PAPI 7.1.0.0
Loading...
Searching...
No Matches
perf_event_uncore.c File Reference
Include dependency graph for perf_event_uncore.c:

Go to the source code of this file.

Macros

#define PERF_EVENTS_OPENED   0x01
 
#define PERF_EVENTS_RUNNING   0x02
 
#define HANDLE_STRING_ERROR   {fprintf(stderr,"%s:%i unexpected string function error.\n",__FILE__,__LINE__); exit(-1);}
 
#define READ_BUFFER_SIZE   (3 + (2 * PERF_EVENT_MAX_MPX_COUNTERS))
 

Functions

static int _peu_set_domain (hwd_control_state_t *ctl, int domain)
 
static int _peu_shutdown_component (void)
 
static unsigned int get_read_format (unsigned int multiplex, unsigned int inherit, int format_group)
 
static long sys_perf_event_open (struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
 
static int map_perf_event_errors_to_papi (int perf_event_error)
 
static int check_scheduability (pe_context_t *ctx, pe_control_t *ctl)
 
static int open_pe_events (pe_context_t *ctx, pe_control_t *ctl)
 
static int close_pe_events (pe_context_t *ctx, pe_control_t *ctl)
 
static int _peu_init_thread (hwd_context_t *hwd_ctx)
 
static int _peu_init_control_state (hwd_control_state_t *ctl)
 
static int _peu_init_component (int cidx)
 
int _peu_update_control_state (hwd_control_state_t *ctl, NativeInfo_t *native, int count, hwd_context_t *ctx)
 
static int _peu_shutdown_thread (hwd_context_t *ctx)
 
static int _peu_reset (hwd_context_t *ctx, hwd_control_state_t *ctl)
 
static int _peu_write (hwd_context_t *ctx, hwd_control_state_t *ctl, long long *from)
 
static int _peu_read (hwd_context_t *ctx, hwd_control_state_t *ctl, long long **events, int flags)
 
static int _peu_start (hwd_context_t *ctx, hwd_control_state_t *ctl)
 
static int _peu_stop (hwd_context_t *ctx, hwd_control_state_t *ctl)
 
static int _peu_ctl (hwd_context_t *ctx, int code, _papi_int_option_t *option)
 
static int _peu_ntv_enum_events (unsigned int *PapiEventCode, int modifier)
 
static int _peu_ntv_name_to_code (const char *name, unsigned int *event_code)
 
static int _peu_ntv_code_to_name (unsigned int EventCode, char *ntv_name, int len)
 
static int _peu_ntv_code_to_descr (unsigned int EventCode, char *ntv_descr, int len)
 
static int _peu_ntv_code_to_info (unsigned int EventCode, PAPI_event_info_t *info)
 

Variables

papi_vector_t _perf_event_uncore_vector
 
struct native_event_table_t uncore_native_event_table
 
static int our_cidx
 

Macro Definition Documentation

◆ HANDLE_STRING_ERROR

#define HANDLE_STRING_ERROR   {fprintf(stderr,"%s:%i unexpected string function error.\n",__FILE__,__LINE__); exit(-1);}

Definition at line 66 of file perf_event_uncore.c.

◆ PERF_EVENTS_OPENED

#define PERF_EVENTS_OPENED   0x01

Definition at line 60 of file perf_event_uncore.c.

◆ PERF_EVENTS_RUNNING

#define PERF_EVENTS_RUNNING   0x02

Definition at line 61 of file perf_event_uncore.c.

◆ READ_BUFFER_SIZE

#define READ_BUFFER_SIZE   (3 + (2 * PERF_EVENT_MAX_MPX_COUNTERS))

Definition at line 222 of file perf_event_uncore.c.

Function Documentation

◆ _peu_ctl()

static int _peu_ctl ( hwd_context_t ctx,
int  code,
_papi_int_option_t option 
)
static

Definition at line 1147 of file perf_event_uncore.c.

1148{
1149 int ret;
1150 pe_context_t *pe_ctx = ( pe_context_t *) ctx;
1151 pe_control_t *pe_ctl = NULL;
1152
1153 switch ( code ) {
1154 case PAPI_MULTIPLEX:
1155 pe_ctl = ( pe_control_t * ) ( option->multiplex.ESI->ctl_state );
1156
1157 pe_ctl->multiplexed = 1;
1158 ret = _peu_update_control_state( pe_ctl, NULL,
1159 pe_ctl->num_events, pe_ctx );
1160 if (ret != PAPI_OK) {
1161 pe_ctl->multiplexed = 0;
1162 }
1163 return ret;
1164
1165 case PAPI_ATTACH:
1166 pe_ctl = ( pe_control_t * ) ( option->attach.ESI->ctl_state );
1167
1168 pe_ctl->tid = option->attach.tid;
1169
1170 /* If events have been already been added, something may */
1171 /* have been done to the kernel, so update */
1172 ret =_peu_update_control_state( pe_ctl, NULL,
1173 pe_ctl->num_events, pe_ctx);
1174
1175 return ret;
1176
1177 case PAPI_DETACH:
1178 pe_ctl = ( pe_control_t *) ( option->attach.ESI->ctl_state );
1179
1180 pe_ctl->tid = 0;
1181 return PAPI_OK;
1182
1183 case PAPI_CPU_ATTACH:
1184 pe_ctl = ( pe_control_t *) ( option->cpu.ESI->ctl_state );
1185
1186 /* this tells the kernel not to count for a thread */
1187 /* should we warn if we try to set both? perf_event */
1188 /* will reject it. */
1189 pe_ctl->tid = -1;
1190
1191 pe_ctl->cpu = option->cpu.cpu_num;
1192
1193 return PAPI_OK;
1194
1195 case PAPI_DOMAIN:
1196 pe_ctl = ( pe_control_t *) ( option->domain.ESI->ctl_state );
1197
1198 /* looks like we are allowed, so set event set level counting domains */
1199 pe_ctl->domain = option->domain.domain;
1200 return PAPI_OK;
1201
1202 case PAPI_GRANUL:
1203 pe_ctl = (pe_control_t *) ( option->granularity.ESI->ctl_state );
1204
1205 /* FIXME: we really don't support this yet */
1206
1207 switch ( option->granularity.granularity ) {
1208 case PAPI_GRN_PROCG:
1209 case PAPI_GRN_SYS_CPU:
1210 case PAPI_GRN_PROC:
1211 return PAPI_ECMP;
1212
1213 /* Currently we only support thread and CPU granularity */
1214 case PAPI_GRN_SYS:
1215 pe_ctl->granularity=PAPI_GRN_SYS;
1216 break;
1217
1218 case PAPI_GRN_THR:
1219 pe_ctl->granularity=PAPI_GRN_THR;
1220 break;
1221
1222
1223 default:
1224 return PAPI_EINVAL;
1225 }
1226 return PAPI_OK;
1227
1228 case PAPI_INHERIT:
1229 pe_ctl = (pe_control_t *) ( option->inherit.ESI->ctl_state );
1230
1231 if (option->inherit.inherit) {
1232 /* children will inherit counters */
1233 pe_ctl->inherit = 1;
1234 } else {
1235 /* children won't inherit counters */
1236 pe_ctl->inherit = 0;
1237 }
1238 return PAPI_OK;
1239
1240 case PAPI_DATA_ADDRESS:
1241 return PAPI_ENOSUPP;
1242
1243 case PAPI_INSTR_ADDRESS:
1244 return PAPI_ENOSUPP;
1245
1246 case PAPI_DEF_ITIMER:
1247 return PAPI_ENOSUPP;
1248
1249 case PAPI_DEF_MPX_NS:
1250 return PAPI_ENOSUPP;
1251
1252 case PAPI_DEF_ITIMER_NS:
1253 return PAPI_ENOSUPP;
1254
1255 default:
1256 return PAPI_ENOSUPP;
1257 }
1258}
#define PAPI_CPU_ATTACH
Definition: f90papi.h:19
#define PAPI_INSTR_ADDRESS
Definition: f90papi.h:209
#define PAPI_OK
Definition: f90papi.h:73
#define PAPI_GRN_PROCG
Definition: f90papi.h:202
#define PAPI_GRANUL
Definition: f90papi.h:179
#define PAPI_DETACH
Definition: f90papi.h:64
#define PAPI_ATTACH
Definition: f90papi.h:70
#define PAPI_EINVAL
Definition: f90papi.h:115
#define PAPI_ENOSUPP
Definition: f90papi.h:244
#define PAPI_DOMAIN
Definition: f90papi.h:159
#define PAPI_DEF_MPX_NS
Definition: f90papi.h:235
#define PAPI_INHERIT
Definition: f90papi.h:76
#define PAPI_ECMP
Definition: f90papi.h:214
#define PAPI_DATA_ADDRESS
Definition: f90papi.h:89
#define PAPI_GRN_SYS
Definition: f90papi.h:43
#define PAPI_GRN_SYS_CPU
Definition: f90papi.h:100
#define PAPI_GRN_PROC
Definition: f90papi.h:266
#define PAPI_MULTIPLEX
Definition: f90papi.h:223
#define PAPI_GRN_THR
Definition: f90papi.h:265
#define PAPI_DEF_ITIMER
Definition: papi.h:462
#define PAPI_DEF_ITIMER_NS
Definition: papi.h:463
int _peu_update_control_state(hwd_control_state_t *ctl, NativeInfo_t *native, int count, hwd_context_t *ctx)
hwd_control_state_t * ctl_state
EventSetInfo_t * ESI
unsigned long tid
EventSetInfo_t * ESI
unsigned int cpu_num
EventSetInfo_t * ESI
EventSetInfo_t * ESI
EventSetInfo_t * ESI
EventSetInfo_t * ESI
unsigned int granularity
unsigned int multiplexed
unsigned int domain
unsigned int inherit
_papi_int_domain_t domain
_papi_int_multiplex_t multiplex
_papi_int_granularity_t granularity
_papi_int_attach_t attach
_papi_int_inherit_t inherit
_papi_int_cpu_t cpu
Here is the call graph for this function:

◆ _peu_init_component()

static int _peu_init_component ( int  cidx)
static

Definition at line 593 of file perf_event_uncore.c.

594{
595
596 int retval;
597 int paranoid_level;
598
599 FILE *fff;
600 char *strCpy;
601
603
604 /* The is the official way to detect if perf_event support exists */
605 /* The file is called perf_counter_paranoid on 2.6.31 */
606 /* currently we are lazy and do not support 2.6.31 kernels */
607
608 fff=fopen("/proc/sys/kernel/perf_event_paranoid","r");
609 if (fff==NULL) {
610 strCpy=strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
611 "perf_event support not detected",PAPI_MAX_STR_LEN);
612 if (strCpy == NULL) HANDLE_STRING_ERROR;
614 goto fn_fail;
615 }
616 retval=fscanf(fff,"%d",&paranoid_level);
617 if (retval!=1) fprintf(stderr,"Error reading paranoid level\n");
618 fclose(fff);
619
620
621 /* Run the libpfm4-specific setup */
622
624 if (retval) {
625 strCpy=strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
626 "Error initializing libpfm4",PAPI_MAX_STR_LEN);
628 if (strCpy == NULL) HANDLE_STRING_ERROR;
630 goto fn_fail;
631 }
632
633
634 /* Run the uncore specific libpfm4 setup */
635
639 if (retval) {
640 strCpy=strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
641 "Error setting up libpfm4",PAPI_MAX_STR_LEN);
643 if (strCpy == NULL) HANDLE_STRING_ERROR;
645 goto fn_fail;
646 }
647
648 /* Check if no uncore events found */
649
650 if (_papi_hwd[cidx]->cmp_info.num_native_events==0) {
651 strCpy=strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
652 "No uncore PMUs or events found",PAPI_MAX_STR_LEN);
654 if (strCpy == NULL) HANDLE_STRING_ERROR;
656 goto fn_fail;
657 }
658
659 /* Check if we have enough permissions for uncore */
660
661 /* 2 means no kernel measurements allowed */
662 /* 1 means normal counter access */
663 /* 0 means you can access CPU-specific data */
664 /* -1 means no restrictions */
665
666 if ((paranoid_level>0) && (getuid()!=0)) {
667 strCpy=strncpy(_papi_hwd[cidx]->cmp_info.disabled_reason,
668 "Insufficient permissions for uncore access. Set /proc/sys/kernel/perf_event_paranoid to 0 or run as root.",
671 if (strCpy == NULL) HANDLE_STRING_ERROR;
673 goto fn_fail;
674 }
675
676 fn_exit:
677 _papi_hwd[cidx]->cmp_info.disabled = retval;
678 return retval;
679 fn_fail:
680 goto fn_exit;
681
682}
Initialize the libpfm4 code.
Initialize the libpfm4 code.
struct papi_vectors * _papi_hwd[]
#define PAPI_MAX_STR_LEN
Definition: f90papi.h:77
FILE * fff[MAX_EVENTS]
FILE * stderr
int fclose(FILE *__stream)
#define PMU_TYPE_UNCORE
static int cidx
struct native_event_table_t uncore_native_event_table
static int our_cidx
static int _peu_shutdown_component(void)
#define HANDLE_STRING_ERROR
int retval
Definition: zero_fork.c:53
Here is the call graph for this function:

◆ _peu_init_control_state()

static int _peu_init_control_state ( hwd_control_state_t ctl)
static

Definition at line 568 of file perf_event_uncore.c.

569{
570 pe_control_t *pe_ctl = ( pe_control_t *) ctl;
571
572 /* clear the contents */
573 memset( pe_ctl, 0, sizeof ( pe_control_t ) );
574
575 /* Set the default domain */
577
578 /* Set the default granularity */
580
581 pe_ctl->cidx=our_cidx;
582
583 /* Set cpu number in the control block to show events */
584 /* are not tied to specific cpu */
585 pe_ctl->cpu = -1;
586 return PAPI_OK;
587}
papi_vector_t _perf_event_uncore_vector
static int _peu_set_domain(hwd_control_state_t *ctl, int domain)
int default_granularity
Definition: papi.h:644
PAPI_component_info_t cmp_info
Definition: papi_vector.h:20
Here is the call graph for this function:

◆ _peu_init_thread()

static int _peu_init_thread ( hwd_context_t hwd_ctx)
static

Definition at line 551 of file perf_event_uncore.c.

552{
553
554 pe_context_t *pe_ctx = ( pe_context_t *) hwd_ctx;
555
556 /* clear the context structure and mark as initialized */
557 memset( pe_ctx, 0, sizeof ( pe_context_t ) );
558 pe_ctx->initialized=1;
559
561 pe_ctx->cidx=our_cidx;
562
563 return PAPI_OK;
564}
struct native_event_table_t * event_table

◆ _peu_ntv_code_to_descr()

static int _peu_ntv_code_to_descr ( unsigned int  EventCode,
char *  ntv_descr,
int  len 
)
static

Definition at line 1293 of file perf_event_uncore.c.

1294 {
1295
1297
1298 return _pe_libpfm4_ntv_code_to_descr(EventCode,ntv_descr,len,
1300}
Take an event code and convert it to a description.
#define PAPI_ENOEVNT
Definition: f90papi.h:139

◆ _peu_ntv_code_to_info()

static int _peu_ntv_code_to_info ( unsigned int  EventCode,
PAPI_event_info_t info 
)
static

Definition at line 1303 of file perf_event_uncore.c.

1304 {
1305
1307
1308 return _pe_libpfm4_ntv_code_to_info(EventCode, info,
1310}
int _pe_libpfm4_ntv_code_to_info(unsigned int EventCode, PAPI_event_info_t *info, struct native_event_table_t *event_table)
Here is the call graph for this function:

◆ _peu_ntv_code_to_name()

static int _peu_ntv_code_to_name ( unsigned int  EventCode,
char *  ntv_name,
int  len 
)
static

Definition at line 1282 of file perf_event_uncore.c.

1283 {
1284
1286
1287 return _pe_libpfm4_ntv_code_to_name(EventCode,
1288 ntv_name, len,
1290}
Take an event code and convert it to a name.

◆ _peu_ntv_enum_events()

static int _peu_ntv_enum_events ( unsigned int PapiEventCode,
int  modifier 
)
static

Definition at line 1262 of file perf_event_uncore.c.

1263{
1264
1266
1267
1268 return _pe_libpfm4_ntv_enum_events(PapiEventCode, modifier, our_cidx,
1270}
Walk through all events in a pre-defined order.

◆ _peu_ntv_name_to_code()

static int _peu_ntv_name_to_code ( const char *  name,
unsigned int event_code 
)
static

Definition at line 1273 of file perf_event_uncore.c.

1273 {
1274
1276
1277 return _pe_libpfm4_ntv_name_to_code(name,event_code, our_cidx,
1279}
Take an event name and convert it to an event code.
const char * name
Definition: rocs.c:225

◆ _peu_read()

static int _peu_read ( hwd_context_t ctx,
hwd_control_state_t ctl,
long long **  events,
int  flags 
)
static

Definition at line 911 of file perf_event_uncore.c.

913{
914 SUBDBG("ENTER: ctx: %p, ctl: %p, events: %p, flags: %#x\n", ctx, ctl, events, flags);
915
916 ( void ) flags; /*unused */
917 int i, ret = -1;
918 /* pe_context_t *pe_ctx = ( pe_context_t *) ctx; */
919 (void) ctx; /*unused*/
920 pe_control_t *pe_ctl = ( pe_control_t *) ctl;
921 long long papi_pe_buffer[READ_BUFFER_SIZE];
922 long long tot_time_running, tot_time_enabled, scale;
923
924 /* Handle case where we are multiplexing */
925 if (pe_ctl->multiplexed) {
926
927 /* currently we handle multiplexing by having individual events */
928 /* so we read from each in turn. */
929
930 for ( i = 0; i < pe_ctl->num_events; i++ ) {
931
932 ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer,
933 sizeof ( papi_pe_buffer ) );
934 if ( ret == -1 ) {
935 PAPIERROR("read returned an error: %s", strerror( errno ));
936 SUBDBG("EXIT: PAPI_ESYS\n");
937 return PAPI_ESYS;
938 }
939
940 /* We should read 3 64-bit values from the counter */
941 if (ret<(signed)(3*sizeof(long long))) {
942 PAPIERROR("Error! short read!\n");
943 SUBDBG("EXIT: PAPI_ESYS\n");
944 return PAPI_ESYS;
945 }
946
947 SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
948 pe_ctl->events[i].event_fd,
949 (long)pe_ctl->tid, pe_ctl->events[i].cpu, ret);
950 SUBDBG("read: %lld %lld %lld\n",papi_pe_buffer[0],
951 papi_pe_buffer[1],papi_pe_buffer[2]);
952
953 tot_time_enabled = papi_pe_buffer[1];
954 tot_time_running = papi_pe_buffer[2];
955
956 SUBDBG("count[%d] = (papi_pe_buffer[%d] %lld * "
957 "tot_time_enabled %lld) / tot_time_running %lld\n",
958 i, 0,papi_pe_buffer[0],
959 tot_time_enabled,tot_time_running);
960
961 if (tot_time_running == tot_time_enabled) {
962 /* No scaling needed */
963 pe_ctl->counts[i] = papi_pe_buffer[0];
964 } else if (tot_time_running && tot_time_enabled) {
965 /* Scale factor of 100 to avoid overflows when computing */
966 /*enabled/running */
967
968 scale = (tot_time_enabled * 100LL) / tot_time_running;
969 scale = scale * papi_pe_buffer[0];
970 scale = scale / 100LL;
971 pe_ctl->counts[i] = scale;
972 } else {
973 /* This should not happen, but Phil reports it sometime does. */
974 SUBDBG("perf_event kernel bug(?) count, enabled, "
975 "running: %lld, %lld, %lld\n",
976 papi_pe_buffer[0],tot_time_enabled,
977 tot_time_running);
978
979 pe_ctl->counts[i] = papi_pe_buffer[0];
980 }
981 }
982 }
983
984 /* Handle cases where we cannot use FORMAT GROUP */
985 else if (pe_ctl->inherit) {
986
987 /* we must read each counter individually */
988 for ( i = 0; i < pe_ctl->num_events; i++ ) {
989
990 ret = read( pe_ctl->events[i].event_fd, papi_pe_buffer,
991 sizeof ( papi_pe_buffer ) );
992 if ( ret == -1 ) {
993 PAPIERROR("read returned an error: %s", strerror( errno ));
994 SUBDBG("EXIT: PAPI_ESYS\n");
995 return PAPI_ESYS;
996 }
997
998 /* we should read one 64-bit value from each counter */
999 if (ret!=sizeof(long long)) {
1000 PAPIERROR("Error! short read!\n");
1001 PAPIERROR("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
1002 pe_ctl->events[i].event_fd,
1003 (long)pe_ctl->tid, pe_ctl->events[i].cpu, ret);
1004 SUBDBG("EXIT: PAPI_ESYS\n");
1005 return PAPI_ESYS;
1006 }
1007
1008 SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
1009 pe_ctl->events[i].event_fd, (long)pe_ctl->tid,
1010 pe_ctl->events[i].cpu, ret);
1011 SUBDBG("read: %lld\n",papi_pe_buffer[0]);
1012
1013 pe_ctl->counts[i] = papi_pe_buffer[0];
1014 }
1015 }
1016
1017
1018 /* Handle cases where we are using FORMAT_GROUP */
1019 /* We assume only one group leader, in position 0 */
1020
1021 else {
1022 if (pe_ctl->events[0].group_leader_fd!=-1) {
1023 PAPIERROR("Was expecting group leader!\n");
1024 }
1025
1026 ret = read( pe_ctl->events[0].event_fd, papi_pe_buffer,
1027 sizeof ( papi_pe_buffer ) );
1028
1029 if ( ret == -1 ) {
1030 PAPIERROR("read returned an error: %s", strerror( errno ));
1031 SUBDBG("EXIT: PAPI_ESYS\n");
1032 return PAPI_ESYS;
1033 }
1034
1035 /* we read 1 64-bit value (number of events) then */
1036 /* num_events more 64-bit values that hold the counts */
1037 if (ret<(signed)((1+pe_ctl->num_events)*sizeof(long long))) {
1038 PAPIERROR("Error! short read!\n");
1039 SUBDBG("EXIT: PAPI_ESYS\n");
1040 return PAPI_ESYS;
1041 }
1042
1043 SUBDBG("read: fd: %2d, tid: %ld, cpu: %d, ret: %d\n",
1044 pe_ctl->events[0].event_fd,
1045 (long)pe_ctl->tid, pe_ctl->events[0].cpu, ret);
1046 {
1047 int j;
1048 for(j=0;j<ret/8;j++) {
1049 SUBDBG("read %d: %lld\n",j,papi_pe_buffer[j]);
1050 }
1051 }
1052
1053 /* Make sure the kernel agrees with how many events we have */
1054 if (papi_pe_buffer[0]!=pe_ctl->num_events) {
1055 PAPIERROR("Error! Wrong number of events!\n");
1056 SUBDBG("EXIT: PAPI_ESYS\n");
1057 return PAPI_ESYS;
1058 }
1059
1060 /* put the count values in their proper location */
1061 for(i=0;i<pe_ctl->num_events;i++) {
1062 pe_ctl->counts[i] = papi_pe_buffer[1+i];
1063 }
1064 }
1065
1066 /* point PAPI to the values we read */
1067 *events = pe_ctl->counts;
1068
1069 SUBDBG("EXIT: PAPI_OK\n");
1070 return PAPI_OK;
1071}
int i
int errno
ssize_t read(int fd, void *buf, size_t count)
Definition: appio.c:229
#define PAPI_ESYS
Definition: f90papi.h:136
char events[MAX_EVENTS][BUFSIZ]
#define SUBDBG(format, args...)
Definition: papi_debug.h:64
void PAPIERROR(char *format,...)
#define READ_BUFFER_SIZE
long long counts[PERF_EVENT_MAX_MPX_COUNTERS]
pe_event_info_t events[PERF_EVENT_MAX_MPX_COUNTERS]
Here is the call graph for this function:

◆ _peu_reset()

static int _peu_reset ( hwd_context_t ctx,
hwd_control_state_t ctl 
)
static

Definition at line 856 of file perf_event_uncore.c.

857{
858 int i, ret;
859 pe_control_t *pe_ctl = ( pe_control_t *) ctl;
860
861 ( void ) ctx; /*unused */
862
863 /* We need to reset all of the events, not just the group leaders */
864 for( i = 0; i < pe_ctl->num_events; i++ ) {
865 ret = ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_RESET, NULL );
866 if ( ret == -1 ) {
867 PAPIERROR("ioctl(%d, PERF_EVENT_IOC_RESET, NULL) "
868 "returned error, Linux says: %s",
869 pe_ctl->events[i].event_fd, strerror( errno ) );
870 return PAPI_ESYS;
871 }
872 }
873
874 return PAPI_OK;
875}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ _peu_set_domain()

static int _peu_set_domain ( hwd_control_state_t ctl,
int  domain 
)
static

Definition at line 829 of file perf_event_uncore.c.

830{
831 pe_control_t *pe_ctl = ( pe_control_t *) ctl;
832
833 SUBDBG("old control domain %d, new domain %d\n",
834 pe_ctl->domain,domain);
835
836 pe_ctl->domain = domain;
837 return PAPI_OK;
838}
Here is the caller graph for this function:

◆ _peu_shutdown_component()

static int _peu_shutdown_component ( void  )
static

Definition at line 686 of file perf_event_uncore.c.

686 {
687
688 /* deallocate our event table */
691
692 /* Shutdown libpfm4 */
694
695 return PAPI_OK;
696}
Shutdown any initialization done by the libpfm4 code.
Shutdown any initialization done by the libpfm4 code.
Here is the caller graph for this function:

◆ _peu_shutdown_thread()

static int _peu_shutdown_thread ( hwd_context_t ctx)
static

Definition at line 842 of file perf_event_uncore.c.

843{
844 pe_context_t *pe_ctx = ( pe_context_t *) ctx;
845
846 pe_ctx->initialized=0;
847
848 return PAPI_OK;
849}

◆ _peu_start()

static int _peu_start ( hwd_context_t ctx,
hwd_control_state_t ctl 
)
static

Definition at line 1075 of file perf_event_uncore.c.

1076{
1077 int ret;
1078 int i;
1079 int did_something = 0;
1080 pe_context_t *pe_ctx = ( pe_context_t *) ctx;
1081 pe_control_t *pe_ctl = ( pe_control_t *) ctl;
1082
1083 /* Reset the counters first. Is this necessary? */
1084 ret = _peu_reset( pe_ctx, pe_ctl );
1085 if ( ret ) {
1086 return ret;
1087 }
1088
1089 /* Enable all of the group leaders */
1090 /* All group leaders have a group_leader_fd of -1 */
1091 for( i = 0; i < pe_ctl->num_events; i++ ) {
1092 if (pe_ctl->events[i].group_leader_fd == -1) {
1093 SUBDBG("ioctl(enable): fd: %d\n", pe_ctl->events[i].event_fd);
1094 ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_ENABLE, NULL) ;
1095
1096 /* ioctls always return -1 on failure */
1097 if (ret == -1) {
1098 PAPIERROR("ioctl(PERF_EVENT_IOC_ENABLE) failed.\n");
1099 return PAPI_ESYS;
1100 }
1101
1102 did_something++;
1103 }
1104 }
1105
1106 if (!did_something) {
1107 PAPIERROR("Did not enable any counters.\n");
1108 return PAPI_EBUG;
1109 }
1110
1111 pe_ctx->state |= PERF_EVENTS_RUNNING;
1112
1113 return PAPI_OK;
1114
1115}
#define PAPI_EBUG
Definition: f90papi.h:176
static int _peu_reset(hwd_context_t *ctx, hwd_control_state_t *ctl)
#define PERF_EVENTS_RUNNING
Here is the call graph for this function:

◆ _peu_stop()

static int _peu_stop ( hwd_context_t ctx,
hwd_control_state_t ctl 
)
static

Definition at line 1119 of file perf_event_uncore.c.

1120{
1121
1122 int ret;
1123 int i;
1124 pe_context_t *pe_ctx = ( pe_context_t *) ctx;
1125 pe_control_t *pe_ctl = ( pe_control_t *) ctl;
1126
1127 /* Just disable the group leaders */
1128 for ( i = 0; i < pe_ctl->num_events; i++ ) {
1129 if ( pe_ctl->events[i].group_leader_fd == -1 ) {
1130 ret=ioctl( pe_ctl->events[i].event_fd, PERF_EVENT_IOC_DISABLE, NULL);
1131 if ( ret == -1 ) {
1132 PAPIERROR( "ioctl(%d, PERF_EVENT_IOC_DISABLE, NULL) "
1133 "returned error, Linux says: %s",
1134 pe_ctl->events[i].event_fd, strerror( errno ) );
1135 return PAPI_EBUG;
1136 }
1137 }
1138 }
1139
1140 pe_ctx->state &= ~PERF_EVENTS_RUNNING;
1141
1142 return PAPI_OK;
1143}
Here is the call graph for this function:

◆ _peu_update_control_state()

int _peu_update_control_state ( hwd_control_state_t ctl,
NativeInfo_t native,
int  count,
hwd_context_t ctx 
)

Definition at line 703 of file perf_event_uncore.c.

706{
707 int i;
708 int j;
709 int ret;
710 int skipped_events=0;
711 struct native_event_t *ntv_evt;
712 pe_context_t *pe_ctx = ( pe_context_t *) ctx;
713 pe_control_t *pe_ctl = ( pe_control_t *) ctl;
714
715 /* close all of the existing fds and start over again */
716 /* In theory we could have finer-grained control and know if */
717 /* things were changed, but it's easier to tear things down and rebuild. */
718 close_pe_events( pe_ctx, pe_ctl );
719
720 /* Calling with count==0 should be OK, it's how things are deallocated */
721 /* when an eventset is destroyed. */
722 if ( count == 0 ) {
723 SUBDBG( "Called with count == 0\n" );
724 return PAPI_OK;
725 }
726
727 /* set up all the events */
728 for( i = 0; i < count; i++ ) {
729 if ( native ) {
730 // get the native event pointer used for this papi event
731 int ntv_idx = _papi_hwi_get_ntv_idx((unsigned)(native[i].ni_papi_code));
732 if (ntv_idx < -1) {
733 SUBDBG("papi_event_code: %#x known by papi but not by the component\n", native[i].ni_papi_code);
734 continue;
735 }
736 // if native index is -1, then we have an event without a mask and need to find the right native index to use
737 if (ntv_idx == -1) {
738 // find the native event index we want by matching for the right papi event code
739 for (j=0 ; j<pe_ctx->event_table->num_native_events ; j++) {
740 if (pe_ctx->event_table->native_events[j].papi_event_code == native[i].ni_papi_code) {
741 ntv_idx = j;
742 }
743 }
744 }
745
746 // if native index is still negative, we did not find event we wanted so just return error
747 if (ntv_idx < 0) {
748 SUBDBG("papi_event_code: %#x not found in native event tables\n", native[i].ni_papi_code);
749 continue;
750 }
751
752 // this native index is positive so there was a mask with the event, the ntv_idx identifies which native event to use
753 ntv_evt = (struct native_event_t *)(&(pe_ctx->event_table->native_events[ntv_idx]));
754
755 SUBDBG("ntv_evt: %p\n", ntv_evt);
756
757 SUBDBG("i: %d, pe_ctx->event_table->num_native_events: %d\n", i, pe_ctx->event_table->num_native_events);
758
759 // Move this events hardware config values and other attributes to the perf_events attribute structure
760 memcpy (&pe_ctl->events[i].attr, &ntv_evt->attr, sizeof(perf_event_attr_t));
761
762 // may need to update the attribute structure with information from event set level domain settings (values set by PAPI_set_domain)
763 // only done if the event mask which controls each counting domain was not provided
764
765 // get pointer to allocated name, will be NULL when adding preset events to event set
766 char *aName = ntv_evt->allocated_name;
767 if ((aName == NULL) || (strstr(aName, ":u=") == NULL)) {
768 SUBDBG("set exclude_user attribute from eventset level domain flags, encode: %d, eventset: %d\n", pe_ctl->events[i].attr.exclude_user, !(pe_ctl->domain & PAPI_DOM_USER));
769 pe_ctl->events[i].attr.exclude_user = !(pe_ctl->domain & PAPI_DOM_USER);
770 }
771 if ((aName == NULL) || (strstr(aName, ":k=") == NULL)) {
772 SUBDBG("set exclude_kernel attribute from eventset level domain flags, encode: %d, eventset: %d\n", pe_ctl->events[i].attr.exclude_kernel, !(pe_ctl->domain & PAPI_DOM_KERNEL));
773 pe_ctl->events[i].attr.exclude_kernel = !(pe_ctl->domain & PAPI_DOM_KERNEL);
774 }
775
776 // set the cpu number provided with an event mask if there was one (will be -1 if mask not provided)
777 pe_ctl->events[i].cpu = ntv_evt->cpu;
778 // if cpu event mask not provided, then set the cpu to use to what may have been set on call to PAPI_set_opt (will still be -1 if not called)
779 if (pe_ctl->events[i].cpu == -1) {
780 pe_ctl->events[i].cpu = pe_ctl->cpu;
781 }
782 } else {
783 // This case happens when called from _pe_set_overflow and _pe_ctl
784 // Those callers put things directly into the pe_ctl structure so it is already set for the open call
785 }
786
787 // Copy the inherit flag into the attribute block that will be passed to the kernel
788 pe_ctl->events[i].attr.inherit = pe_ctl->inherit;
789
790 /* Set the position in the native structure */
791 /* We just set up events linearly */
792 if ( native ) {
793 native[i].ni_position = i;
794 SUBDBG( "&native[%d]: %p, ni_papi_code: %#x, ni_event: %#x, ni_position: %d, ni_owners: %d\n",
795 i, &(native[i]), native[i].ni_papi_code, native[i].ni_event, native[i].ni_position, native[i].ni_owners);
796 }
797 }
798
799 if (count <= skipped_events) {
800 SUBDBG("EXIT: No events to count, they all contained invalid umasks\n");
801 return PAPI_ENOEVNT;
802 }
803
804 pe_ctl->num_events = count - skipped_events;
805
806 /* actuall open the events */
807 /* (why is this a separate function?) */
808 ret = open_pe_events( pe_ctx, pe_ctl );
809 if ( ret != PAPI_OK ) {
810 SUBDBG("open_pe_events failed\n");
811 /* Restore values ? */
812 return ret;
813 }
814
815 SUBDBG( "EXIT: PAPI_OK\n" );
816 return PAPI_OK;
817}
static long count
#define PAPI_DOM_USER
Definition: f90papi.h:174
#define PAPI_DOM_KERNEL
Definition: f90papi.h:254
int _papi_hwi_get_ntv_idx(unsigned int papi_evt_code)
static int native
static int close_pe_events(pe_context_t *ctx, pe_control_t *ctl)
static int open_pe_events(pe_context_t *ctx, pe_control_t *ctl)
perf_event_attr_t attr
struct native_event_t * native_events
struct perf_event_attr attr
Here is the call graph for this function:
Here is the caller graph for this function:

◆ _peu_write()

static int _peu_write ( hwd_context_t ctx,
hwd_control_state_t ctl,
long long from 
)
static

Definition at line 881 of file perf_event_uncore.c.

883{
884 ( void ) ctx; /*unused */
885 ( void ) ctl; /*unused */
886 ( void ) from; /*unused */
887 /*
888 * Counters cannot be written. Do we need to virtualize the
889 * counters so that they can be written, or perhaps modify code so that
890 * they can be written? FIXME ?
891 */
892
893 return PAPI_ENOSUPP;
894}

◆ check_scheduability()

static int check_scheduability ( pe_context_t ctx,
pe_control_t ctl 
)
static

Definition at line 232 of file perf_event_uncore.c.

233{
234 SUBDBG("ENTER: ctx: %p, ctl: %p\n", ctx, ctl);
235 int retval = 0, cnt = -1;
236 ( void ) ctx; /*unused */
237 long long papi_pe_buffer[READ_BUFFER_SIZE];
238 int i;
239
240 /* If the kernel isn't tracking scheduability right */
241 /* Then we need to start/stop/read to force the event */
242 /* to be scheduled and see if an error condition happens. */
243
244 /* start all events */
245 for( i = 0; i < ctl->num_events; i++) {
246 retval = ioctl( ctl->events[i].event_fd, PERF_EVENT_IOC_ENABLE, NULL );
247 if (retval == -1) {
248 SUBDBG("EXIT: Enable failed event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
249 return PAPI_ESYS;
250 }
251 }
252
253 /* stop all events */
254 for( i = 0; i < ctl->num_events; i++) {
255 retval = ioctl(ctl->events[i].event_fd, PERF_EVENT_IOC_DISABLE, NULL );
256 if (retval == -1) {
257 SUBDBG("EXIT: Disable failed: event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
258 return PAPI_ESYS;
259 }
260 }
261
262 /* See if a read of each event returns results */
263 for( i = 0; i < ctl->num_events; i++) {
264 cnt = read( ctl->events[i].event_fd, papi_pe_buffer, sizeof(papi_pe_buffer));
265 if ( cnt == -1 ) {
266 SUBDBG( "EXIT: read failed: event index: %d, num_events: %d, return PAPI_ESYS. Should never happen.\n", i, ctl->num_events);
267 return PAPI_ESYS;
268 }
269
270 if ( cnt == 0 ) {
271 /* We read 0 bytes if we could not schedule the event */
272 /* The kernel should have detected this at open */
273 /* but various bugs (including NMI watchdog) */
274 /* result in this behavior */
275
276 SUBDBG( "EXIT: read returned 0: event index: %d, num_events: %d, return PAPI_ECNFLCT.\n", i, ctl->num_events);
277 return PAPI_ECNFLCT;
278 }
279 }
280
281 /* Reset all of the counters (opened so far) back to zero */
282 /* from the above brief enable/disable call pair. */
283
284 /* We have to reset all events because reset of group leader */
285 /* does not reset all. */
286 /* we assume that the events are being added one by one and that */
287 /* we do not need to reset higher events (doing so may reset ones */
288 /* that have not been initialized yet. */
289
290 /* Note... PERF_EVENT_IOC_RESET does not reset time running */
291 /* info if multiplexing, so we should avoid coming here if */
292 /* we are multiplexing the event. */
293 for( i = 0; i < ctl->num_events; i++) {
294 retval=ioctl( ctl->events[i].event_fd, PERF_EVENT_IOC_RESET, NULL );
295 if (retval == -1) {
296 SUBDBG("EXIT: Reset failed: event index: %d, num_events: %d, return PAPI_ESYS\n", i, ctl->num_events);
297 return PAPI_ESYS;
298 }
299 }
300 SUBDBG("EXIT: return PAPI_OK\n");
301 return PAPI_OK;
302}
#define PAPI_ECNFLCT
Definition: f90papi.h:234
Here is the call graph for this function:
Here is the caller graph for this function:

◆ close_pe_events()

static int close_pe_events ( pe_context_t ctx,
pe_control_t ctl 
)
static

Definition at line 454 of file perf_event_uncore.c.

455{
456 int i;
457 int num_closed=0;
458 int events_not_opened=0;
459
460 /* should this be a more serious error? */
461 if ( ctx->state & PERF_EVENTS_RUNNING ) {
462 SUBDBG("Closing without stopping first\n");
463 }
464
465 /* Close child events first */
466 for( i=0; i<ctl->num_events; i++ ) {
467
468 if (ctl->events[i].event_opened) {
469
470 if (ctl->events[i].group_leader_fd!=-1) {
471 if ( ctl->events[i].mmap_buf ) {
472 if ( munmap ( ctl->events[i].mmap_buf,
473 ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
474 PAPIERROR( "munmap of fd = %d returned error: %s",
475 ctl->events[i].event_fd, strerror( errno ) );
476 return PAPI_ESYS;
477 }
478 }
479
480 if ( close( ctl->events[i].event_fd ) ) {
481 PAPIERROR( "close of fd = %d returned error: %s",
482 ctl->events[i].event_fd, strerror( errno ) );
483 return PAPI_ESYS;
484 } else {
485 num_closed++;
486 }
487 ctl->events[i].event_opened=0;
488 }
489 }
490 else {
491 events_not_opened++;
492 }
493 }
494
495 /* Close the group leaders last */
496 for( i=0; i<ctl->num_events; i++ ) {
497
498 if (ctl->events[i].event_opened) {
499
500 if (ctl->events[i].group_leader_fd==-1) {
501 if ( ctl->events[i].mmap_buf ) {
502 if ( munmap ( ctl->events[i].mmap_buf,
503 ctl->events[i].nr_mmap_pages * getpagesize() ) ) {
504 PAPIERROR( "munmap of fd = %d returned error: %s",
505 ctl->events[i].event_fd, strerror( errno ) );
506 return PAPI_ESYS;
507 }
508 }
509
510
511 if ( close( ctl->events[i].event_fd ) ) {
512 PAPIERROR( "close of fd = %d returned error: %s",
513 ctl->events[i].event_fd, strerror( errno ) );
514 return PAPI_ESYS;
515 } else {
516 num_closed++;
517 }
518 ctl->events[i].event_opened=0;
519 }
520 }
521 }
522
523
524 if (ctl->num_events!=num_closed) {
525 if (ctl->num_events!=(num_closed+events_not_opened)) {
526 PAPIERROR("Didn't close all events: "
527 "Closed %d Not Opened: %d Expected %d\n",
528 num_closed,events_not_opened,ctl->num_events);
529 return PAPI_EBUG;
530 }
531 }
532
533 ctl->num_events=0;
534
535 ctx->state &= ~PERF_EVENTS_OPENED;
536
537 return PAPI_OK;
538}
int close(int fd)
Definition: appio.c:179
uint32_t nr_mmap_pages
Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_read_format()

static unsigned int get_read_format ( unsigned int  multiplex,
unsigned int  inherit,
int  format_group 
)
static

Definition at line 81 of file perf_event_uncore.c.

84{
85 unsigned int format = 0;
86
87 /* if we need read format options for multiplexing, add them now */
88 if (multiplex) {
89 format |= PERF_FORMAT_TOTAL_TIME_ENABLED;
90 format |= PERF_FORMAT_TOTAL_TIME_RUNNING;
91 }
92
93 /* If we are not using inherit, add the group read options */
94 if (!inherit) {
95 if (format_group) {
96 format |= PERF_FORMAT_GROUP;
97 }
98 }
99
100 SUBDBG("multiplex: %d, inherit: %d, group_leader: %d, format: %#x\n",
101 multiplex, inherit, format_group, format);
102
103 return format;
104}
int multiplex(void)
Definition: multiplex.c:35
i inherit inherit
Here is the call graph for this function:
Here is the caller graph for this function:

◆ map_perf_event_errors_to_papi()

static int map_perf_event_errors_to_papi ( int  perf_event_error)
static

Definition at line 181 of file perf_event_uncore.c.

181 {
182
183 int ret;
184
185 /* These mappings are approximate.
186 EINVAL in particular can mean lots of different things */
187 switch(perf_event_error) {
188 case EPERM:
189 case EACCES:
190 ret = PAPI_EPERM;
191 break;
192 case ENODEV:
193 case EOPNOTSUPP:
194 ret = PAPI_ENOSUPP;
195 break;
196 case ENOENT:
197 ret = PAPI_ENOEVNT;
198 break;
199 case ENOSYS:
200 case EAGAIN:
201 case EBUSY:
202 case E2BIG:
203 ret = PAPI_ESYS;
204 break;
205 case ENOMEM:
206 ret = PAPI_ENOMEM;
207 break;
208 case EINVAL:
209 default:
210 ret = PAPI_EINVAL;
211 break;
212 }
213 return ret;
214}
#define PAPI_EPERM
Definition: f90papi.h:112
#define PAPI_ENOMEM
Definition: f90papi.h:16
Here is the caller graph for this function:

◆ open_pe_events()

static int open_pe_events ( pe_context_t ctx,
pe_control_t ctl 
)
static

Definition at line 307 of file perf_event_uncore.c.

308{
309
310 int i, ret = PAPI_OK;
311 long pid;
312
313 if (ctl->granularity==PAPI_GRN_SYS) {
314 pid = -1;
315 }
316 else {
317 pid = ctl->tid;
318 }
319
320 for( i = 0; i < ctl->num_events; i++ ) {
321
322 ctl->events[i].event_opened=0;
323
324 /* set up the attr structure. We don't set up all fields here */
325 /* as some have already been set up previously. */
326
327/*
328 * The following code controls how the uncore component interfaces with the
329 * kernel for uncore events. The code inside the ifdef will use grouping of
330 * uncore events which can make the cost of reading the results more efficient.
331 * The problem with it is that the uncore component supports 20 different uncore
332 * PMU's. The kernel requires that all events in a group must be for the same PMU.
333 * This means that with grouping enabled papi applications can count events on only
334 * one of the 20 PMU's during a run.
335 *
336 * The code inside the else clause treats each event in the event set as
337 * independent. When running in this mode the kernel allows the papi multiple
338 * uncore PMU's at the same time.
339 *
340 * Example:
341 * An application wants to measure all the L3 cache write requests.
342 * The event to do this is part of a cbox pmu (there are 8 cbox pmu's).
343 * When built with the code in the ifdef, the application would have to be
344 * run 8 times and count write requests from one pmu at a time.
345 * When built with the code in the else, the write requests in all 8 cbox
346 * pmu's could be counted in the same run.
347 *
348 */
349// #define GROUPIT 1 // remove the comment on this line to force event grouping
350#ifdef GROUPIT
351 /* group leader (event 0) is special */
352 /* If we're multiplexed, everyone is a group leader */
353 if (( i == 0 ) || (ctl->multiplexed)) {
354 ctl->events[i].attr.pinned = !ctl->multiplexed;
355 ctl->events[i].attr.disabled = 1;
356 ctl->events[i].group_leader_fd=-1;
357 ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
358 ctl->inherit,
359 !ctl->multiplexed );
360 } else {
361 ctl->events[i].attr.pinned=0;
362 ctl->events[i].attr.disabled = 0;
363 ctl->events[i].group_leader_fd=ctl->events[0].event_fd,
364 ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed,
365 ctl->inherit,
366 0 );
367 }
368#else
369 ctl->events[i].attr.pinned = !ctl->multiplexed;
370 ctl->events[i].attr.disabled = 1;
371 ctl->inherit = 1;
372 ctl->events[i].group_leader_fd=-1;
373 ctl->events[i].attr.read_format = get_read_format(ctl->multiplexed, ctl->inherit, 0 );
374#endif
375
376
377 /* try to open */
379 pid,
380 ctl->events[i].cpu,
382 0 /* flags */
383 );
384
385 /* Try to match Linux errors to PAPI errors */
386 if ( ctl->events[i].event_fd == -1 ) {
387 SUBDBG("sys_perf_event_open returned error on event #%d."
388 " Error: %s\n",
389 i, strerror( errno ) );
391
392 goto open_peu_cleanup;
393 }
394
395 SUBDBG ("sys_perf_event_open: tid: %ld, cpu_num: %d,"
396 " group_leader/fd: %d, event_fd: %d,"
397 " read_format: %"PRIu64"\n",
398 pid, ctl->events[i].cpu, ctl->events[i].group_leader_fd,
399 ctl->events[i].event_fd, ctl->events[i].attr.read_format);
400
401 ctl->events[i].event_opened=1;
402 }
403
404
405 /* in many situations the kernel will indicate we opened fine */
406 /* yet things will fail later. So we need to double check */
407 /* we actually can use the events we've set up. */
408
409 /* This is not necessary if we are multiplexing, and in fact */
410 /* we cannot do this properly if multiplexed because */
411 /* PERF_EVENT_IOC_RESET does not reset the time running info */
412 if (!ctl->multiplexed) {
413 ret = check_scheduability( ctx, ctl);
414
415 if ( ret != PAPI_OK ) {
416 /* the last event did open, so we need to bump the counter */
417 /* before doing the cleanup */
418 i++;
419 goto open_peu_cleanup;
420 }
421 }
422
423 /* Now that we've successfully opened all of the events, do whatever */
424 /* "tune-up" is needed to attach the mmap'd buffers, signal handlers, */
425 /* and so on. */
426 for ( i = 0; i < ctl->num_events; i++ ) {
427
428 /* No sampling if uncore */
429 ctl->events[i].mmap_buf = NULL;
430 }
431
432 /* Set num_evts only if completely successful */
434
435 return PAPI_OK;
436
437open_peu_cleanup:
438 /* We encountered an error, close up the fds we successfully opened. */
439 /* We go backward in an attempt to close group leaders last, although */
440 /* That's probably not strictly necessary. */
441 while ( i > 0 ) {
442 i--;
443 if (ctl->events[i].event_fd>=0) {
444 close( ctl->events[i].event_fd );
445 ctl->events[i].event_opened=0;
446 }
447 }
448
449 return ret;
450}
static int check_scheduability(pe_context_t *ctx, pe_control_t *ctl)
#define PERF_EVENTS_OPENED
static long sys_perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags)
static int map_perf_event_errors_to_papi(int perf_event_error)
static unsigned int get_read_format(unsigned int multiplex, unsigned int inherit, int format_group)
static int pid
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sys_perf_event_open()

static long sys_perf_event_open ( struct perf_event_attr hw_event,
pid_t  pid,
int  cpu,
int  group_fd,
unsigned long  flags 
)
static

Definition at line 130 of file perf_event_uncore.c.

132{
133 int ret;
134
135 SUBDBG("sys_perf_event_open(hw_event: %p, pid: %d, cpu: %d, group_fd: %d, flags: %lx\n",hw_event,pid,cpu,group_fd,flags);
136 SUBDBG(" type: %d\n",hw_event->type);
137 SUBDBG(" size: %d\n",hw_event->size);
138 SUBDBG(" config: %#"PRIx64" (%"PRIu64")\n",hw_event->config,
139 hw_event->config);
140 SUBDBG(" sample_period: %"PRIu64"\n",hw_event->sample_period);
141 SUBDBG(" sample_type: %"PRIu64"\n",hw_event->sample_type);
142 SUBDBG(" read_format: %"PRIu64"\n",hw_event->read_format);
143 SUBDBG(" disabled: %d\n",hw_event->disabled);
144 SUBDBG(" inherit: %d\n",hw_event->inherit);
145 SUBDBG(" pinned: %d\n",hw_event->pinned);
146 SUBDBG(" exclusive: %d\n",hw_event->exclusive);
147 SUBDBG(" exclude_user: %d\n",hw_event->exclude_user);
148 SUBDBG(" exclude_kernel: %d\n",hw_event->exclude_kernel);
149 SUBDBG(" exclude_hv: %d\n",hw_event->exclude_hv);
150 SUBDBG(" exclude_idle: %d\n",hw_event->exclude_idle);
151 SUBDBG(" mmap: %d\n",hw_event->mmap);
152 SUBDBG(" comm: %d\n",hw_event->comm);
153 SUBDBG(" freq: %d\n",hw_event->freq);
154 SUBDBG(" inherit_stat: %d\n",hw_event->inherit_stat);
155 SUBDBG(" enable_on_exec: %d\n",hw_event->enable_on_exec);
156 SUBDBG(" task: %d\n",hw_event->task);
157 SUBDBG(" watermark: %d\n",hw_event->watermark);
158 SUBDBG(" precise_ip: %d\n",hw_event->precise_ip);
159 SUBDBG(" mmap_data: %d\n",hw_event->mmap_data);
160 SUBDBG(" sample_id_all: %d\n",hw_event->sample_id_all);
161 SUBDBG(" exclude_host: %d\n",hw_event->exclude_host);
162 SUBDBG(" exclude_guest: %d\n",hw_event->exclude_guest);
163 SUBDBG(" exclude_callchain_kernel: %d\n",hw_event->exclude_callchain_kernel);
164 SUBDBG(" exclude_callchain_user: %d\n",hw_event->exclude_callchain_user);
165 SUBDBG(" wakeup_watermark: %d\n",hw_event->wakeup_watermark);
166 SUBDBG(" bp_type: %d\n",hw_event->bp_type);
167 SUBDBG(" config1: %#lx (%lu)\n",hw_event->config1,hw_event->config1);
168 SUBDBG(" config2: %#lx (%lu)\n",hw_event->config2,hw_event->config2);
169 SUBDBG(" branch_sample_type: %lu\n",hw_event->branch_sample_type);
170 SUBDBG(" sample_regs_user: %lu\n",hw_event->sample_regs_user);
171 SUBDBG(" sample_stack_user: %d\n",hw_event->sample_stack_user);
172
173 ret = syscall( __NR_perf_event_open, hw_event, pid, cpu, group_fd, flags );
174 SUBDBG("Returned %d %d %s\n",ret,
175 ret<0?errno:0,
176 ret<0?strerror(errno):" ");
177 return ret;
178}
Here is the caller graph for this function:

Variable Documentation

◆ _perf_event_uncore_vector

papi_vector_t _perf_event_uncore_vector

Definition at line 49 of file perf_event_uncore.c.

◆ our_cidx

int our_cidx
static

Definition at line 53 of file perf_event_uncore.c.

◆ uncore_native_event_table

struct native_event_table_t uncore_native_event_table

Definition at line 52 of file perf_event_uncore.c.