PAPI 7.1.0.0
Loading...
Searching...
No Matches
libpfms.c File Reference
Include dependency graph for libpfms.c:

Go to the source code of this file.

Data Structures

struct  barrier_t
 
struct  pfms_cpu_t
 
struct  pfms_thread_t
 
struct  pfms_session_t
 

Macros

#define dprint(format, arg...)
 

Enumerations

enum  pfms_cmd_t {
  CMD_NONE , CMD_CTX , CMD_LOAD , CMD_UNLOAD ,
  CMD_WPMCS , CMD_WPMDS , CMD_RPMDS , CMD_STOP ,
  CMD_START , CMD_CLOSE
}
 

Functions

static int barrier_init (barrier_t *b, uint32_t count)
 
static void cleanup_barrier (void *arg)
 
static int barrier_wait (barrier_t *b)
 
static int pin_cpu (uint32_t cpu)
 
static void pfms_thread_mainloop (void *arg)
 
static int create_one_wthread (int cpu)
 
static int create_wthreads (uint64_t *cpu_list, uint32_t n)
 
int pfms_initialize (void)
 
int pfms_create (uint64_t *cpu_list, size_t n, pfarg_ctx_t *ctx, pfms_ovfl_t *ovfl, void **desc)
 
int pfms_load (void *desc)
 
static int __pfms_do_simple_cmd (pfms_cmd_t cmd, void *desc, void *data, uint32_t n)
 
int pfms_unload (void *desc)
 
int pfms_start (void *desc)
 
int pfms_stop (void *desc)
 
int pfms_write_pmcs (void *desc, pfarg_pmc_t *pmcs, uint32_t n)
 
int pfms_write_pmds (void *desc, pfarg_pmd_t *pmds, uint32_t n)
 
int pfms_close (void *desc)
 
int pfms_read_pmds (void *desc, pfarg_pmd_t *pmds, uint32_t n)
 

Variables

static uint32_t ncpus
 
static pfms_thread_ttds
 
static pthread_mutex_t tds_lock = PTHREAD_MUTEX_INITIALIZER
 

Macro Definition Documentation

◆ dprint

#define dprint (   format,
  arg... 
)

Definition at line 18 of file libpfms.c.

Enumeration Type Documentation

◆ pfms_cmd_t

enum pfms_cmd_t
Enumerator
CMD_NONE 
CMD_CTX 
CMD_LOAD 
CMD_UNLOAD 
CMD_WPMCS 
CMD_WPMDS 
CMD_RPMDS 
CMD_STOP 
CMD_START 
CMD_CLOSE 

Definition at line 20 of file libpfms.c.

20 { CMD_NONE,
21 CMD_CTX,
pfms_cmd_t
Definition: libpfms.c:20
@ CMD_RPMDS
Definition: libpfms.c:26
@ CMD_STOP
Definition: libpfms.c:27
@ CMD_CTX
Definition: libpfms.c:21
@ CMD_WPMDS
Definition: libpfms.c:25
@ CMD_UNLOAD
Definition: libpfms.c:23
@ CMD_CLOSE
Definition: libpfms.c:29
@ CMD_LOAD
Definition: libpfms.c:22
@ CMD_NONE
Definition: libpfms.c:20
@ CMD_WPMCS
Definition: libpfms.c:24
@ CMD_START
Definition: libpfms.c:28

Function Documentation

◆ __pfms_do_simple_cmd()

static int __pfms_do_simple_cmd ( pfms_cmd_t  cmd,
void *  desc,
void *  data,
uint32_t  n 
)
static

Definition at line 528 of file libpfms.c.

529{
530 size_t k;
532 int ret;
533
534 if (desc == NULL) {
535 dprint("invalid parameters\n");
536 return -1;
537 }
538 s = (pfms_session_t *)desc;
539
540 if (s->ncpus == 0) {
541 dprint("invalid session content 0 CPUS\n");
542 return -1;
543 }
544 /*
545 * send create context order
546 */
547 for(k=0; k < ncpus; k++) {
548 if (tds[k].barrier == &s->barrier) {
549 tds[k].cmd = cmd;
550 tds[k].data = data;
551 tds[k].ndata = n;
552 sem_post(&tds[k].cmd_sem);
553 }
554 }
555 barrier_wait(&s->barrier);
556
557 ret = 0;
558
559 /*
560 * check for errors
561 */
562 for(k=0; k < ncpus; k++) {
563 if (tds[k].barrier == &s->barrier) {
564 ret = tds[k].ret;
565 if (ret) {
566 dprint("failure on CPU%zu\n", k);
567 break;
568 }
569 }
570 }
571 /*
572 * simple commands cannot be undone
573 */
574 return ret ? -1 : 0;
575}
double s
Definition: byte_profile.c:36
static pfms_thread_t * tds
Definition: libpfms.c:64
static uint32_t ncpus
Definition: libpfms.c:63
static int barrier_wait(barrier_t *b)
Definition: libpfms.c:95
#define dprint(format, arg...)
Definition: libpfms.c:18
uint32_t ndata
Definition: libpfms.c:51
pfms_cmd_t cmd
Definition: libpfms.c:49
void * data
Definition: libpfms.c:50
Here is the call graph for this function:
Here is the caller graph for this function:

◆ barrier_init()

static int barrier_init ( barrier_t b,
uint32_t  count 
)
static

Definition at line 68 of file libpfms.c.

69{
70 int r;
71
72 r = pthread_mutex_init(&b->mutex, NULL);
73 if (r == -1) return -1;
74
75 r = pthread_cond_init(&b->cond, NULL);
76 if (r == -1) return -1;
77
78 b->max = b->counter = count;
79 b->generation = 0;
80
81 return 0;
82}
static long count
static double b[MATRIX_SIZE][MATRIX_SIZE]
Definition: libmsr_basic.c:39
Here is the caller graph for this function:

◆ barrier_wait()

static int barrier_wait ( barrier_t b)
static

Definition at line 95 of file libpfms.c.

96{
97 uint64_t generation;
98 int oldstate;
99
100 pthread_cleanup_push(cleanup_barrier, b);
101
102 pthread_mutex_lock(&b->mutex);
103
104 pthread_testcancel();
105
106 if (--b->counter == 0) {
107
108 /* reset barrier */
109 b->counter = b->max;
110 /*
111 * bump generation number, this avoids thread getting stuck in the
112 * wake up loop below in case a thread just out of the barrier goes
113 * back in right away before all the thread from the previous "round"
114 * have "escaped".
115 */
116 b->generation++;
117
118 pthread_cond_broadcast(&b->cond);
119 } else {
120
121 generation = b->generation;
122
123 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
124
125 while (b->counter != b->max && generation == b->generation) {
126 pthread_cond_wait(&b->cond, &b->mutex);
127 }
128
129 pthread_setcancelstate(oldstate, NULL);
130 }
131 pthread_mutex_unlock(&b->mutex);
132
133 pthread_cleanup_pop(0);
134
135 return 0;
136}
static void cleanup_barrier(void *arg)
Definition: libpfms.c:85
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cleanup_barrier()

static void cleanup_barrier ( void *  arg)
static

Definition at line 85 of file libpfms.c.

86{
87 barrier_t *b = (barrier_t *)arg;
88 int r;
89 r = pthread_mutex_unlock(&b->mutex);
90 dprint("free barrier mutex r=%d\n", r);
91 (void) r;
92}
Here is the caller graph for this function:

◆ create_one_wthread()

static int create_one_wthread ( int  cpu)
static

Definition at line 260 of file libpfms.c.

261{
262 int ret;
263
264 sem_init(&tds[cpu].cmd_sem, 0, 0);
265
266 ret = pthread_create(&tds[cpu].tid,
267 NULL,
268 (void *(*)(void *))pfms_thread_mainloop,
269 (void *)(long)cpu);
270 return ret;
271}
static void pfms_thread_mainloop(void *arg)
Definition: libpfms.c:172
Here is the call graph for this function:
Here is the caller graph for this function:

◆ create_wthreads()

static int create_wthreads ( uint64_t *  cpu_list,
uint32_t  n 
)
static

Definition at line 277 of file libpfms.c.

278{
279 uint64_t v;
280 uint32_t i,k, cpu;
281 int ret = 0;
282
283 for(k=0, cpu = 0; k < n; k++, cpu+= 64) {
284 v = cpu_list[k];
285 for(i=0; v && i < 63; i++, v>>=1, cpu++) {
286 if ((v & 0x1) && tds[cpu].tid == 0) {
287 ret = create_one_wthread(cpu);
288 if (ret) break;
289 }
290 }
291 }
292
293 if (ret)
294 dprint("cannot create wthread on CPU%u\n", cpu);
295
296 return ret;
297}
int i
static int create_one_wthread(int cpu)
Definition: libpfms.c:260
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pfms_close()

int pfms_close ( void *  desc)

Definition at line 608 of file libpfms.c.

609{
610 size_t k;
612 int ret;
613
614 if (desc == NULL) {
615 dprint("invalid parameters\n");
616 return -1;
617 }
618 s = (pfms_session_t *)desc;
619
620 if (s->ncpus == 0) {
621 dprint("invalid session content 0 CPUS\n");
622 return -1;
623 }
624
625 for(k=0; k < ncpus; k++) {
626 if (tds[k].barrier == &s->barrier) {
627 tds[k].cmd = CMD_CLOSE;
628 sem_post(&tds[k].cmd_sem);
629 }
630 }
631 barrier_wait(&s->barrier);
632
633 ret = 0;
634
635 pthread_mutex_lock(&tds_lock);
636 /*
637 * check for errors
638 */
639 for(k=0; k < ncpus; k++) {
640 if (tds[k].barrier == &s->barrier) {
641 if (tds[k].ret) {
642 dprint("failure on CPU%zu\n", k);
643 }
644 ret |= tds[k].ret;
645 tds[k].barrier = NULL;
646 }
647 }
648
649 pthread_mutex_unlock(&tds_lock);
650
651 free(s);
652
653 /*
654 * XXX: we cannot undo close
655 */
656 return ret ? -1 : 0;
657}
static pthread_mutex_t tds_lock
Definition: libpfms.c:65
barrier_t * barrier
Definition: libpfms.c:55
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pfms_create()

int pfms_create ( uint64_t *  cpu_list,
size_t  n,
pfarg_ctx_t ctx,
pfms_ovfl_t ovfl,
void **  desc 
)

Definition at line 327 of file libpfms.c.

328{
329 uint64_t v;
330 size_t k, i;
331 uint32_t num, cpu;
333 int ret;
334
335 if (cpu_list == NULL || n == 0 || ctx == NULL || desc == NULL) {
336 dprint("invalid parameters\n");
337 return -1;
338 }
339
340 if ((ctx->ctx_flags & PFM_FL_SYSTEM_WIDE) == 0) {
341 dprint("only works for system wide\n");
342 return -1;
343 }
344
345 *desc = NULL;
346
347 /*
348 * XXX: assuming CPU are contiguously indexed
349 */
350 num = 0;
351 for(k=0, cpu = 0; k < n; k++, cpu+=64) {
352 v = cpu_list[k];
353 for(i=0; v && i < 63; i++, v>>=1, cpu++) {
354 if (v & 0x1) {
355 if (cpu >= ncpus) {
356 dprint("unavailable CPU%u\n", cpu);
357 return -1;
358 }
359 num++;
360 }
361 }
362 }
363
364 if (num == 0)
365 return 0;
366
367 s = calloc(1, sizeof(*s));
368 if (s == NULL) {
369 dprint("cannot allocate %u contexts\n", num);
370 return -1;
371 }
372 s->ncpus = num;
373
374 printf("%u-way session\n", num);
375
376 /*
377 * +1 to account for main thread waiting
378 */
379 ret = barrier_init(&s->barrier, num + 1);
380 if (ret) {
381 dprint("cannot init barrier\n");
382 goto error_free;
383 }
384
385 /*
386 * lock thread descriptor table, no other create_session, close_session
387 * can occur
388 */
389 pthread_mutex_lock(&tds_lock);
390
391 if (create_wthreads(cpu_list, n))
392 goto error_free_unlock;
393
394 /*
395 * check all needed threads are available
396 */
397 for(k=0, cpu = 0; k < n; k++, cpu += 64) {
398 v = cpu_list[k];
399 for(i=0; v && i < 63; i++, v>>=1, cpu++) {
400 if (v & 0x1) {
401 if (tds[cpu].barrier) {
402 dprint("CPU%u already managing a session\n", cpu);
403 goto error_free_unlock;
404 }
405
406 }
407 }
408 }
409
410 /*
411 * send create context order
412 */
413 for(k=0, cpu = 0; k < n; k++, cpu += 64) {
414 v = cpu_list[k];
415 for(i=0; v && i < 63; i++, v>>=1, cpu++) {
416 if (v & 0x1) {
417 tds[cpu].cmd = CMD_CTX;
418 tds[cpu].data = ctx;
419 tds[cpu].barrier = &s->barrier;
420 sem_post(&tds[cpu].cmd_sem);
421 }
422 }
423 }
424 barrier_wait(&s->barrier);
425
426 ret = 0;
427
428 /*
429 * check for errors
430 */
431 for(k=0; k < ncpus; k++) {
432 if (tds[k].barrier == &s->barrier) {
433 ret = tds[k].ret;
434 if (ret)
435 break;
436 }
437 }
438 /*
439 * undo if error found
440 */
441 if (k < ncpus) {
442 for(k=0; k < ncpus; k++) {
443 if (tds[k].barrier == &s->barrier) {
444 if (tds[k].ret == 0) {
445 tds[k].cmd = CMD_CLOSE;
446 sem_post(&tds[k].cmd_sem);
447 }
448 /* mark as free */
449 tds[k].barrier = NULL;
450 }
451 }
452 }
453 pthread_mutex_unlock(&tds_lock);
454
455 if (ret == 0) *desc = s;
456
457 return ret ? -1 : 0;
458
459error_free_unlock:
460 pthread_mutex_unlock(&tds_lock);
461
462error_free:
463 free(s);
464 return -1;
465}
#define PFM_FL_SYSTEM_WIDE
static int create_wthreads(uint64_t *cpu_list, uint32_t n)
Definition: libpfms.c:277
static int barrier_init(barrier_t *b, uint32_t count)
Definition: libpfms.c:68
uint32_t ctx_flags
Definition: perfmon_v2.h:19
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pfms_initialize()

int pfms_initialize ( void  )

Definition at line 300 of file libpfms.c.

301{
302 printf("cpu_t=%zu thread=%zu session_t=%zu\n",
303 sizeof(pfms_cpu_t),
304 sizeof(pfms_thread_t),
305 sizeof(pfms_session_t));
306
307 ncpus = (uint32_t)sysconf(_SC_NPROCESSORS_ONLN);
308 if (ncpus == -1) {
309 dprint("cannot retrieve number of online processors\n");
310 return -1;
311 }
312
313 dprint("configured for %u CPUs\n", ncpus);
314
315 /*
316 * XXX: assuming CPU are contiguously indexed
317 */
318 tds = calloc(ncpus, sizeof(*tds));
319 if (tds == NULL) {
320 dprint("cannot allocate thread descriptors\n");
321 return -1;
322 }
323 return 0;
324}
Here is the caller graph for this function:

◆ pfms_load()

int pfms_load ( void *  desc)

Definition at line 468 of file libpfms.c.

469{
470 uint32_t k;
472 int ret;
473
474 if (desc == NULL) {
475 dprint("invalid parameters\n");
476 return -1;
477 }
478 s = (pfms_session_t *)desc;
479
480 if (s->ncpus == 0) {
481 dprint("invalid session content 0 CPUS\n");
482 return -1;
483 }
484 /*
485 * send create context order
486 */
487 for(k=0; k < ncpus; k++) {
488 if (tds[k].barrier == &s->barrier) {
489 tds[k].cmd = CMD_LOAD;
490 sem_post(&tds[k].cmd_sem);
491 }
492 }
493
494 barrier_wait(&s->barrier);
495
496 ret = 0;
497
498 /*
499 * check for errors
500 */
501 for(k=0; k < ncpus; k++) {
502 if (tds[k].barrier == &s->barrier) {
503 ret = tds[k].ret;
504 if (ret) {
505 dprint("failure on CPU%u\n", k);
506 break;
507 }
508 }
509 }
510
511 /*
512 * if error, unload all others
513 */
514 if (k < ncpus) {
515 for(k=0; k < ncpus; k++) {
516 if (tds[k].barrier == &s->barrier) {
517 if (tds[k].ret == 0) {
518 tds[k].cmd = CMD_UNLOAD;
519 sem_post(&tds[k].cmd_sem);
520 }
521 }
522 }
523 }
524 return ret ? -1 : 0;
525}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pfms_read_pmds()

int pfms_read_pmds ( void *  desc,
pfarg_pmd_t pmds,
uint32_t  n 
)

Definition at line 660 of file libpfms.c.

661{
663 uint32_t k, pmds_per_cpu;
664 int ret;
665
666 if (desc == NULL) {
667 dprint("invalid parameters\n");
668 return -1;
669 }
670 s = (pfms_session_t *)desc;
671
672 if (s->ncpus == 0) {
673 dprint("invalid session content 0 CPUS\n");
674 return -1;
675 }
676 if (n % s->ncpus) {
677 dprint("invalid number of pfarg_pmd_t provided, must be multiple of %u\n", s->ncpus);
678 return -1;
679 }
680 pmds_per_cpu = n / s->ncpus;
681
682 dprint("n=%u ncpus=%u per_cpu=%u\n", n, s->ncpus, pmds_per_cpu);
683
684 for(k=0; k < ncpus; k++) {
685 if (tds[k].barrier == &s->barrier) {
686 tds[k].cmd = CMD_RPMDS;
687 tds[k].data = pmds;
688 tds[k].ndata= pmds_per_cpu;
689 sem_post(&tds[k].cmd_sem);
690 pmds += pmds_per_cpu;
691 }
692 }
693 barrier_wait(&s->barrier);
694
695 ret = 0;
696
697 /*
698 * check for errors
699 */
700 for(k=0; k < ncpus; k++) {
701 if (tds[k].barrier == &s->barrier) {
702 ret = tds[k].ret;
703 if (ret) {
704 dprint("failure on CPU%u\n", k);
705 break;
706 }
707 }
708 }
709 /*
710 * cannot undo pfm_read_pmds
711 */
712 return ret ? -1 : 0;
713}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pfms_start()

int pfms_start ( void *  desc)

Definition at line 584 of file libpfms.c.

585{
586 return __pfms_do_simple_cmd(CMD_START, desc, NULL, 0);
587}
static int __pfms_do_simple_cmd(pfms_cmd_t cmd, void *desc, void *data, uint32_t n)
Definition: libpfms.c:528
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pfms_stop()

int pfms_stop ( void *  desc)

Definition at line 590 of file libpfms.c.

591{
592 return __pfms_do_simple_cmd(CMD_STOP, desc, NULL, 0);
593}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pfms_thread_mainloop()

static void pfms_thread_mainloop ( void *  arg)
static

Definition at line 172 of file libpfms.c.

173{
174 long k = (long )arg;
175 uint32_t mycpu = (uint32_t)k;
176 pfarg_ctx_t myctx, *ctx;
177 pfarg_load_t load_args;
178 int fd = -1;
179 pfms_thread_t *td;
180 sem_t *cmd_sem;
181 int ret = 0;
182
183 memset(&load_args, 0, sizeof(load_args));
184 load_args.load_pid = mycpu;
185 td = tds+mycpu;
186
187 ret = pin_cpu(mycpu);
188 dprint("CPU%u wthread created and pinned ret=%d\n", mycpu, ret);
189
190 cmd_sem = &tds[mycpu].cmd_sem;
191
192 for(;;) {
193 dprint("CPU%u waiting for cmd\n", mycpu);
194
195 sem_wait(cmd_sem);
196
197 switch(td->cmd) {
198 case CMD_NONE:
199 ret = 0;
200 break;
201
202 case CMD_CTX:
203
204 /*
205 * copy context to get private fd
206 */
207 ctx = td->data;
208 myctx = *ctx;
209
210 fd = pfm_create_context(&myctx, NULL, NULL, 0);
211 ret = fd < 0 ? -1 : 0;
212 dprint("CPU%u CMD_CTX ret=%d errno=%d fd=%d\n", mycpu, ret, errno, fd);
213 break;
214
215 case CMD_LOAD:
216 ret = pfm_load_context(fd, &load_args);
217 dprint("CPU%u CMD_LOAD ret=%d errno=%d fd=%d\n", mycpu, ret, errno, fd);
218 break;
219 case CMD_UNLOAD:
220 ret = pfm_unload_context(fd);
221 dprint("CPU%u CMD_UNLOAD ret=%d errno=%d fd=%d\n", mycpu, ret, errno, fd);
222 break;
223 case CMD_START:
224 ret = pfm_start(fd, NULL);
225 dprint("CPU%u CMD_START ret=%d errno=%d fd=%d\n", mycpu, ret, errno, fd);
226 break;
227 case CMD_STOP:
228 ret = pfm_stop(fd);
229 dprint("CPU%u CMD_STOP ret=%d errno=%d fd=%d\n", mycpu, ret, errno, fd);
230 break;
231 case CMD_WPMCS:
232 ret = pfm_write_pmcs(fd,(pfarg_pmc_t *)td->data, td->ndata);
233 dprint("CPU%u CMD_WPMCS ret=%d errno=%d fd=%d\n", mycpu, ret, errno, fd);
234 break;
235 case CMD_WPMDS:
236 ret = pfm_write_pmds(fd,(pfarg_pmd_t *)td->data, td->ndata);
237 dprint("CPU%u CMD_WPMDS ret=%d errno=%d fd=%d\n", mycpu, ret, errno, fd);
238 break;
239 case CMD_RPMDS:
240 ret = pfm_read_pmds(fd,(pfarg_pmd_t *)td->data, td->ndata);
241 dprint("CPU%u CMD_RPMDS ret=%d errno=%d fd=%d\n", mycpu, ret, errno, fd);
242 break;
243 case CMD_CLOSE:
244 dprint("CPU%u CMD_CLOSE fd=%d\n", mycpu, fd);
245 ret = close(fd);
246 fd = -1;
247 break;
248 default:
249 break;
250 }
251 td->ret = ret;
252
253 dprint("CPU%u td->ret=%d\n", mycpu, ret);
254
256 }
257}
int close(int fd)
Definition: appio.c:179
int errno
static int pin_cpu(uint32_t cpu)
Definition: libpfms.c:146
os_err_t pfm_stop(int fd)
os_err_t pfm_write_pmds(int fd, pfarg_pmd_t *pmds, int count)
os_err_t pfm_unload_context(int fd)
os_err_t pfm_write_pmcs(int fd, pfarg_pmc_t *pmcs, int count)
os_err_t pfm_start(int fd, pfarg_start_t *start)
os_err_t pfm_create_context(pfarg_ctx_t *ctx, char *smpl_name, void *smpl_arg, size_t smpl_size)
os_err_t pfm_load_context(int fd, pfarg_load_t *load)
os_err_t pfm_read_pmds(int fd, pfarg_pmd_t *pmds, int count)
long long int long long
Definition: sde_internal.h:85
uint32_t load_pid
Definition: perfmon_v2.h:69
sem_t cmd_sem
Definition: libpfms.c:52
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pfms_unload()

int pfms_unload ( void *  desc)

Definition at line 578 of file libpfms.c.

579{
580 return __pfms_do_simple_cmd(CMD_UNLOAD, desc, NULL, 0);
581}
Here is the call graph for this function:

◆ pfms_write_pmcs()

int pfms_write_pmcs ( void *  desc,
pfarg_pmc_t pmcs,
uint32_t  n 
)

Definition at line 596 of file libpfms.c.

597{
598 return __pfms_do_simple_cmd(CMD_WPMCS, desc, pmcs, n);
599}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pfms_write_pmds()

int pfms_write_pmds ( void *  desc,
pfarg_pmd_t pmds,
uint32_t  n 
)

Definition at line 602 of file libpfms.c.

603{
604 return __pfms_do_simple_cmd(CMD_WPMDS, desc, pmds, n);
605}
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pin_cpu()

static int pin_cpu ( uint32_t  cpu)
static

Definition at line 146 of file libpfms.c.

147{
148 uint64_t *mask;
149 size_t size;
150 pid_t pid;
151 int ret;
152
153 pid = syscall(__NR_gettid);
154
155 size = ncpus * sizeof(uint64_t);
156
157 mask = calloc(1, size);
158 if (mask == NULL) {
159 dprint("CPU%u: cannot allocate bitvector\n", cpu);
160 return -1;
161 }
162 mask[cpu>>6] = 1ULL << (cpu & 63);
163
164 ret = syscall(__NR_sched_setaffinity, pid, size, mask);
165
166 free(mask);
167
168 return ret;
169}
static int pid
Here is the caller graph for this function:

Variable Documentation

◆ ncpus

uint32_t ncpus
static

Definition at line 63 of file libpfms.c.

◆ tds

pfms_thread_t* tds
static

Definition at line 64 of file libpfms.c.

◆ tds_lock

pthread_mutex_t tds_lock = PTHREAD_MUTEX_INITIALIZER
static

Definition at line 65 of file libpfms.c.