4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
36 #include <sys/syscall.h>
37 #include <sys/types.h>
38 #include <sys/processor.h>
39 #include <sys/procset.h>
41 #include "libcpc_impl.h"
43 #define MASK32 0xFFFFFFFF
46 * The library uses the cpc_lock field of the cpc_t struct to protect access to
47 * the linked lists inside the cpc_t, and only the linked lists. It is NOT used
48 * to protect against users shooting themselves in the foot (such as, for
49 * instance, destroying the same set at the same time from different threads.).
51 * SIGEMT needs to be blocked while holding the lock, to prevent deadlock among
52 * an app holding the lock and a signal handler attempting to sample or bind.
55 static char *cpc_get_list(int which
, int arg
);
56 static void cpc_err(cpc_t
*cpc
, const char *fn
, int subcode
, ...);
57 static int cpc_set_valid(cpc_t
*cpc
, cpc_set_t
*set
);
58 static int cpc_lock(cpc_t
*cpc
);
59 static void cpc_unlock(cpc_t
*cpc
, int blocked
);
60 static int cpc_valid_event(cpc_t
*cpc
, uint_t pic
, const char *ev
);
61 static int cpc_valid_attr(cpc_t
*cpc
, char *attr
);
62 static void cpc_invalidate_pctx(cpc_t
*cpc
, pctx_t
*pctx
);
73 if (ver
!= CPC_VER_CURRENT
) {
75 * v1 clients must stick to the v1 interface: cpc_version()
82 * Call the syscall with invalid parameters. If we get ENOSYS this CPU
83 * has no CPC support. We need to block SIGSYS because the syscall code
84 * will send the signal if the system call fails to load.
86 sigsaved
= signal(SIGSYS
, SIG_IGN
);
87 if (syscall(SYS_cpc
, -1, -1, -1, -1, -1) != -1) {
88 (void) signal(SIGSYS
, sigsaved
);
93 (void) signal(SIGSYS
, sigsaved
);
95 if (error
!= EINVAL
) {
100 if ((cpc
= malloc(sizeof (cpc_t
))) == NULL
) {
105 cpc
->cpc_npic
= syscall(SYS_cpc
, CPC_NPIC
, -1, 0, 0, 0);
106 cpc
->cpc_caps
= syscall(SYS_cpc
, CPC_CAPS
, -1, 0, 0, 0);
108 if (syscall(SYS_cpc
, CPC_IMPL_NAME
, -1, &cpc
->cpc_cciname
, 0, 0) != 0)
110 if (syscall(SYS_cpc
, CPC_CPUREF
, -1, &cpc
->cpc_cpuref
, 0, 0) != 0)
114 if ((cpc
->cpc_attrlist
= cpc_get_list(CPC_LIST_ATTRS
, 0)) == NULL
) {
119 if ((cpc
->cpc_evlist
= malloc(cpc
->cpc_npic
* sizeof (char *))) ==
121 free(cpc
->cpc_attrlist
);
126 for (i
= 0; i
< cpc
->cpc_npic
; i
++) {
127 if ((cpc
->cpc_evlist
[i
] = cpc_get_list(CPC_LIST_EVENTS
, i
)) ==
131 if (i
!= cpc
->cpc_npic
) {
132 for (j
= 0; j
< i
; j
++)
133 free(cpc
->cpc_evlist
[j
]);
134 free(cpc
->cpc_evlist
);
135 free(cpc
->cpc_attrlist
);
140 cpc
->cpc_sets
= NULL
;
141 cpc
->cpc_bufs
= NULL
;
142 cpc
->cpc_errfn
= NULL
;
143 (void) mutex_init(&cpc
->cpc_lock
, USYNC_THREAD
, NULL
);
144 __pctx_cpc_register_callback(cpc_invalidate_pctx
);
150 * Ensure state is cleaned up:
152 * - Hardware is unbound
153 * - Sets are all destroyed
154 * - Bufs are all freed
157 cpc_close(cpc_t
*cpc
)
159 while (cpc
->cpc_sets
!= NULL
) {
160 if (cpc
->cpc_sets
->cs_state
!= CS_UNBOUND
)
161 (void) cpc_unbind(cpc
, cpc
->cpc_sets
);
162 (void) cpc_set_destroy(cpc
, cpc
->cpc_sets
);
165 while (cpc
->cpc_bufs
!= NULL
)
166 (void) cpc_buf_destroy(cpc
, cpc
->cpc_bufs
);
173 * Terminate everything that runs in pctx_run
176 cpc_terminate(cpc_t
*cpc
)
181 sigblocked
= cpc_lock(cpc
);
182 for (csp
= cpc
->cpc_sets
; csp
!= NULL
; csp
= csp
->cs_next
) {
183 if (csp
->cs_pctx
!= NULL
)
184 pctx_terminate(csp
->cs_pctx
);
186 cpc_unlock(cpc
, sigblocked
);
190 cpc_set_create(cpc_t
*cpc
)
195 if ((set
= malloc(sizeof (*set
))) == NULL
) {
200 set
->cs_request
= NULL
;
202 set
->cs_state
= CS_UNBOUND
;
208 sigblocked
= cpc_lock(cpc
);
209 set
->cs_next
= cpc
->cpc_sets
;
211 cpc_unlock(cpc
, sigblocked
);
217 cpc_set_destroy(cpc_t
*cpc
, cpc_set_t
*set
)
219 cpc_set_t
*csp
, *prev
;
220 cpc_request_t
*req
, *next
;
224 * Remove this set from the cpc handle's list of sets.
226 sigblocked
= cpc_lock(cpc
);
227 for (csp
= prev
= cpc
->cpc_sets
; csp
!= NULL
; csp
= csp
->cs_next
) {
233 cpc_unlock(cpc
, sigblocked
);
237 if (csp
== cpc
->cpc_sets
)
238 cpc
->cpc_sets
= csp
->cs_next
;
239 prev
->cs_next
= csp
->cs_next
;
240 cpc_unlock(cpc
, sigblocked
);
242 if (csp
->cs_state
!= CS_UNBOUND
)
243 (void) cpc_unbind(cpc
, csp
);
246 * Detach from the process
248 if (csp
->cs_pctx
!= NULL
) {
249 pctx_release(csp
->cs_pctx
);
253 for (req
= csp
->cs_request
; req
!= NULL
; req
= next
) {
256 if (req
->cr_nattrs
!= 0)
270 cpc_set_add_request(cpc_t
*cpc
, cpc_set_t
*set
, const char *event
,
271 uint64_t preset
, uint_t flags
, uint_t nattrs
, const cpc_attr_t
*attrs
)
274 const char *fn
= "cpc_set_add_request";
276 int npics
= cpc_npic(cpc
);
278 if (cpc_set_valid(cpc
, set
) != 0 || set
->cs_state
!= CS_UNBOUND
) {
283 for (i
= 0; i
< npics
; i
++)
284 if (cpc_valid_event(cpc
, i
, event
))
287 cpc_err(cpc
, fn
, CPC_INVALID_EVENT
);
292 if ((req
= malloc(sizeof (*req
))) == NULL
) {
297 (void) strncpy(req
->cr_event
, event
, CPC_MAX_EVENT_LEN
);
298 req
->cr_preset
= preset
;
299 req
->cr_flags
= flags
;
300 req
->cr_nattrs
= nattrs
;
301 req
->cr_index
= set
->cs_nreqs
;
305 for (i
= 0; i
< nattrs
; i
++) {
307 * Verify that each attribute name is legal and valid.
309 if (attrs
[i
].ca_name
[0] == '\0' ||
310 cpc_valid_attr(cpc
, attrs
[i
].ca_name
) == 0) {
311 cpc_err(cpc
, fn
, CPC_INVALID_ATTRIBUTE
);
316 * If the user requested a specific picnum, ensure that
317 * the pic can count the requested event.
319 if (strncmp("picnum", attrs
[i
].ca_name
, 8) == 0) {
320 if (attrs
[i
].ca_val
>= npics
) {
321 cpc_err(cpc
, fn
, CPC_INVALID_PICNUM
);
325 if (cpc_valid_event(cpc
, attrs
[i
].ca_val
,
326 req
->cr_event
) == 0) {
327 cpc_err(cpc
, fn
, CPC_PIC_NOT_CAPABLE
);
333 if ((req
->cr_attr
= malloc(nattrs
* sizeof (kcpc_attr_t
)))
339 for (i
= 0; i
< nattrs
; i
++) {
340 req
->cr_attr
[i
].ka_val
= attrs
[i
].ca_val
;
341 (void) strncpy(req
->cr_attr
[i
].ka_name
,
342 attrs
[i
].ca_name
, CPC_MAX_ATTR_LEN
);
347 req
->cr_next
= set
->cs_request
;
348 set
->cs_request
= req
;
351 return (req
->cr_index
);
360 cpc_buf_create(cpc_t
*cpc
, cpc_set_t
*set
)
365 if (cpc_set_valid(cpc
, set
) != 0) {
370 if ((buf
= malloc(sizeof (*buf
))) == NULL
)
373 buf
->cb_size
= set
->cs_nreqs
* sizeof (uint64_t);
374 if ((buf
->cb_data
= malloc(buf
->cb_size
)) == NULL
) {
379 bzero(buf
->cb_data
, buf
->cb_size
);
384 sigblocked
= cpc_lock(cpc
);
385 buf
->cb_next
= cpc
->cpc_bufs
;
387 cpc_unlock(cpc
, sigblocked
);
393 cpc_buf_destroy(cpc_t
*cpc
, cpc_buf_t
*buf
)
395 cpc_buf_t
*cbp
, *prev
;
399 * Remove this buf from the cpc handle's list of bufs.
401 sigblocked
= cpc_lock(cpc
);
402 for (cbp
= prev
= cpc
->cpc_bufs
; cbp
!= NULL
; cbp
= cbp
->cb_next
) {
408 cpc_unlock(cpc
, sigblocked
);
412 if (cbp
== cpc
->cpc_bufs
)
413 cpc
->cpc_bufs
= cbp
->cb_next
;
414 prev
->cb_next
= cbp
->cb_next
;
416 cpc_unlock(cpc
, sigblocked
);
425 cpc_bind_curlwp(cpc_t
*cpc
, cpc_set_t
*set
, uint_t flags
)
433 * We don't bother checking cpc_set_valid() here, because this is in the
434 * fast path of an app doing SIGEMT-based profiling as they restart the
435 * counters from their signal handler.
437 if (CPC_SET_VALID_FLAGS(flags
) == 0 || set
->cs_nreqs
<= 0) {
442 if ((packed_set
= __cpc_pack_set(set
, flags
, &packsize
)) == NULL
) {
447 ret
= syscall(SYS_cpc
, CPC_BIND
, -1, packed_set
, packsize
, &subcode
);
452 cpc_err(cpc
, "cpc_bind_curlwp", subcode
);
456 set
->cs_thr
= thr_self();
457 set
->cs_state
= CS_BOUND_CURLWP
;
463 cpc_bind_pctx(cpc_t
*cpc
, pctx_t
*pctx
, id_t id
, cpc_set_t
*set
, uint_t flags
)
471 * cpc_bind_pctx() currently has no valid flags.
473 if (flags
!= 0 || cpc_set_valid(cpc
, set
) != 0 || set
->cs_nreqs
<= 0) {
478 if ((packed_set
= __cpc_pack_set(set
, flags
, &packsize
)) == NULL
) {
483 ret
= __pctx_cpc(pctx
, cpc
, CPC_BIND
, id
, packed_set
, (void *)packsize
,
484 (void *)&subcode
, -1);
491 set
->cs_state
= CS_BOUND_PCTX
;
492 } else if (subcode
!= -1)
493 cpc_err(cpc
, "cpc_bind_pctx", subcode
);
500 cpc_bind_cpu(cpc_t
*cpc
, processorid_t id
, cpc_set_t
*set
, uint_t flags
)
505 __cpc_args_t cpc_args
;
507 const char *fn
= "cpc_bind_cpu";
511 * cpc_bind_cpu() currently has no valid flags.
513 if (flags
!= 0 || cpc_set_valid(cpc
, set
) != 0 || set
->cs_nreqs
<= 0) {
518 if (processor_bind(P_LWPID
, P_MYID
, id
, &set
->cs_obind
) == -1) {
519 cpc_err(cpc
, fn
, CPC_PBIND_FAILED
);
523 if ((fd
= open(CPUDRV_SHARED
, O_RDWR
)) < 0) {
525 (void) processor_bind(P_LWPID
, P_MYID
, set
->cs_obind
, NULL
);
531 * To avoid leaking file descriptors, if we find an existing fd here we
532 * just close it. This is only a problem if a user attempts to bind the
533 * same set to different CPUs without first unbinding it.
535 if (set
->cs_fd
!= -1)
536 (void) close(set
->cs_fd
);
539 if ((packed_set
= __cpc_pack_set(set
, flags
, &packsize
)) == NULL
) {
541 (void) processor_bind(P_LWPID
, P_MYID
, set
->cs_obind
, NULL
);
546 cpc_args
.udata1
= packed_set
;
547 cpc_args
.udata2
= (void *)packsize
;
548 cpc_args
.udata3
= (void *)&subcode
;
550 if (ioctl(fd
, CPCIO_BIND
, &cpc_args
) != 0) {
554 (void) processor_bind(P_LWPID
, P_MYID
, set
->cs_obind
, NULL
);
556 cpc_err(cpc
, fn
, subcode
);
563 set
->cs_thr
= thr_self();
564 set
->cs_state
= CS_BOUND_CPU
;
571 cpc_request_preset(cpc_t
*cpc
, int index
, uint64_t preset
)
573 return (syscall(SYS_cpc
, CPC_PRESET
, -1, index
,
574 (uint32_t)(preset
>> 32), (uint32_t)(preset
& MASK32
)));
579 cpc_set_restart(cpc_t
*cpc
, cpc_set_t
*set
)
581 return (syscall(SYS_cpc
, CPC_RESTART
, -1, 0, 0, 0));
586 cpc_unbind(cpc_t
*cpc
, cpc_set_t
*set
)
591 if (cpc_set_valid(cpc
, set
) != 0) {
596 switch (set
->cs_state
) {
600 case CS_BOUND_CURLWP
:
601 ret
= syscall(SYS_cpc
, CPC_RELE
, -1, 0, 0, 0);
605 ret
= ioctl(set
->cs_fd
, CPCIO_RELE
, NULL
);
607 (void) close(set
->cs_fd
);
609 (void) processor_bind(P_LWPID
, P_MYID
, set
->cs_obind
, NULL
);
612 if (set
->cs_pctx
!= NULL
) {
613 ret
= __pctx_cpc(set
->cs_pctx
, cpc
, CPC_RELE
,
614 set
->cs_id
, 0, 0, 0, 0);
622 set
->cs_state
= CS_UNBOUND
;
630 cpc_set_sample(cpc_t
*cpc
, cpc_set_t
*set
, cpc_buf_t
*buf
)
635 * The following check ensures that only the most recently bound set
636 * can be sampled, as binding a set invalidates all other sets in the
639 if (set
->cs_state
== CS_UNBOUND
||
640 buf
->cb_size
!= set
->cs_nreqs
* sizeof (uint64_t)) {
645 switch (set
->cs_state
) {
646 case CS_BOUND_CURLWP
:
647 return (syscall(SYS_cpc
, CPC_SAMPLE
, -1, buf
->cb_data
,
648 &buf
->cb_hrtime
, &buf
->cb_tick
));
650 args
.udata1
= buf
->cb_data
;
651 args
.udata2
= &buf
->cb_hrtime
;
652 args
.udata3
= &buf
->cb_tick
;
653 return (ioctl(set
->cs_fd
, CPCIO_SAMPLE
, &args
));
655 return (__pctx_cpc(set
->cs_pctx
, cpc
, CPC_SAMPLE
, set
->cs_id
,
656 buf
->cb_data
, &buf
->cb_hrtime
, &buf
->cb_tick
,
666 cpc_buf_sub(cpc_t
*cpc
, cpc_buf_t
*ds
, cpc_buf_t
*a
, cpc_buf_t
*b
)
670 if (a
->cb_size
!= ds
->cb_size
|| b
->cb_size
!= ds
->cb_size
)
673 ds
->cb_hrtime
= (a
->cb_hrtime
> b
->cb_hrtime
) ?
674 a
->cb_hrtime
: b
->cb_hrtime
;
675 ds
->cb_tick
= a
->cb_tick
- b
->cb_tick
;
677 for (i
= 0; i
< ds
->cb_size
/ sizeof (uint64_t); i
++)
678 ds
->cb_data
[i
] = a
->cb_data
[i
] - b
->cb_data
[i
];
683 cpc_buf_add(cpc_t
*cpc
, cpc_buf_t
*ds
, cpc_buf_t
*a
, cpc_buf_t
*b
)
687 if (a
->cb_size
!= ds
->cb_size
|| b
->cb_size
!= ds
->cb_size
)
690 ds
->cb_hrtime
= (a
->cb_hrtime
> b
->cb_hrtime
) ?
691 a
->cb_hrtime
: b
->cb_hrtime
;
692 ds
->cb_tick
= a
->cb_tick
+ b
->cb_tick
;
694 for (i
= 0; i
< ds
->cb_size
/ sizeof (uint64_t); i
++)
695 ds
->cb_data
[i
] = a
->cb_data
[i
] + b
->cb_data
[i
];
700 cpc_buf_copy(cpc_t
*cpc
, cpc_buf_t
*ds
, cpc_buf_t
*src
)
702 if (ds
->cb_size
!= src
->cb_size
)
705 bcopy(src
->cb_data
, ds
->cb_data
, ds
->cb_size
);
706 ds
->cb_hrtime
= src
->cb_hrtime
;
707 ds
->cb_tick
= src
->cb_tick
;
712 cpc_buf_zero(cpc_t
*cpc
, cpc_buf_t
*buf
)
714 bzero(buf
->cb_data
, buf
->cb_size
);
720 * Gets or sets the value of the request specified by index.
724 cpc_buf_get(cpc_t
*cpc
, cpc_buf_t
*buf
, int index
, uint64_t *val
)
726 *val
= buf
->cb_data
[index
];
733 cpc_buf_set(cpc_t
*cpc
, cpc_buf_t
*buf
, int index
, uint64_t val
)
735 buf
->cb_data
[index
] = val
;
742 cpc_buf_hrtime(cpc_t
*cpc
, cpc_buf_t
*buf
)
744 return (buf
->cb_hrtime
);
749 cpc_buf_tick(cpc_t
*cpc
, cpc_buf_t
*buf
)
751 return (buf
->cb_tick
);
755 cpc_get_list(int which
, int arg
)
761 if (which
== CPC_LIST_ATTRS
)
762 szcmd
= CPC_ATTRLIST_SIZE
;
764 szcmd
= CPC_EVLIST_SIZE
;
766 if (syscall(SYS_cpc
, szcmd
, -1, &size
, arg
, 0) != 0)
769 if ((list
= malloc(size
)) == NULL
)
772 if (syscall(SYS_cpc
, which
, -1, list
, arg
, 0) != 0) {
782 cpc_walk_requests(cpc_t
*cpc
, cpc_set_t
*set
, void *arg
,
783 void (*action
)(void *arg
, int index
, const char *event
, uint64_t preset
,
784 uint_t flags
, int nattrs
, const cpc_attr_t
*attrs
))
787 cpc_attr_t
*attrs
= NULL
;
790 for (rp
= set
->cs_request
; rp
!= NULL
; rp
= rp
->cr_next
) {
792 * Need to reconstruct a temporary cpc_attr_t array for req.
794 if (rp
->cr_nattrs
!= 0)
795 if ((attrs
= malloc(rp
->cr_nattrs
*
796 sizeof (cpc_attr_t
))) == NULL
)
798 for (i
= 0; i
< rp
->cr_nattrs
; i
++) {
799 attrs
[i
].ca_name
= rp
->cr_attr
[i
].ka_name
;
800 attrs
[i
].ca_val
= rp
->cr_attr
[i
].ka_val
;
803 action(arg
, rp
->cr_index
, rp
->cr_event
, rp
->cr_preset
,
804 rp
->cr_flags
, rp
->cr_nattrs
, attrs
);
806 if (rp
->cr_nattrs
!= 0)
813 cpc_walk_events_impl(cpc_t
*cpc
, void *arg
,
814 void (*action
)(void *arg
, const char *event
), int is_generic
)
820 int ncounters
= cpc_npic(cpc
);
823 if ((list
= malloc(ncounters
* sizeof (char *))) == NULL
)
826 if ((hash
= __cpc_strhash_alloc()) == NULL
) {
831 for (i
= 0; i
< ncounters
; i
++) {
832 if ((list
[i
] = strdup(cpc
->cpc_evlist
[i
])) == NULL
)
835 while ((e
= strchr(p
, ',')) != NULL
) {
839 * Based on is_generic flag, skip appropriate
842 is_papi
= (strncmp(p
, "PAPI", 4) == 0);
843 if (is_generic
!= is_papi
) {
848 if (__cpc_strhash_add(hash
, p
) == -1)
854 is_papi
= (strncmp(p
, "PAPI", 4) == 0);
855 if (is_generic
== is_papi
) {
856 if (__cpc_strhash_add(hash
, p
) == -1)
861 while ((p
= __cpc_strhash_next(hash
)) != NULL
)
865 __cpc_strhash_free(hash
);
866 for (i
= 0; i
< ncounters
; i
++)
873 cpc_walk_events_all(cpc_t
*cpc
, void *arg
,
874 void (*action
)(void *arg
, const char *event
))
876 cpc_walk_events_impl(cpc
, arg
, action
, 0);
882 cpc_walk_generic_events_all(cpc_t
*cpc
, void *arg
,
883 void (*action
)(void *arg
, const char *event
))
885 cpc_walk_events_impl(cpc
, arg
, action
, 1);
890 cpc_walk_events_pic_impl(cpc_t
*cpc
, uint_t picno
, void *arg
,
891 void (*action
)(void *arg
, uint_t picno
, const char *event
), int is_generic
)
898 if (picno
>= cpc
->cpc_npic
) {
903 if ((list
= strdup(cpc
->cpc_evlist
[picno
])) == NULL
)
907 * List now points to a comma-separated list of events supported by
908 * the designated pic.
911 while ((e
= strchr(p
, ',')) != NULL
) {
915 * Based on is_generic flag, skip appropriate
918 is_papi
= (strncmp(p
, "PAPI", 4) == 0);
919 if (is_generic
!= is_papi
) {
924 action(arg
, picno
, p
);
928 is_papi
= (strncmp(p
, "PAPI", 4) == 0);
929 if (is_generic
== is_papi
)
930 action(arg
, picno
, p
);
937 cpc_walk_events_pic(cpc_t
*cpc
, uint_t picno
, void *arg
,
938 void (*action
)(void *arg
, uint_t picno
, const char *event
))
940 cpc_walk_events_pic_impl(cpc
, picno
, arg
, action
, 0);
945 cpc_walk_generic_events_pic(cpc_t
*cpc
, uint_t picno
, void *arg
,
946 void (*action
)(void *arg
, uint_t picno
, const char *event
))
948 cpc_walk_events_pic_impl(cpc
, picno
, arg
, action
, 1);
953 cpc_walk_attrs(cpc_t
*cpc
, void *arg
,
954 void (*action
)(void *arg
, const char *attr
))
960 if ((list
= strdup(cpc
->cpc_attrlist
)) == NULL
)
964 * Platforms with no attributes will return an empty string.
970 * List now points to a comma-separated list of attributes supported by
971 * the underlying platform.
974 while ((e
= strchr(p
, ',')) != NULL
) {
986 cpc_enable(cpc_t
*cpc
)
988 return (syscall(SYS_cpc
, CPC_ENABLE
, -1, 0, 0, 0));
993 cpc_disable(cpc_t
*cpc
)
995 return (syscall(SYS_cpc
, CPC_DISABLE
, -1, 0, 0, 0));
1000 cpc_npic(cpc_t
*cpc
)
1002 return (cpc
->cpc_npic
);
1007 cpc_caps(cpc_t
*cpc
)
1009 return (cpc
->cpc_caps
);
1013 cpc_cciname(cpc_t
*cpc
)
1015 return (cpc
->cpc_cciname
);
1019 cpc_cpuref(cpc_t
*cpc
)
1021 return (cpc
->cpc_cpuref
);
1025 cpc_seterrhndlr(cpc_t
*cpc
, cpc_errhndlr_t
*fn
)
1027 cpc
->cpc_errfn
= fn
;
1032 * These strings may contain printf() conversion specifiers.
1034 static const char *errstr
[] = {
1035 "", /* zero slot filler */
1036 "Unknown event\n", /* CPC_INVALID_EVENT */
1037 "Invalid counter number\n", /* CPC_INVALID_PICNUM */
1038 "Unknown attribute\n", /* CPC_INVALID_ATTRIBUTE */
1039 "Attribute out of range\n", /* CPC_ATTRIBUTE_OUT_OF_RANGE */
1040 "Hardware resource unavailable\n", /* CPC_RESOURCE_UNAVAIL */
1041 "Counter cannot count requested event\n", /* CPC_PIC_NOT_CAPABLE */
1042 "Invalid flags in a request\n", /* CPC_REQ_INVALID_FLAGS */
1043 "Requests conflict with each other\n", /* CPC_CONFLICTING_REQS */
1044 "Attribute requires the cpc_cpu privilege\n", /* CPC_ATTR_REQUIRES_PRIVILEGE */
1045 "Couldn't bind LWP to requested processor\n", /* CPC_PBIND_FAILED */
1046 "Hypervisor event access denied\n" /* CPC_HV_NO_ACCESS */
1051 cpc_err(cpc_t
*cpc
, const char *fn
, int subcode
, ...)
1058 * If subcode is -1, there is no specific description for this error.
1064 * We need to preserve errno across calls to this function to prevent it
1065 * from being clobbered while here, or in the user's error handler.
1069 str
= dgettext(TEXT_DOMAIN
, errstr
[subcode
]);
1071 va_start(ap
, subcode
);
1072 if (cpc
->cpc_errfn
!= NULL
)
1073 cpc
->cpc_errfn(fn
, subcode
, str
, ap
);
1076 * If printf() conversion specifiers are added to the errstr[]
1077 * table, this call needs to be changed to vfprintf().
1079 (void) fprintf(stderr
, "libcpc: %s: %s", fn
, str
);
1087 * Hook used by libpctx to alert libcpc when a pctx handle is going away.
1088 * This is necessary to prevent libcpc from attempting a libpctx operation on a
1089 * stale and invalid pctx_t handle. Since pctx_t's are cached by libcpc, we need
1090 * to be notified when they go away.
1093 cpc_invalidate_pctx(cpc_t
*cpc
, pctx_t
*pctx
)
1098 sigblocked
= cpc_lock(cpc
);
1099 for (set
= cpc
->cpc_sets
; set
!= NULL
; set
= set
->cs_next
)
1100 if (set
->cs_pctx
== pctx
)
1101 set
->cs_pctx
= NULL
;
1102 cpc_unlock(cpc
, sigblocked
);
1106 * Check that the set is valid; if so it will be in the cpc handle's
1107 * list of sets. The lock protects the list of sets, but not the set
1111 cpc_set_valid(cpc_t
*cpc
, cpc_set_t
*set
)
1116 sigblocked
= cpc_lock(cpc
);
1117 for (csp
= cpc
->cpc_sets
; csp
!= NULL
; csp
= csp
->cs_next
)
1120 cpc_unlock(cpc
, sigblocked
);
1127 cpc_lock(cpc_t
*cpc
)
1129 int ret
= (sigset(SIGEMT
, SIG_HOLD
) == SIG_HOLD
);
1130 (void) mutex_lock(&cpc
->cpc_lock
);
1135 cpc_unlock(cpc_t
*cpc
, int sigblocked
)
1137 (void) mutex_unlock(&cpc
->cpc_lock
);
1138 if (sigblocked
== 0)
1139 (void) sigrelse(SIGEMT
);
1149 ev_walker(void *arg
, uint_t picno
, const char *ev
)
1151 if (strcmp(((struct priv
*)arg
)->name
, ev
) == 0)
1152 ((struct priv
*)arg
)->found
= 1;
1156 at_walker(void *arg
, const char *at
)
1158 if (strcmp(((struct priv
*)arg
)->name
, at
) == 0)
1159 ((struct priv
*)arg
)->found
= 1;
1163 cpc_valid_event(cpc_t
*cpc
, uint_t pic
, const char *ev
)
1165 struct priv pr
= { NULL
, 0 };
1170 cpc_walk_events_pic(cpc
, pic
, &pr
, ev_walker
);
1174 cpc_walk_generic_events_pic(cpc
, pic
, &pr
, ev_walker
);
1179 * Before assuming this is an invalid event, see if we have been given
1181 * Check the second argument of strtol() to ensure invalid events
1182 * beginning with number do not go through.
1186 (void) strtol(ev
, &end_ev
, 0);
1187 if ((errno
== 0) && (*end_ev
== '\0')) {
1189 * Success - this is a valid raw code in hex, decimal, or octal.
1200 cpc_valid_attr(cpc_t
*cpc
, char *attr
)
1202 struct priv pr
= { NULL
, 0 };
1205 cpc_walk_attrs(cpc
, &pr
, at_walker
);