4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include <sys/types.h>
31 #include <sys/syscall.h>
42 #include <sys/cpc_impl.h>
45 #include "libcpc_impl.h"
48 * CPC library handle for use by CPCv1 implementation.
51 mutex_t __cpc_lock
; /* protects __cpc handle */
52 int __cpc_v1_cpuver
; /* CPU version in use by CPCv1 client */
54 uint32_t __cpc_v1_pes
[2]; /* last bound %pes values */
59 const char *fn
= "__cpc_init";
60 extern cpc_t
*__cpc
; /* CPC handle for obsolete clients to share */
62 (void) mutex_lock(&__cpc_lock
);
63 if (__cpc
== NULL
&& (__cpc
= cpc_open(CPC_VER_CURRENT
)) == NULL
) {
64 __cpc_error(fn
, dgettext(TEXT_DOMAIN
,
65 "Couldn't open CPC library handle\n"));
66 (void) mutex_unlock(&__cpc_lock
);
69 (void) mutex_unlock(&__cpc_lock
);
75 cpc_bind_event(cpc_event_t
*this, int flags
)
86 if (__cpc_init() != 0) {
92 * The cpuver and control fields of the cpc_event_t must be saved off
93 * for later. The user may call cpc_take_sample(), expecting these to
94 * be copied into a different cpc_event_t struct by the kernel. We have
95 * to fake that behavior for CPCv1 clients.
97 __cpc_v1_cpuver
= this->ce_cpuver
;
98 __cpc_v1_pes
[0] = this->ce_pes
[0];
99 __cpc_v1_pes
[1] = this->ce_pes
[1];
101 if ((set
= __cpc_eventtoset(__cpc
, this, flags
)) == NULL
) {
107 * Convert flags to CPC2.
109 if (flags
& CPC_BIND_EMT_OVF
) {
110 for (rp
= set
->cs_request
; rp
!= NULL
; rp
= rp
->cr_next
)
111 rp
->cr_flags
|= CPC_OVF_NOTIFY_EMT
;
112 flags
&= ~CPC_BIND_EMT_OVF
;
115 ret
= cpc_bind_curlwp(__cpc
, set
, flags
);
117 (void) cpc_set_destroy(__cpc
, set
);
123 cpc_take_sample(cpc_event_t
*this)
125 this->ce_cpuver
= __cpc_v1_cpuver
;
126 this->ce_pes
[0] = __cpc_v1_pes
[0];
127 this->ce_pes
[1] = __cpc_v1_pes
[1];
129 return (syscall(SYS_cpc
, CPC_SAMPLE
, -1, this->ce_pic
, &this->ce_hrt
,
130 &CPC_TICKREG(this), 0));
134 cpc_count_usr_events(int enable
)
136 return (syscall(SYS_cpc
, CPC_USR_EVENTS
, -1, enable
, 0));
140 cpc_count_sys_events(int enable
)
142 return (syscall(SYS_cpc
, CPC_SYS_EVENTS
, -1, enable
, 0));
148 return (syscall(SYS_cpc
, CPC_RELE
, -1, NULL
, 0));
152 * See if the system call is working and installed.
154 * We invoke the system call with nonsense arguments - if it's
155 * there and working correctly, it will return EINVAL.
157 * (This avoids the user getting a SIGSYS core dump when they attempt
158 * to bind on older hardware)
163 void (*handler
)(int);
165 const char fn
[] = "access";
167 handler
= signal(SIGSYS
, SIG_IGN
);
168 if (syscall(SYS_cpc
, -1, -1, NULL
, 0) == -1 &&
171 (void) signal(SIGSYS
, handler
);
175 __cpc_error(fn
, dgettext(TEXT_DOMAIN
, "Another process may be "
176 "sampling system-wide CPU statistics\n"));
180 dgettext(TEXT_DOMAIN
, "CPU performance counters "
181 "are inaccessible on this machine\n"));
184 __cpc_error(fn
, "%s\n", strerror(errno
));
195 * To look at the system-wide counters, we have to open the
196 * 'shared' device. Once that device is open, no further contexts
197 * can be installed (though one open is needed per CPU)
200 cpc_shared_open(void)
202 const char driver
[] = CPUDRV_SHARED
;
204 return (open(driver
, O_RDWR
));
208 cpc_shared_close(int fd
)
210 (void) cpc_shared_rele(fd
);
215 cpc_shared_bind_event(int fd
, cpc_event_t
*this, int flags
)
223 __cpc_args_t cpc_args
;
226 (void) cpc_shared_rele(fd
);
228 } else if (flags
!= 0) {
233 if (__cpc_init() != 0) {
238 if ((set
= __cpc_eventtoset(__cpc
, this, flags
)) == NULL
) {
243 __cpc_v1_cpuver
= this->ce_cpuver
;
245 if ((packed_set
= __cpc_pack_set(set
, flags
, &packsize
)) == NULL
) {
250 cpc_args
.udata1
= packed_set
;
251 cpc_args
.udata2
= (void *)packsize
;
252 cpc_args
.udata3
= (void *)&subcode
;
254 ret
= ioctl(fd
, CPCIO_BIND
, &cpc_args
);
257 (void) cpc_set_destroy(__cpc
, set
);
263 cpc_shared_take_sample(int fd
, cpc_event_t
*this)
267 args
.udata1
= this->ce_pic
;
268 args
.udata2
= &this->ce_hrt
;
269 args
.udata3
= &CPC_TICKREG(this);
271 this->ce_cpuver
= __cpc_v1_cpuver
;
273 return (ioctl(fd
, CPCIO_SAMPLE
, &args
));
277 cpc_shared_rele(int fd
)
279 return (ioctl(fd
, CPCIO_RELE
, 0));
283 cpc_pctx_bind_event(pctx_t
*pctx
, id_t lwpid
, cpc_event_t
*event
, int flags
)
289 return (cpc_pctx_rele(pctx
, lwpid
));
291 if (__cpc_init() != 0) {
296 else if (flags
!= 0) {
301 if ((set
= __cpc_eventtoset(__cpc
, event
, flags
)) == NULL
) {
307 * The cpuver and control fields of the cpc_event_t must be saved off
308 * for later. The user may call cpc_take_sample(), expecting these to
309 * be copied into a different cpc_event_t struct by the kernel. We have
310 * to fake that behavior for CPCv1 clients.
312 __cpc_v1_cpuver
= event
->ce_cpuver
;
314 ret
= cpc_bind_pctx(__cpc
, pctx
, lwpid
, set
, 0);
316 (void) cpc_set_destroy(__cpc
, set
);
322 cpc_pctx_take_sample(pctx_t
*pctx
, id_t lwpid
, cpc_event_t
*event
)
324 event
->ce_cpuver
= __cpc_v1_cpuver
;
326 return (__pctx_cpc(pctx
, __cpc
, CPC_SAMPLE
, lwpid
, event
->ce_pic
,
327 &event
->ce_hrt
, &CPC_TICKREG(event
), CPC1_BUFSIZE
));
331 * Given a process context and an lwpid, mark the CPU performance
332 * counter context as invalid.
335 cpc_pctx_invalidate(pctx_t
*pctx
, id_t lwpid
)
337 return (__pctx_cpc(pctx
, __cpc
, CPC_INVALIDATE
, lwpid
, 0, 0, 0, 0));
341 * Given a process context and an lwpid, remove all our
342 * hardware context from it.
345 cpc_pctx_rele(pctx_t
*pctx
, id_t lwpid
)
347 return (__pctx_cpc(pctx
, __cpc
, CPC_RELE
, lwpid
, 0, 0, 0, 0));
350 static cpc_errfn_t
*__cpc_uerrfn
;
354 __cpc_error(const char *fn
, const char *fmt
, ...)
360 __cpc_uerrfn(fn
, fmt
, ap
);
362 (void) fprintf(stderr
, "libcpc: %s: ", fn
);
363 (void) vfprintf(stderr
, fmt
, ap
);
369 cpc_seterrfn(cpc_errfn_t
*errfn
)
371 __cpc_uerrfn
= errfn
;
375 * cpc_version() is only for CPC1 clients.
377 uint_t __cpc_workver
= CPC_VER_1
;
380 cpc_version(uint_t ver
)
382 __cpc_workver
= CPC_VER_1
;
386 case CPC_VER_CURRENT
:
387 return (CPC_VER_CURRENT
);
390 * As long as the client is using cpc_version() at all, it is
391 * a CPCv1 client. We still allow CPCv1 clients to compile on
397 return (CPC_VER_NONE
);