Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / dtrace / dtrace.c
blob383a61bd89fc32660b735ec2530847a81920e3a9
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2017, Joyent, Inc.
25 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
29 * DTrace - Dynamic Tracing for Solaris
31 * This is the implementation of the Solaris Dynamic Tracing framework
32 * (DTrace). The user-visible interface to DTrace is described at length in
33 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
34 * library, the in-kernel DTrace framework, and the DTrace providers are
35 * described in the block comments in the <sys/dtrace.h> header file. The
36 * internal architecture of DTrace is described in the block comments in the
37 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
38 * implementation very much assume mastery of all of these sources; if one has
39 * an unanswered question about the implementation, one should consult them
40 * first.
42 * The functions here are ordered roughly as follows:
44 * - Probe context functions
45 * - Probe hashing functions
46 * - Non-probe context utility functions
47 * - Matching functions
48 * - Provider-to-Framework API functions
49 * - Probe management functions
50 * - DIF object functions
51 * - Format functions
52 * - Predicate functions
53 * - ECB functions
54 * - Buffer functions
55 * - Enabling functions
56 * - DOF functions
57 * - Anonymous enabling functions
58 * - Consumer state functions
59 * - Helper functions
60 * - Hook functions
61 * - Driver cookbook functions
63 * Each group of functions begins with a block comment labelled the "DTrace
64 * [Group] Functions", allowing one to find each block by searching forward
65 * on capital-f functions.
67 #include <sys/errno.h>
68 #include <sys/stat.h>
69 #include <sys/modctl.h>
70 #include <sys/conf.h>
71 #include <sys/systm.h>
72 #include <sys/ddi.h>
73 #include <sys/sunddi.h>
74 #include <sys/cpuvar.h>
75 #include <sys/kmem.h>
76 #include <sys/strsubr.h>
77 #include <sys/sysmacros.h>
78 #include <sys/dtrace_impl.h>
79 #include <sys/atomic.h>
80 #include <sys/cmn_err.h>
81 #include <sys/mutex_impl.h>
82 #include <sys/rwlock_impl.h>
83 #include <sys/ctf_api.h>
84 #include <sys/panic.h>
85 #include <sys/priv_impl.h>
86 #include <sys/policy.h>
87 #include <sys/cred_impl.h>
88 #include <sys/procfs_isa.h>
89 #include <sys/taskq.h>
90 #include <sys/mkdev.h>
91 #include <sys/kdi.h>
92 #include <sys/zone.h>
93 #include <sys/socket.h>
94 #include <netinet/in.h>
95 #include "strtolctype.h"
98 * DTrace Tunable Variables
100 * The following variables may be tuned by adding a line to /etc/system that
101 * includes both the name of the DTrace module ("dtrace") and the name of the
102 * variable. For example:
104 * set dtrace:dtrace_destructive_disallow = 1
106 * In general, the only variables that one should be tuning this way are those
107 * that affect system-wide DTrace behavior, and for which the default behavior
108 * is undesirable. Most of these variables are tunable on a per-consumer
109 * basis using DTrace options, and need not be tuned on a system-wide basis.
110 * When tuning these variables, avoid pathological values; while some attempt
111 * is made to verify the integrity of these variables, they are not considered
112 * part of the supported interface to DTrace, and they are therefore not
113 * checked comprehensively. Further, these variables should not be tuned
114 * dynamically via "mdb -kw" or other means; they should only be tuned via
115 * /etc/system.
117 int dtrace_destructive_disallow = 0;
118 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
119 size_t dtrace_difo_maxsize = (256 * 1024);
120 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024);
121 size_t dtrace_statvar_maxsize = (16 * 1024);
122 size_t dtrace_actions_max = (16 * 1024);
123 size_t dtrace_retain_max = 1024;
124 dtrace_optval_t dtrace_helper_actions_max = 1024;
125 dtrace_optval_t dtrace_helper_providers_max = 32;
126 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
127 size_t dtrace_strsize_default = 256;
128 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
129 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
130 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
131 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
132 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
133 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
134 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
135 dtrace_optval_t dtrace_nspec_default = 1;
136 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
137 dtrace_optval_t dtrace_stackframes_default = 20;
138 dtrace_optval_t dtrace_ustackframes_default = 20;
139 dtrace_optval_t dtrace_jstackframes_default = 50;
140 dtrace_optval_t dtrace_jstackstrsize_default = 512;
141 int dtrace_msgdsize_max = 128;
142 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */
143 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
144 int dtrace_devdepth_max = 32;
145 int dtrace_err_verbose;
146 hrtime_t dtrace_deadman_interval = NANOSEC;
147 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
148 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
149 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC;
152 * DTrace External Variables
154 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
155 * available to DTrace consumers via the backtick (`) syntax. One of these,
156 * dtrace_zero, is made deliberately so: it is provided as a source of
157 * well-known, zero-filled memory. While this variable is not documented,
158 * it is used by some translators as an implementation detail.
160 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
163 * DTrace Internal Variables
165 static dev_info_t *dtrace_devi; /* device info */
166 static vmem_t *dtrace_arena; /* probe ID arena */
167 static vmem_t *dtrace_minor; /* minor number arena */
168 static taskq_t *dtrace_taskq; /* task queue */
169 static dtrace_probe_t **dtrace_probes; /* array of all probes */
170 static int dtrace_nprobes; /* number of probes */
171 static dtrace_provider_t *dtrace_provider; /* provider list */
172 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
173 static int dtrace_opens; /* number of opens */
174 static int dtrace_helpers; /* number of helpers */
175 static int dtrace_getf; /* number of unpriv getf()s */
176 static void *dtrace_softstate; /* softstate pointer */
177 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
178 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
179 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
180 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
181 static int dtrace_toxranges; /* number of toxic ranges */
182 static int dtrace_toxranges_max; /* size of toxic range array */
183 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
184 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
185 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
186 static kthread_t *dtrace_panicked; /* panicking thread */
187 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
188 static dtrace_genid_t dtrace_probegen; /* current probe generation */
189 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
190 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
191 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */
192 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
193 static int dtrace_dynvar_failclean; /* dynvars failed to clean */
196 * DTrace Locking
197 * DTrace is protected by three (relatively coarse-grained) locks:
199 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
200 * including enabling state, probes, ECBs, consumer state, helper state,
201 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
202 * probe context is lock-free -- synchronization is handled via the
203 * dtrace_sync() cross call mechanism.
205 * (2) dtrace_provider_lock is required when manipulating provider state, or
206 * when provider state must be held constant.
208 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
209 * when meta provider state must be held constant.
211 * The lock ordering between these three locks is dtrace_meta_lock before
212 * dtrace_provider_lock before dtrace_lock. (In particular, there are
213 * several places where dtrace_provider_lock is held by the framework as it
214 * calls into the providers -- which then call back into the framework,
215 * grabbing dtrace_lock.)
217 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
218 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
219 * role as a coarse-grained lock; it is acquired before both of these locks.
220 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
221 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
222 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
223 * acquired _between_ dtrace_provider_lock and dtrace_lock.
225 static kmutex_t dtrace_lock; /* probe state lock */
226 static kmutex_t dtrace_provider_lock; /* provider state lock */
227 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
230 * DTrace Provider Variables
232 * These are the variables relating to DTrace as a provider (that is, the
233 * provider of the BEGIN, END, and ERROR probes).
235 static dtrace_pattr_t dtrace_provider_attr = {
236 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
237 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
238 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
239 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
240 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
243 static void
244 dtrace_nullop(void)
247 static int
248 dtrace_enable_nullop(void)
250 return (0);
253 static dtrace_pops_t dtrace_provider_ops = {
254 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
255 (void (*)(void *, struct modctl *))dtrace_nullop,
256 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
257 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
258 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
259 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
260 NULL,
261 NULL,
262 NULL,
263 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
266 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
267 static dtrace_id_t dtrace_probeid_end; /* special END probe */
268 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
271 * DTrace Helper Tracing Variables
273 * These variables should be set dynamically to enable helper tracing. The
274 * only variables that should be set are dtrace_helptrace_enable (which should
275 * be set to a non-zero value to allocate helper tracing buffers on the next
276 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a
277 * non-zero value to deallocate helper tracing buffers on the next close of
278 * /dev/dtrace). When (and only when) helper tracing is disabled, the
279 * buffer size may also be set via dtrace_helptrace_bufsize.
281 int dtrace_helptrace_enable = 0;
282 int dtrace_helptrace_disable = 0;
283 int dtrace_helptrace_bufsize = 16 * 1024 * 1024;
284 uint32_t dtrace_helptrace_nlocals;
285 static dtrace_helptrace_t *dtrace_helptrace_buffer;
286 static uint32_t dtrace_helptrace_next = 0;
287 static int dtrace_helptrace_wrapped = 0;
290 * DTrace Error Hashing
292 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
293 * table. This is very useful for checking coverage of tests that are
294 * expected to induce DIF or DOF processing errors, and may be useful for
295 * debugging problems in the DIF code generator or in DOF generation . The
296 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
298 #ifdef DEBUG
299 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
300 static const char *dtrace_errlast;
301 static kthread_t *dtrace_errthread;
302 static kmutex_t dtrace_errlock;
303 #endif
306 * DTrace Macros and Constants
308 * These are various macros that are useful in various spots in the
309 * implementation, along with a few random constants that have no meaning
310 * outside of the implementation. There is no real structure to this cpp
311 * mishmash -- but is there ever?
313 #define DTRACE_HASHSTR(hash, probe) \
314 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
316 #define DTRACE_HASHNEXT(hash, probe) \
317 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
319 #define DTRACE_HASHPREV(hash, probe) \
320 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
322 #define DTRACE_HASHEQ(hash, lhs, rhs) \
323 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
324 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
326 #define DTRACE_AGGHASHSIZE_SLEW 17
328 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
331 * The key for a thread-local variable consists of the lower 61 bits of the
332 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
333 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
334 * equal to a variable identifier. This is necessary (but not sufficient) to
335 * assure that global associative arrays never collide with thread-local
336 * variables. To guarantee that they cannot collide, we must also define the
337 * order for keying dynamic variables. That order is:
339 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
341 * Because the variable-key and the tls-key are in orthogonal spaces, there is
342 * no way for a global variable key signature to match a thread-local key
343 * signature.
345 #define DTRACE_TLS_THRKEY(where) { \
346 uint_t intr = 0; \
347 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
348 for (; actv; actv >>= 1) \
349 intr++; \
350 ASSERT(intr < (1 << 3)); \
351 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
352 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
355 #define DT_BSWAP_8(x) ((x) & 0xff)
356 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
357 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
358 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
360 #define DT_MASK_LO 0x00000000FFFFFFFFULL
362 #define DTRACE_STORE(type, tomax, offset, what) \
363 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
365 #ifndef __x86
366 #define DTRACE_ALIGNCHECK(addr, size, flags) \
367 if (addr & (size - 1)) { \
368 *flags |= CPU_DTRACE_BADALIGN; \
369 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
370 return (0); \
372 #else
373 #define DTRACE_ALIGNCHECK(addr, size, flags)
374 #endif
377 * Test whether a range of memory starting at testaddr of size testsz falls
378 * within the range of memory described by addr, sz. We take care to avoid
379 * problems with overflow and underflow of the unsigned quantities, and
380 * disallow all negative sizes. Ranges of size 0 are allowed.
382 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
383 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \
384 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \
385 (testaddr) + (testsz) >= (testaddr))
387 #define DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz) \
388 do { \
389 if ((remp) != NULL) { \
390 *(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr); \
392 _NOTE(CONSTCOND) } while (0)
396 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
397 * alloc_sz on the righthand side of the comparison in order to avoid overflow
398 * or underflow in the comparison with it. This is simpler than the INRANGE
399 * check above, because we know that the dtms_scratch_ptr is valid in the
400 * range. Allocations of size zero are allowed.
402 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
403 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
404 (mstate)->dtms_scratch_ptr >= (alloc_sz))
406 #define DTRACE_LOADFUNC(bits) \
407 /*CSTYLED*/ \
408 uint##bits##_t \
409 dtrace_load##bits(uintptr_t addr) \
411 size_t size = bits / NBBY; \
412 /*CSTYLED*/ \
413 uint##bits##_t rval; \
414 int i; \
415 volatile uint16_t *flags = (volatile uint16_t *) \
416 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \
418 DTRACE_ALIGNCHECK(addr, size, flags); \
420 for (i = 0; i < dtrace_toxranges; i++) { \
421 if (addr >= dtrace_toxrange[i].dtt_limit) \
422 continue; \
424 if (addr + size <= dtrace_toxrange[i].dtt_base) \
425 continue; \
427 /* \
428 * This address falls within a toxic region; return 0. \
429 */ \
430 *flags |= CPU_DTRACE_BADADDR; \
431 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
432 return (0); \
435 *flags |= CPU_DTRACE_NOFAULT; \
436 /*CSTYLED*/ \
437 rval = *((volatile uint##bits##_t *)addr); \
438 *flags &= ~CPU_DTRACE_NOFAULT; \
440 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
443 #ifdef _LP64
444 #define dtrace_loadptr dtrace_load64
445 #else
446 #define dtrace_loadptr dtrace_load32
447 #endif
449 #define DTRACE_DYNHASH_FREE 0
450 #define DTRACE_DYNHASH_SINK 1
451 #define DTRACE_DYNHASH_VALID 2
453 #define DTRACE_MATCH_FAIL -1
454 #define DTRACE_MATCH_NEXT 0
455 #define DTRACE_MATCH_DONE 1
456 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
457 #define DTRACE_STATE_ALIGN 64
459 #define DTRACE_FLAGS2FLT(flags) \
460 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
461 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
462 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
463 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
464 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
465 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
466 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
467 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
468 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
469 DTRACEFLT_UNKNOWN)
471 #define DTRACEACT_ISSTRING(act) \
472 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
473 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
475 static size_t dtrace_strlen(const char *, size_t);
476 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
477 static void dtrace_enabling_provide(dtrace_provider_t *);
478 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
479 static void dtrace_enabling_matchall(void);
480 static void dtrace_enabling_reap(void);
481 static dtrace_state_t *dtrace_anon_grab(void);
482 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
483 dtrace_state_t *, uint64_t, uint64_t);
484 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
485 static void dtrace_buffer_drop(dtrace_buffer_t *);
486 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
487 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
488 dtrace_state_t *, dtrace_mstate_t *);
489 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
490 dtrace_optval_t);
491 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
492 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
493 static int dtrace_priv_proc(dtrace_state_t *, dtrace_mstate_t *);
494 static void dtrace_getf_barrier(void);
495 static int dtrace_canload_remains(uint64_t, size_t, size_t *,
496 dtrace_mstate_t *, dtrace_vstate_t *);
497 static int dtrace_canstore_remains(uint64_t, size_t, size_t *,
498 dtrace_mstate_t *, dtrace_vstate_t *);
501 * DTrace Probe Context Functions
503 * These functions are called from probe context. Because probe context is
504 * any context in which C may be called, arbitrarily locks may be held,
505 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
506 * As a result, functions called from probe context may only call other DTrace
507 * support functions -- they may not interact at all with the system at large.
508 * (Note that the ASSERT macro is made probe-context safe by redefining it in
509 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
510 * loads are to be performed from probe context, they _must_ be in terms of
511 * the safe dtrace_load*() variants.
513 * Some functions in this block are not actually called from probe context;
514 * for these functions, there will be a comment above the function reading
515 * "Note: not called from probe context."
517 void
518 dtrace_panic(const char *format, ...)
520 va_list alist;
522 va_start(alist, format);
523 dtrace_vpanic(format, alist);
524 va_end(alist);
528 dtrace_assfail(const char *a, const char *f, int l)
530 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
533 * We just need something here that even the most clever compiler
534 * cannot optimize away.
536 return (a[(uintptr_t)f]);
540 * Atomically increment a specified error counter from probe context.
542 static void
543 dtrace_error(uint32_t *counter)
546 * Most counters stored to in probe context are per-CPU counters.
547 * However, there are some error conditions that are sufficiently
548 * arcane that they don't merit per-CPU storage. If these counters
549 * are incremented concurrently on different CPUs, scalability will be
550 * adversely affected -- but we don't expect them to be white-hot in a
551 * correctly constructed enabling...
553 uint32_t oval, nval;
555 do {
556 oval = *counter;
558 if ((nval = oval + 1) == 0) {
560 * If the counter would wrap, set it to 1 -- assuring
561 * that the counter is never zero when we have seen
562 * errors. (The counter must be 32-bits because we
563 * aren't guaranteed a 64-bit compare&swap operation.)
564 * To save this code both the infamy of being fingered
565 * by a priggish news story and the indignity of being
566 * the target of a neo-puritan witch trial, we're
567 * carefully avoiding any colorful description of the
568 * likelihood of this condition -- but suffice it to
569 * say that it is only slightly more likely than the
570 * overflow of predicate cache IDs, as discussed in
571 * dtrace_predicate_create().
573 nval = 1;
575 } while (dtrace_cas32(counter, oval, nval) != oval);
579 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
580 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
582 /* BEGIN CSTYLED */
583 DTRACE_LOADFUNC(8)
584 DTRACE_LOADFUNC(16)
585 DTRACE_LOADFUNC(32)
586 DTRACE_LOADFUNC(64)
587 /* END CSTYLED */
589 static int
590 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
592 if (dest < mstate->dtms_scratch_base)
593 return (0);
595 if (dest + size < dest)
596 return (0);
598 if (dest + size > mstate->dtms_scratch_ptr)
599 return (0);
601 return (1);
604 static int
605 dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain,
606 dtrace_statvar_t **svars, int nsvars)
608 int i;
609 size_t maxglobalsize, maxlocalsize;
611 if (nsvars == 0)
612 return (0);
614 maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t);
615 maxlocalsize = maxglobalsize * NCPU;
617 for (i = 0; i < nsvars; i++) {
618 dtrace_statvar_t *svar = svars[i];
619 uint8_t scope;
620 size_t size;
622 if (svar == NULL || (size = svar->dtsv_size) == 0)
623 continue;
625 scope = svar->dtsv_var.dtdv_scope;
628 * We verify that our size is valid in the spirit of providing
629 * defense in depth: we want to prevent attackers from using
630 * DTrace to escalate an orthogonal kernel heap corruption bug
631 * into the ability to store to arbitrary locations in memory.
633 VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) ||
634 (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize));
636 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data,
637 svar->dtsv_size)) {
638 DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data,
639 svar->dtsv_size);
640 return (1);
644 return (0);
648 * Check to see if the address is within a memory region to which a store may
649 * be issued. This includes the DTrace scratch areas, and any DTrace variable
650 * region. The caller of dtrace_canstore() is responsible for performing any
651 * alignment checks that are needed before stores are actually executed.
653 static int
654 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
655 dtrace_vstate_t *vstate)
657 return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate));
661 * Implementation of dtrace_canstore which communicates the upper bound of the
662 * allowed memory region.
664 static int
665 dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain,
666 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
669 * First, check to see if the address is in scratch space...
671 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
672 mstate->dtms_scratch_size)) {
673 DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base,
674 mstate->dtms_scratch_size);
675 return (1);
679 * Now check to see if it's a dynamic variable. This check will pick
680 * up both thread-local variables and any global dynamically-allocated
681 * variables.
683 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base,
684 vstate->dtvs_dynvars.dtds_size)) {
685 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
686 uintptr_t base = (uintptr_t)dstate->dtds_base +
687 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
688 uintptr_t chunkoffs;
689 dtrace_dynvar_t *dvar;
692 * Before we assume that we can store here, we need to make
693 * sure that it isn't in our metadata -- storing to our
694 * dynamic variable metadata would corrupt our state. For
695 * the range to not include any dynamic variable metadata,
696 * it must:
698 * (1) Start above the hash table that is at the base of
699 * the dynamic variable space
701 * (2) Have a starting chunk offset that is beyond the
702 * dtrace_dynvar_t that is at the base of every chunk
704 * (3) Not span a chunk boundary
706 * (4) Not be in the tuple space of a dynamic variable
709 if (addr < base)
710 return (0);
712 chunkoffs = (addr - base) % dstate->dtds_chunksize;
714 if (chunkoffs < sizeof (dtrace_dynvar_t))
715 return (0);
717 if (chunkoffs + sz > dstate->dtds_chunksize)
718 return (0);
720 dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs);
722 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE)
723 return (0);
725 if (chunkoffs < sizeof (dtrace_dynvar_t) +
726 ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t)))
727 return (0);
729 DTRACE_RANGE_REMAIN(remain, addr, dvar, dstate->dtds_chunksize);
730 return (1);
734 * Finally, check the static local and global variables. These checks
735 * take the longest, so we perform them last.
737 if (dtrace_canstore_statvar(addr, sz, remain,
738 vstate->dtvs_locals, vstate->dtvs_nlocals))
739 return (1);
741 if (dtrace_canstore_statvar(addr, sz, remain,
742 vstate->dtvs_globals, vstate->dtvs_nglobals))
743 return (1);
745 return (0);
750 * Convenience routine to check to see if the address is within a memory
751 * region in which a load may be issued given the user's privilege level;
752 * if not, it sets the appropriate error flags and loads 'addr' into the
753 * illegal value slot.
755 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
756 * appropriate memory access protection.
758 static int
759 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
760 dtrace_vstate_t *vstate)
762 return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate));
766 * Implementation of dtrace_canload which communicates the upper bound of the
767 * allowed memory region.
769 static int
770 dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain,
771 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
773 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
774 file_t *fp;
777 * If we hold the privilege to read from kernel memory, then
778 * everything is readable.
780 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
781 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
782 return (1);
786 * You can obviously read that which you can store.
788 if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate))
789 return (1);
792 * We're allowed to read from our own string table.
794 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab,
795 mstate->dtms_difo->dtdo_strlen)) {
796 DTRACE_RANGE_REMAIN(remain, addr,
797 mstate->dtms_difo->dtdo_strtab,
798 mstate->dtms_difo->dtdo_strlen);
799 return (1);
802 if (vstate->dtvs_state != NULL &&
803 dtrace_priv_proc(vstate->dtvs_state, mstate)) {
804 proc_t *p;
807 * When we have privileges to the current process, there are
808 * several context-related kernel structures that are safe to
809 * read, even absent the privilege to read from kernel memory.
810 * These reads are safe because these structures contain only
811 * state that (1) we're permitted to read, (2) is harmless or
812 * (3) contains pointers to additional kernel state that we're
813 * not permitted to read (and as such, do not present an
814 * opportunity for privilege escalation). Finally (and
815 * critically), because of the nature of their relation with
816 * the current thread context, the memory associated with these
817 * structures cannot change over the duration of probe context,
818 * and it is therefore impossible for this memory to be
819 * deallocated and reallocated as something else while it's
820 * being operated upon.
822 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) {
823 DTRACE_RANGE_REMAIN(remain, addr, curthread,
824 sizeof (kthread_t));
825 return (1);
828 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr,
829 sz, curthread->t_procp, sizeof (proc_t))) {
830 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_procp,
831 sizeof (proc_t));
832 return (1);
835 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz,
836 curthread->t_cred, sizeof (cred_t))) {
837 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cred,
838 sizeof (cred_t));
839 return (1);
842 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz,
843 &(p->p_pidp->pid_id), sizeof (pid_t))) {
844 DTRACE_RANGE_REMAIN(remain, addr, &(p->p_pidp->pid_id),
845 sizeof (pid_t));
846 return (1);
849 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz,
850 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) {
851 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cpu,
852 offsetof(cpu_t, cpu_pause_thread));
853 return (1);
857 if ((fp = mstate->dtms_getf) != NULL) {
858 uintptr_t psz = sizeof (void *);
859 const struct vnodeops *op;
860 struct vnode *vp;
863 * When getf() returns a file_t, the enabling is implicitly
864 * granted the (transient) right to read the returned file_t
865 * as well as the v_path and v_op->vnop_name of the underlying
866 * vnode. These accesses are allowed after a successful
867 * getf() because the members that they refer to cannot change
868 * once set -- and the barrier logic in the kernel's closef()
869 * path assures that the file_t and its referenced vode_t
870 * cannot themselves be stale (that is, it impossible for
871 * either dtms_getf itself or its f_vnode member to reference
872 * freed memory).
874 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) {
875 DTRACE_RANGE_REMAIN(remain, addr, fp, sizeof (file_t));
876 return (1);
879 if ((vp = fp->f_vnode) != NULL) {
880 size_t slen;
882 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) {
883 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_path,
884 psz);
885 return (1);
888 slen = strlen(vp->v_path) + 1;
889 if (DTRACE_INRANGE(addr, sz, vp->v_path, slen)) {
890 DTRACE_RANGE_REMAIN(remain, addr, vp->v_path,
891 slen);
892 return (1);
895 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) {
896 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_op,
897 psz);
898 return (1);
901 if ((op = vp->v_op) != NULL &&
902 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) {
903 DTRACE_RANGE_REMAIN(remain, addr,
904 &op->vnop_name, psz);
905 return (1);
908 if (op != NULL && op->vnop_name != NULL &&
909 DTRACE_INRANGE(addr, sz, op->vnop_name,
910 (slen = strlen(op->vnop_name) + 1))) {
911 DTRACE_RANGE_REMAIN(remain, addr,
912 op->vnop_name, slen);
913 return (1);
918 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
919 *illval = addr;
920 return (0);
924 * Convenience routine to check to see if a given string is within a memory
925 * region in which a load may be issued given the user's privilege level;
926 * this exists so that we don't need to issue unnecessary dtrace_strlen()
927 * calls in the event that the user has all privileges.
929 static int
930 dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain,
931 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
933 size_t rsize;
936 * If we hold the privilege to read from kernel memory, then
937 * everything is readable.
939 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
940 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
941 return (1);
945 * Even if the caller is uninterested in querying the remaining valid
946 * range, it is required to ensure that the access is allowed.
948 if (remain == NULL) {
949 remain = &rsize;
951 if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) {
952 size_t strsz;
954 * Perform the strlen after determining the length of the
955 * memory region which is accessible. This prevents timing
956 * information from being used to find NULs in memory which is
957 * not accessible to the caller.
959 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr,
960 MIN(sz, *remain));
961 if (strsz <= *remain) {
962 return (1);
966 return (0);
970 * Convenience routine to check to see if a given variable is within a memory
971 * region in which a load may be issued given the user's privilege level.
973 static int
974 dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain,
975 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
977 size_t sz;
978 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
981 * Calculate the max size before performing any checks since even
982 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function
983 * return the max length via 'remain'.
985 if (type->dtdt_kind == DIF_TYPE_STRING) {
986 dtrace_state_t *state = vstate->dtvs_state;
988 if (state != NULL) {
989 sz = state->dts_options[DTRACEOPT_STRSIZE];
990 } else {
992 * In helper context, we have a NULL state; fall back
993 * to using the system-wide default for the string size
994 * in this case.
996 sz = dtrace_strsize_default;
998 } else {
999 sz = type->dtdt_size;
1003 * If we hold the privilege to read from kernel memory, then
1004 * everything is readable.
1006 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1007 DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz);
1008 return (1);
1011 if (type->dtdt_kind == DIF_TYPE_STRING) {
1012 return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate,
1013 vstate));
1015 return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate,
1016 vstate));
1020 * Convert a string to a signed integer using safe loads.
1022 * NOTE: This function uses various macros from strtolctype.h to manipulate
1023 * digit values, etc -- these have all been checked to ensure they make
1024 * no additional function calls.
1026 static int64_t
1027 dtrace_strtoll(char *input, int base, size_t limit)
1029 uintptr_t pos = (uintptr_t)input;
1030 int64_t val = 0;
1031 int x;
1032 boolean_t neg = B_FALSE;
1033 char c, cc, ccc;
1034 uintptr_t end = pos + limit;
1037 * Consume any whitespace preceding digits.
1039 while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
1040 pos++;
1043 * Handle an explicit sign if one is present.
1045 if (c == '-' || c == '+') {
1046 if (c == '-')
1047 neg = B_TRUE;
1048 c = dtrace_load8(++pos);
1052 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
1053 * if present.
1055 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
1056 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
1057 pos += 2;
1058 c = ccc;
1062 * Read in contiguous digits until the first non-digit character.
1064 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
1065 c = dtrace_load8(++pos))
1066 val = val * base + x;
1068 return (neg ? -val : val);
1072 * Compare two strings using safe loads.
1074 static int
1075 dtrace_strncmp(char *s1, char *s2, size_t limit)
1077 uint8_t c1, c2;
1078 volatile uint16_t *flags;
1080 if (s1 == s2 || limit == 0)
1081 return (0);
1083 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1085 do {
1086 if (s1 == NULL) {
1087 c1 = '\0';
1088 } else {
1089 c1 = dtrace_load8((uintptr_t)s1++);
1092 if (s2 == NULL) {
1093 c2 = '\0';
1094 } else {
1095 c2 = dtrace_load8((uintptr_t)s2++);
1098 if (c1 != c2)
1099 return (c1 - c2);
1100 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
1102 return (0);
1106 * Compute strlen(s) for a string using safe memory accesses. The additional
1107 * len parameter is used to specify a maximum length to ensure completion.
1109 static size_t
1110 dtrace_strlen(const char *s, size_t lim)
1112 uint_t len;
1114 for (len = 0; len != lim; len++) {
1115 if (dtrace_load8((uintptr_t)s++) == '\0')
1116 break;
1119 return (len);
1123 * Check if an address falls within a toxic region.
1125 static int
1126 dtrace_istoxic(uintptr_t kaddr, size_t size)
1128 uintptr_t taddr, tsize;
1129 int i;
1131 for (i = 0; i < dtrace_toxranges; i++) {
1132 taddr = dtrace_toxrange[i].dtt_base;
1133 tsize = dtrace_toxrange[i].dtt_limit - taddr;
1135 if (kaddr - taddr < tsize) {
1136 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1137 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
1138 return (1);
1141 if (taddr - kaddr < size) {
1142 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1143 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
1144 return (1);
1148 return (0);
1152 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
1153 * memory specified by the DIF program. The dst is assumed to be safe memory
1154 * that we can store to directly because it is managed by DTrace. As with
1155 * standard bcopy, overlapping copies are handled properly.
1157 static void
1158 dtrace_bcopy(const void *src, void *dst, size_t len)
1160 if (len != 0) {
1161 uint8_t *s1 = dst;
1162 const uint8_t *s2 = src;
1164 if (s1 <= s2) {
1165 do {
1166 *s1++ = dtrace_load8((uintptr_t)s2++);
1167 } while (--len != 0);
1168 } else {
1169 s2 += len;
1170 s1 += len;
1172 do {
1173 *--s1 = dtrace_load8((uintptr_t)--s2);
1174 } while (--len != 0);
1180 * Copy src to dst using safe memory accesses, up to either the specified
1181 * length, or the point that a nul byte is encountered. The src is assumed to
1182 * be unsafe memory specified by the DIF program. The dst is assumed to be
1183 * safe memory that we can store to directly because it is managed by DTrace.
1184 * Unlike dtrace_bcopy(), overlapping regions are not handled.
1186 static void
1187 dtrace_strcpy(const void *src, void *dst, size_t len)
1189 if (len != 0) {
1190 uint8_t *s1 = dst, c;
1191 const uint8_t *s2 = src;
1193 do {
1194 *s1++ = c = dtrace_load8((uintptr_t)s2++);
1195 } while (--len != 0 && c != '\0');
1200 * Copy src to dst, deriving the size and type from the specified (BYREF)
1201 * variable type. The src is assumed to be unsafe memory specified by the DIF
1202 * program. The dst is assumed to be DTrace variable memory that is of the
1203 * specified type; we assume that we can store to directly.
1205 static void
1206 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit)
1208 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1210 if (type->dtdt_kind == DIF_TYPE_STRING) {
1211 dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit));
1212 } else {
1213 dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit));
1218 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1219 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1220 * safe memory that we can access directly because it is managed by DTrace.
1222 static int
1223 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1225 volatile uint16_t *flags;
1227 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1229 if (s1 == s2)
1230 return (0);
1232 if (s1 == NULL || s2 == NULL)
1233 return (1);
1235 if (s1 != s2 && len != 0) {
1236 const uint8_t *ps1 = s1;
1237 const uint8_t *ps2 = s2;
1239 do {
1240 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1241 return (1);
1242 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1244 return (0);
1248 * Zero the specified region using a simple byte-by-byte loop. Note that this
1249 * is for safe DTrace-managed memory only.
1251 static void
1252 dtrace_bzero(void *dst, size_t len)
1254 uchar_t *cp;
1256 for (cp = dst; len != 0; len--)
1257 *cp++ = 0;
1260 static void
1261 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1263 uint64_t result[2];
1265 result[0] = addend1[0] + addend2[0];
1266 result[1] = addend1[1] + addend2[1] +
1267 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1269 sum[0] = result[0];
1270 sum[1] = result[1];
1274 * Shift the 128-bit value in a by b. If b is positive, shift left.
1275 * If b is negative, shift right.
1277 static void
1278 dtrace_shift_128(uint64_t *a, int b)
1280 uint64_t mask;
1282 if (b == 0)
1283 return;
1285 if (b < 0) {
1286 b = -b;
1287 if (b >= 64) {
1288 a[0] = a[1] >> (b - 64);
1289 a[1] = 0;
1290 } else {
1291 a[0] >>= b;
1292 mask = 1LL << (64 - b);
1293 mask -= 1;
1294 a[0] |= ((a[1] & mask) << (64 - b));
1295 a[1] >>= b;
1297 } else {
1298 if (b >= 64) {
1299 a[1] = a[0] << (b - 64);
1300 a[0] = 0;
1301 } else {
1302 a[1] <<= b;
1303 mask = a[0] >> (64 - b);
1304 a[1] |= mask;
1305 a[0] <<= b;
1311 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1312 * use native multiplication on those, and then re-combine into the
1313 * resulting 128-bit value.
1315 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1316 * hi1 * hi2 << 64 +
1317 * hi1 * lo2 << 32 +
1318 * hi2 * lo1 << 32 +
1319 * lo1 * lo2
1321 static void
1322 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1324 uint64_t hi1, hi2, lo1, lo2;
1325 uint64_t tmp[2];
1327 hi1 = factor1 >> 32;
1328 hi2 = factor2 >> 32;
1330 lo1 = factor1 & DT_MASK_LO;
1331 lo2 = factor2 & DT_MASK_LO;
1333 product[0] = lo1 * lo2;
1334 product[1] = hi1 * hi2;
1336 tmp[0] = hi1 * lo2;
1337 tmp[1] = 0;
1338 dtrace_shift_128(tmp, 32);
1339 dtrace_add_128(product, tmp, product);
1341 tmp[0] = hi2 * lo1;
1342 tmp[1] = 0;
1343 dtrace_shift_128(tmp, 32);
1344 dtrace_add_128(product, tmp, product);
1348 * This privilege check should be used by actions and subroutines to
1349 * verify that the user credentials of the process that enabled the
1350 * invoking ECB match the target credentials
1352 static int
1353 dtrace_priv_proc_common_user(dtrace_state_t *state)
1355 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1358 * We should always have a non-NULL state cred here, since if cred
1359 * is null (anonymous tracing), we fast-path bypass this routine.
1361 ASSERT(s_cr != NULL);
1363 if ((cr = CRED()) != NULL &&
1364 s_cr->cr_uid == cr->cr_uid &&
1365 s_cr->cr_uid == cr->cr_ruid &&
1366 s_cr->cr_uid == cr->cr_suid &&
1367 s_cr->cr_gid == cr->cr_gid &&
1368 s_cr->cr_gid == cr->cr_rgid &&
1369 s_cr->cr_gid == cr->cr_sgid)
1370 return (1);
1372 return (0);
1376 * This privilege check should be used by actions and subroutines to
1377 * verify that the zone of the process that enabled the invoking ECB
1378 * matches the target credentials
1380 static int
1381 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1383 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1386 * We should always have a non-NULL state cred here, since if cred
1387 * is null (anonymous tracing), we fast-path bypass this routine.
1389 ASSERT(s_cr != NULL);
1391 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone)
1392 return (1);
1394 return (0);
1398 * This privilege check should be used by actions and subroutines to
1399 * verify that the process has not setuid or changed credentials.
1401 static int
1402 dtrace_priv_proc_common_nocd()
1404 proc_t *proc;
1406 if ((proc = ttoproc(curthread)) != NULL &&
1407 !(proc->p_flag & SNOCD))
1408 return (1);
1410 return (0);
1413 static int
1414 dtrace_priv_proc_destructive(dtrace_state_t *state, dtrace_mstate_t *mstate)
1416 int action = state->dts_cred.dcr_action;
1418 if (!(mstate->dtms_access & DTRACE_ACCESS_PROC))
1419 goto bad;
1421 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1422 dtrace_priv_proc_common_zone(state) == 0)
1423 goto bad;
1425 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1426 dtrace_priv_proc_common_user(state) == 0)
1427 goto bad;
1429 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1430 dtrace_priv_proc_common_nocd() == 0)
1431 goto bad;
1433 return (1);
1435 bad:
1436 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1438 return (0);
1441 static int
1442 dtrace_priv_proc_control(dtrace_state_t *state, dtrace_mstate_t *mstate)
1444 if (mstate->dtms_access & DTRACE_ACCESS_PROC) {
1445 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1446 return (1);
1448 if (dtrace_priv_proc_common_zone(state) &&
1449 dtrace_priv_proc_common_user(state) &&
1450 dtrace_priv_proc_common_nocd())
1451 return (1);
1454 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1456 return (0);
1459 static int
1460 dtrace_priv_proc(dtrace_state_t *state, dtrace_mstate_t *mstate)
1462 if ((mstate->dtms_access & DTRACE_ACCESS_PROC) &&
1463 (state->dts_cred.dcr_action & DTRACE_CRA_PROC))
1464 return (1);
1466 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1468 return (0);
1471 static int
1472 dtrace_priv_kernel(dtrace_state_t *state)
1474 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1475 return (1);
1477 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1479 return (0);
1482 static int
1483 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1485 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1486 return (1);
1488 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1490 return (0);
1494 * Determine if the dte_cond of the specified ECB allows for processing of
1495 * the current probe to continue. Note that this routine may allow continued
1496 * processing, but with access(es) stripped from the mstate's dtms_access
1497 * field.
1499 static int
1500 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate,
1501 dtrace_ecb_t *ecb)
1503 dtrace_probe_t *probe = ecb->dte_probe;
1504 dtrace_provider_t *prov = probe->dtpr_provider;
1505 dtrace_pops_t *pops = &prov->dtpv_pops;
1506 int mode = DTRACE_MODE_NOPRIV_DROP;
1508 ASSERT(ecb->dte_cond);
1510 if (pops->dtps_mode != NULL) {
1511 mode = pops->dtps_mode(prov->dtpv_arg,
1512 probe->dtpr_id, probe->dtpr_arg);
1514 ASSERT(mode & (DTRACE_MODE_USER | DTRACE_MODE_KERNEL));
1515 ASSERT(mode & (DTRACE_MODE_NOPRIV_RESTRICT |
1516 DTRACE_MODE_NOPRIV_DROP));
1520 * If the dte_cond bits indicate that this consumer is only allowed to
1521 * see user-mode firings of this probe, check that the probe was fired
1522 * while in a user context. If that's not the case, use the policy
1523 * specified by the provider to determine if we drop the probe or
1524 * merely restrict operation.
1526 if (ecb->dte_cond & DTRACE_COND_USERMODE) {
1527 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP);
1529 if (!(mode & DTRACE_MODE_USER)) {
1530 if (mode & DTRACE_MODE_NOPRIV_DROP)
1531 return (0);
1533 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1538 * This is more subtle than it looks. We have to be absolutely certain
1539 * that CRED() isn't going to change out from under us so it's only
1540 * legit to examine that structure if we're in constrained situations.
1541 * Currently, the only times we'll this check is if a non-super-user
1542 * has enabled the profile or syscall providers -- providers that
1543 * allow visibility of all processes. For the profile case, the check
1544 * above will ensure that we're examining a user context.
1546 if (ecb->dte_cond & DTRACE_COND_OWNER) {
1547 cred_t *cr;
1548 cred_t *s_cr = state->dts_cred.dcr_cred;
1549 proc_t *proc;
1551 ASSERT(s_cr != NULL);
1553 if ((cr = CRED()) == NULL ||
1554 s_cr->cr_uid != cr->cr_uid ||
1555 s_cr->cr_uid != cr->cr_ruid ||
1556 s_cr->cr_uid != cr->cr_suid ||
1557 s_cr->cr_gid != cr->cr_gid ||
1558 s_cr->cr_gid != cr->cr_rgid ||
1559 s_cr->cr_gid != cr->cr_sgid ||
1560 (proc = ttoproc(curthread)) == NULL ||
1561 (proc->p_flag & SNOCD)) {
1562 if (mode & DTRACE_MODE_NOPRIV_DROP)
1563 return (0);
1565 mstate->dtms_access &= ~DTRACE_ACCESS_PROC;
1570 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not
1571 * in our zone, check to see if our mode policy is to restrict rather
1572 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC
1573 * and DTRACE_ACCESS_ARGS
1575 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
1576 cred_t *cr;
1577 cred_t *s_cr = state->dts_cred.dcr_cred;
1579 ASSERT(s_cr != NULL);
1581 if ((cr = CRED()) == NULL ||
1582 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) {
1583 if (mode & DTRACE_MODE_NOPRIV_DROP)
1584 return (0);
1586 mstate->dtms_access &=
1587 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS);
1592 * By merits of being in this code path at all, we have limited
1593 * privileges. If the provider has indicated that limited privileges
1594 * are to denote restricted operation, strip off the ability to access
1595 * arguments.
1597 if (mode & DTRACE_MODE_LIMITEDPRIV_RESTRICT)
1598 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1600 return (1);
1604 * Note: not called from probe context. This function is called
1605 * asynchronously (and at a regular interval) from outside of probe context to
1606 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1607 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1609 void
1610 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1612 dtrace_dynvar_t *dirty;
1613 dtrace_dstate_percpu_t *dcpu;
1614 dtrace_dynvar_t **rinsep;
1615 int i, j, work = 0;
1617 for (i = 0; i < NCPU; i++) {
1618 dcpu = &dstate->dtds_percpu[i];
1619 rinsep = &dcpu->dtdsc_rinsing;
1622 * If the dirty list is NULL, there is no dirty work to do.
1624 if (dcpu->dtdsc_dirty == NULL)
1625 continue;
1627 if (dcpu->dtdsc_rinsing != NULL) {
1629 * If the rinsing list is non-NULL, then it is because
1630 * this CPU was selected to accept another CPU's
1631 * dirty list -- and since that time, dirty buffers
1632 * have accumulated. This is a highly unlikely
1633 * condition, but we choose to ignore the dirty
1634 * buffers -- they'll be picked up a future cleanse.
1636 continue;
1639 if (dcpu->dtdsc_clean != NULL) {
1641 * If the clean list is non-NULL, then we're in a
1642 * situation where a CPU has done deallocations (we
1643 * have a non-NULL dirty list) but no allocations (we
1644 * also have a non-NULL clean list). We can't simply
1645 * move the dirty list into the clean list on this
1646 * CPU, yet we also don't want to allow this condition
1647 * to persist, lest a short clean list prevent a
1648 * massive dirty list from being cleaned (which in
1649 * turn could lead to otherwise avoidable dynamic
1650 * drops). To deal with this, we look for some CPU
1651 * with a NULL clean list, NULL dirty list, and NULL
1652 * rinsing list -- and then we borrow this CPU to
1653 * rinse our dirty list.
1655 for (j = 0; j < NCPU; j++) {
1656 dtrace_dstate_percpu_t *rinser;
1658 rinser = &dstate->dtds_percpu[j];
1660 if (rinser->dtdsc_rinsing != NULL)
1661 continue;
1663 if (rinser->dtdsc_dirty != NULL)
1664 continue;
1666 if (rinser->dtdsc_clean != NULL)
1667 continue;
1669 rinsep = &rinser->dtdsc_rinsing;
1670 break;
1673 if (j == NCPU) {
1675 * We were unable to find another CPU that
1676 * could accept this dirty list -- we are
1677 * therefore unable to clean it now.
1679 dtrace_dynvar_failclean++;
1680 continue;
1684 work = 1;
1687 * Atomically move the dirty list aside.
1689 do {
1690 dirty = dcpu->dtdsc_dirty;
1693 * Before we zap the dirty list, set the rinsing list.
1694 * (This allows for a potential assertion in
1695 * dtrace_dynvar(): if a free dynamic variable appears
1696 * on a hash chain, either the dirty list or the
1697 * rinsing list for some CPU must be non-NULL.)
1699 *rinsep = dirty;
1700 dtrace_membar_producer();
1701 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1702 dirty, NULL) != dirty);
1705 if (!work) {
1707 * We have no work to do; we can simply return.
1709 return;
1712 dtrace_sync();
1714 for (i = 0; i < NCPU; i++) {
1715 dcpu = &dstate->dtds_percpu[i];
1717 if (dcpu->dtdsc_rinsing == NULL)
1718 continue;
1721 * We are now guaranteed that no hash chain contains a pointer
1722 * into this dirty list; we can make it clean.
1724 ASSERT(dcpu->dtdsc_clean == NULL);
1725 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1726 dcpu->dtdsc_rinsing = NULL;
1730 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1731 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1732 * This prevents a race whereby a CPU incorrectly decides that
1733 * the state should be something other than DTRACE_DSTATE_CLEAN
1734 * after dtrace_dynvar_clean() has completed.
1736 dtrace_sync();
1738 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1742 * Depending on the value of the op parameter, this function looks-up,
1743 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1744 * allocation is requested, this function will return a pointer to a
1745 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1746 * variable can be allocated. If NULL is returned, the appropriate counter
1747 * will be incremented.
1749 dtrace_dynvar_t *
1750 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1751 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1752 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1754 uint64_t hashval = DTRACE_DYNHASH_VALID;
1755 dtrace_dynhash_t *hash = dstate->dtds_hash;
1756 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1757 processorid_t me = CPU->cpu_id, cpu = me;
1758 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1759 size_t bucket, ksize;
1760 size_t chunksize = dstate->dtds_chunksize;
1761 uintptr_t kdata, lock, nstate;
1762 uint_t i;
1764 ASSERT(nkeys != 0);
1767 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1768 * algorithm. For the by-value portions, we perform the algorithm in
1769 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1770 * bit, and seems to have only a minute effect on distribution. For
1771 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1772 * over each referenced byte. It's painful to do this, but it's much
1773 * better than pathological hash distribution. The efficacy of the
1774 * hashing algorithm (and a comparison with other algorithms) may be
1775 * found by running the ::dtrace_dynstat MDB dcmd.
1777 for (i = 0; i < nkeys; i++) {
1778 if (key[i].dttk_size == 0) {
1779 uint64_t val = key[i].dttk_value;
1781 hashval += (val >> 48) & 0xffff;
1782 hashval += (hashval << 10);
1783 hashval ^= (hashval >> 6);
1785 hashval += (val >> 32) & 0xffff;
1786 hashval += (hashval << 10);
1787 hashval ^= (hashval >> 6);
1789 hashval += (val >> 16) & 0xffff;
1790 hashval += (hashval << 10);
1791 hashval ^= (hashval >> 6);
1793 hashval += val & 0xffff;
1794 hashval += (hashval << 10);
1795 hashval ^= (hashval >> 6);
1796 } else {
1798 * This is incredibly painful, but it beats the hell
1799 * out of the alternative.
1801 uint64_t j, size = key[i].dttk_size;
1802 uintptr_t base = (uintptr_t)key[i].dttk_value;
1804 if (!dtrace_canload(base, size, mstate, vstate))
1805 break;
1807 for (j = 0; j < size; j++) {
1808 hashval += dtrace_load8(base + j);
1809 hashval += (hashval << 10);
1810 hashval ^= (hashval >> 6);
1815 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1816 return (NULL);
1818 hashval += (hashval << 3);
1819 hashval ^= (hashval >> 11);
1820 hashval += (hashval << 15);
1823 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1824 * comes out to be one of our two sentinel hash values. If this
1825 * actually happens, we set the hashval to be a value known to be a
1826 * non-sentinel value.
1828 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1829 hashval = DTRACE_DYNHASH_VALID;
1832 * Yes, it's painful to do a divide here. If the cycle count becomes
1833 * important here, tricks can be pulled to reduce it. (However, it's
1834 * critical that hash collisions be kept to an absolute minimum;
1835 * they're much more painful than a divide.) It's better to have a
1836 * solution that generates few collisions and still keeps things
1837 * relatively simple.
1839 bucket = hashval % dstate->dtds_hashsize;
1841 if (op == DTRACE_DYNVAR_DEALLOC) {
1842 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1844 for (;;) {
1845 while ((lock = *lockp) & 1)
1846 continue;
1848 if (dtrace_casptr((void *)lockp,
1849 (void *)lock, (void *)(lock + 1)) == (void *)lock)
1850 break;
1853 dtrace_membar_producer();
1856 top:
1857 prev = NULL;
1858 lock = hash[bucket].dtdh_lock;
1860 dtrace_membar_consumer();
1862 start = hash[bucket].dtdh_chain;
1863 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1864 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1865 op != DTRACE_DYNVAR_DEALLOC));
1867 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1868 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1869 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1871 if (dvar->dtdv_hashval != hashval) {
1872 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1874 * We've reached the sink, and therefore the
1875 * end of the hash chain; we can kick out of
1876 * the loop knowing that we have seen a valid
1877 * snapshot of state.
1879 ASSERT(dvar->dtdv_next == NULL);
1880 ASSERT(dvar == &dtrace_dynhash_sink);
1881 break;
1884 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1886 * We've gone off the rails: somewhere along
1887 * the line, one of the members of this hash
1888 * chain was deleted. Note that we could also
1889 * detect this by simply letting this loop run
1890 * to completion, as we would eventually hit
1891 * the end of the dirty list. However, we
1892 * want to avoid running the length of the
1893 * dirty list unnecessarily (it might be quite
1894 * long), so we catch this as early as
1895 * possible by detecting the hash marker. In
1896 * this case, we simply set dvar to NULL and
1897 * break; the conditional after the loop will
1898 * send us back to top.
1900 dvar = NULL;
1901 break;
1904 goto next;
1907 if (dtuple->dtt_nkeys != nkeys)
1908 goto next;
1910 for (i = 0; i < nkeys; i++, dkey++) {
1911 if (dkey->dttk_size != key[i].dttk_size)
1912 goto next; /* size or type mismatch */
1914 if (dkey->dttk_size != 0) {
1915 if (dtrace_bcmp(
1916 (void *)(uintptr_t)key[i].dttk_value,
1917 (void *)(uintptr_t)dkey->dttk_value,
1918 dkey->dttk_size))
1919 goto next;
1920 } else {
1921 if (dkey->dttk_value != key[i].dttk_value)
1922 goto next;
1926 if (op != DTRACE_DYNVAR_DEALLOC)
1927 return (dvar);
1929 ASSERT(dvar->dtdv_next == NULL ||
1930 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1932 if (prev != NULL) {
1933 ASSERT(hash[bucket].dtdh_chain != dvar);
1934 ASSERT(start != dvar);
1935 ASSERT(prev->dtdv_next == dvar);
1936 prev->dtdv_next = dvar->dtdv_next;
1937 } else {
1938 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1939 start, dvar->dtdv_next) != start) {
1941 * We have failed to atomically swing the
1942 * hash table head pointer, presumably because
1943 * of a conflicting allocation on another CPU.
1944 * We need to reread the hash chain and try
1945 * again.
1947 goto top;
1951 dtrace_membar_producer();
1954 * Now set the hash value to indicate that it's free.
1956 ASSERT(hash[bucket].dtdh_chain != dvar);
1957 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1959 dtrace_membar_producer();
1962 * Set the next pointer to point at the dirty list, and
1963 * atomically swing the dirty pointer to the newly freed dvar.
1965 do {
1966 next = dcpu->dtdsc_dirty;
1967 dvar->dtdv_next = next;
1968 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1971 * Finally, unlock this hash bucket.
1973 ASSERT(hash[bucket].dtdh_lock == lock);
1974 ASSERT(lock & 1);
1975 hash[bucket].dtdh_lock++;
1977 return (NULL);
1978 next:
1979 prev = dvar;
1980 continue;
1983 if (dvar == NULL) {
1985 * If dvar is NULL, it is because we went off the rails:
1986 * one of the elements that we traversed in the hash chain
1987 * was deleted while we were traversing it. In this case,
1988 * we assert that we aren't doing a dealloc (deallocs lock
1989 * the hash bucket to prevent themselves from racing with
1990 * one another), and retry the hash chain traversal.
1992 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1993 goto top;
1996 if (op != DTRACE_DYNVAR_ALLOC) {
1998 * If we are not to allocate a new variable, we want to
1999 * return NULL now. Before we return, check that the value
2000 * of the lock word hasn't changed. If it has, we may have
2001 * seen an inconsistent snapshot.
2003 if (op == DTRACE_DYNVAR_NOALLOC) {
2004 if (hash[bucket].dtdh_lock != lock)
2005 goto top;
2006 } else {
2007 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
2008 ASSERT(hash[bucket].dtdh_lock == lock);
2009 ASSERT(lock & 1);
2010 hash[bucket].dtdh_lock++;
2013 return (NULL);
2017 * We need to allocate a new dynamic variable. The size we need is the
2018 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
2019 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
2020 * the size of any referred-to data (dsize). We then round the final
2021 * size up to the chunksize for allocation.
2023 for (ksize = 0, i = 0; i < nkeys; i++)
2024 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
2027 * This should be pretty much impossible, but could happen if, say,
2028 * strange DIF specified the tuple. Ideally, this should be an
2029 * assertion and not an error condition -- but that requires that the
2030 * chunksize calculation in dtrace_difo_chunksize() be absolutely
2031 * bullet-proof. (That is, it must not be able to be fooled by
2032 * malicious DIF.) Given the lack of backwards branches in DIF,
2033 * solving this would presumably not amount to solving the Halting
2034 * Problem -- but it still seems awfully hard.
2036 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
2037 ksize + dsize > chunksize) {
2038 dcpu->dtdsc_drops++;
2039 return (NULL);
2042 nstate = DTRACE_DSTATE_EMPTY;
2044 do {
2045 retry:
2046 free = dcpu->dtdsc_free;
2048 if (free == NULL) {
2049 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
2050 void *rval;
2052 if (clean == NULL) {
2054 * We're out of dynamic variable space on
2055 * this CPU. Unless we have tried all CPUs,
2056 * we'll try to allocate from a different
2057 * CPU.
2059 switch (dstate->dtds_state) {
2060 case DTRACE_DSTATE_CLEAN: {
2061 void *sp = &dstate->dtds_state;
2063 if (++cpu >= NCPU)
2064 cpu = 0;
2066 if (dcpu->dtdsc_dirty != NULL &&
2067 nstate == DTRACE_DSTATE_EMPTY)
2068 nstate = DTRACE_DSTATE_DIRTY;
2070 if (dcpu->dtdsc_rinsing != NULL)
2071 nstate = DTRACE_DSTATE_RINSING;
2073 dcpu = &dstate->dtds_percpu[cpu];
2075 if (cpu != me)
2076 goto retry;
2078 (void) dtrace_cas32(sp,
2079 DTRACE_DSTATE_CLEAN, nstate);
2082 * To increment the correct bean
2083 * counter, take another lap.
2085 goto retry;
2088 case DTRACE_DSTATE_DIRTY:
2089 dcpu->dtdsc_dirty_drops++;
2090 break;
2092 case DTRACE_DSTATE_RINSING:
2093 dcpu->dtdsc_rinsing_drops++;
2094 break;
2096 case DTRACE_DSTATE_EMPTY:
2097 dcpu->dtdsc_drops++;
2098 break;
2101 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
2102 return (NULL);
2106 * The clean list appears to be non-empty. We want to
2107 * move the clean list to the free list; we start by
2108 * moving the clean pointer aside.
2110 if (dtrace_casptr(&dcpu->dtdsc_clean,
2111 clean, NULL) != clean) {
2113 * We are in one of two situations:
2115 * (a) The clean list was switched to the
2116 * free list by another CPU.
2118 * (b) The clean list was added to by the
2119 * cleansing cyclic.
2121 * In either of these situations, we can
2122 * just reattempt the free list allocation.
2124 goto retry;
2127 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
2130 * Now we'll move the clean list to our free list.
2131 * It's impossible for this to fail: the only way
2132 * the free list can be updated is through this
2133 * code path, and only one CPU can own the clean list.
2134 * Thus, it would only be possible for this to fail if
2135 * this code were racing with dtrace_dynvar_clean().
2136 * (That is, if dtrace_dynvar_clean() updated the clean
2137 * list, and we ended up racing to update the free
2138 * list.) This race is prevented by the dtrace_sync()
2139 * in dtrace_dynvar_clean() -- which flushes the
2140 * owners of the clean lists out before resetting
2141 * the clean lists.
2143 dcpu = &dstate->dtds_percpu[me];
2144 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
2145 ASSERT(rval == NULL);
2146 goto retry;
2149 dvar = free;
2150 new_free = dvar->dtdv_next;
2151 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
2154 * We have now allocated a new chunk. We copy the tuple keys into the
2155 * tuple array and copy any referenced key data into the data space
2156 * following the tuple array. As we do this, we relocate dttk_value
2157 * in the final tuple to point to the key data address in the chunk.
2159 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2160 dvar->dtdv_data = (void *)(kdata + ksize);
2161 dvar->dtdv_tuple.dtt_nkeys = nkeys;
2163 for (i = 0; i < nkeys; i++) {
2164 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2165 size_t kesize = key[i].dttk_size;
2167 if (kesize != 0) {
2168 dtrace_bcopy(
2169 (const void *)(uintptr_t)key[i].dttk_value,
2170 (void *)kdata, kesize);
2171 dkey->dttk_value = kdata;
2172 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2173 } else {
2174 dkey->dttk_value = key[i].dttk_value;
2177 dkey->dttk_size = kesize;
2180 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2181 dvar->dtdv_hashval = hashval;
2182 dvar->dtdv_next = start;
2184 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2185 return (dvar);
2188 * The cas has failed. Either another CPU is adding an element to
2189 * this hash chain, or another CPU is deleting an element from this
2190 * hash chain. The simplest way to deal with both of these cases
2191 * (though not necessarily the most efficient) is to free our
2192 * allocated block and re-attempt it all. Note that the free is
2193 * to the dirty list and _not_ to the free list. This is to prevent
2194 * races with allocators, above.
2196 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2198 dtrace_membar_producer();
2200 do {
2201 free = dcpu->dtdsc_dirty;
2202 dvar->dtdv_next = free;
2203 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2205 goto top;
2208 /*ARGSUSED*/
2209 static void
2210 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2212 if ((int64_t)nval < (int64_t)*oval)
2213 *oval = nval;
2216 /*ARGSUSED*/
2217 static void
2218 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2220 if ((int64_t)nval > (int64_t)*oval)
2221 *oval = nval;
2224 static void
2225 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2227 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2228 int64_t val = (int64_t)nval;
2230 if (val < 0) {
2231 for (i = 0; i < zero; i++) {
2232 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2233 quanta[i] += incr;
2234 return;
2237 } else {
2238 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2239 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2240 quanta[i - 1] += incr;
2241 return;
2245 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2246 return;
2249 ASSERT(0);
2252 static void
2253 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2255 uint64_t arg = *lquanta++;
2256 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2257 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2258 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2259 int32_t val = (int32_t)nval, level;
2261 ASSERT(step != 0);
2262 ASSERT(levels != 0);
2264 if (val < base) {
2266 * This is an underflow.
2268 lquanta[0] += incr;
2269 return;
2272 level = (val - base) / step;
2274 if (level < levels) {
2275 lquanta[level + 1] += incr;
2276 return;
2280 * This is an overflow.
2282 lquanta[levels + 1] += incr;
2285 static int
2286 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
2287 uint16_t high, uint16_t nsteps, int64_t value)
2289 int64_t this = 1, last, next;
2290 int base = 1, order;
2292 ASSERT(factor <= nsteps);
2293 ASSERT(nsteps % factor == 0);
2295 for (order = 0; order < low; order++)
2296 this *= factor;
2299 * If our value is less than our factor taken to the power of the
2300 * low order of magnitude, it goes into the zeroth bucket.
2302 if (value < (last = this))
2303 return (0);
2305 for (this *= factor; order <= high; order++) {
2306 int nbuckets = this > nsteps ? nsteps : this;
2308 if ((next = this * factor) < this) {
2310 * We should not generally get log/linear quantizations
2311 * with a high magnitude that allows 64-bits to
2312 * overflow, but we nonetheless protect against this
2313 * by explicitly checking for overflow, and clamping
2314 * our value accordingly.
2316 value = this - 1;
2319 if (value < this) {
2321 * If our value lies within this order of magnitude,
2322 * determine its position by taking the offset within
2323 * the order of magnitude, dividing by the bucket
2324 * width, and adding to our (accumulated) base.
2326 return (base + (value - last) / (this / nbuckets));
2329 base += nbuckets - (nbuckets / factor);
2330 last = this;
2331 this = next;
2335 * Our value is greater than or equal to our factor taken to the
2336 * power of one plus the high magnitude -- return the top bucket.
2338 return (base);
2341 static void
2342 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2344 uint64_t arg = *llquanta++;
2345 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2346 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
2347 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
2348 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
2350 llquanta[dtrace_aggregate_llquantize_bucket(factor,
2351 low, high, nsteps, nval)] += incr;
2354 /*ARGSUSED*/
2355 static void
2356 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2358 data[0]++;
2359 data[1] += nval;
2362 /*ARGSUSED*/
2363 static void
2364 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2366 int64_t snval = (int64_t)nval;
2367 uint64_t tmp[2];
2369 data[0]++;
2370 data[1] += nval;
2373 * What we want to say here is:
2375 * data[2] += nval * nval;
2377 * But given that nval is 64-bit, we could easily overflow, so
2378 * we do this as 128-bit arithmetic.
2380 if (snval < 0)
2381 snval = -snval;
2383 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2384 dtrace_add_128(data + 2, tmp, data + 2);
2387 /*ARGSUSED*/
2388 static void
2389 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2391 *oval = *oval + 1;
2394 /*ARGSUSED*/
2395 static void
2396 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2398 *oval += nval;
2402 * Aggregate given the tuple in the principal data buffer, and the aggregating
2403 * action denoted by the specified dtrace_aggregation_t. The aggregation
2404 * buffer is specified as the buf parameter. This routine does not return
2405 * failure; if there is no space in the aggregation buffer, the data will be
2406 * dropped, and a corresponding counter incremented.
2408 static void
2409 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2410 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2412 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2413 uint32_t i, ndx, size, fsize;
2414 uint32_t align = sizeof (uint64_t) - 1;
2415 dtrace_aggbuffer_t *agb;
2416 dtrace_aggkey_t *key;
2417 uint32_t hashval = 0, limit, isstr;
2418 caddr_t tomax, data, kdata;
2419 dtrace_actkind_t action;
2420 dtrace_action_t *act;
2421 uintptr_t offs;
2423 if (buf == NULL)
2424 return;
2426 if (!agg->dtag_hasarg) {
2428 * Currently, only quantize() and lquantize() take additional
2429 * arguments, and they have the same semantics: an increment
2430 * value that defaults to 1 when not present. If additional
2431 * aggregating actions take arguments, the setting of the
2432 * default argument value will presumably have to become more
2433 * sophisticated...
2435 arg = 1;
2438 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2439 size = rec->dtrd_offset - agg->dtag_base;
2440 fsize = size + rec->dtrd_size;
2442 ASSERT(dbuf->dtb_tomax != NULL);
2443 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2445 if ((tomax = buf->dtb_tomax) == NULL) {
2446 dtrace_buffer_drop(buf);
2447 return;
2451 * The metastructure is always at the bottom of the buffer.
2453 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2454 sizeof (dtrace_aggbuffer_t));
2456 if (buf->dtb_offset == 0) {
2458 * We just kludge up approximately 1/8th of the size to be
2459 * buckets. If this guess ends up being routinely
2460 * off-the-mark, we may need to dynamically readjust this
2461 * based on past performance.
2463 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2465 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2466 (uintptr_t)tomax || hashsize == 0) {
2468 * We've been given a ludicrously small buffer;
2469 * increment our drop count and leave.
2471 dtrace_buffer_drop(buf);
2472 return;
2476 * And now, a pathetic attempt to try to get a an odd (or
2477 * perchance, a prime) hash size for better hash distribution.
2479 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2480 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2482 agb->dtagb_hashsize = hashsize;
2483 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2484 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2485 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2487 for (i = 0; i < agb->dtagb_hashsize; i++)
2488 agb->dtagb_hash[i] = NULL;
2491 ASSERT(agg->dtag_first != NULL);
2492 ASSERT(agg->dtag_first->dta_intuple);
2495 * Calculate the hash value based on the key. Note that we _don't_
2496 * include the aggid in the hashing (but we will store it as part of
2497 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2498 * algorithm: a simple, quick algorithm that has no known funnels, and
2499 * gets good distribution in practice. The efficacy of the hashing
2500 * algorithm (and a comparison with other algorithms) may be found by
2501 * running the ::dtrace_aggstat MDB dcmd.
2503 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2504 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2505 limit = i + act->dta_rec.dtrd_size;
2506 ASSERT(limit <= size);
2507 isstr = DTRACEACT_ISSTRING(act);
2509 for (; i < limit; i++) {
2510 hashval += data[i];
2511 hashval += (hashval << 10);
2512 hashval ^= (hashval >> 6);
2514 if (isstr && data[i] == '\0')
2515 break;
2519 hashval += (hashval << 3);
2520 hashval ^= (hashval >> 11);
2521 hashval += (hashval << 15);
2524 * Yes, the divide here is expensive -- but it's generally the least
2525 * of the performance issues given the amount of data that we iterate
2526 * over to compute hash values, compare data, etc.
2528 ndx = hashval % agb->dtagb_hashsize;
2530 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2531 ASSERT((caddr_t)key >= tomax);
2532 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2534 if (hashval != key->dtak_hashval || key->dtak_size != size)
2535 continue;
2537 kdata = key->dtak_data;
2538 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2540 for (act = agg->dtag_first; act->dta_intuple;
2541 act = act->dta_next) {
2542 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2543 limit = i + act->dta_rec.dtrd_size;
2544 ASSERT(limit <= size);
2545 isstr = DTRACEACT_ISSTRING(act);
2547 for (; i < limit; i++) {
2548 if (kdata[i] != data[i])
2549 goto next;
2551 if (isstr && data[i] == '\0')
2552 break;
2556 if (action != key->dtak_action) {
2558 * We are aggregating on the same value in the same
2559 * aggregation with two different aggregating actions.
2560 * (This should have been picked up in the compiler,
2561 * so we may be dealing with errant or devious DIF.)
2562 * This is an error condition; we indicate as much,
2563 * and return.
2565 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2566 return;
2570 * This is a hit: we need to apply the aggregator to
2571 * the value at this key.
2573 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2574 return;
2575 next:
2576 continue;
2580 * We didn't find it. We need to allocate some zero-filled space,
2581 * link it into the hash table appropriately, and apply the aggregator
2582 * to the (zero-filled) value.
2584 offs = buf->dtb_offset;
2585 while (offs & (align - 1))
2586 offs += sizeof (uint32_t);
2589 * If we don't have enough room to both allocate a new key _and_
2590 * its associated data, increment the drop count and return.
2592 if ((uintptr_t)tomax + offs + fsize >
2593 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2594 dtrace_buffer_drop(buf);
2595 return;
2598 /*CONSTCOND*/
2599 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2600 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2601 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2603 key->dtak_data = kdata = tomax + offs;
2604 buf->dtb_offset = offs + fsize;
2607 * Now copy the data across.
2609 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2611 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2612 kdata[i] = data[i];
2615 * Because strings are not zeroed out by default, we need to iterate
2616 * looking for actions that store strings, and we need to explicitly
2617 * pad these strings out with zeroes.
2619 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2620 int nul;
2622 if (!DTRACEACT_ISSTRING(act))
2623 continue;
2625 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2626 limit = i + act->dta_rec.dtrd_size;
2627 ASSERT(limit <= size);
2629 for (nul = 0; i < limit; i++) {
2630 if (nul) {
2631 kdata[i] = '\0';
2632 continue;
2635 if (data[i] != '\0')
2636 continue;
2638 nul = 1;
2642 for (i = size; i < fsize; i++)
2643 kdata[i] = 0;
2645 key->dtak_hashval = hashval;
2646 key->dtak_size = size;
2647 key->dtak_action = action;
2648 key->dtak_next = agb->dtagb_hash[ndx];
2649 agb->dtagb_hash[ndx] = key;
2652 * Finally, apply the aggregator.
2654 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2655 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2659 * Given consumer state, this routine finds a speculation in the INACTIVE
2660 * state and transitions it into the ACTIVE state. If there is no speculation
2661 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2662 * incremented -- it is up to the caller to take appropriate action.
2664 static int
2665 dtrace_speculation(dtrace_state_t *state)
2667 int i = 0;
2668 dtrace_speculation_state_t current;
2669 uint32_t *stat = &state->dts_speculations_unavail, count;
2671 while (i < state->dts_nspeculations) {
2672 dtrace_speculation_t *spec = &state->dts_speculations[i];
2674 current = spec->dtsp_state;
2676 if (current != DTRACESPEC_INACTIVE) {
2677 if (current == DTRACESPEC_COMMITTINGMANY ||
2678 current == DTRACESPEC_COMMITTING ||
2679 current == DTRACESPEC_DISCARDING)
2680 stat = &state->dts_speculations_busy;
2681 i++;
2682 continue;
2685 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2686 current, DTRACESPEC_ACTIVE) == current)
2687 return (i + 1);
2691 * We couldn't find a speculation. If we found as much as a single
2692 * busy speculation buffer, we'll attribute this failure as "busy"
2693 * instead of "unavail".
2695 do {
2696 count = *stat;
2697 } while (dtrace_cas32(stat, count, count + 1) != count);
2699 return (0);
2703 * This routine commits an active speculation. If the specified speculation
2704 * is not in a valid state to perform a commit(), this routine will silently do
2705 * nothing. The state of the specified speculation is transitioned according
2706 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2708 static void
2709 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2710 dtrace_specid_t which)
2712 dtrace_speculation_t *spec;
2713 dtrace_buffer_t *src, *dest;
2714 uintptr_t daddr, saddr, dlimit, slimit;
2715 dtrace_speculation_state_t current, new;
2716 intptr_t offs;
2717 uint64_t timestamp;
2719 if (which == 0)
2720 return;
2722 if (which > state->dts_nspeculations) {
2723 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2724 return;
2727 spec = &state->dts_speculations[which - 1];
2728 src = &spec->dtsp_buffer[cpu];
2729 dest = &state->dts_buffer[cpu];
2731 do {
2732 current = spec->dtsp_state;
2734 if (current == DTRACESPEC_COMMITTINGMANY)
2735 break;
2737 switch (current) {
2738 case DTRACESPEC_INACTIVE:
2739 case DTRACESPEC_DISCARDING:
2740 return;
2742 case DTRACESPEC_COMMITTING:
2744 * This is only possible if we are (a) commit()'ing
2745 * without having done a prior speculate() on this CPU
2746 * and (b) racing with another commit() on a different
2747 * CPU. There's nothing to do -- we just assert that
2748 * our offset is 0.
2750 ASSERT(src->dtb_offset == 0);
2751 return;
2753 case DTRACESPEC_ACTIVE:
2754 new = DTRACESPEC_COMMITTING;
2755 break;
2757 case DTRACESPEC_ACTIVEONE:
2759 * This speculation is active on one CPU. If our
2760 * buffer offset is non-zero, we know that the one CPU
2761 * must be us. Otherwise, we are committing on a
2762 * different CPU from the speculate(), and we must
2763 * rely on being asynchronously cleaned.
2765 if (src->dtb_offset != 0) {
2766 new = DTRACESPEC_COMMITTING;
2767 break;
2769 /*FALLTHROUGH*/
2771 case DTRACESPEC_ACTIVEMANY:
2772 new = DTRACESPEC_COMMITTINGMANY;
2773 break;
2775 default:
2776 ASSERT(0);
2778 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2779 current, new) != current);
2782 * We have set the state to indicate that we are committing this
2783 * speculation. Now reserve the necessary space in the destination
2784 * buffer.
2786 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2787 sizeof (uint64_t), state, NULL)) < 0) {
2788 dtrace_buffer_drop(dest);
2789 goto out;
2793 * We have sufficient space to copy the speculative buffer into the
2794 * primary buffer. First, modify the speculative buffer, filling
2795 * in the timestamp of all entries with the current time. The data
2796 * must have the commit() time rather than the time it was traced,
2797 * so that all entries in the primary buffer are in timestamp order.
2799 timestamp = dtrace_gethrtime();
2800 saddr = (uintptr_t)src->dtb_tomax;
2801 slimit = saddr + src->dtb_offset;
2802 while (saddr < slimit) {
2803 size_t size;
2804 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
2806 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2807 saddr += sizeof (dtrace_epid_t);
2808 continue;
2810 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs);
2811 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
2813 ASSERT3U(saddr + size, <=, slimit);
2814 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t));
2815 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX);
2817 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
2819 saddr += size;
2823 * Copy the buffer across. (Note that this is a
2824 * highly subobtimal bcopy(); in the unlikely event that this becomes
2825 * a serious performance issue, a high-performance DTrace-specific
2826 * bcopy() should obviously be invented.)
2828 daddr = (uintptr_t)dest->dtb_tomax + offs;
2829 dlimit = daddr + src->dtb_offset;
2830 saddr = (uintptr_t)src->dtb_tomax;
2833 * First, the aligned portion.
2835 while (dlimit - daddr >= sizeof (uint64_t)) {
2836 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2838 daddr += sizeof (uint64_t);
2839 saddr += sizeof (uint64_t);
2843 * Now any left-over bit...
2845 while (dlimit - daddr)
2846 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2849 * Finally, commit the reserved space in the destination buffer.
2851 dest->dtb_offset = offs + src->dtb_offset;
2853 out:
2855 * If we're lucky enough to be the only active CPU on this speculation
2856 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2858 if (current == DTRACESPEC_ACTIVE ||
2859 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2860 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2861 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2863 ASSERT(rval == DTRACESPEC_COMMITTING);
2866 src->dtb_offset = 0;
2867 src->dtb_xamot_drops += src->dtb_drops;
2868 src->dtb_drops = 0;
2872 * This routine discards an active speculation. If the specified speculation
2873 * is not in a valid state to perform a discard(), this routine will silently
2874 * do nothing. The state of the specified speculation is transitioned
2875 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2877 static void
2878 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2879 dtrace_specid_t which)
2881 dtrace_speculation_t *spec;
2882 dtrace_speculation_state_t current, new;
2883 dtrace_buffer_t *buf;
2885 if (which == 0)
2886 return;
2888 if (which > state->dts_nspeculations) {
2889 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2890 return;
2893 spec = &state->dts_speculations[which - 1];
2894 buf = &spec->dtsp_buffer[cpu];
2896 do {
2897 current = spec->dtsp_state;
2899 switch (current) {
2900 case DTRACESPEC_INACTIVE:
2901 case DTRACESPEC_COMMITTINGMANY:
2902 case DTRACESPEC_COMMITTING:
2903 case DTRACESPEC_DISCARDING:
2904 return;
2906 case DTRACESPEC_ACTIVE:
2907 case DTRACESPEC_ACTIVEMANY:
2908 new = DTRACESPEC_DISCARDING;
2909 break;
2911 case DTRACESPEC_ACTIVEONE:
2912 if (buf->dtb_offset != 0) {
2913 new = DTRACESPEC_INACTIVE;
2914 } else {
2915 new = DTRACESPEC_DISCARDING;
2917 break;
2919 default:
2920 ASSERT(0);
2922 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2923 current, new) != current);
2925 buf->dtb_offset = 0;
2926 buf->dtb_drops = 0;
2930 * Note: not called from probe context. This function is called
2931 * asynchronously from cross call context to clean any speculations that are
2932 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2933 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2934 * speculation.
2936 static void
2937 dtrace_speculation_clean_here(dtrace_state_t *state)
2939 dtrace_icookie_t cookie;
2940 processorid_t cpu = CPU->cpu_id;
2941 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2942 dtrace_specid_t i;
2944 cookie = dtrace_interrupt_disable();
2946 if (dest->dtb_tomax == NULL) {
2947 dtrace_interrupt_enable(cookie);
2948 return;
2951 for (i = 0; i < state->dts_nspeculations; i++) {
2952 dtrace_speculation_t *spec = &state->dts_speculations[i];
2953 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2955 if (src->dtb_tomax == NULL)
2956 continue;
2958 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2959 src->dtb_offset = 0;
2960 continue;
2963 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2964 continue;
2966 if (src->dtb_offset == 0)
2967 continue;
2969 dtrace_speculation_commit(state, cpu, i + 1);
2972 dtrace_interrupt_enable(cookie);
2976 * Note: not called from probe context. This function is called
2977 * asynchronously (and at a regular interval) to clean any speculations that
2978 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2979 * is work to be done, it cross calls all CPUs to perform that work;
2980 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2981 * INACTIVE state until they have been cleaned by all CPUs.
2983 static void
2984 dtrace_speculation_clean(dtrace_state_t *state)
2986 int work = 0, rv;
2987 dtrace_specid_t i;
2989 for (i = 0; i < state->dts_nspeculations; i++) {
2990 dtrace_speculation_t *spec = &state->dts_speculations[i];
2992 ASSERT(!spec->dtsp_cleaning);
2994 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2995 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2996 continue;
2998 work++;
2999 spec->dtsp_cleaning = 1;
3002 if (!work)
3003 return;
3005 dtrace_xcall(DTRACE_CPUALL,
3006 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
3009 * We now know that all CPUs have committed or discarded their
3010 * speculation buffers, as appropriate. We can now set the state
3011 * to inactive.
3013 for (i = 0; i < state->dts_nspeculations; i++) {
3014 dtrace_speculation_t *spec = &state->dts_speculations[i];
3015 dtrace_speculation_state_t current, new;
3017 if (!spec->dtsp_cleaning)
3018 continue;
3020 current = spec->dtsp_state;
3021 ASSERT(current == DTRACESPEC_DISCARDING ||
3022 current == DTRACESPEC_COMMITTINGMANY);
3024 new = DTRACESPEC_INACTIVE;
3026 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
3027 ASSERT(rv == current);
3028 spec->dtsp_cleaning = 0;
3033 * Called as part of a speculate() to get the speculative buffer associated
3034 * with a given speculation. Returns NULL if the specified speculation is not
3035 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
3036 * the active CPU is not the specified CPU -- the speculation will be
3037 * atomically transitioned into the ACTIVEMANY state.
3039 static dtrace_buffer_t *
3040 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
3041 dtrace_specid_t which)
3043 dtrace_speculation_t *spec;
3044 dtrace_speculation_state_t current, new;
3045 dtrace_buffer_t *buf;
3047 if (which == 0)
3048 return (NULL);
3050 if (which > state->dts_nspeculations) {
3051 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3052 return (NULL);
3055 spec = &state->dts_speculations[which - 1];
3056 buf = &spec->dtsp_buffer[cpuid];
3058 do {
3059 current = spec->dtsp_state;
3061 switch (current) {
3062 case DTRACESPEC_INACTIVE:
3063 case DTRACESPEC_COMMITTINGMANY:
3064 case DTRACESPEC_DISCARDING:
3065 return (NULL);
3067 case DTRACESPEC_COMMITTING:
3068 ASSERT(buf->dtb_offset == 0);
3069 return (NULL);
3071 case DTRACESPEC_ACTIVEONE:
3073 * This speculation is currently active on one CPU.
3074 * Check the offset in the buffer; if it's non-zero,
3075 * that CPU must be us (and we leave the state alone).
3076 * If it's zero, assume that we're starting on a new
3077 * CPU -- and change the state to indicate that the
3078 * speculation is active on more than one CPU.
3080 if (buf->dtb_offset != 0)
3081 return (buf);
3083 new = DTRACESPEC_ACTIVEMANY;
3084 break;
3086 case DTRACESPEC_ACTIVEMANY:
3087 return (buf);
3089 case DTRACESPEC_ACTIVE:
3090 new = DTRACESPEC_ACTIVEONE;
3091 break;
3093 default:
3094 ASSERT(0);
3096 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3097 current, new) != current);
3099 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
3100 return (buf);
3104 * Return a string. In the event that the user lacks the privilege to access
3105 * arbitrary kernel memory, we copy the string out to scratch memory so that we
3106 * don't fail access checking.
3108 * dtrace_dif_variable() uses this routine as a helper for various
3109 * builtin values such as 'execname' and 'probefunc.'
3111 uintptr_t
3112 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
3113 dtrace_mstate_t *mstate)
3115 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3116 uintptr_t ret;
3117 size_t strsz;
3120 * The easy case: this probe is allowed to read all of memory, so
3121 * we can just return this as a vanilla pointer.
3123 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
3124 return (addr);
3127 * This is the tougher case: we copy the string in question from
3128 * kernel memory into scratch memory and return it that way: this
3129 * ensures that we won't trip up when access checking tests the
3130 * BYREF return value.
3132 strsz = dtrace_strlen((char *)addr, size) + 1;
3134 if (mstate->dtms_scratch_ptr + strsz >
3135 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3136 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3137 return ((uintptr_t)NULL);
3140 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3141 strsz);
3142 ret = mstate->dtms_scratch_ptr;
3143 mstate->dtms_scratch_ptr += strsz;
3144 return (ret);
3148 * This function implements the DIF emulator's variable lookups. The emulator
3149 * passes a reserved variable identifier and optional built-in array index.
3151 static uint64_t
3152 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
3153 uint64_t ndx)
3156 * If we're accessing one of the uncached arguments, we'll turn this
3157 * into a reference in the args array.
3159 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3160 ndx = v - DIF_VAR_ARG0;
3161 v = DIF_VAR_ARGS;
3164 switch (v) {
3165 case DIF_VAR_ARGS:
3166 if (!(mstate->dtms_access & DTRACE_ACCESS_ARGS)) {
3167 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |=
3168 CPU_DTRACE_KPRIV;
3169 return (0);
3172 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3173 if (ndx >= sizeof (mstate->dtms_arg) /
3174 sizeof (mstate->dtms_arg[0])) {
3175 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3176 dtrace_provider_t *pv;
3177 uint64_t val;
3179 pv = mstate->dtms_probe->dtpr_provider;
3180 if (pv->dtpv_pops.dtps_getargval != NULL)
3181 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3182 mstate->dtms_probe->dtpr_id,
3183 mstate->dtms_probe->dtpr_arg, ndx, aframes);
3184 else
3185 val = dtrace_getarg(ndx, aframes);
3188 * This is regrettably required to keep the compiler
3189 * from tail-optimizing the call to dtrace_getarg().
3190 * The condition always evaluates to true, but the
3191 * compiler has no way of figuring that out a priori.
3192 * (None of this would be necessary if the compiler
3193 * could be relied upon to _always_ tail-optimize
3194 * the call to dtrace_getarg() -- but it can't.)
3196 if (mstate->dtms_probe != NULL)
3197 return (val);
3199 ASSERT(0);
3202 return (mstate->dtms_arg[ndx]);
3204 case DIF_VAR_UREGS: {
3205 klwp_t *lwp;
3207 if (!dtrace_priv_proc(state, mstate))
3208 return (0);
3210 if ((lwp = curthread->t_lwp) == NULL) {
3211 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3212 cpu_core[CPU->cpu_id].cpuc_dtrace_illval =
3213 (uintptr_t)NULL;
3214 return (0);
3217 return (dtrace_getreg(lwp->lwp_regs, ndx));
3220 case DIF_VAR_VMREGS: {
3221 uint64_t rval;
3223 if (!dtrace_priv_kernel(state))
3224 return (0);
3226 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3228 rval = dtrace_getvmreg(ndx,
3229 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags);
3231 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3233 return (rval);
3236 case DIF_VAR_CURTHREAD:
3237 if (!dtrace_priv_proc(state, mstate))
3238 return (0);
3239 return ((uint64_t)(uintptr_t)curthread);
3241 case DIF_VAR_TIMESTAMP:
3242 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3243 mstate->dtms_timestamp = dtrace_gethrtime();
3244 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3246 return (mstate->dtms_timestamp);
3248 case DIF_VAR_VTIMESTAMP:
3249 ASSERT(dtrace_vtime_references != 0);
3250 return (curthread->t_dtrace_vtime);
3252 case DIF_VAR_WALLTIMESTAMP:
3253 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3254 mstate->dtms_walltimestamp = dtrace_gethrestime();
3255 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3257 return (mstate->dtms_walltimestamp);
3259 case DIF_VAR_IPL:
3260 if (!dtrace_priv_kernel(state))
3261 return (0);
3262 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3263 mstate->dtms_ipl = dtrace_getipl();
3264 mstate->dtms_present |= DTRACE_MSTATE_IPL;
3266 return (mstate->dtms_ipl);
3268 case DIF_VAR_EPID:
3269 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3270 return (mstate->dtms_epid);
3272 case DIF_VAR_ID:
3273 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3274 return (mstate->dtms_probe->dtpr_id);
3276 case DIF_VAR_STACKDEPTH:
3277 if (!dtrace_priv_kernel(state))
3278 return (0);
3279 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
3280 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3282 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3283 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3285 return (mstate->dtms_stackdepth);
3287 case DIF_VAR_USTACKDEPTH:
3288 if (!dtrace_priv_proc(state, mstate))
3289 return (0);
3290 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3292 * See comment in DIF_VAR_PID.
3294 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3295 CPU_ON_INTR(CPU)) {
3296 mstate->dtms_ustackdepth = 0;
3297 } else {
3298 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3299 mstate->dtms_ustackdepth =
3300 dtrace_getustackdepth();
3301 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3303 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3305 return (mstate->dtms_ustackdepth);
3307 case DIF_VAR_CALLER:
3308 if (!dtrace_priv_kernel(state))
3309 return (0);
3310 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
3311 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3313 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3315 * If this is an unanchored probe, we are
3316 * required to go through the slow path:
3317 * dtrace_caller() only guarantees correct
3318 * results for anchored probes.
3320 pc_t caller[2];
3322 dtrace_getpcstack(caller, 2, aframes,
3323 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3324 mstate->dtms_caller = caller[1];
3325 } else if ((mstate->dtms_caller =
3326 dtrace_caller(aframes)) == -1) {
3328 * We have failed to do this the quick way;
3329 * we must resort to the slower approach of
3330 * calling dtrace_getpcstack().
3332 pc_t caller;
3334 dtrace_getpcstack(&caller, 1, aframes, NULL);
3335 mstate->dtms_caller = caller;
3338 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3340 return (mstate->dtms_caller);
3342 case DIF_VAR_UCALLER:
3343 if (!dtrace_priv_proc(state, mstate))
3344 return (0);
3346 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3347 uint64_t ustack[3];
3350 * dtrace_getupcstack() fills in the first uint64_t
3351 * with the current PID. The second uint64_t will
3352 * be the program counter at user-level. The third
3353 * uint64_t will contain the caller, which is what
3354 * we're after.
3356 ustack[2] = (uintptr_t)NULL;
3357 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3358 dtrace_getupcstack(ustack, 3);
3359 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3360 mstate->dtms_ucaller = ustack[2];
3361 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3364 return (mstate->dtms_ucaller);
3366 case DIF_VAR_PROBEPROV:
3367 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3368 return (dtrace_dif_varstr(
3369 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3370 state, mstate));
3372 case DIF_VAR_PROBEMOD:
3373 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3374 return (dtrace_dif_varstr(
3375 (uintptr_t)mstate->dtms_probe->dtpr_mod,
3376 state, mstate));
3378 case DIF_VAR_PROBEFUNC:
3379 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3380 return (dtrace_dif_varstr(
3381 (uintptr_t)mstate->dtms_probe->dtpr_func,
3382 state, mstate));
3384 case DIF_VAR_PROBENAME:
3385 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3386 return (dtrace_dif_varstr(
3387 (uintptr_t)mstate->dtms_probe->dtpr_name,
3388 state, mstate));
3390 case DIF_VAR_PID:
3391 if (!dtrace_priv_proc(state, mstate))
3392 return (0);
3395 * Note that we are assuming that an unanchored probe is
3396 * always due to a high-level interrupt. (And we're assuming
3397 * that there is only a single high level interrupt.)
3399 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3400 return (pid0.pid_id);
3403 * It is always safe to dereference one's own t_procp pointer:
3404 * it always points to a valid, allocated proc structure.
3405 * Further, it is always safe to dereference the p_pidp member
3406 * of one's own proc structure. (These are truisms becuase
3407 * threads and processes don't clean up their own state --
3408 * they leave that task to whomever reaps them.)
3410 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3412 case DIF_VAR_PPID:
3413 if (!dtrace_priv_proc(state, mstate))
3414 return (0);
3417 * See comment in DIF_VAR_PID.
3419 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3420 return (pid0.pid_id);
3423 * It is always safe to dereference one's own t_procp pointer:
3424 * it always points to a valid, allocated proc structure.
3425 * (This is true because threads don't clean up their own
3426 * state -- they leave that task to whomever reaps them.)
3428 return ((uint64_t)curthread->t_procp->p_ppid);
3430 case DIF_VAR_TID:
3432 * See comment in DIF_VAR_PID.
3434 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3435 return (0);
3437 return ((uint64_t)curthread->t_tid);
3439 case DIF_VAR_EXECNAME:
3440 if (!dtrace_priv_proc(state, mstate))
3441 return (0);
3444 * See comment in DIF_VAR_PID.
3446 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3447 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3450 * It is always safe to dereference one's own t_procp pointer:
3451 * it always points to a valid, allocated proc structure.
3452 * (This is true because threads don't clean up their own
3453 * state -- they leave that task to whomever reaps them.)
3455 return (dtrace_dif_varstr(
3456 (uintptr_t)curthread->t_procp->p_user.u_comm,
3457 state, mstate));
3459 case DIF_VAR_ZONENAME:
3460 if (!dtrace_priv_proc(state, mstate))
3461 return (0);
3464 * See comment in DIF_VAR_PID.
3466 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3467 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3470 * It is always safe to dereference one's own t_procp pointer:
3471 * it always points to a valid, allocated proc structure.
3472 * (This is true because threads don't clean up their own
3473 * state -- they leave that task to whomever reaps them.)
3475 return (dtrace_dif_varstr(
3476 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3477 state, mstate));
3479 case DIF_VAR_UID:
3480 if (!dtrace_priv_proc(state, mstate))
3481 return (0);
3484 * See comment in DIF_VAR_PID.
3486 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3487 return ((uint64_t)p0.p_cred->cr_uid);
3490 * It is always safe to dereference one's own t_procp pointer:
3491 * it always points to a valid, allocated proc structure.
3492 * (This is true because threads don't clean up their own
3493 * state -- they leave that task to whomever reaps them.)
3495 * Additionally, it is safe to dereference one's own process
3496 * credential, since this is never NULL after process birth.
3498 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3500 case DIF_VAR_GID:
3501 if (!dtrace_priv_proc(state, mstate))
3502 return (0);
3505 * See comment in DIF_VAR_PID.
3507 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3508 return ((uint64_t)p0.p_cred->cr_gid);
3511 * It is always safe to dereference one's own t_procp pointer:
3512 * it always points to a valid, allocated proc structure.
3513 * (This is true because threads don't clean up their own
3514 * state -- they leave that task to whomever reaps them.)
3516 * Additionally, it is safe to dereference one's own process
3517 * credential, since this is never NULL after process birth.
3519 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3521 case DIF_VAR_ERRNO: {
3522 klwp_t *lwp;
3523 if (!dtrace_priv_proc(state, mstate))
3524 return (0);
3527 * See comment in DIF_VAR_PID.
3529 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3530 return (0);
3533 * It is always safe to dereference one's own t_lwp pointer in
3534 * the event that this pointer is non-NULL. (This is true
3535 * because threads and lwps don't clean up their own state --
3536 * they leave that task to whomever reaps them.)
3538 if ((lwp = curthread->t_lwp) == NULL)
3539 return (0);
3541 return ((uint64_t)lwp->lwp_errno);
3543 default:
3544 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3545 return (0);
3549 static void
3550 dtrace_dif_variable_write(dtrace_mstate_t *mstate, dtrace_state_t *state,
3551 uint64_t v, uint64_t ndx, uint64_t data)
3553 switch (v) {
3554 case DIF_VAR_UREGS: {
3555 klwp_t *lwp;
3557 if (dtrace_destructive_disallow ||
3558 !dtrace_priv_proc_control(state, mstate)) {
3559 return;
3562 if ((lwp = curthread->t_lwp) == NULL) {
3563 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3564 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = 0;
3565 return;
3568 dtrace_setreg(lwp->lwp_regs, ndx, data);
3569 return;
3572 default:
3573 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3574 return;
3578 typedef enum dtrace_json_state {
3579 DTRACE_JSON_REST = 1,
3580 DTRACE_JSON_OBJECT,
3581 DTRACE_JSON_STRING,
3582 DTRACE_JSON_STRING_ESCAPE,
3583 DTRACE_JSON_STRING_ESCAPE_UNICODE,
3584 DTRACE_JSON_COLON,
3585 DTRACE_JSON_COMMA,
3586 DTRACE_JSON_VALUE,
3587 DTRACE_JSON_IDENTIFIER,
3588 DTRACE_JSON_NUMBER,
3589 DTRACE_JSON_NUMBER_FRAC,
3590 DTRACE_JSON_NUMBER_EXP,
3591 DTRACE_JSON_COLLECT_OBJECT
3592 } dtrace_json_state_t;
3595 * This function possesses just enough knowledge about JSON to extract a single
3596 * value from a JSON string and store it in the scratch buffer. It is able
3597 * to extract nested object values, and members of arrays by index.
3599 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3600 * be looked up as we descend into the object tree. e.g.
3602 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3603 * with nelems = 5.
3605 * The run time of this function must be bounded above by strsize to limit the
3606 * amount of work done in probe context. As such, it is implemented as a
3607 * simple state machine, reading one character at a time using safe loads
3608 * until we find the requested element, hit a parsing error or run off the
3609 * end of the object or string.
3611 * As there is no way for a subroutine to return an error without interrupting
3612 * clause execution, we simply return NULL in the event of a missing key or any
3613 * other error condition. Each NULL return in this function is commented with
3614 * the error condition it represents -- parsing or otherwise.
3616 * The set of states for the state machine closely matches the JSON
3617 * specification (http://json.org/). Briefly:
3619 * DTRACE_JSON_REST:
3620 * Skip whitespace until we find either a top-level Object, moving
3621 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3623 * DTRACE_JSON_OBJECT:
3624 * Locate the next key String in an Object. Sets a flag to denote
3625 * the next String as a key string and moves to DTRACE_JSON_STRING.
3627 * DTRACE_JSON_COLON:
3628 * Skip whitespace until we find the colon that separates key Strings
3629 * from their values. Once found, move to DTRACE_JSON_VALUE.
3631 * DTRACE_JSON_VALUE:
3632 * Detects the type of the next value (String, Number, Identifier, Object
3633 * or Array) and routes to the states that process that type. Here we also
3634 * deal with the element selector list if we are requested to traverse down
3635 * into the object tree.
3637 * DTRACE_JSON_COMMA:
3638 * Skip whitespace until we find the comma that separates key-value pairs
3639 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3640 * (similarly DTRACE_JSON_VALUE). All following literal value processing
3641 * states return to this state at the end of their value, unless otherwise
3642 * noted.
3644 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3645 * Processes a Number literal from the JSON, including any exponent
3646 * component that may be present. Numbers are returned as strings, which
3647 * may be passed to strtoll() if an integer is required.
3649 * DTRACE_JSON_IDENTIFIER:
3650 * Processes a "true", "false" or "null" literal in the JSON.
3652 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3653 * DTRACE_JSON_STRING_ESCAPE_UNICODE:
3654 * Processes a String literal from the JSON, whether the String denotes
3655 * a key, a value or part of a larger Object. Handles all escape sequences
3656 * present in the specification, including four-digit unicode characters,
3657 * but merely includes the escape sequence without converting it to the
3658 * actual escaped character. If the String is flagged as a key, we
3659 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3661 * DTRACE_JSON_COLLECT_OBJECT:
3662 * This state collects an entire Object (or Array), correctly handling
3663 * embedded strings. If the full element selector list matches this nested
3664 * object, we return the Object in full as a string. If not, we use this
3665 * state to skip to the next value at this level and continue processing.
3667 * NOTE: This function uses various macros from strtolctype.h to manipulate
3668 * digit values, etc -- these have all been checked to ensure they make
3669 * no additional function calls.
3671 static char *
3672 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3673 char *dest)
3675 dtrace_json_state_t state = DTRACE_JSON_REST;
3676 int64_t array_elem = INT64_MIN;
3677 int64_t array_pos = 0;
3678 uint8_t escape_unicount = 0;
3679 boolean_t string_is_key = B_FALSE;
3680 boolean_t collect_object = B_FALSE;
3681 boolean_t found_key = B_FALSE;
3682 boolean_t in_array = B_FALSE;
3683 uint32_t braces = 0, brackets = 0;
3684 char *elem = elemlist;
3685 char *dd = dest;
3686 uintptr_t cur;
3688 for (cur = json; cur < json + size; cur++) {
3689 char cc = dtrace_load8(cur);
3690 if (cc == '\0')
3691 return (NULL);
3693 switch (state) {
3694 case DTRACE_JSON_REST:
3695 if (isspace(cc))
3696 break;
3698 if (cc == '{') {
3699 state = DTRACE_JSON_OBJECT;
3700 break;
3703 if (cc == '[') {
3704 in_array = B_TRUE;
3705 array_pos = 0;
3706 array_elem = dtrace_strtoll(elem, 10, size);
3707 found_key = array_elem == 0 ? B_TRUE : B_FALSE;
3708 state = DTRACE_JSON_VALUE;
3709 break;
3713 * ERROR: expected to find a top-level object or array.
3715 return (NULL);
3716 case DTRACE_JSON_OBJECT:
3717 if (isspace(cc))
3718 break;
3720 if (cc == '"') {
3721 state = DTRACE_JSON_STRING;
3722 string_is_key = B_TRUE;
3723 break;
3727 * ERROR: either the object did not start with a key
3728 * string, or we've run off the end of the object
3729 * without finding the requested key.
3731 return (NULL);
3732 case DTRACE_JSON_STRING:
3733 if (cc == '\\') {
3734 *dd++ = '\\';
3735 state = DTRACE_JSON_STRING_ESCAPE;
3736 break;
3739 if (cc == '"') {
3740 if (collect_object) {
3742 * We don't reset the dest here, as
3743 * the string is part of a larger
3744 * object being collected.
3746 *dd++ = cc;
3747 collect_object = B_FALSE;
3748 state = DTRACE_JSON_COLLECT_OBJECT;
3749 break;
3751 *dd = '\0';
3752 dd = dest; /* reset string buffer */
3753 if (string_is_key) {
3754 if (dtrace_strncmp(dest, elem,
3755 size) == 0)
3756 found_key = B_TRUE;
3757 } else if (found_key) {
3758 if (nelems > 1) {
3760 * We expected an object, not
3761 * this string.
3763 return (NULL);
3765 return (dest);
3767 state = string_is_key ? DTRACE_JSON_COLON :
3768 DTRACE_JSON_COMMA;
3769 string_is_key = B_FALSE;
3770 break;
3773 *dd++ = cc;
3774 break;
3775 case DTRACE_JSON_STRING_ESCAPE:
3776 *dd++ = cc;
3777 if (cc == 'u') {
3778 escape_unicount = 0;
3779 state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
3780 } else {
3781 state = DTRACE_JSON_STRING;
3783 break;
3784 case DTRACE_JSON_STRING_ESCAPE_UNICODE:
3785 if (!isxdigit(cc)) {
3787 * ERROR: invalid unicode escape, expected
3788 * four valid hexidecimal digits.
3790 return (NULL);
3793 *dd++ = cc;
3794 if (++escape_unicount == 4)
3795 state = DTRACE_JSON_STRING;
3796 break;
3797 case DTRACE_JSON_COLON:
3798 if (isspace(cc))
3799 break;
3801 if (cc == ':') {
3802 state = DTRACE_JSON_VALUE;
3803 break;
3807 * ERROR: expected a colon.
3809 return (NULL);
3810 case DTRACE_JSON_COMMA:
3811 if (isspace(cc))
3812 break;
3814 if (cc == ',') {
3815 if (in_array) {
3816 state = DTRACE_JSON_VALUE;
3817 if (++array_pos == array_elem)
3818 found_key = B_TRUE;
3819 } else {
3820 state = DTRACE_JSON_OBJECT;
3822 break;
3826 * ERROR: either we hit an unexpected character, or
3827 * we reached the end of the object or array without
3828 * finding the requested key.
3830 return (NULL);
3831 case DTRACE_JSON_IDENTIFIER:
3832 if (islower(cc)) {
3833 *dd++ = cc;
3834 break;
3837 *dd = '\0';
3838 dd = dest; /* reset string buffer */
3840 if (dtrace_strncmp(dest, "true", 5) == 0 ||
3841 dtrace_strncmp(dest, "false", 6) == 0 ||
3842 dtrace_strncmp(dest, "null", 5) == 0) {
3843 if (found_key) {
3844 if (nelems > 1) {
3846 * ERROR: We expected an object,
3847 * not this identifier.
3849 return (NULL);
3851 return (dest);
3852 } else {
3853 cur--;
3854 state = DTRACE_JSON_COMMA;
3855 break;
3860 * ERROR: we did not recognise the identifier as one
3861 * of those in the JSON specification.
3863 return (NULL);
3864 case DTRACE_JSON_NUMBER:
3865 if (cc == '.') {
3866 *dd++ = cc;
3867 state = DTRACE_JSON_NUMBER_FRAC;
3868 break;
3871 if (cc == 'x' || cc == 'X') {
3873 * ERROR: specification explicitly excludes
3874 * hexidecimal or octal numbers.
3876 return (NULL);
3879 /* FALLTHRU */
3880 case DTRACE_JSON_NUMBER_FRAC:
3881 if (cc == 'e' || cc == 'E') {
3882 *dd++ = cc;
3883 state = DTRACE_JSON_NUMBER_EXP;
3884 break;
3887 if (cc == '+' || cc == '-') {
3889 * ERROR: expect sign as part of exponent only.
3891 return (NULL);
3893 /* FALLTHRU */
3894 case DTRACE_JSON_NUMBER_EXP:
3895 if (isdigit(cc) || cc == '+' || cc == '-') {
3896 *dd++ = cc;
3897 break;
3900 *dd = '\0';
3901 dd = dest; /* reset string buffer */
3902 if (found_key) {
3903 if (nelems > 1) {
3905 * ERROR: We expected an object, not
3906 * this number.
3908 return (NULL);
3910 return (dest);
3913 cur--;
3914 state = DTRACE_JSON_COMMA;
3915 break;
3916 case DTRACE_JSON_VALUE:
3917 if (isspace(cc))
3918 break;
3920 if (cc == '{' || cc == '[') {
3921 if (nelems > 1 && found_key) {
3922 in_array = cc == '[' ? B_TRUE : B_FALSE;
3924 * If our element selector directs us
3925 * to descend into this nested object,
3926 * then move to the next selector
3927 * element in the list and restart the
3928 * state machine.
3930 while (*elem != '\0')
3931 elem++;
3932 elem++; /* skip the inter-element NUL */
3933 nelems--;
3934 dd = dest;
3935 if (in_array) {
3936 state = DTRACE_JSON_VALUE;
3937 array_pos = 0;
3938 array_elem = dtrace_strtoll(
3939 elem, 10, size);
3940 found_key = array_elem == 0 ?
3941 B_TRUE : B_FALSE;
3942 } else {
3943 found_key = B_FALSE;
3944 state = DTRACE_JSON_OBJECT;
3946 break;
3950 * Otherwise, we wish to either skip this
3951 * nested object or return it in full.
3953 if (cc == '[')
3954 brackets = 1;
3955 else
3956 braces = 1;
3957 *dd++ = cc;
3958 state = DTRACE_JSON_COLLECT_OBJECT;
3959 break;
3962 if (cc == '"') {
3963 state = DTRACE_JSON_STRING;
3964 break;
3967 if (islower(cc)) {
3969 * Here we deal with true, false and null.
3971 *dd++ = cc;
3972 state = DTRACE_JSON_IDENTIFIER;
3973 break;
3976 if (cc == '-' || isdigit(cc)) {
3977 *dd++ = cc;
3978 state = DTRACE_JSON_NUMBER;
3979 break;
3983 * ERROR: unexpected character at start of value.
3985 return (NULL);
3986 case DTRACE_JSON_COLLECT_OBJECT:
3987 if (cc == '\0')
3989 * ERROR: unexpected end of input.
3991 return (NULL);
3993 *dd++ = cc;
3994 if (cc == '"') {
3995 collect_object = B_TRUE;
3996 state = DTRACE_JSON_STRING;
3997 break;
4000 if (cc == ']') {
4001 if (brackets-- == 0) {
4003 * ERROR: unbalanced brackets.
4005 return (NULL);
4007 } else if (cc == '}') {
4008 if (braces-- == 0) {
4010 * ERROR: unbalanced braces.
4012 return (NULL);
4014 } else if (cc == '{') {
4015 braces++;
4016 } else if (cc == '[') {
4017 brackets++;
4020 if (brackets == 0 && braces == 0) {
4021 if (found_key) {
4022 *dd = '\0';
4023 return (dest);
4025 dd = dest; /* reset string buffer */
4026 state = DTRACE_JSON_COMMA;
4028 break;
4031 return (NULL);
4035 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
4036 * Notice that we don't bother validating the proper number of arguments or
4037 * their types in the tuple stack. This isn't needed because all argument
4038 * interpretation is safe because of our load safety -- the worst that can
4039 * happen is that a bogus program can obtain bogus results.
4041 static void
4042 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
4043 dtrace_key_t *tupregs, int nargs,
4044 dtrace_mstate_t *mstate, dtrace_state_t *state)
4046 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
4047 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
4048 dtrace_vstate_t *vstate = &state->dts_vstate;
4050 union {
4051 mutex_impl_t mi;
4052 uint64_t mx;
4053 } m;
4055 union {
4056 krwlock_t ri;
4057 uintptr_t rw;
4058 } r;
4060 switch (subr) {
4061 case DIF_SUBR_RAND:
4062 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
4063 break;
4065 case DIF_SUBR_MUTEX_OWNED:
4066 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4067 mstate, vstate)) {
4068 regs[rd] = (uintptr_t)NULL;
4069 break;
4072 m.mx = dtrace_load64(tupregs[0].dttk_value);
4073 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
4074 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
4075 else
4076 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
4077 break;
4079 case DIF_SUBR_MUTEX_OWNER:
4080 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4081 mstate, vstate)) {
4082 regs[rd] = (uintptr_t)NULL;
4083 break;
4086 m.mx = dtrace_load64(tupregs[0].dttk_value);
4087 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
4088 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
4089 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
4090 else
4091 regs[rd] = (uintptr_t)NULL;
4092 break;
4094 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4095 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4096 mstate, vstate)) {
4097 regs[rd] = (uintptr_t)NULL;
4098 break;
4101 m.mx = dtrace_load64(tupregs[0].dttk_value);
4102 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
4103 break;
4105 case DIF_SUBR_MUTEX_TYPE_SPIN:
4106 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4107 mstate, vstate)) {
4108 regs[rd] = (uintptr_t)NULL;
4109 break;
4112 m.mx = dtrace_load64(tupregs[0].dttk_value);
4113 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
4114 break;
4116 case DIF_SUBR_RW_READ_HELD: {
4117 uintptr_t tmp;
4119 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4120 mstate, vstate)) {
4121 regs[rd] = (uintptr_t)NULL;
4122 break;
4125 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4126 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
4127 break;
4130 case DIF_SUBR_RW_WRITE_HELD:
4131 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4132 mstate, vstate)) {
4133 regs[rd] = (uintptr_t)NULL;
4134 break;
4137 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4138 regs[rd] = _RW_WRITE_HELD(&r.ri);
4139 break;
4141 case DIF_SUBR_RW_ISWRITER:
4142 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4143 mstate, vstate)) {
4144 regs[rd] = (uintptr_t)NULL;
4145 break;
4148 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4149 regs[rd] = _RW_ISWRITER(&r.ri);
4150 break;
4152 case DIF_SUBR_BCOPY: {
4154 * We need to be sure that the destination is in the scratch
4155 * region -- no other region is allowed.
4157 uintptr_t src = tupregs[0].dttk_value;
4158 uintptr_t dest = tupregs[1].dttk_value;
4159 size_t size = tupregs[2].dttk_value;
4161 if (!dtrace_inscratch(dest, size, mstate)) {
4162 *flags |= CPU_DTRACE_BADADDR;
4163 *illval = regs[rd];
4164 break;
4167 if (!dtrace_canload(src, size, mstate, vstate)) {
4168 regs[rd] = (uintptr_t)NULL;
4169 break;
4172 dtrace_bcopy((void *)src, (void *)dest, size);
4173 break;
4176 case DIF_SUBR_ALLOCA:
4177 case DIF_SUBR_COPYIN: {
4178 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
4179 uint64_t size =
4180 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
4181 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
4184 * This action doesn't require any credential checks since
4185 * probes will not activate in user contexts to which the
4186 * enabling user does not have permissions.
4190 * Rounding up the user allocation size could have overflowed
4191 * a large, bogus allocation (like -1ULL) to 0.
4193 if (scratch_size < size ||
4194 !DTRACE_INSCRATCH(mstate, scratch_size)) {
4195 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4196 regs[rd] = (uintptr_t)NULL;
4197 break;
4200 if (subr == DIF_SUBR_COPYIN) {
4201 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4202 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4203 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4206 mstate->dtms_scratch_ptr += scratch_size;
4207 regs[rd] = dest;
4208 break;
4211 case DIF_SUBR_COPYINTO: {
4212 uint64_t size = tupregs[1].dttk_value;
4213 uintptr_t dest = tupregs[2].dttk_value;
4216 * This action doesn't require any credential checks since
4217 * probes will not activate in user contexts to which the
4218 * enabling user does not have permissions.
4220 if (!dtrace_inscratch(dest, size, mstate)) {
4221 *flags |= CPU_DTRACE_BADADDR;
4222 *illval = regs[rd];
4223 break;
4226 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4227 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4228 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4229 break;
4232 case DIF_SUBR_COPYINSTR: {
4233 uintptr_t dest = mstate->dtms_scratch_ptr;
4234 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4236 if (nargs > 1 && tupregs[1].dttk_value < size)
4237 size = tupregs[1].dttk_value + 1;
4240 * This action doesn't require any credential checks since
4241 * probes will not activate in user contexts to which the
4242 * enabling user does not have permissions.
4244 if (!DTRACE_INSCRATCH(mstate, size)) {
4245 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4246 regs[rd] = (uintptr_t)NULL;
4247 break;
4250 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4251 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
4252 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4254 ((char *)dest)[size - 1] = '\0';
4255 mstate->dtms_scratch_ptr += size;
4256 regs[rd] = dest;
4257 break;
4260 case DIF_SUBR_MSGSIZE:
4261 case DIF_SUBR_MSGDSIZE: {
4262 uintptr_t baddr = tupregs[0].dttk_value, daddr;
4263 uintptr_t wptr, rptr;
4264 size_t count = 0;
4265 int cont = 0;
4267 while (baddr != (uintptr_t)NULL &&
4268 !(*flags & CPU_DTRACE_FAULT)) {
4269 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
4270 vstate)) {
4271 regs[rd] = (uintptr_t)NULL;
4272 break;
4275 wptr = dtrace_loadptr(baddr +
4276 offsetof(mblk_t, b_wptr));
4278 rptr = dtrace_loadptr(baddr +
4279 offsetof(mblk_t, b_rptr));
4281 if (wptr < rptr) {
4282 *flags |= CPU_DTRACE_BADADDR;
4283 *illval = tupregs[0].dttk_value;
4284 break;
4287 daddr = dtrace_loadptr(baddr +
4288 offsetof(mblk_t, b_datap));
4290 baddr = dtrace_loadptr(baddr +
4291 offsetof(mblk_t, b_cont));
4294 * We want to prevent against denial-of-service here,
4295 * so we're only going to search the list for
4296 * dtrace_msgdsize_max mblks.
4298 if (cont++ > dtrace_msgdsize_max) {
4299 *flags |= CPU_DTRACE_ILLOP;
4300 break;
4303 if (subr == DIF_SUBR_MSGDSIZE) {
4304 if (dtrace_load8(daddr +
4305 offsetof(dblk_t, db_type)) != M_DATA)
4306 continue;
4309 count += wptr - rptr;
4312 if (!(*flags & CPU_DTRACE_FAULT))
4313 regs[rd] = count;
4315 break;
4318 case DIF_SUBR_PROGENYOF: {
4319 pid_t pid = tupregs[0].dttk_value;
4320 proc_t *p;
4321 int rval = 0;
4323 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4325 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
4326 if (p->p_pidp->pid_id == pid) {
4327 rval = 1;
4328 break;
4332 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4334 regs[rd] = rval;
4335 break;
4338 case DIF_SUBR_SPECULATION:
4339 regs[rd] = dtrace_speculation(state);
4340 break;
4342 case DIF_SUBR_COPYOUT: {
4343 uintptr_t kaddr = tupregs[0].dttk_value;
4344 uintptr_t uaddr = tupregs[1].dttk_value;
4345 uint64_t size = tupregs[2].dttk_value;
4347 if (!dtrace_destructive_disallow &&
4348 dtrace_priv_proc_control(state, mstate) &&
4349 !dtrace_istoxic(kaddr, size) &&
4350 dtrace_canload(kaddr, size, mstate, vstate)) {
4351 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4352 dtrace_copyout(kaddr, uaddr, size, flags);
4353 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4355 break;
4358 case DIF_SUBR_COPYOUTSTR: {
4359 uintptr_t kaddr = tupregs[0].dttk_value;
4360 uintptr_t uaddr = tupregs[1].dttk_value;
4361 uint64_t size = tupregs[2].dttk_value;
4362 size_t lim;
4364 if (!dtrace_destructive_disallow &&
4365 dtrace_priv_proc_control(state, mstate) &&
4366 !dtrace_istoxic(kaddr, size) &&
4367 dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) {
4368 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4369 dtrace_copyoutstr(kaddr, uaddr, lim, flags);
4370 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4372 break;
4375 case DIF_SUBR_STRLEN: {
4376 size_t size = state->dts_options[DTRACEOPT_STRSIZE];
4377 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
4378 size_t lim;
4380 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4381 regs[rd] = (uintptr_t)NULL;
4382 break;
4384 regs[rd] = dtrace_strlen((char *)addr, lim);
4386 break;
4389 case DIF_SUBR_STRCHR:
4390 case DIF_SUBR_STRRCHR: {
4392 * We're going to iterate over the string looking for the
4393 * specified character. We will iterate until we have reached
4394 * the string length or we have found the character. If this
4395 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4396 * of the specified character instead of the first.
4398 uintptr_t addr = tupregs[0].dttk_value;
4399 uintptr_t addr_limit;
4400 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4401 size_t lim;
4402 char c, target = (char)tupregs[1].dttk_value;
4404 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4405 regs[rd] = (uintptr_t)NULL;
4406 break;
4408 addr_limit = addr + lim;
4410 for (regs[rd] = (uintptr_t)NULL; addr < addr_limit; addr++) {
4411 if ((c = dtrace_load8(addr)) == target) {
4412 regs[rd] = addr;
4414 if (subr == DIF_SUBR_STRCHR)
4415 break;
4417 if (c == '\0')
4418 break;
4421 break;
4424 case DIF_SUBR_STRSTR:
4425 case DIF_SUBR_INDEX:
4426 case DIF_SUBR_RINDEX: {
4428 * We're going to iterate over the string looking for the
4429 * specified string. We will iterate until we have reached
4430 * the string length or we have found the string. (Yes, this
4431 * is done in the most naive way possible -- but considering
4432 * that the string we're searching for is likely to be
4433 * relatively short, the complexity of Rabin-Karp or similar
4434 * hardly seems merited.)
4436 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4437 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4438 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4439 size_t len = dtrace_strlen(addr, size);
4440 size_t sublen = dtrace_strlen(substr, size);
4441 char *limit = addr + len, *orig = addr;
4442 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4443 int inc = 1;
4445 regs[rd] = notfound;
4447 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
4448 regs[rd] = (uintptr_t)NULL;
4449 break;
4452 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4453 vstate)) {
4454 regs[rd] = (uintptr_t)NULL;
4455 break;
4459 * strstr() and index()/rindex() have similar semantics if
4460 * both strings are the empty string: strstr() returns a
4461 * pointer to the (empty) string, and index() and rindex()
4462 * both return index 0 (regardless of any position argument).
4464 if (sublen == 0 && len == 0) {
4465 if (subr == DIF_SUBR_STRSTR)
4466 regs[rd] = (uintptr_t)addr;
4467 else
4468 regs[rd] = (uintptr_t)NULL;
4469 break;
4472 if (subr != DIF_SUBR_STRSTR) {
4473 if (subr == DIF_SUBR_RINDEX) {
4474 limit = orig - 1;
4475 addr += len;
4476 inc = -1;
4480 * Both index() and rindex() take an optional position
4481 * argument that denotes the starting position.
4483 if (nargs == 3) {
4484 int64_t pos = (int64_t)tupregs[2].dttk_value;
4487 * If the position argument to index() is
4488 * negative, Perl implicitly clamps it at
4489 * zero. This semantic is a little surprising
4490 * given the special meaning of negative
4491 * positions to similar Perl functions like
4492 * substr(), but it appears to reflect a
4493 * notion that index() can start from a
4494 * negative index and increment its way up to
4495 * the string. Given this notion, Perl's
4496 * rindex() is at least self-consistent in
4497 * that it implicitly clamps positions greater
4498 * than the string length to be the string
4499 * length. Where Perl completely loses
4500 * coherence, however, is when the specified
4501 * substring is the empty string (""). In
4502 * this case, even if the position is
4503 * negative, rindex() returns 0 -- and even if
4504 * the position is greater than the length,
4505 * index() returns the string length. These
4506 * semantics violate the notion that index()
4507 * should never return a value less than the
4508 * specified position and that rindex() should
4509 * never return a value greater than the
4510 * specified position. (One assumes that
4511 * these semantics are artifacts of Perl's
4512 * implementation and not the results of
4513 * deliberate design -- it beggars belief that
4514 * even Larry Wall could desire such oddness.)
4515 * While in the abstract one would wish for
4516 * consistent position semantics across
4517 * substr(), index() and rindex() -- or at the
4518 * very least self-consistent position
4519 * semantics for index() and rindex() -- we
4520 * instead opt to keep with the extant Perl
4521 * semantics, in all their broken glory. (Do
4522 * we have more desire to maintain Perl's
4523 * semantics than Perl does? Probably.)
4525 if (subr == DIF_SUBR_RINDEX) {
4526 if (pos < 0) {
4527 if (sublen == 0)
4528 regs[rd] =
4529 (uintptr_t)NULL;
4530 break;
4533 if (pos > len)
4534 pos = len;
4535 } else {
4536 if (pos < 0)
4537 pos = 0;
4539 if (pos >= len) {
4540 if (sublen == 0)
4541 regs[rd] = len;
4542 break;
4546 addr = orig + pos;
4550 for (regs[rd] = notfound; addr != limit; addr += inc) {
4551 if (dtrace_strncmp(addr, substr, sublen) == 0) {
4552 if (subr != DIF_SUBR_STRSTR) {
4554 * As D index() and rindex() are
4555 * modeled on Perl (and not on awk),
4556 * we return a zero-based (and not a
4557 * one-based) index. (For you Perl
4558 * weenies: no, we're not going to add
4559 * $[ -- and shouldn't you be at a con
4560 * or something?)
4562 regs[rd] = (uintptr_t)(addr - orig);
4563 break;
4566 ASSERT(subr == DIF_SUBR_STRSTR);
4567 regs[rd] = (uintptr_t)addr;
4568 break;
4572 break;
4575 case DIF_SUBR_STRTOK: {
4576 uintptr_t addr = tupregs[0].dttk_value;
4577 uintptr_t tokaddr = tupregs[1].dttk_value;
4578 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4579 uintptr_t limit, toklimit;
4580 size_t clim;
4581 uint8_t c, tokmap[32]; /* 256 / 8 */
4582 char *dest = (char *)mstate->dtms_scratch_ptr;
4583 int i;
4586 * Check both the token buffer and (later) the input buffer,
4587 * since both could be non-scratch addresses.
4589 if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) {
4590 regs[rd] = (uintptr_t)NULL;
4591 break;
4593 toklimit = tokaddr + clim;
4595 if (!DTRACE_INSCRATCH(mstate, size)) {
4596 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4597 regs[rd] = (uintptr_t)NULL;
4598 break;
4601 if (addr == (uintptr_t)NULL) {
4603 * If the address specified is NULL, we use our saved
4604 * strtok pointer from the mstate. Note that this
4605 * means that the saved strtok pointer is _only_
4606 * valid within multiple enablings of the same probe --
4607 * it behaves like an implicit clause-local variable.
4609 addr = mstate->dtms_strtok;
4610 limit = mstate->dtms_strtok_limit;
4611 } else {
4613 * If the user-specified address is non-NULL we must
4614 * access check it. This is the only time we have
4615 * a chance to do so, since this address may reside
4616 * in the string table of this clause-- future calls
4617 * (when we fetch addr from mstate->dtms_strtok)
4618 * would fail this access check.
4620 if (!dtrace_strcanload(addr, size, &clim, mstate,
4621 vstate)) {
4622 regs[rd] = (uintptr_t)NULL;
4623 break;
4625 limit = addr + clim;
4629 * First, zero the token map, and then process the token
4630 * string -- setting a bit in the map for every character
4631 * found in the token string.
4633 for (i = 0; i < sizeof (tokmap); i++)
4634 tokmap[i] = 0;
4636 for (; tokaddr < toklimit; tokaddr++) {
4637 if ((c = dtrace_load8(tokaddr)) == '\0')
4638 break;
4640 ASSERT((c >> 3) < sizeof (tokmap));
4641 tokmap[c >> 3] |= (1 << (c & 0x7));
4644 for (; addr < limit; addr++) {
4646 * We're looking for a character that is _not_
4647 * contained in the token string.
4649 if ((c = dtrace_load8(addr)) == '\0')
4650 break;
4652 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
4653 break;
4656 if (c == '\0') {
4658 * We reached the end of the string without finding
4659 * any character that was not in the token string.
4660 * We return NULL in this case, and we set the saved
4661 * address to NULL as well.
4663 regs[rd] = (uintptr_t)NULL;
4664 mstate->dtms_strtok = (uintptr_t)NULL;
4665 mstate->dtms_strtok_limit = (uintptr_t)NULL;
4666 break;
4670 * From here on, we're copying into the destination string.
4672 for (i = 0; addr < limit && i < size - 1; addr++) {
4673 if ((c = dtrace_load8(addr)) == '\0')
4674 break;
4676 if (tokmap[c >> 3] & (1 << (c & 0x7)))
4677 break;
4679 ASSERT(i < size);
4680 dest[i++] = c;
4683 ASSERT(i < size);
4684 dest[i] = '\0';
4685 regs[rd] = (uintptr_t)dest;
4686 mstate->dtms_scratch_ptr += size;
4687 mstate->dtms_strtok = addr;
4688 mstate->dtms_strtok_limit = limit;
4689 break;
4692 case DIF_SUBR_SUBSTR: {
4693 uintptr_t s = tupregs[0].dttk_value;
4694 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4695 char *d = (char *)mstate->dtms_scratch_ptr;
4696 int64_t index = (int64_t)tupregs[1].dttk_value;
4697 int64_t remaining = (int64_t)tupregs[2].dttk_value;
4698 size_t len = dtrace_strlen((char *)s, size);
4699 int64_t i;
4701 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4702 regs[rd] = (uintptr_t)NULL;
4703 break;
4706 if (!DTRACE_INSCRATCH(mstate, size)) {
4707 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4708 regs[rd] = (uintptr_t)NULL;
4709 break;
4712 if (nargs <= 2)
4713 remaining = (int64_t)size;
4715 if (index < 0) {
4716 index += len;
4718 if (index < 0 && index + remaining > 0) {
4719 remaining += index;
4720 index = 0;
4724 if (index >= len || index < 0) {
4725 remaining = 0;
4726 } else if (remaining < 0) {
4727 remaining += len - index;
4728 } else if (index + remaining > size) {
4729 remaining = size - index;
4732 for (i = 0; i < remaining; i++) {
4733 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4734 break;
4737 d[i] = '\0';
4739 mstate->dtms_scratch_ptr += size;
4740 regs[rd] = (uintptr_t)d;
4741 break;
4744 case DIF_SUBR_JSON: {
4745 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4746 uintptr_t json = tupregs[0].dttk_value;
4747 size_t jsonlen = dtrace_strlen((char *)json, size);
4748 uintptr_t elem = tupregs[1].dttk_value;
4749 size_t elemlen = dtrace_strlen((char *)elem, size);
4751 char *dest = (char *)mstate->dtms_scratch_ptr;
4752 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
4753 char *ee = elemlist;
4754 int nelems = 1;
4755 uintptr_t cur;
4757 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
4758 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
4759 regs[rd] = (uintptr_t)NULL;
4760 break;
4763 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
4764 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4765 regs[rd] = (uintptr_t)NULL;
4766 break;
4770 * Read the element selector and split it up into a packed list
4771 * of strings.
4773 for (cur = elem; cur < elem + elemlen; cur++) {
4774 char cc = dtrace_load8(cur);
4776 if (cur == elem && cc == '[') {
4778 * If the first element selector key is
4779 * actually an array index then ignore the
4780 * bracket.
4782 continue;
4785 if (cc == ']')
4786 continue;
4788 if (cc == '.' || cc == '[') {
4789 nelems++;
4790 cc = '\0';
4793 *ee++ = cc;
4795 *ee++ = '\0';
4797 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
4798 nelems, dest)) != (uintptr_t)NULL)
4799 mstate->dtms_scratch_ptr += jsonlen + 1;
4800 break;
4803 case DIF_SUBR_TOUPPER:
4804 case DIF_SUBR_TOLOWER: {
4805 uintptr_t s = tupregs[0].dttk_value;
4806 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4807 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4808 size_t len = dtrace_strlen((char *)s, size);
4809 char lower, upper, convert;
4810 int64_t i;
4812 if (subr == DIF_SUBR_TOUPPER) {
4813 lower = 'a';
4814 upper = 'z';
4815 convert = 'A';
4816 } else {
4817 lower = 'A';
4818 upper = 'Z';
4819 convert = 'a';
4822 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4823 regs[rd] = (uintptr_t)NULL;
4824 break;
4827 if (!DTRACE_INSCRATCH(mstate, size)) {
4828 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4829 regs[rd] = (uintptr_t)NULL;
4830 break;
4833 for (i = 0; i < size - 1; i++) {
4834 if ((c = dtrace_load8(s + i)) == '\0')
4835 break;
4837 if (c >= lower && c <= upper)
4838 c = convert + (c - lower);
4840 dest[i] = c;
4843 ASSERT(i < size);
4844 dest[i] = '\0';
4845 regs[rd] = (uintptr_t)dest;
4846 mstate->dtms_scratch_ptr += size;
4847 break;
4850 case DIF_SUBR_GETMAJOR:
4851 #ifdef _LP64
4852 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
4853 #else
4854 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
4855 #endif
4856 break;
4858 case DIF_SUBR_GETMINOR:
4859 #ifdef _LP64
4860 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
4861 #else
4862 regs[rd] = tupregs[0].dttk_value & MAXMIN;
4863 #endif
4864 break;
4866 case DIF_SUBR_DDI_PATHNAME: {
4868 * This one is a galactic mess. We are going to roughly
4869 * emulate ddi_pathname(), but it's made more complicated
4870 * by the fact that we (a) want to include the minor name and
4871 * (b) must proceed iteratively instead of recursively.
4873 uintptr_t dest = mstate->dtms_scratch_ptr;
4874 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4875 char *start = (char *)dest, *end = start + size - 1;
4876 uintptr_t daddr = tupregs[0].dttk_value;
4877 int64_t minor = (int64_t)tupregs[1].dttk_value;
4878 char *s;
4879 int i, len, depth = 0;
4882 * Due to all the pointer jumping we do and context we must
4883 * rely upon, we just mandate that the user must have kernel
4884 * read privileges to use this routine.
4886 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
4887 *flags |= CPU_DTRACE_KPRIV;
4888 *illval = daddr;
4889 regs[rd] = (uintptr_t)NULL;
4892 if (!DTRACE_INSCRATCH(mstate, size)) {
4893 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4894 regs[rd] = (uintptr_t)NULL;
4895 break;
4898 *end = '\0';
4901 * We want to have a name for the minor. In order to do this,
4902 * we need to walk the minor list from the devinfo. We want
4903 * to be sure that we don't infinitely walk a circular list,
4904 * so we check for circularity by sending a scout pointer
4905 * ahead two elements for every element that we iterate over;
4906 * if the list is circular, these will ultimately point to the
4907 * same element. You may recognize this little trick as the
4908 * answer to a stupid interview question -- one that always
4909 * seems to be asked by those who had to have it laboriously
4910 * explained to them, and who can't even concisely describe
4911 * the conditions under which one would be forced to resort to
4912 * this technique. Needless to say, those conditions are
4913 * found here -- and probably only here. Is this the only use
4914 * of this infamous trick in shipping, production code? If it
4915 * isn't, it probably should be...
4917 if (minor != -1) {
4918 uintptr_t maddr = dtrace_loadptr(daddr +
4919 offsetof(struct dev_info, devi_minor));
4921 uintptr_t next = offsetof(struct ddi_minor_data, next);
4922 uintptr_t name = offsetof(struct ddi_minor_data,
4923 d_minor) + offsetof(struct ddi_minor, name);
4924 uintptr_t dev = offsetof(struct ddi_minor_data,
4925 d_minor) + offsetof(struct ddi_minor, dev);
4926 uintptr_t scout;
4928 if (maddr != (uintptr_t)NULL)
4929 scout = dtrace_loadptr(maddr + next);
4931 while (maddr != (uintptr_t)NULL &&
4932 !(*flags & CPU_DTRACE_FAULT)) {
4933 uint64_t m;
4934 #ifdef _LP64
4935 m = dtrace_load64(maddr + dev) & MAXMIN64;
4936 #else
4937 m = dtrace_load32(maddr + dev) & MAXMIN;
4938 #endif
4939 if (m != minor) {
4940 maddr = dtrace_loadptr(maddr + next);
4942 if (scout == (uintptr_t)NULL)
4943 continue;
4945 scout = dtrace_loadptr(scout + next);
4947 if (scout == (uintptr_t)NULL)
4948 continue;
4950 scout = dtrace_loadptr(scout + next);
4952 if (scout == (uintptr_t)NULL)
4953 continue;
4955 if (scout == maddr) {
4956 *flags |= CPU_DTRACE_ILLOP;
4957 break;
4960 continue;
4964 * We have the minor data. Now we need to
4965 * copy the minor's name into the end of the
4966 * pathname.
4968 s = (char *)dtrace_loadptr(maddr + name);
4969 len = dtrace_strlen(s, size);
4971 if (*flags & CPU_DTRACE_FAULT)
4972 break;
4974 if (len != 0) {
4975 if ((end -= (len + 1)) < start)
4976 break;
4978 *end = ':';
4981 for (i = 1; i <= len; i++)
4982 end[i] = dtrace_load8((uintptr_t)s++);
4983 break;
4987 while (daddr != (uintptr_t)NULL &&
4988 !(*flags & CPU_DTRACE_FAULT)) {
4989 ddi_node_state_t devi_state;
4991 devi_state = dtrace_load32(daddr +
4992 offsetof(struct dev_info, devi_node_state));
4994 if (*flags & CPU_DTRACE_FAULT)
4995 break;
4997 if (devi_state >= DS_INITIALIZED) {
4998 s = (char *)dtrace_loadptr(daddr +
4999 offsetof(struct dev_info, devi_addr));
5000 len = dtrace_strlen(s, size);
5002 if (*flags & CPU_DTRACE_FAULT)
5003 break;
5005 if (len != 0) {
5006 if ((end -= (len + 1)) < start)
5007 break;
5009 *end = '@';
5012 for (i = 1; i <= len; i++)
5013 end[i] = dtrace_load8((uintptr_t)s++);
5017 * Now for the node name...
5019 s = (char *)dtrace_loadptr(daddr +
5020 offsetof(struct dev_info, devi_node_name));
5022 daddr = dtrace_loadptr(daddr +
5023 offsetof(struct dev_info, devi_parent));
5026 * If our parent is NULL (that is, if we're the root
5027 * node), we're going to use the special path
5028 * "devices".
5030 if (daddr == (uintptr_t)NULL)
5031 s = "devices";
5033 len = dtrace_strlen(s, size);
5034 if (*flags & CPU_DTRACE_FAULT)
5035 break;
5037 if ((end -= (len + 1)) < start)
5038 break;
5040 for (i = 1; i <= len; i++)
5041 end[i] = dtrace_load8((uintptr_t)s++);
5042 *end = '/';
5044 if (depth++ > dtrace_devdepth_max) {
5045 *flags |= CPU_DTRACE_ILLOP;
5046 break;
5050 if (end < start)
5051 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5053 if (daddr == (uintptr_t)NULL) {
5054 regs[rd] = (uintptr_t)end;
5055 mstate->dtms_scratch_ptr += size;
5058 break;
5061 case DIF_SUBR_STRJOIN: {
5062 char *d = (char *)mstate->dtms_scratch_ptr;
5063 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5064 uintptr_t s1 = tupregs[0].dttk_value;
5065 uintptr_t s2 = tupregs[1].dttk_value;
5066 int i = 0, j = 0;
5067 size_t lim1, lim2;
5068 char c;
5070 if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) ||
5071 !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) {
5072 regs[rd] = (uintptr_t)NULL;
5073 break;
5076 if (!DTRACE_INSCRATCH(mstate, size)) {
5077 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5078 regs[rd] = (uintptr_t)NULL;
5079 break;
5082 for (;;) {
5083 if (i >= size) {
5084 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5085 regs[rd] = (uintptr_t)NULL;
5086 break;
5088 c = (i >= lim1) ? '\0' : dtrace_load8(s1++);
5089 if ((d[i++] = c) == '\0') {
5090 i--;
5091 break;
5095 for (;;) {
5096 if (i >= size) {
5097 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5098 regs[rd] = (uintptr_t)NULL;
5099 break;
5102 c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++);
5103 if ((d[i++] = c) == '\0')
5104 break;
5107 if (i < size) {
5108 mstate->dtms_scratch_ptr += i;
5109 regs[rd] = (uintptr_t)d;
5112 break;
5115 case DIF_SUBR_STRTOLL: {
5116 uintptr_t s = tupregs[0].dttk_value;
5117 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5118 size_t lim;
5119 int base = 10;
5121 if (nargs > 1) {
5122 if ((base = tupregs[1].dttk_value) <= 1 ||
5123 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5124 *flags |= CPU_DTRACE_ILLOP;
5125 break;
5129 if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) {
5130 regs[rd] = INT64_MIN;
5131 break;
5134 regs[rd] = dtrace_strtoll((char *)s, base, lim);
5135 break;
5138 case DIF_SUBR_LLTOSTR: {
5139 int64_t i = (int64_t)tupregs[0].dttk_value;
5140 uint64_t val, digit;
5141 uint64_t size = 65; /* enough room for 2^64 in binary */
5142 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
5143 int base = 10;
5145 if (nargs > 1) {
5146 if ((base = tupregs[1].dttk_value) <= 1 ||
5147 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5148 *flags |= CPU_DTRACE_ILLOP;
5149 break;
5153 val = (base == 10 && i < 0) ? i * -1 : i;
5155 if (!DTRACE_INSCRATCH(mstate, size)) {
5156 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5157 regs[rd] = (uintptr_t)NULL;
5158 break;
5161 for (*end-- = '\0'; val; val /= base) {
5162 if ((digit = val % base) <= '9' - '0') {
5163 *end-- = '0' + digit;
5164 } else {
5165 *end-- = 'a' + (digit - ('9' - '0') - 1);
5169 if (i == 0 && base == 16)
5170 *end-- = '0';
5172 if (base == 16)
5173 *end-- = 'x';
5175 if (i == 0 || base == 8 || base == 16)
5176 *end-- = '0';
5178 if (i < 0 && base == 10)
5179 *end-- = '-';
5181 regs[rd] = (uintptr_t)end + 1;
5182 mstate->dtms_scratch_ptr += size;
5183 break;
5186 case DIF_SUBR_HTONS:
5187 case DIF_SUBR_NTOHS:
5188 #ifdef _BIG_ENDIAN
5189 regs[rd] = (uint16_t)tupregs[0].dttk_value;
5190 #else
5191 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
5192 #endif
5193 break;
5196 case DIF_SUBR_HTONL:
5197 case DIF_SUBR_NTOHL:
5198 #ifdef _BIG_ENDIAN
5199 regs[rd] = (uint32_t)tupregs[0].dttk_value;
5200 #else
5201 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5202 #endif
5203 break;
5206 case DIF_SUBR_HTONLL:
5207 case DIF_SUBR_NTOHLL:
5208 #ifdef _BIG_ENDIAN
5209 regs[rd] = (uint64_t)tupregs[0].dttk_value;
5210 #else
5211 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5212 #endif
5213 break;
5216 case DIF_SUBR_DIRNAME:
5217 case DIF_SUBR_BASENAME: {
5218 char *dest = (char *)mstate->dtms_scratch_ptr;
5219 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5220 uintptr_t src = tupregs[0].dttk_value;
5221 int i, j, len = dtrace_strlen((char *)src, size);
5222 int lastbase = -1, firstbase = -1, lastdir = -1;
5223 int start, end;
5225 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5226 regs[rd] = (uintptr_t)NULL;
5227 break;
5230 if (!DTRACE_INSCRATCH(mstate, size)) {
5231 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5232 regs[rd] = (uintptr_t)NULL;
5233 break;
5237 * The basename and dirname for a zero-length string is
5238 * defined to be "."
5240 if (len == 0) {
5241 len = 1;
5242 src = (uintptr_t)".";
5246 * Start from the back of the string, moving back toward the
5247 * front until we see a character that isn't a slash. That
5248 * character is the last character in the basename.
5250 for (i = len - 1; i >= 0; i--) {
5251 if (dtrace_load8(src + i) != '/')
5252 break;
5255 if (i >= 0)
5256 lastbase = i;
5259 * Starting from the last character in the basename, move
5260 * towards the front until we find a slash. The character
5261 * that we processed immediately before that is the first
5262 * character in the basename.
5264 for (; i >= 0; i--) {
5265 if (dtrace_load8(src + i) == '/')
5266 break;
5269 if (i >= 0)
5270 firstbase = i + 1;
5273 * Now keep going until we find a non-slash character. That
5274 * character is the last character in the dirname.
5276 for (; i >= 0; i--) {
5277 if (dtrace_load8(src + i) != '/')
5278 break;
5281 if (i >= 0)
5282 lastdir = i;
5284 ASSERT(!(lastbase == -1 && firstbase != -1));
5285 ASSERT(!(firstbase == -1 && lastdir != -1));
5287 if (lastbase == -1) {
5289 * We didn't find a non-slash character. We know that
5290 * the length is non-zero, so the whole string must be
5291 * slashes. In either the dirname or the basename
5292 * case, we return '/'.
5294 ASSERT(firstbase == -1);
5295 firstbase = lastbase = lastdir = 0;
5298 if (firstbase == -1) {
5300 * The entire string consists only of a basename
5301 * component. If we're looking for dirname, we need
5302 * to change our string to be just "."; if we're
5303 * looking for a basename, we'll just set the first
5304 * character of the basename to be 0.
5306 if (subr == DIF_SUBR_DIRNAME) {
5307 ASSERT(lastdir == -1);
5308 src = (uintptr_t)".";
5309 lastdir = 0;
5310 } else {
5311 firstbase = 0;
5315 if (subr == DIF_SUBR_DIRNAME) {
5316 if (lastdir == -1) {
5318 * We know that we have a slash in the name --
5319 * or lastdir would be set to 0, above. And
5320 * because lastdir is -1, we know that this
5321 * slash must be the first character. (That
5322 * is, the full string must be of the form
5323 * "/basename".) In this case, the last
5324 * character of the directory name is 0.
5326 lastdir = 0;
5329 start = 0;
5330 end = lastdir;
5331 } else {
5332 ASSERT(subr == DIF_SUBR_BASENAME);
5333 ASSERT(firstbase != -1 && lastbase != -1);
5334 start = firstbase;
5335 end = lastbase;
5338 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
5339 dest[j] = dtrace_load8(src + i);
5341 dest[j] = '\0';
5342 regs[rd] = (uintptr_t)dest;
5343 mstate->dtms_scratch_ptr += size;
5344 break;
5347 case DIF_SUBR_GETF: {
5348 uintptr_t fd = tupregs[0].dttk_value;
5349 uf_info_t *finfo = &curthread->t_procp->p_user.u_finfo;
5350 file_t *fp;
5352 if (!dtrace_priv_proc(state, mstate)) {
5353 regs[rd] = (uintptr_t)NULL;
5354 break;
5358 * This is safe because fi_nfiles only increases, and the
5359 * fi_list array is not freed when the array size doubles.
5360 * (See the comment in flist_grow() for details on the
5361 * management of the u_finfo structure.)
5363 fp = fd < finfo->fi_nfiles ? finfo->fi_list[fd].uf_file : NULL;
5365 mstate->dtms_getf = fp;
5366 regs[rd] = (uintptr_t)fp;
5367 break;
5370 case DIF_SUBR_CLEANPATH: {
5371 char *dest = (char *)mstate->dtms_scratch_ptr, c;
5372 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5373 uintptr_t src = tupregs[0].dttk_value;
5374 size_t lim;
5375 int i = 0, j = 0;
5376 zone_t *z;
5378 if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) {
5379 regs[rd] = (uintptr_t)NULL;
5380 break;
5383 if (!DTRACE_INSCRATCH(mstate, size)) {
5384 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5385 regs[rd] = (uintptr_t)NULL;
5386 break;
5390 * Move forward, loading each character.
5392 do {
5393 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5394 next:
5395 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
5396 break;
5398 if (c != '/') {
5399 dest[j++] = c;
5400 continue;
5403 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5405 if (c == '/') {
5407 * We have two slashes -- we can just advance
5408 * to the next character.
5410 goto next;
5413 if (c != '.') {
5415 * This is not "." and it's not ".." -- we can
5416 * just store the "/" and this character and
5417 * drive on.
5419 dest[j++] = '/';
5420 dest[j++] = c;
5421 continue;
5424 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5426 if (c == '/') {
5428 * This is a "/./" component. We're not going
5429 * to store anything in the destination buffer;
5430 * we're just going to go to the next component.
5432 goto next;
5435 if (c != '.') {
5437 * This is not ".." -- we can just store the
5438 * "/." and this character and continue
5439 * processing.
5441 dest[j++] = '/';
5442 dest[j++] = '.';
5443 dest[j++] = c;
5444 continue;
5447 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5449 if (c != '/' && c != '\0') {
5451 * This is not ".." -- it's "..[mumble]".
5452 * We'll store the "/.." and this character
5453 * and continue processing.
5455 dest[j++] = '/';
5456 dest[j++] = '.';
5457 dest[j++] = '.';
5458 dest[j++] = c;
5459 continue;
5463 * This is "/../" or "/..\0". We need to back up
5464 * our destination pointer until we find a "/".
5466 i--;
5467 while (j != 0 && dest[--j] != '/')
5468 continue;
5470 if (c == '\0')
5471 dest[++j] = '/';
5472 } while (c != '\0');
5474 dest[j] = '\0';
5476 if (mstate->dtms_getf != NULL &&
5477 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) &&
5478 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) {
5480 * If we've done a getf() as a part of this ECB and we
5481 * don't have kernel access (and we're not in the global
5482 * zone), check if the path we cleaned up begins with
5483 * the zone's root path, and trim it off if so. Note
5484 * that this is an output cleanliness issue, not a
5485 * security issue: knowing one's zone root path does
5486 * not enable privilege escalation.
5488 if (strstr(dest, z->zone_rootpath) == dest)
5489 dest += strlen(z->zone_rootpath) - 1;
5492 regs[rd] = (uintptr_t)dest;
5493 mstate->dtms_scratch_ptr += size;
5494 break;
5497 case DIF_SUBR_INET_NTOA:
5498 case DIF_SUBR_INET_NTOA6:
5499 case DIF_SUBR_INET_NTOP: {
5500 size_t size;
5501 int af, argi, i;
5502 char *base, *end;
5504 if (subr == DIF_SUBR_INET_NTOP) {
5505 af = (int)tupregs[0].dttk_value;
5506 argi = 1;
5507 } else {
5508 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
5509 argi = 0;
5512 if (af == AF_INET) {
5513 ipaddr_t ip4;
5514 uint8_t *ptr8, val;
5516 if (!dtrace_canload(tupregs[argi].dttk_value,
5517 sizeof (ipaddr_t), mstate, vstate)) {
5518 regs[rd] = 0;
5519 break;
5523 * Safely load the IPv4 address.
5525 ip4 = dtrace_load32(tupregs[argi].dttk_value);
5528 * Check an IPv4 string will fit in scratch.
5530 size = INET_ADDRSTRLEN;
5531 if (!DTRACE_INSCRATCH(mstate, size)) {
5532 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5533 regs[rd] = (uintptr_t)NULL;
5534 break;
5536 base = (char *)mstate->dtms_scratch_ptr;
5537 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5540 * Stringify as a dotted decimal quad.
5542 *end-- = '\0';
5543 ptr8 = (uint8_t *)&ip4;
5544 for (i = 3; i >= 0; i--) {
5545 val = ptr8[i];
5547 if (val == 0) {
5548 *end-- = '0';
5549 } else {
5550 for (; val; val /= 10) {
5551 *end-- = '0' + (val % 10);
5555 if (i > 0)
5556 *end-- = '.';
5558 ASSERT(end + 1 >= base);
5560 } else if (af == AF_INET6) {
5561 struct in6_addr ip6;
5562 int firstzero, tryzero, numzero, v6end;
5563 uint16_t val;
5564 const char digits[] = "0123456789abcdef";
5567 * Stringify using RFC 1884 convention 2 - 16 bit
5568 * hexadecimal values with a zero-run compression.
5569 * Lower case hexadecimal digits are used.
5570 * eg, fe80::214:4fff:fe0b:76c8.
5571 * The IPv4 embedded form is returned for inet_ntop,
5572 * just the IPv4 string is returned for inet_ntoa6.
5575 if (!dtrace_canload(tupregs[argi].dttk_value,
5576 sizeof (struct in6_addr), mstate, vstate)) {
5577 regs[rd] = 0;
5578 break;
5582 * Safely load the IPv6 address.
5584 dtrace_bcopy(
5585 (void *)(uintptr_t)tupregs[argi].dttk_value,
5586 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
5589 * Check an IPv6 string will fit in scratch.
5591 size = INET6_ADDRSTRLEN;
5592 if (!DTRACE_INSCRATCH(mstate, size)) {
5593 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5594 regs[rd] = (uintptr_t)NULL;
5595 break;
5597 base = (char *)mstate->dtms_scratch_ptr;
5598 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5599 *end-- = '\0';
5602 * Find the longest run of 16 bit zero values
5603 * for the single allowed zero compression - "::".
5605 firstzero = -1;
5606 tryzero = -1;
5607 numzero = 1;
5608 for (i = 0; i < sizeof (struct in6_addr); i++) {
5609 if (ip6._S6_un._S6_u8[i] == 0 &&
5610 tryzero == -1 && i % 2 == 0) {
5611 tryzero = i;
5612 continue;
5615 if (tryzero != -1 &&
5616 (ip6._S6_un._S6_u8[i] != 0 ||
5617 i == sizeof (struct in6_addr) - 1)) {
5619 if (i - tryzero <= numzero) {
5620 tryzero = -1;
5621 continue;
5624 firstzero = tryzero;
5625 numzero = i - i % 2 - tryzero;
5626 tryzero = -1;
5628 if (ip6._S6_un._S6_u8[i] == 0 &&
5629 i == sizeof (struct in6_addr) - 1)
5630 numzero += 2;
5633 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
5636 * Check for an IPv4 embedded address.
5638 v6end = sizeof (struct in6_addr) - 2;
5639 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
5640 IN6_IS_ADDR_V4COMPAT(&ip6)) {
5641 for (i = sizeof (struct in6_addr) - 1;
5642 i >= DTRACE_V4MAPPED_OFFSET; i--) {
5643 ASSERT(end >= base);
5645 val = ip6._S6_un._S6_u8[i];
5647 if (val == 0) {
5648 *end-- = '0';
5649 } else {
5650 for (; val; val /= 10) {
5651 *end-- = '0' + val % 10;
5655 if (i > DTRACE_V4MAPPED_OFFSET)
5656 *end-- = '.';
5659 if (subr == DIF_SUBR_INET_NTOA6)
5660 goto inetout;
5663 * Set v6end to skip the IPv4 address that
5664 * we have already stringified.
5666 v6end = 10;
5670 * Build the IPv6 string by working through the
5671 * address in reverse.
5673 for (i = v6end; i >= 0; i -= 2) {
5674 ASSERT(end >= base);
5676 if (i == firstzero + numzero - 2) {
5677 *end-- = ':';
5678 *end-- = ':';
5679 i -= numzero - 2;
5680 continue;
5683 if (i < 14 && i != firstzero - 2)
5684 *end-- = ':';
5686 val = (ip6._S6_un._S6_u8[i] << 8) +
5687 ip6._S6_un._S6_u8[i + 1];
5689 if (val == 0) {
5690 *end-- = '0';
5691 } else {
5692 for (; val; val /= 16) {
5693 *end-- = digits[val % 16];
5697 ASSERT(end + 1 >= base);
5699 } else {
5701 * The user didn't use AH_INET or AH_INET6.
5703 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5704 regs[rd] = (uintptr_t)NULL;
5705 break;
5708 inetout: regs[rd] = (uintptr_t)end + 1;
5709 mstate->dtms_scratch_ptr += size;
5710 break;
5717 * Emulate the execution of DTrace IR instructions specified by the given
5718 * DIF object. This function is deliberately void of assertions as all of
5719 * the necessary checks are handled by a call to dtrace_difo_validate().
5721 static uint64_t
5722 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
5723 dtrace_vstate_t *vstate, dtrace_state_t *state)
5725 const dif_instr_t *text = difo->dtdo_buf;
5726 const uint_t textlen = difo->dtdo_len;
5727 const char *strtab = difo->dtdo_strtab;
5728 const uint64_t *inttab = difo->dtdo_inttab;
5730 uint64_t rval = 0;
5731 dtrace_statvar_t *svar;
5732 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
5733 dtrace_difv_t *v;
5734 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
5735 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
5737 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
5738 uint64_t regs[DIF_DIR_NREGS];
5739 uint64_t *tmp;
5741 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
5742 int64_t cc_r;
5743 uint_t pc = 0, id, opc;
5744 uint8_t ttop = 0;
5745 dif_instr_t instr;
5746 uint_t r1, r2, rd;
5749 * We stash the current DIF object into the machine state: we need it
5750 * for subsequent access checking.
5752 mstate->dtms_difo = difo;
5754 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
5756 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
5757 opc = pc;
5759 instr = text[pc++];
5760 r1 = DIF_INSTR_R1(instr);
5761 r2 = DIF_INSTR_R2(instr);
5762 rd = DIF_INSTR_RD(instr);
5764 switch (DIF_INSTR_OP(instr)) {
5765 case DIF_OP_OR:
5766 regs[rd] = regs[r1] | regs[r2];
5767 break;
5768 case DIF_OP_XOR:
5769 regs[rd] = regs[r1] ^ regs[r2];
5770 break;
5771 case DIF_OP_AND:
5772 regs[rd] = regs[r1] & regs[r2];
5773 break;
5774 case DIF_OP_SLL:
5775 regs[rd] = regs[r1] << regs[r2];
5776 break;
5777 case DIF_OP_SRL:
5778 regs[rd] = regs[r1] >> regs[r2];
5779 break;
5780 case DIF_OP_SUB:
5781 regs[rd] = regs[r1] - regs[r2];
5782 break;
5783 case DIF_OP_ADD:
5784 regs[rd] = regs[r1] + regs[r2];
5785 break;
5786 case DIF_OP_MUL:
5787 regs[rd] = regs[r1] * regs[r2];
5788 break;
5789 case DIF_OP_SDIV:
5790 if (regs[r2] == 0) {
5791 regs[rd] = (uintptr_t)NULL;
5792 *flags |= CPU_DTRACE_DIVZERO;
5793 } else {
5794 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5795 regs[rd] = (int64_t)regs[r1] /
5796 (int64_t)regs[r2];
5797 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5799 break;
5801 case DIF_OP_UDIV:
5802 if (regs[r2] == 0) {
5803 regs[rd] = (uintptr_t)NULL;
5804 *flags |= CPU_DTRACE_DIVZERO;
5805 } else {
5806 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5807 regs[rd] = regs[r1] / regs[r2];
5808 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5810 break;
5812 case DIF_OP_SREM:
5813 if (regs[r2] == 0) {
5814 regs[rd] = (uintptr_t)NULL;
5815 *flags |= CPU_DTRACE_DIVZERO;
5816 } else {
5817 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5818 regs[rd] = (int64_t)regs[r1] %
5819 (int64_t)regs[r2];
5820 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5822 break;
5824 case DIF_OP_UREM:
5825 if (regs[r2] == 0) {
5826 regs[rd] = (uintptr_t)NULL;
5827 *flags |= CPU_DTRACE_DIVZERO;
5828 } else {
5829 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5830 regs[rd] = regs[r1] % regs[r2];
5831 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5833 break;
5835 case DIF_OP_NOT:
5836 regs[rd] = ~regs[r1];
5837 break;
5838 case DIF_OP_MOV:
5839 regs[rd] = regs[r1];
5840 break;
5841 case DIF_OP_CMP:
5842 cc_r = regs[r1] - regs[r2];
5843 cc_n = cc_r < 0;
5844 cc_z = cc_r == 0;
5845 cc_v = 0;
5846 cc_c = regs[r1] < regs[r2];
5847 break;
5848 case DIF_OP_TST:
5849 cc_n = cc_v = cc_c = 0;
5850 cc_z = regs[r1] == 0;
5851 break;
5852 case DIF_OP_BA:
5853 pc = DIF_INSTR_LABEL(instr);
5854 break;
5855 case DIF_OP_BE:
5856 if (cc_z)
5857 pc = DIF_INSTR_LABEL(instr);
5858 break;
5859 case DIF_OP_BNE:
5860 if (cc_z == 0)
5861 pc = DIF_INSTR_LABEL(instr);
5862 break;
5863 case DIF_OP_BG:
5864 if ((cc_z | (cc_n ^ cc_v)) == 0)
5865 pc = DIF_INSTR_LABEL(instr);
5866 break;
5867 case DIF_OP_BGU:
5868 if ((cc_c | cc_z) == 0)
5869 pc = DIF_INSTR_LABEL(instr);
5870 break;
5871 case DIF_OP_BGE:
5872 if ((cc_n ^ cc_v) == 0)
5873 pc = DIF_INSTR_LABEL(instr);
5874 break;
5875 case DIF_OP_BGEU:
5876 if (cc_c == 0)
5877 pc = DIF_INSTR_LABEL(instr);
5878 break;
5879 case DIF_OP_BL:
5880 if (cc_n ^ cc_v)
5881 pc = DIF_INSTR_LABEL(instr);
5882 break;
5883 case DIF_OP_BLU:
5884 if (cc_c)
5885 pc = DIF_INSTR_LABEL(instr);
5886 break;
5887 case DIF_OP_BLE:
5888 if (cc_z | (cc_n ^ cc_v))
5889 pc = DIF_INSTR_LABEL(instr);
5890 break;
5891 case DIF_OP_BLEU:
5892 if (cc_c | cc_z)
5893 pc = DIF_INSTR_LABEL(instr);
5894 break;
5895 case DIF_OP_RLDSB:
5896 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5897 break;
5898 /*FALLTHROUGH*/
5899 case DIF_OP_LDSB:
5900 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5901 break;
5902 case DIF_OP_RLDSH:
5903 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5904 break;
5905 /*FALLTHROUGH*/
5906 case DIF_OP_LDSH:
5907 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5908 break;
5909 case DIF_OP_RLDSW:
5910 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5911 break;
5912 /*FALLTHROUGH*/
5913 case DIF_OP_LDSW:
5914 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5915 break;
5916 case DIF_OP_RLDUB:
5917 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5918 break;
5919 /*FALLTHROUGH*/
5920 case DIF_OP_LDUB:
5921 regs[rd] = dtrace_load8(regs[r1]);
5922 break;
5923 case DIF_OP_RLDUH:
5924 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5925 break;
5926 /*FALLTHROUGH*/
5927 case DIF_OP_LDUH:
5928 regs[rd] = dtrace_load16(regs[r1]);
5929 break;
5930 case DIF_OP_RLDUW:
5931 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5932 break;
5933 /*FALLTHROUGH*/
5934 case DIF_OP_LDUW:
5935 regs[rd] = dtrace_load32(regs[r1]);
5936 break;
5937 case DIF_OP_RLDX:
5938 if (!dtrace_canload(regs[r1], 8, mstate, vstate))
5939 break;
5940 /*FALLTHROUGH*/
5941 case DIF_OP_LDX:
5942 regs[rd] = dtrace_load64(regs[r1]);
5943 break;
5944 case DIF_OP_ULDSB:
5945 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5946 regs[rd] = (int8_t)
5947 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5948 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5949 break;
5950 case DIF_OP_ULDSH:
5951 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5952 regs[rd] = (int16_t)
5953 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5954 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5955 break;
5956 case DIF_OP_ULDSW:
5957 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5958 regs[rd] = (int32_t)
5959 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5960 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5961 break;
5962 case DIF_OP_ULDUB:
5963 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5964 regs[rd] =
5965 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5966 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5967 break;
5968 case DIF_OP_ULDUH:
5969 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5970 regs[rd] =
5971 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5972 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5973 break;
5974 case DIF_OP_ULDUW:
5975 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5976 regs[rd] =
5977 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5978 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5979 break;
5980 case DIF_OP_ULDX:
5981 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5982 regs[rd] =
5983 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5984 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5985 break;
5986 case DIF_OP_RET:
5987 rval = regs[rd];
5988 pc = textlen;
5989 break;
5990 case DIF_OP_NOP:
5991 break;
5992 case DIF_OP_SETX:
5993 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5994 break;
5995 case DIF_OP_SETS:
5996 regs[rd] = (uint64_t)(uintptr_t)
5997 (strtab + DIF_INSTR_STRING(instr));
5998 break;
5999 case DIF_OP_SCMP: {
6000 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
6001 uintptr_t s1 = regs[r1];
6002 uintptr_t s2 = regs[r2];
6003 size_t lim1, lim2;
6005 if (s1 != (uintptr_t)NULL &&
6006 !dtrace_strcanload(s1, sz, &lim1, mstate, vstate))
6007 break;
6008 if (s2 != (uintptr_t)NULL &&
6009 !dtrace_strcanload(s2, sz, &lim2, mstate, vstate))
6010 break;
6012 cc_r = dtrace_strncmp((char *)s1, (char *)s2,
6013 MIN(lim1, lim2));
6015 cc_n = cc_r < 0;
6016 cc_z = cc_r == 0;
6017 cc_v = cc_c = 0;
6018 break;
6020 case DIF_OP_LDGA:
6021 regs[rd] = dtrace_dif_variable(mstate, state,
6022 r1, regs[r2]);
6023 break;
6024 case DIF_OP_LDGS:
6025 id = DIF_INSTR_VAR(instr);
6027 if (id >= DIF_VAR_OTHER_UBASE) {
6028 uintptr_t a;
6030 id -= DIF_VAR_OTHER_UBASE;
6031 svar = vstate->dtvs_globals[id];
6032 ASSERT(svar != NULL);
6033 v = &svar->dtsv_var;
6035 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
6036 regs[rd] = svar->dtsv_data;
6037 break;
6040 a = (uintptr_t)svar->dtsv_data;
6042 if (*(uint8_t *)a == UINT8_MAX) {
6044 * If the 0th byte is set to UINT8_MAX
6045 * then this is to be treated as a
6046 * reference to a NULL variable.
6048 regs[rd] = (uintptr_t)NULL;
6049 } else {
6050 regs[rd] = a + sizeof (uint64_t);
6053 break;
6056 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
6057 break;
6059 case DIF_OP_STGA:
6060 dtrace_dif_variable_write(mstate, state, r1, regs[r2],
6061 regs[rd]);
6062 break;
6064 case DIF_OP_STGS:
6065 id = DIF_INSTR_VAR(instr);
6067 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6068 id -= DIF_VAR_OTHER_UBASE;
6070 VERIFY(id < vstate->dtvs_nglobals);
6071 svar = vstate->dtvs_globals[id];
6072 ASSERT(svar != NULL);
6073 v = &svar->dtsv_var;
6075 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6076 uintptr_t a = (uintptr_t)svar->dtsv_data;
6077 size_t lim;
6079 ASSERT(a != (uintptr_t)NULL);
6080 ASSERT(svar->dtsv_size != 0);
6082 if (regs[rd] == (uintptr_t)NULL) {
6083 *(uint8_t *)a = UINT8_MAX;
6084 break;
6085 } else {
6086 *(uint8_t *)a = 0;
6087 a += sizeof (uint64_t);
6089 if (!dtrace_vcanload(
6090 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6091 &lim, mstate, vstate))
6092 break;
6094 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6095 (void *)a, &v->dtdv_type, lim);
6096 break;
6099 svar->dtsv_data = regs[rd];
6100 break;
6102 case DIF_OP_LDTA:
6104 * There are no DTrace built-in thread-local arrays at
6105 * present. This opcode is saved for future work.
6107 *flags |= CPU_DTRACE_ILLOP;
6108 regs[rd] = (uintptr_t)NULL;
6109 break;
6111 case DIF_OP_LDLS:
6112 id = DIF_INSTR_VAR(instr);
6114 if (id < DIF_VAR_OTHER_UBASE) {
6116 * For now, this has no meaning.
6118 regs[rd] = (uintptr_t)NULL;
6119 break;
6122 id -= DIF_VAR_OTHER_UBASE;
6124 ASSERT(id < vstate->dtvs_nlocals);
6125 ASSERT(vstate->dtvs_locals != NULL);
6127 svar = vstate->dtvs_locals[id];
6128 ASSERT(svar != NULL);
6129 v = &svar->dtsv_var;
6131 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6132 uintptr_t a = (uintptr_t)svar->dtsv_data;
6133 size_t sz = v->dtdv_type.dtdt_size;
6135 sz += sizeof (uint64_t);
6136 ASSERT(svar->dtsv_size == NCPU * sz);
6137 a += CPU->cpu_id * sz;
6139 if (*(uint8_t *)a == UINT8_MAX) {
6141 * If the 0th byte is set to UINT8_MAX
6142 * then this is to be treated as a
6143 * reference to a NULL variable.
6145 regs[rd] = (uintptr_t)NULL;
6146 } else {
6147 regs[rd] = a + sizeof (uint64_t);
6150 break;
6153 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6154 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6155 regs[rd] = tmp[CPU->cpu_id];
6156 break;
6158 case DIF_OP_STLS:
6159 id = DIF_INSTR_VAR(instr);
6161 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6162 id -= DIF_VAR_OTHER_UBASE;
6163 VERIFY(id < vstate->dtvs_nlocals);
6165 ASSERT(vstate->dtvs_locals != NULL);
6166 svar = vstate->dtvs_locals[id];
6167 ASSERT(svar != NULL);
6168 v = &svar->dtsv_var;
6170 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6171 uintptr_t a = (uintptr_t)svar->dtsv_data;
6172 size_t sz = v->dtdv_type.dtdt_size;
6173 size_t lim;
6175 sz += sizeof (uint64_t);
6176 ASSERT(svar->dtsv_size == NCPU * sz);
6177 a += CPU->cpu_id * sz;
6179 if (regs[rd] == (uintptr_t)NULL) {
6180 *(uint8_t *)a = UINT8_MAX;
6181 break;
6182 } else {
6183 *(uint8_t *)a = 0;
6184 a += sizeof (uint64_t);
6187 if (!dtrace_vcanload(
6188 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6189 &lim, mstate, vstate))
6190 break;
6192 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6193 (void *)a, &v->dtdv_type, lim);
6194 break;
6197 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6198 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6199 tmp[CPU->cpu_id] = regs[rd];
6200 break;
6202 case DIF_OP_LDTS: {
6203 dtrace_dynvar_t *dvar;
6204 dtrace_key_t *key;
6206 id = DIF_INSTR_VAR(instr);
6207 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6208 id -= DIF_VAR_OTHER_UBASE;
6209 v = &vstate->dtvs_tlocals[id];
6211 key = &tupregs[DIF_DTR_NREGS];
6212 key[0].dttk_value = (uint64_t)id;
6213 key[0].dttk_size = 0;
6214 DTRACE_TLS_THRKEY(key[1].dttk_value);
6215 key[1].dttk_size = 0;
6217 dvar = dtrace_dynvar(dstate, 2, key,
6218 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
6219 mstate, vstate);
6221 if (dvar == NULL) {
6222 regs[rd] = (uintptr_t)NULL;
6223 break;
6226 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6227 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6228 } else {
6229 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6232 break;
6235 case DIF_OP_STTS: {
6236 dtrace_dynvar_t *dvar;
6237 dtrace_key_t *key;
6239 id = DIF_INSTR_VAR(instr);
6240 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6241 id -= DIF_VAR_OTHER_UBASE;
6242 VERIFY(id < vstate->dtvs_ntlocals);
6244 key = &tupregs[DIF_DTR_NREGS];
6245 key[0].dttk_value = (uint64_t)id;
6246 key[0].dttk_size = 0;
6247 DTRACE_TLS_THRKEY(key[1].dttk_value);
6248 key[1].dttk_size = 0;
6249 v = &vstate->dtvs_tlocals[id];
6251 dvar = dtrace_dynvar(dstate, 2, key,
6252 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6253 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6254 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6255 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6258 * Given that we're storing to thread-local data,
6259 * we need to flush our predicate cache.
6261 curthread->t_predcache = 0;
6263 if (dvar == NULL)
6264 break;
6266 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6267 size_t lim;
6269 if (!dtrace_vcanload(
6270 (void *)(uintptr_t)regs[rd],
6271 &v->dtdv_type, &lim, mstate, vstate))
6272 break;
6274 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6275 dvar->dtdv_data, &v->dtdv_type, lim);
6276 } else {
6277 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6280 break;
6283 case DIF_OP_SRA:
6284 regs[rd] = (int64_t)regs[r1] >> regs[r2];
6285 break;
6287 case DIF_OP_CALL:
6288 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6289 regs, tupregs, ttop, mstate, state);
6290 break;
6292 case DIF_OP_PUSHTR:
6293 if (ttop == DIF_DTR_NREGS) {
6294 *flags |= CPU_DTRACE_TUPOFLOW;
6295 break;
6298 if (r1 == DIF_TYPE_STRING) {
6300 * If this is a string type and the size is 0,
6301 * we'll use the system-wide default string
6302 * size. Note that we are _not_ looking at
6303 * the value of the DTRACEOPT_STRSIZE option;
6304 * had this been set, we would expect to have
6305 * a non-zero size value in the "pushtr".
6307 tupregs[ttop].dttk_size =
6308 dtrace_strlen((char *)(uintptr_t)regs[rd],
6309 regs[r2] ? regs[r2] :
6310 dtrace_strsize_default) + 1;
6311 } else {
6312 if (regs[r2] > LONG_MAX) {
6313 *flags |= CPU_DTRACE_ILLOP;
6314 break;
6317 tupregs[ttop].dttk_size = regs[r2];
6320 tupregs[ttop++].dttk_value = regs[rd];
6321 break;
6323 case DIF_OP_PUSHTV:
6324 if (ttop == DIF_DTR_NREGS) {
6325 *flags |= CPU_DTRACE_TUPOFLOW;
6326 break;
6329 tupregs[ttop].dttk_value = regs[rd];
6330 tupregs[ttop++].dttk_size = 0;
6331 break;
6333 case DIF_OP_POPTS:
6334 if (ttop != 0)
6335 ttop--;
6336 break;
6338 case DIF_OP_FLUSHTS:
6339 ttop = 0;
6340 break;
6342 case DIF_OP_LDGAA:
6343 case DIF_OP_LDTAA: {
6344 dtrace_dynvar_t *dvar;
6345 dtrace_key_t *key = tupregs;
6346 uint_t nkeys = ttop;
6348 id = DIF_INSTR_VAR(instr);
6349 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6350 id -= DIF_VAR_OTHER_UBASE;
6352 key[nkeys].dttk_value = (uint64_t)id;
6353 key[nkeys++].dttk_size = 0;
6355 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6356 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6357 key[nkeys++].dttk_size = 0;
6358 VERIFY(id < vstate->dtvs_ntlocals);
6359 v = &vstate->dtvs_tlocals[id];
6360 } else {
6361 VERIFY(id < vstate->dtvs_nglobals);
6362 v = &vstate->dtvs_globals[id]->dtsv_var;
6365 dvar = dtrace_dynvar(dstate, nkeys, key,
6366 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6367 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6368 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
6370 if (dvar == NULL) {
6371 regs[rd] = (uintptr_t)NULL;
6372 break;
6375 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6376 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6377 } else {
6378 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6381 break;
6384 case DIF_OP_STGAA:
6385 case DIF_OP_STTAA: {
6386 dtrace_dynvar_t *dvar;
6387 dtrace_key_t *key = tupregs;
6388 uint_t nkeys = ttop;
6390 id = DIF_INSTR_VAR(instr);
6391 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6392 id -= DIF_VAR_OTHER_UBASE;
6394 key[nkeys].dttk_value = (uint64_t)id;
6395 key[nkeys++].dttk_size = 0;
6397 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6398 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6399 key[nkeys++].dttk_size = 0;
6400 VERIFY(id < vstate->dtvs_ntlocals);
6401 v = &vstate->dtvs_tlocals[id];
6402 } else {
6403 VERIFY(id < vstate->dtvs_nglobals);
6404 v = &vstate->dtvs_globals[id]->dtsv_var;
6407 dvar = dtrace_dynvar(dstate, nkeys, key,
6408 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6409 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6410 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6411 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6413 if (dvar == NULL)
6414 break;
6416 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6417 size_t lim;
6419 if (!dtrace_vcanload(
6420 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6421 &lim, mstate, vstate))
6422 break;
6424 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6425 dvar->dtdv_data, &v->dtdv_type, lim);
6426 } else {
6427 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6430 break;
6433 case DIF_OP_ALLOCS: {
6434 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6435 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
6438 * Rounding up the user allocation size could have
6439 * overflowed large, bogus allocations (like -1ULL) to
6440 * 0.
6442 if (size < regs[r1] ||
6443 !DTRACE_INSCRATCH(mstate, size)) {
6444 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6445 regs[rd] = (uintptr_t)NULL;
6446 break;
6449 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
6450 mstate->dtms_scratch_ptr += size;
6451 regs[rd] = ptr;
6452 break;
6455 case DIF_OP_COPYS:
6456 if (!dtrace_canstore(regs[rd], regs[r2],
6457 mstate, vstate)) {
6458 *flags |= CPU_DTRACE_BADADDR;
6459 *illval = regs[rd];
6460 break;
6463 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
6464 break;
6466 dtrace_bcopy((void *)(uintptr_t)regs[r1],
6467 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
6468 break;
6470 case DIF_OP_STB:
6471 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
6472 *flags |= CPU_DTRACE_BADADDR;
6473 *illval = regs[rd];
6474 break;
6476 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
6477 break;
6479 case DIF_OP_STH:
6480 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
6481 *flags |= CPU_DTRACE_BADADDR;
6482 *illval = regs[rd];
6483 break;
6485 if (regs[rd] & 1) {
6486 *flags |= CPU_DTRACE_BADALIGN;
6487 *illval = regs[rd];
6488 break;
6490 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
6491 break;
6493 case DIF_OP_STW:
6494 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
6495 *flags |= CPU_DTRACE_BADADDR;
6496 *illval = regs[rd];
6497 break;
6499 if (regs[rd] & 3) {
6500 *flags |= CPU_DTRACE_BADALIGN;
6501 *illval = regs[rd];
6502 break;
6504 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
6505 break;
6507 case DIF_OP_STX:
6508 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
6509 *flags |= CPU_DTRACE_BADADDR;
6510 *illval = regs[rd];
6511 break;
6513 if (regs[rd] & 7) {
6514 *flags |= CPU_DTRACE_BADALIGN;
6515 *illval = regs[rd];
6516 break;
6518 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
6519 break;
6523 if (!(*flags & CPU_DTRACE_FAULT))
6524 return (rval);
6526 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
6527 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
6529 return (0);
6532 static void
6533 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
6535 dtrace_probe_t *probe = ecb->dte_probe;
6536 dtrace_provider_t *prov = probe->dtpr_provider;
6537 char c[DTRACE_FULLNAMELEN + 80], *str;
6538 char *msg = "dtrace: breakpoint action at probe ";
6539 char *ecbmsg = " (ecb ";
6540 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
6541 uintptr_t val = (uintptr_t)ecb;
6542 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
6544 if (dtrace_destructive_disallow)
6545 return;
6548 * It's impossible to be taking action on the NULL probe.
6550 ASSERT(probe != NULL);
6553 * This is a poor man's (destitute man's?) sprintf(): we want to
6554 * print the provider name, module name, function name and name of
6555 * the probe, along with the hex address of the ECB with the breakpoint
6556 * action -- all of which we must place in the character buffer by
6557 * hand.
6559 while (*msg != '\0')
6560 c[i++] = *msg++;
6562 for (str = prov->dtpv_name; *str != '\0'; str++)
6563 c[i++] = *str;
6564 c[i++] = ':';
6566 for (str = probe->dtpr_mod; *str != '\0'; str++)
6567 c[i++] = *str;
6568 c[i++] = ':';
6570 for (str = probe->dtpr_func; *str != '\0'; str++)
6571 c[i++] = *str;
6572 c[i++] = ':';
6574 for (str = probe->dtpr_name; *str != '\0'; str++)
6575 c[i++] = *str;
6577 while (*ecbmsg != '\0')
6578 c[i++] = *ecbmsg++;
6580 while (shift >= 0) {
6581 mask = (uintptr_t)0xf << shift;
6583 if (val >= ((uintptr_t)1 << shift))
6584 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
6585 shift -= 4;
6588 c[i++] = ')';
6589 c[i] = '\0';
6591 debug_enter(c);
6594 static void
6595 dtrace_action_panic(dtrace_ecb_t *ecb)
6597 dtrace_probe_t *probe = ecb->dte_probe;
6600 * It's impossible to be taking action on the NULL probe.
6602 ASSERT(probe != NULL);
6604 if (dtrace_destructive_disallow)
6605 return;
6607 if (dtrace_panicked != NULL)
6608 return;
6610 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
6611 return;
6614 * We won the right to panic. (We want to be sure that only one
6615 * thread calls panic() from dtrace_probe(), and that panic() is
6616 * called exactly once.)
6618 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
6619 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
6620 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
6623 static void
6624 dtrace_action_raise(uint64_t sig)
6626 if (dtrace_destructive_disallow)
6627 return;
6629 if (sig >= NSIG) {
6630 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6631 return;
6635 * raise() has a queue depth of 1 -- we ignore all subsequent
6636 * invocations of the raise() action.
6638 if (curthread->t_dtrace_sig == 0)
6639 curthread->t_dtrace_sig = (uint8_t)sig;
6641 curthread->t_sig_check = 1;
6642 aston(curthread);
6645 static void
6646 dtrace_action_stop(void)
6648 if (dtrace_destructive_disallow)
6649 return;
6651 if (!curthread->t_dtrace_stop) {
6652 curthread->t_dtrace_stop = 1;
6653 curthread->t_sig_check = 1;
6654 aston(curthread);
6658 static void
6659 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
6661 hrtime_t now;
6662 volatile uint16_t *flags;
6663 cpu_t *cpu = CPU;
6665 if (dtrace_destructive_disallow)
6666 return;
6668 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
6670 now = dtrace_gethrtime();
6672 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
6674 * We need to advance the mark to the current time.
6676 cpu->cpu_dtrace_chillmark = now;
6677 cpu->cpu_dtrace_chilled = 0;
6681 * Now check to see if the requested chill time would take us over
6682 * the maximum amount of time allowed in the chill interval. (Or
6683 * worse, if the calculation itself induces overflow.)
6685 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
6686 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
6687 *flags |= CPU_DTRACE_ILLOP;
6688 return;
6691 while (dtrace_gethrtime() - now < val)
6692 continue;
6695 * Normally, we assure that the value of the variable "timestamp" does
6696 * not change within an ECB. The presence of chill() represents an
6697 * exception to this rule, however.
6699 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
6700 cpu->cpu_dtrace_chilled += val;
6703 static void
6704 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
6705 uint64_t *buf, uint64_t arg)
6707 int nframes = DTRACE_USTACK_NFRAMES(arg);
6708 int strsize = DTRACE_USTACK_STRSIZE(arg);
6709 uint64_t *pcs = &buf[1], *fps;
6710 char *str = (char *)&pcs[nframes];
6711 int size, offs = 0, i, j;
6712 size_t rem;
6713 uintptr_t old = mstate->dtms_scratch_ptr, saved;
6714 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6715 char *sym;
6718 * Should be taking a faster path if string space has not been
6719 * allocated.
6721 ASSERT(strsize != 0);
6724 * We will first allocate some temporary space for the frame pointers.
6726 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6727 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
6728 (nframes * sizeof (uint64_t));
6730 if (!DTRACE_INSCRATCH(mstate, size)) {
6732 * Not enough room for our frame pointers -- need to indicate
6733 * that we ran out of scratch space.
6735 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6736 return;
6739 mstate->dtms_scratch_ptr += size;
6740 saved = mstate->dtms_scratch_ptr;
6743 * Now get a stack with both program counters and frame pointers.
6745 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6746 dtrace_getufpstack(buf, fps, nframes + 1);
6747 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6750 * If that faulted, we're cooked.
6752 if (*flags & CPU_DTRACE_FAULT)
6753 goto out;
6756 * Now we want to walk up the stack, calling the USTACK helper. For
6757 * each iteration, we restore the scratch pointer.
6759 for (i = 0; i < nframes; i++) {
6760 mstate->dtms_scratch_ptr = saved;
6762 if (offs >= strsize)
6763 break;
6765 sym = (char *)(uintptr_t)dtrace_helper(
6766 DTRACE_HELPER_ACTION_USTACK,
6767 mstate, state, pcs[i], fps[i]);
6770 * If we faulted while running the helper, we're going to
6771 * clear the fault and null out the corresponding string.
6773 if (*flags & CPU_DTRACE_FAULT) {
6774 *flags &= ~CPU_DTRACE_FAULT;
6775 str[offs++] = '\0';
6776 continue;
6779 if (sym == NULL) {
6780 str[offs++] = '\0';
6781 continue;
6784 if (!dtrace_strcanload((uintptr_t)sym, strsize, &rem, mstate,
6785 &(state->dts_vstate))) {
6786 str[offs++] = '\0';
6787 continue;
6790 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6793 * Now copy in the string that the helper returned to us.
6795 for (j = 0; offs + j < strsize && j < rem; j++) {
6796 if ((str[offs + j] = sym[j]) == '\0')
6797 break;
6800 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6802 offs += j + 1;
6805 if (offs >= strsize) {
6807 * If we didn't have room for all of the strings, we don't
6808 * abort processing -- this needn't be a fatal error -- but we
6809 * still want to increment a counter (dts_stkstroverflows) to
6810 * allow this condition to be warned about. (If this is from
6811 * a jstack() action, it is easily tuned via jstackstrsize.)
6813 dtrace_error(&state->dts_stkstroverflows);
6816 while (offs < strsize)
6817 str[offs++] = '\0';
6819 out:
6820 mstate->dtms_scratch_ptr = old;
6823 static void
6824 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
6825 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
6827 volatile uint16_t *flags;
6828 uint64_t val = *valp;
6829 size_t valoffs = *valoffsp;
6831 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6832 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
6835 * If this is a string, we're going to only load until we find the zero
6836 * byte -- after which we'll store zero bytes.
6838 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
6839 char c = '\0' + 1;
6840 size_t s;
6842 for (s = 0; s < size; s++) {
6843 if (c != '\0' && dtkind == DIF_TF_BYREF) {
6844 c = dtrace_load8(val++);
6845 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
6846 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6847 c = dtrace_fuword8((void *)(uintptr_t)val++);
6848 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6849 if (*flags & CPU_DTRACE_FAULT)
6850 break;
6853 DTRACE_STORE(uint8_t, tomax, valoffs++, c);
6855 if (c == '\0' && intuple)
6856 break;
6858 } else {
6859 uint8_t c;
6860 while (valoffs < end) {
6861 if (dtkind == DIF_TF_BYREF) {
6862 c = dtrace_load8(val++);
6863 } else if (dtkind == DIF_TF_BYUREF) {
6864 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6865 c = dtrace_fuword8((void *)(uintptr_t)val++);
6866 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6867 if (*flags & CPU_DTRACE_FAULT)
6868 break;
6871 DTRACE_STORE(uint8_t, tomax,
6872 valoffs++, c);
6876 *valp = val;
6877 *valoffsp = valoffs;
6881 * If you're looking for the epicenter of DTrace, you just found it. This
6882 * is the function called by the provider to fire a probe -- from which all
6883 * subsequent probe-context DTrace activity emanates.
6885 void
6886 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
6887 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
6889 processorid_t cpuid;
6890 dtrace_icookie_t cookie;
6891 dtrace_probe_t *probe;
6892 dtrace_mstate_t mstate;
6893 dtrace_ecb_t *ecb;
6894 dtrace_action_t *act;
6895 intptr_t offs;
6896 size_t size;
6897 int vtime, onintr;
6898 volatile uint16_t *flags;
6899 hrtime_t now, end;
6902 * Kick out immediately if this CPU is still being born (in which case
6903 * curthread will be set to -1) or the current thread can't allow
6904 * probes in its current context.
6906 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
6907 return;
6909 cookie = dtrace_interrupt_disable();
6910 probe = dtrace_probes[id - 1];
6911 cpuid = CPU->cpu_id;
6912 onintr = CPU_ON_INTR(CPU);
6914 CPU->cpu_dtrace_probes++;
6916 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
6917 probe->dtpr_predcache == curthread->t_predcache) {
6919 * We have hit in the predicate cache; we know that
6920 * this predicate would evaluate to be false.
6922 dtrace_interrupt_enable(cookie);
6923 return;
6926 if (panic_quiesce) {
6928 * We don't trace anything if we're panicking.
6930 dtrace_interrupt_enable(cookie);
6931 return;
6934 now = mstate.dtms_timestamp = dtrace_gethrtime();
6935 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
6936 vtime = dtrace_vtime_references != 0;
6938 if (vtime && curthread->t_dtrace_start)
6939 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
6941 mstate.dtms_difo = NULL;
6942 mstate.dtms_probe = probe;
6943 mstate.dtms_strtok = (uintptr_t)NULL;
6944 mstate.dtms_arg[0] = arg0;
6945 mstate.dtms_arg[1] = arg1;
6946 mstate.dtms_arg[2] = arg2;
6947 mstate.dtms_arg[3] = arg3;
6948 mstate.dtms_arg[4] = arg4;
6950 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
6952 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
6953 dtrace_predicate_t *pred = ecb->dte_predicate;
6954 dtrace_state_t *state = ecb->dte_state;
6955 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
6956 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
6957 dtrace_vstate_t *vstate = &state->dts_vstate;
6958 dtrace_provider_t *prov = probe->dtpr_provider;
6959 uint64_t tracememsize = 0;
6960 int committed = 0;
6961 caddr_t tomax;
6964 * A little subtlety with the following (seemingly innocuous)
6965 * declaration of the automatic 'val': by looking at the
6966 * code, you might think that it could be declared in the
6967 * action processing loop, below. (That is, it's only used in
6968 * the action processing loop.) However, it must be declared
6969 * out of that scope because in the case of DIF expression
6970 * arguments to aggregating actions, one iteration of the
6971 * action loop will use the last iteration's value.
6973 uint64_t val;
6975 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
6976 mstate.dtms_access = DTRACE_ACCESS_ARGS | DTRACE_ACCESS_PROC;
6977 mstate.dtms_getf = NULL;
6979 *flags &= ~CPU_DTRACE_ERROR;
6981 if (prov == dtrace_provider) {
6983 * If dtrace itself is the provider of this probe,
6984 * we're only going to continue processing the ECB if
6985 * arg0 (the dtrace_state_t) is equal to the ECB's
6986 * creating state. (This prevents disjoint consumers
6987 * from seeing one another's metaprobes.)
6989 if (arg0 != (uint64_t)(uintptr_t)state)
6990 continue;
6993 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
6995 * We're not currently active. If our provider isn't
6996 * the dtrace pseudo provider, we're not interested.
6998 if (prov != dtrace_provider)
6999 continue;
7002 * Now we must further check if we are in the BEGIN
7003 * probe. If we are, we will only continue processing
7004 * if we're still in WARMUP -- if one BEGIN enabling
7005 * has invoked the exit() action, we don't want to
7006 * evaluate subsequent BEGIN enablings.
7008 if (probe->dtpr_id == dtrace_probeid_begin &&
7009 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
7010 ASSERT(state->dts_activity ==
7011 DTRACE_ACTIVITY_DRAINING);
7012 continue;
7016 if (ecb->dte_cond && !dtrace_priv_probe(state, &mstate, ecb))
7017 continue;
7019 if (now - state->dts_alive > dtrace_deadman_timeout) {
7021 * We seem to be dead. Unless we (a) have kernel
7022 * destructive permissions (b) have explicitly enabled
7023 * destructive actions and (c) destructive actions have
7024 * not been disabled, we're going to transition into
7025 * the KILLED state, from which no further processing
7026 * on this state will be performed.
7028 if (!dtrace_priv_kernel_destructive(state) ||
7029 !state->dts_cred.dcr_destructive ||
7030 dtrace_destructive_disallow) {
7031 void *activity = &state->dts_activity;
7032 dtrace_activity_t current;
7034 do {
7035 current = state->dts_activity;
7036 } while (dtrace_cas32(activity, current,
7037 DTRACE_ACTIVITY_KILLED) != current);
7039 continue;
7043 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
7044 ecb->dte_alignment, state, &mstate)) < 0)
7045 continue;
7047 tomax = buf->dtb_tomax;
7048 ASSERT(tomax != NULL);
7050 if (ecb->dte_size != 0) {
7051 dtrace_rechdr_t dtrh;
7052 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
7053 mstate.dtms_timestamp = dtrace_gethrtime();
7054 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
7056 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t));
7057 dtrh.dtrh_epid = ecb->dte_epid;
7058 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh,
7059 mstate.dtms_timestamp);
7060 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh;
7063 mstate.dtms_epid = ecb->dte_epid;
7064 mstate.dtms_present |= DTRACE_MSTATE_EPID;
7066 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
7067 mstate.dtms_access |= DTRACE_ACCESS_KERNEL;
7069 if (pred != NULL) {
7070 dtrace_difo_t *dp = pred->dtp_difo;
7071 int rval;
7073 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
7075 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
7076 dtrace_cacheid_t cid = probe->dtpr_predcache;
7078 if (cid != DTRACE_CACHEIDNONE && !onintr) {
7080 * Update the predicate cache...
7082 ASSERT(cid == pred->dtp_cacheid);
7083 curthread->t_predcache = cid;
7086 continue;
7090 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
7091 act != NULL; act = act->dta_next) {
7092 size_t valoffs;
7093 dtrace_difo_t *dp;
7094 dtrace_recdesc_t *rec = &act->dta_rec;
7096 size = rec->dtrd_size;
7097 valoffs = offs + rec->dtrd_offset;
7099 if (DTRACEACT_ISAGG(act->dta_kind)) {
7100 uint64_t v = 0xbad;
7101 dtrace_aggregation_t *agg;
7103 agg = (dtrace_aggregation_t *)act;
7105 if ((dp = act->dta_difo) != NULL)
7106 v = dtrace_dif_emulate(dp,
7107 &mstate, vstate, state);
7109 if (*flags & CPU_DTRACE_ERROR)
7110 continue;
7113 * Note that we always pass the expression
7114 * value from the previous iteration of the
7115 * action loop. This value will only be used
7116 * if there is an expression argument to the
7117 * aggregating action, denoted by the
7118 * dtag_hasarg field.
7120 dtrace_aggregate(agg, buf,
7121 offs, aggbuf, v, val);
7122 continue;
7125 switch (act->dta_kind) {
7126 case DTRACEACT_STOP:
7127 if (dtrace_priv_proc_destructive(state,
7128 &mstate))
7129 dtrace_action_stop();
7130 continue;
7132 case DTRACEACT_BREAKPOINT:
7133 if (dtrace_priv_kernel_destructive(state))
7134 dtrace_action_breakpoint(ecb);
7135 continue;
7137 case DTRACEACT_PANIC:
7138 if (dtrace_priv_kernel_destructive(state))
7139 dtrace_action_panic(ecb);
7140 continue;
7142 case DTRACEACT_STACK:
7143 if (!dtrace_priv_kernel(state))
7144 continue;
7146 dtrace_getpcstack((pc_t *)(tomax + valoffs),
7147 size / sizeof (pc_t), probe->dtpr_aframes,
7148 DTRACE_ANCHORED(probe) ? NULL :
7149 (uint32_t *)arg0);
7151 continue;
7153 case DTRACEACT_JSTACK:
7154 case DTRACEACT_USTACK:
7155 if (!dtrace_priv_proc(state, &mstate))
7156 continue;
7159 * See comment in DIF_VAR_PID.
7161 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
7162 CPU_ON_INTR(CPU)) {
7163 int depth = DTRACE_USTACK_NFRAMES(
7164 rec->dtrd_arg) + 1;
7166 dtrace_bzero((void *)(tomax + valoffs),
7167 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
7168 + depth * sizeof (uint64_t));
7170 continue;
7173 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
7174 curproc->p_dtrace_helpers != NULL) {
7176 * This is the slow path -- we have
7177 * allocated string space, and we're
7178 * getting the stack of a process that
7179 * has helpers. Call into a separate
7180 * routine to perform this processing.
7182 dtrace_action_ustack(&mstate, state,
7183 (uint64_t *)(tomax + valoffs),
7184 rec->dtrd_arg);
7185 continue;
7189 * Clear the string space, since there's no
7190 * helper to do it for us.
7192 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0) {
7193 int depth = DTRACE_USTACK_NFRAMES(
7194 rec->dtrd_arg);
7195 size_t strsize = DTRACE_USTACK_STRSIZE(
7196 rec->dtrd_arg);
7197 uint64_t *buf = (uint64_t *)(tomax +
7198 valoffs);
7199 void *strspace = &buf[depth + 1];
7201 dtrace_bzero(strspace,
7202 MIN(depth, strsize));
7205 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7206 dtrace_getupcstack((uint64_t *)
7207 (tomax + valoffs),
7208 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
7209 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7210 continue;
7212 default:
7213 break;
7216 dp = act->dta_difo;
7217 ASSERT(dp != NULL);
7219 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
7221 if (*flags & CPU_DTRACE_ERROR)
7222 continue;
7224 switch (act->dta_kind) {
7225 case DTRACEACT_SPECULATE: {
7226 dtrace_rechdr_t *dtrh;
7228 ASSERT(buf == &state->dts_buffer[cpuid]);
7229 buf = dtrace_speculation_buffer(state,
7230 cpuid, val);
7232 if (buf == NULL) {
7233 *flags |= CPU_DTRACE_DROP;
7234 continue;
7237 offs = dtrace_buffer_reserve(buf,
7238 ecb->dte_needed, ecb->dte_alignment,
7239 state, NULL);
7241 if (offs < 0) {
7242 *flags |= CPU_DTRACE_DROP;
7243 continue;
7246 tomax = buf->dtb_tomax;
7247 ASSERT(tomax != NULL);
7249 if (ecb->dte_size == 0)
7250 continue;
7252 ASSERT3U(ecb->dte_size, >=,
7253 sizeof (dtrace_rechdr_t));
7254 dtrh = ((void *)(tomax + offs));
7255 dtrh->dtrh_epid = ecb->dte_epid;
7257 * When the speculation is committed, all of
7258 * the records in the speculative buffer will
7259 * have their timestamps set to the commit
7260 * time. Until then, it is set to a sentinel
7261 * value, for debugability.
7263 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
7264 continue;
7267 case DTRACEACT_CHILL:
7268 if (dtrace_priv_kernel_destructive(state))
7269 dtrace_action_chill(&mstate, val);
7270 continue;
7272 case DTRACEACT_RAISE:
7273 if (dtrace_priv_proc_destructive(state,
7274 &mstate))
7275 dtrace_action_raise(val);
7276 continue;
7278 case DTRACEACT_COMMIT:
7279 ASSERT(!committed);
7282 * We need to commit our buffer state.
7284 if (ecb->dte_size)
7285 buf->dtb_offset = offs + ecb->dte_size;
7286 buf = &state->dts_buffer[cpuid];
7287 dtrace_speculation_commit(state, cpuid, val);
7288 committed = 1;
7289 continue;
7291 case DTRACEACT_DISCARD:
7292 dtrace_speculation_discard(state, cpuid, val);
7293 continue;
7295 case DTRACEACT_DIFEXPR:
7296 case DTRACEACT_LIBACT:
7297 case DTRACEACT_PRINTF:
7298 case DTRACEACT_PRINTA:
7299 case DTRACEACT_SYSTEM:
7300 case DTRACEACT_FREOPEN:
7301 case DTRACEACT_TRACEMEM:
7302 break;
7304 case DTRACEACT_TRACEMEM_DYNSIZE:
7305 tracememsize = val;
7306 break;
7308 case DTRACEACT_SYM:
7309 case DTRACEACT_MOD:
7310 if (!dtrace_priv_kernel(state))
7311 continue;
7312 break;
7314 case DTRACEACT_USYM:
7315 case DTRACEACT_UMOD:
7316 case DTRACEACT_UADDR: {
7317 struct pid *pid = curthread->t_procp->p_pidp;
7319 if (!dtrace_priv_proc(state, &mstate))
7320 continue;
7322 DTRACE_STORE(uint64_t, tomax,
7323 valoffs, (uint64_t)pid->pid_id);
7324 DTRACE_STORE(uint64_t, tomax,
7325 valoffs + sizeof (uint64_t), val);
7327 continue;
7330 case DTRACEACT_EXIT: {
7332 * For the exit action, we are going to attempt
7333 * to atomically set our activity to be
7334 * draining. If this fails (either because
7335 * another CPU has beat us to the exit action,
7336 * or because our current activity is something
7337 * other than ACTIVE or WARMUP), we will
7338 * continue. This assures that the exit action
7339 * can be successfully recorded at most once
7340 * when we're in the ACTIVE state. If we're
7341 * encountering the exit() action while in
7342 * COOLDOWN, however, we want to honor the new
7343 * status code. (We know that we're the only
7344 * thread in COOLDOWN, so there is no race.)
7346 void *activity = &state->dts_activity;
7347 dtrace_activity_t current = state->dts_activity;
7349 if (current == DTRACE_ACTIVITY_COOLDOWN)
7350 break;
7352 if (current != DTRACE_ACTIVITY_WARMUP)
7353 current = DTRACE_ACTIVITY_ACTIVE;
7355 if (dtrace_cas32(activity, current,
7356 DTRACE_ACTIVITY_DRAINING) != current) {
7357 *flags |= CPU_DTRACE_DROP;
7358 continue;
7361 break;
7364 default:
7365 ASSERT(0);
7368 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ||
7369 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) {
7370 uintptr_t end = valoffs + size;
7372 if (tracememsize != 0 &&
7373 valoffs + tracememsize < end) {
7374 end = valoffs + tracememsize;
7375 tracememsize = 0;
7378 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
7379 !dtrace_vcanload((void *)(uintptr_t)val,
7380 &dp->dtdo_rtype, NULL, &mstate, vstate))
7381 continue;
7383 dtrace_store_by_ref(dp, tomax, size, &valoffs,
7384 &val, end, act->dta_intuple,
7385 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
7386 DIF_TF_BYREF: DIF_TF_BYUREF);
7387 continue;
7390 switch (size) {
7391 case 0:
7392 break;
7394 case sizeof (uint8_t):
7395 DTRACE_STORE(uint8_t, tomax, valoffs, val);
7396 break;
7397 case sizeof (uint16_t):
7398 DTRACE_STORE(uint16_t, tomax, valoffs, val);
7399 break;
7400 case sizeof (uint32_t):
7401 DTRACE_STORE(uint32_t, tomax, valoffs, val);
7402 break;
7403 case sizeof (uint64_t):
7404 DTRACE_STORE(uint64_t, tomax, valoffs, val);
7405 break;
7406 default:
7408 * Any other size should have been returned by
7409 * reference, not by value.
7411 ASSERT(0);
7412 break;
7416 if (*flags & CPU_DTRACE_DROP)
7417 continue;
7419 if (*flags & CPU_DTRACE_FAULT) {
7420 int ndx;
7421 dtrace_action_t *err;
7423 buf->dtb_errors++;
7425 if (probe->dtpr_id == dtrace_probeid_error) {
7427 * There's nothing we can do -- we had an
7428 * error on the error probe. We bump an
7429 * error counter to at least indicate that
7430 * this condition happened.
7432 dtrace_error(&state->dts_dblerrors);
7433 continue;
7436 if (vtime) {
7438 * Before recursing on dtrace_probe(), we
7439 * need to explicitly clear out our start
7440 * time to prevent it from being accumulated
7441 * into t_dtrace_vtime.
7443 curthread->t_dtrace_start = 0;
7447 * Iterate over the actions to figure out which action
7448 * we were processing when we experienced the error.
7449 * Note that act points _past_ the faulting action; if
7450 * act is ecb->dte_action, the fault was in the
7451 * predicate, if it's ecb->dte_action->dta_next it's
7452 * in action #1, and so on.
7454 for (err = ecb->dte_action, ndx = 0;
7455 err != act; err = err->dta_next, ndx++)
7456 continue;
7458 dtrace_probe_error(state, ecb->dte_epid, ndx,
7459 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
7460 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
7461 cpu_core[cpuid].cpuc_dtrace_illval);
7463 continue;
7466 if (!committed)
7467 buf->dtb_offset = offs + ecb->dte_size;
7470 end = dtrace_gethrtime();
7471 if (vtime)
7472 curthread->t_dtrace_start = end;
7474 CPU->cpu_dtrace_nsec += end - now;
7476 dtrace_interrupt_enable(cookie);
7480 * DTrace Probe Hashing Functions
7482 * The functions in this section (and indeed, the functions in remaining
7483 * sections) are not _called_ from probe context. (Any exceptions to this are
7484 * marked with a "Note:".) Rather, they are called from elsewhere in the
7485 * DTrace framework to look-up probes in, add probes to and remove probes from
7486 * the DTrace probe hashes. (Each probe is hashed by each element of the
7487 * probe tuple -- allowing for fast lookups, regardless of what was
7488 * specified.)
7490 static uint_t
7491 dtrace_hash_str(char *p)
7493 unsigned int g;
7494 uint_t hval = 0;
7496 while (*p) {
7497 hval = (hval << 4) + *p++;
7498 if ((g = (hval & 0xf0000000)) != 0)
7499 hval ^= g >> 24;
7500 hval &= ~g;
7502 return (hval);
7505 static dtrace_hash_t *
7506 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
7508 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
7510 hash->dth_stroffs = stroffs;
7511 hash->dth_nextoffs = nextoffs;
7512 hash->dth_prevoffs = prevoffs;
7514 hash->dth_size = 1;
7515 hash->dth_mask = hash->dth_size - 1;
7517 hash->dth_tab = kmem_zalloc(hash->dth_size *
7518 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
7520 return (hash);
7523 static void
7524 dtrace_hash_destroy(dtrace_hash_t *hash)
7526 #ifdef DEBUG
7527 int i;
7529 for (i = 0; i < hash->dth_size; i++)
7530 ASSERT(hash->dth_tab[i] == NULL);
7531 #endif
7533 kmem_free(hash->dth_tab,
7534 hash->dth_size * sizeof (dtrace_hashbucket_t *));
7535 kmem_free(hash, sizeof (dtrace_hash_t));
7538 static void
7539 dtrace_hash_resize(dtrace_hash_t *hash)
7541 int size = hash->dth_size, i, ndx;
7542 int new_size = hash->dth_size << 1;
7543 int new_mask = new_size - 1;
7544 dtrace_hashbucket_t **new_tab, *bucket, *next;
7546 ASSERT((new_size & new_mask) == 0);
7548 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
7550 for (i = 0; i < size; i++) {
7551 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
7552 dtrace_probe_t *probe = bucket->dthb_chain;
7554 ASSERT(probe != NULL);
7555 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
7557 next = bucket->dthb_next;
7558 bucket->dthb_next = new_tab[ndx];
7559 new_tab[ndx] = bucket;
7563 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
7564 hash->dth_tab = new_tab;
7565 hash->dth_size = new_size;
7566 hash->dth_mask = new_mask;
7569 static void
7570 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
7572 int hashval = DTRACE_HASHSTR(hash, new);
7573 int ndx = hashval & hash->dth_mask;
7574 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7575 dtrace_probe_t **nextp, **prevp;
7577 for (; bucket != NULL; bucket = bucket->dthb_next) {
7578 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
7579 goto add;
7582 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
7583 dtrace_hash_resize(hash);
7584 dtrace_hash_add(hash, new);
7585 return;
7588 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
7589 bucket->dthb_next = hash->dth_tab[ndx];
7590 hash->dth_tab[ndx] = bucket;
7591 hash->dth_nbuckets++;
7593 add:
7594 nextp = DTRACE_HASHNEXT(hash, new);
7595 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
7596 *nextp = bucket->dthb_chain;
7598 if (bucket->dthb_chain != NULL) {
7599 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
7600 ASSERT(*prevp == NULL);
7601 *prevp = new;
7604 bucket->dthb_chain = new;
7605 bucket->dthb_len++;
7608 static dtrace_probe_t *
7609 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
7611 int hashval = DTRACE_HASHSTR(hash, template);
7612 int ndx = hashval & hash->dth_mask;
7613 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7615 for (; bucket != NULL; bucket = bucket->dthb_next) {
7616 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7617 return (bucket->dthb_chain);
7620 return (NULL);
7623 static int
7624 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
7626 int hashval = DTRACE_HASHSTR(hash, template);
7627 int ndx = hashval & hash->dth_mask;
7628 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7630 for (; bucket != NULL; bucket = bucket->dthb_next) {
7631 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7632 return (bucket->dthb_len);
7635 return (0);
7638 static void
7639 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
7641 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
7642 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7644 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
7645 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
7648 * Find the bucket that we're removing this probe from.
7650 for (; bucket != NULL; bucket = bucket->dthb_next) {
7651 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
7652 break;
7655 ASSERT(bucket != NULL);
7657 if (*prevp == NULL) {
7658 if (*nextp == NULL) {
7660 * The removed probe was the only probe on this
7661 * bucket; we need to remove the bucket.
7663 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
7665 ASSERT(bucket->dthb_chain == probe);
7666 ASSERT(b != NULL);
7668 if (b == bucket) {
7669 hash->dth_tab[ndx] = bucket->dthb_next;
7670 } else {
7671 while (b->dthb_next != bucket)
7672 b = b->dthb_next;
7673 b->dthb_next = bucket->dthb_next;
7676 ASSERT(hash->dth_nbuckets > 0);
7677 hash->dth_nbuckets--;
7678 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
7679 return;
7682 bucket->dthb_chain = *nextp;
7683 } else {
7684 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
7687 if (*nextp != NULL)
7688 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
7692 * DTrace Utility Functions
7694 * These are random utility functions that are _not_ called from probe context.
7696 static int
7697 dtrace_badattr(const dtrace_attribute_t *a)
7699 return (a->dtat_name > DTRACE_STABILITY_MAX ||
7700 a->dtat_data > DTRACE_STABILITY_MAX ||
7701 a->dtat_class > DTRACE_CLASS_MAX);
7705 * Return a duplicate copy of a string. If the specified string is NULL,
7706 * this function returns a zero-length string.
7708 static char *
7709 dtrace_strdup(const char *str)
7711 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
7713 if (str != NULL)
7714 (void) strcpy(new, str);
7716 return (new);
7719 #define DTRACE_ISALPHA(c) \
7720 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
7722 static int
7723 dtrace_badname(const char *s)
7725 char c;
7727 if (s == NULL || (c = *s++) == '\0')
7728 return (0);
7730 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
7731 return (1);
7733 while ((c = *s++) != '\0') {
7734 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
7735 c != '-' && c != '_' && c != '.' && c != '`')
7736 return (1);
7739 return (0);
7742 static void
7743 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
7745 uint32_t priv;
7747 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
7749 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
7751 priv = DTRACE_PRIV_ALL;
7752 } else {
7753 *uidp = crgetuid(cr);
7754 *zoneidp = crgetzoneid(cr);
7756 priv = 0;
7757 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
7758 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
7759 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
7760 priv |= DTRACE_PRIV_USER;
7761 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
7762 priv |= DTRACE_PRIV_PROC;
7763 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
7764 priv |= DTRACE_PRIV_OWNER;
7765 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
7766 priv |= DTRACE_PRIV_ZONEOWNER;
7769 *privp = priv;
7772 #ifdef DTRACE_ERRDEBUG
7773 static void
7774 dtrace_errdebug(const char *str)
7776 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ;
7777 int occupied = 0;
7779 mutex_enter(&dtrace_errlock);
7780 dtrace_errlast = str;
7781 dtrace_errthread = curthread;
7783 while (occupied++ < DTRACE_ERRHASHSZ) {
7784 if (dtrace_errhash[hval].dter_msg == str) {
7785 dtrace_errhash[hval].dter_count++;
7786 goto out;
7789 if (dtrace_errhash[hval].dter_msg != NULL) {
7790 hval = (hval + 1) % DTRACE_ERRHASHSZ;
7791 continue;
7794 dtrace_errhash[hval].dter_msg = str;
7795 dtrace_errhash[hval].dter_count = 1;
7796 goto out;
7799 panic("dtrace: undersized error hash");
7800 out:
7801 mutex_exit(&dtrace_errlock);
7803 #endif
7806 * DTrace Matching Functions
7808 * These functions are used to match groups of probes, given some elements of
7809 * a probe tuple, or some globbed expressions for elements of a probe tuple.
7811 static int
7812 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
7813 zoneid_t zoneid)
7815 if (priv != DTRACE_PRIV_ALL) {
7816 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
7817 uint32_t match = priv & ppriv;
7820 * No PRIV_DTRACE_* privileges...
7822 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
7823 DTRACE_PRIV_KERNEL)) == 0)
7824 return (0);
7827 * No matching bits, but there were bits to match...
7829 if (match == 0 && ppriv != 0)
7830 return (0);
7833 * Need to have permissions to the process, but don't...
7835 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
7836 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
7837 return (0);
7841 * Need to be in the same zone unless we possess the
7842 * privilege to examine all zones.
7844 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
7845 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
7846 return (0);
7850 return (1);
7854 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
7855 * consists of input pattern strings and an ops-vector to evaluate them.
7856 * This function returns >0 for match, 0 for no match, and <0 for error.
7858 static int
7859 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
7860 uint32_t priv, uid_t uid, zoneid_t zoneid)
7862 dtrace_provider_t *pvp = prp->dtpr_provider;
7863 int rv;
7865 if (pvp->dtpv_defunct)
7866 return (0);
7868 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
7869 return (rv);
7871 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
7872 return (rv);
7874 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
7875 return (rv);
7877 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
7878 return (rv);
7880 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
7881 return (0);
7883 return (rv);
7887 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
7888 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
7889 * libc's version, the kernel version only applies to 8-bit ASCII strings.
7890 * In addition, all of the recursion cases except for '*' matching have been
7891 * unwound. For '*', we still implement recursive evaluation, but a depth
7892 * counter is maintained and matching is aborted if we recurse too deep.
7893 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7895 static int
7896 dtrace_match_glob(const char *s, const char *p, int depth)
7898 const char *olds;
7899 char s1, c;
7900 int gs;
7902 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7903 return (-1);
7905 if (s == NULL)
7906 s = ""; /* treat NULL as empty string */
7908 top:
7909 olds = s;
7910 s1 = *s++;
7912 if (p == NULL)
7913 return (0);
7915 if ((c = *p++) == '\0')
7916 return (s1 == '\0');
7918 switch (c) {
7919 case '[': {
7920 int ok = 0, notflag = 0;
7921 char lc = '\0';
7923 if (s1 == '\0')
7924 return (0);
7926 if (*p == '!') {
7927 notflag = 1;
7928 p++;
7931 if ((c = *p++) == '\0')
7932 return (0);
7934 do {
7935 if (c == '-' && lc != '\0' && *p != ']') {
7936 if ((c = *p++) == '\0')
7937 return (0);
7938 if (c == '\\' && (c = *p++) == '\0')
7939 return (0);
7941 if (notflag) {
7942 if (s1 < lc || s1 > c)
7943 ok++;
7944 else
7945 return (0);
7946 } else if (lc <= s1 && s1 <= c)
7947 ok++;
7949 } else if (c == '\\' && (c = *p++) == '\0')
7950 return (0);
7952 lc = c; /* save left-hand 'c' for next iteration */
7954 if (notflag) {
7955 if (s1 != c)
7956 ok++;
7957 else
7958 return (0);
7959 } else if (s1 == c)
7960 ok++;
7962 if ((c = *p++) == '\0')
7963 return (0);
7965 } while (c != ']');
7967 if (ok)
7968 goto top;
7970 return (0);
7973 case '\\':
7974 if ((c = *p++) == '\0')
7975 return (0);
7976 /*FALLTHRU*/
7978 default:
7979 if (c != s1)
7980 return (0);
7981 /*FALLTHRU*/
7983 case '?':
7984 if (s1 != '\0')
7985 goto top;
7986 return (0);
7988 case '*':
7989 while (*p == '*')
7990 p++; /* consecutive *'s are identical to a single one */
7992 if (*p == '\0')
7993 return (1);
7995 for (s = olds; *s != '\0'; s++) {
7996 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7997 return (gs);
8000 return (0);
8004 /*ARGSUSED*/
8005 static int
8006 dtrace_match_string(const char *s, const char *p, int depth)
8008 return (s != NULL && strcmp(s, p) == 0);
8011 /*ARGSUSED*/
8012 static int
8013 dtrace_match_nul(const char *s, const char *p, int depth)
8015 return (1); /* always match the empty pattern */
8018 /*ARGSUSED*/
8019 static int
8020 dtrace_match_nonzero(const char *s, const char *p, int depth)
8022 return (s != NULL && s[0] != '\0');
8025 static int
8026 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
8027 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
8029 dtrace_probe_t template, *probe;
8030 dtrace_hash_t *hash = NULL;
8031 int len, rc, best = INT_MAX, nmatched = 0;
8032 dtrace_id_t i;
8034 ASSERT(MUTEX_HELD(&dtrace_lock));
8037 * If the probe ID is specified in the key, just lookup by ID and
8038 * invoke the match callback once if a matching probe is found.
8040 if (pkp->dtpk_id != DTRACE_IDNONE) {
8041 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
8042 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
8043 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
8044 return (DTRACE_MATCH_FAIL);
8045 nmatched++;
8047 return (nmatched);
8050 template.dtpr_mod = (char *)pkp->dtpk_mod;
8051 template.dtpr_func = (char *)pkp->dtpk_func;
8052 template.dtpr_name = (char *)pkp->dtpk_name;
8055 * We want to find the most distinct of the module name, function
8056 * name, and name. So for each one that is not a glob pattern or
8057 * empty string, we perform a lookup in the corresponding hash and
8058 * use the hash table with the fewest collisions to do our search.
8060 if (pkp->dtpk_mmatch == &dtrace_match_string &&
8061 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
8062 best = len;
8063 hash = dtrace_bymod;
8066 if (pkp->dtpk_fmatch == &dtrace_match_string &&
8067 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
8068 best = len;
8069 hash = dtrace_byfunc;
8072 if (pkp->dtpk_nmatch == &dtrace_match_string &&
8073 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
8074 best = len;
8075 hash = dtrace_byname;
8079 * If we did not select a hash table, iterate over every probe and
8080 * invoke our callback for each one that matches our input probe key.
8082 if (hash == NULL) {
8083 for (i = 0; i < dtrace_nprobes; i++) {
8084 if ((probe = dtrace_probes[i]) == NULL ||
8085 dtrace_match_probe(probe, pkp, priv, uid,
8086 zoneid) <= 0)
8087 continue;
8089 nmatched++;
8091 if ((rc = (*matched)(probe, arg)) !=
8092 DTRACE_MATCH_NEXT) {
8093 if (rc == DTRACE_MATCH_FAIL)
8094 return (DTRACE_MATCH_FAIL);
8095 break;
8099 return (nmatched);
8103 * If we selected a hash table, iterate over each probe of the same key
8104 * name and invoke the callback for every probe that matches the other
8105 * attributes of our input probe key.
8107 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
8108 probe = *(DTRACE_HASHNEXT(hash, probe))) {
8110 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
8111 continue;
8113 nmatched++;
8115 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
8116 if (rc == DTRACE_MATCH_FAIL)
8117 return (DTRACE_MATCH_FAIL);
8118 break;
8122 return (nmatched);
8126 * Return the function pointer dtrace_probecmp() should use to compare the
8127 * specified pattern with a string. For NULL or empty patterns, we select
8128 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
8129 * For non-empty non-glob strings, we use dtrace_match_string().
8131 static dtrace_probekey_f *
8132 dtrace_probekey_func(const char *p)
8134 char c;
8136 if (p == NULL || *p == '\0')
8137 return (&dtrace_match_nul);
8139 while ((c = *p++) != '\0') {
8140 if (c == '[' || c == '?' || c == '*' || c == '\\')
8141 return (&dtrace_match_glob);
8144 return (&dtrace_match_string);
8148 * Build a probe comparison key for use with dtrace_match_probe() from the
8149 * given probe description. By convention, a null key only matches anchored
8150 * probes: if each field is the empty string, reset dtpk_fmatch to
8151 * dtrace_match_nonzero().
8153 static void
8154 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
8156 pkp->dtpk_prov = pdp->dtpd_provider;
8157 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
8159 pkp->dtpk_mod = pdp->dtpd_mod;
8160 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
8162 pkp->dtpk_func = pdp->dtpd_func;
8163 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
8165 pkp->dtpk_name = pdp->dtpd_name;
8166 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
8168 pkp->dtpk_id = pdp->dtpd_id;
8170 if (pkp->dtpk_id == DTRACE_IDNONE &&
8171 pkp->dtpk_pmatch == &dtrace_match_nul &&
8172 pkp->dtpk_mmatch == &dtrace_match_nul &&
8173 pkp->dtpk_fmatch == &dtrace_match_nul &&
8174 pkp->dtpk_nmatch == &dtrace_match_nul)
8175 pkp->dtpk_fmatch = &dtrace_match_nonzero;
8179 * DTrace Provider-to-Framework API Functions
8181 * These functions implement much of the Provider-to-Framework API, as
8182 * described in <sys/dtrace.h>. The parts of the API not in this section are
8183 * the functions in the API for probe management (found below), and
8184 * dtrace_probe() itself (found above).
8188 * Register the calling provider with the DTrace framework. This should
8189 * generally be called by DTrace providers in their attach(9E) entry point.
8192 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
8193 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
8195 dtrace_provider_t *provider;
8197 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
8198 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8199 "arguments", name ? name : "<NULL>");
8200 return (EINVAL);
8203 if (name[0] == '\0' || dtrace_badname(name)) {
8204 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8205 "provider name", name);
8206 return (EINVAL);
8209 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
8210 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
8211 pops->dtps_destroy == NULL ||
8212 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
8213 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8214 "provider ops", name);
8215 return (EINVAL);
8218 if (dtrace_badattr(&pap->dtpa_provider) ||
8219 dtrace_badattr(&pap->dtpa_mod) ||
8220 dtrace_badattr(&pap->dtpa_func) ||
8221 dtrace_badattr(&pap->dtpa_name) ||
8222 dtrace_badattr(&pap->dtpa_args)) {
8223 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8224 "provider attributes", name);
8225 return (EINVAL);
8228 if (priv & ~DTRACE_PRIV_ALL) {
8229 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8230 "privilege attributes", name);
8231 return (EINVAL);
8234 if ((priv & DTRACE_PRIV_KERNEL) &&
8235 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
8236 pops->dtps_mode == NULL) {
8237 cmn_err(CE_WARN, "failed to register provider '%s': need "
8238 "dtps_mode() op for given privilege attributes", name);
8239 return (EINVAL);
8242 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
8243 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8244 (void) strcpy(provider->dtpv_name, name);
8246 provider->dtpv_attr = *pap;
8247 provider->dtpv_priv.dtpp_flags = priv;
8248 if (cr != NULL) {
8249 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
8250 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
8252 provider->dtpv_pops = *pops;
8254 if (pops->dtps_provide == NULL) {
8255 ASSERT(pops->dtps_provide_module != NULL);
8256 provider->dtpv_pops.dtps_provide =
8257 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
8260 if (pops->dtps_provide_module == NULL) {
8261 ASSERT(pops->dtps_provide != NULL);
8262 provider->dtpv_pops.dtps_provide_module =
8263 (void (*)(void *, struct modctl *))dtrace_nullop;
8266 if (pops->dtps_suspend == NULL) {
8267 ASSERT(pops->dtps_resume == NULL);
8268 provider->dtpv_pops.dtps_suspend =
8269 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8270 provider->dtpv_pops.dtps_resume =
8271 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8274 provider->dtpv_arg = arg;
8275 *idp = (dtrace_provider_id_t)provider;
8277 if (pops == &dtrace_provider_ops) {
8278 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8279 ASSERT(MUTEX_HELD(&dtrace_lock));
8280 ASSERT(dtrace_anon.dta_enabling == NULL);
8283 * We make sure that the DTrace provider is at the head of
8284 * the provider chain.
8286 provider->dtpv_next = dtrace_provider;
8287 dtrace_provider = provider;
8288 return (0);
8291 mutex_enter(&dtrace_provider_lock);
8292 mutex_enter(&dtrace_lock);
8295 * If there is at least one provider registered, we'll add this
8296 * provider after the first provider.
8298 if (dtrace_provider != NULL) {
8299 provider->dtpv_next = dtrace_provider->dtpv_next;
8300 dtrace_provider->dtpv_next = provider;
8301 } else {
8302 dtrace_provider = provider;
8305 if (dtrace_retained != NULL) {
8306 dtrace_enabling_provide(provider);
8309 * Now we need to call dtrace_enabling_matchall() -- which
8310 * will acquire cpu_lock and dtrace_lock. We therefore need
8311 * to drop all of our locks before calling into it...
8313 mutex_exit(&dtrace_lock);
8314 mutex_exit(&dtrace_provider_lock);
8315 dtrace_enabling_matchall();
8317 return (0);
8320 mutex_exit(&dtrace_lock);
8321 mutex_exit(&dtrace_provider_lock);
8323 return (0);
8327 * Unregister the specified provider from the DTrace framework. This should
8328 * generally be called by DTrace providers in their detach(9E) entry point.
8331 dtrace_unregister(dtrace_provider_id_t id)
8333 dtrace_provider_t *old = (dtrace_provider_t *)id;
8334 dtrace_provider_t *prev = NULL;
8335 int i, self = 0, noreap = 0;
8336 dtrace_probe_t *probe, *first = NULL;
8338 if (old->dtpv_pops.dtps_enable ==
8339 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
8341 * If DTrace itself is the provider, we're called with locks
8342 * already held.
8344 ASSERT(old == dtrace_provider);
8345 ASSERT(dtrace_devi != NULL);
8346 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8347 ASSERT(MUTEX_HELD(&dtrace_lock));
8348 self = 1;
8350 if (dtrace_provider->dtpv_next != NULL) {
8352 * There's another provider here; return failure.
8354 return (EBUSY);
8356 } else {
8357 mutex_enter(&dtrace_provider_lock);
8358 mutex_enter(&mod_lock);
8359 mutex_enter(&dtrace_lock);
8363 * If anyone has /dev/dtrace open, or if there are anonymous enabled
8364 * probes, we refuse to let providers slither away, unless this
8365 * provider has already been explicitly invalidated.
8367 if (!old->dtpv_defunct &&
8368 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
8369 dtrace_anon.dta_state->dts_necbs > 0))) {
8370 if (!self) {
8371 mutex_exit(&dtrace_lock);
8372 mutex_exit(&mod_lock);
8373 mutex_exit(&dtrace_provider_lock);
8375 return (EBUSY);
8379 * Attempt to destroy the probes associated with this provider.
8381 for (i = 0; i < dtrace_nprobes; i++) {
8382 if ((probe = dtrace_probes[i]) == NULL)
8383 continue;
8385 if (probe->dtpr_provider != old)
8386 continue;
8388 if (probe->dtpr_ecb == NULL)
8389 continue;
8392 * If we are trying to unregister a defunct provider, and the
8393 * provider was made defunct within the interval dictated by
8394 * dtrace_unregister_defunct_reap, we'll (asynchronously)
8395 * attempt to reap our enablings. To denote that the provider
8396 * should reattempt to unregister itself at some point in the
8397 * future, we will return a differentiable error code (EAGAIN
8398 * instead of EBUSY) in this case.
8400 if (dtrace_gethrtime() - old->dtpv_defunct >
8401 dtrace_unregister_defunct_reap)
8402 noreap = 1;
8404 if (!self) {
8405 mutex_exit(&dtrace_lock);
8406 mutex_exit(&mod_lock);
8407 mutex_exit(&dtrace_provider_lock);
8410 if (noreap)
8411 return (EBUSY);
8413 (void) taskq_dispatch(dtrace_taskq,
8414 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP);
8416 return (EAGAIN);
8420 * All of the probes for this provider are disabled; we can safely
8421 * remove all of them from their hash chains and from the probe array.
8423 for (i = 0; i < dtrace_nprobes; i++) {
8424 if ((probe = dtrace_probes[i]) == NULL)
8425 continue;
8427 if (probe->dtpr_provider != old)
8428 continue;
8430 dtrace_probes[i] = NULL;
8432 dtrace_hash_remove(dtrace_bymod, probe);
8433 dtrace_hash_remove(dtrace_byfunc, probe);
8434 dtrace_hash_remove(dtrace_byname, probe);
8436 if (first == NULL) {
8437 first = probe;
8438 probe->dtpr_nextmod = NULL;
8439 } else {
8440 probe->dtpr_nextmod = first;
8441 first = probe;
8446 * The provider's probes have been removed from the hash chains and
8447 * from the probe array. Now issue a dtrace_sync() to be sure that
8448 * everyone has cleared out from any probe array processing.
8450 dtrace_sync();
8452 for (probe = first; probe != NULL; probe = first) {
8453 first = probe->dtpr_nextmod;
8455 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
8456 probe->dtpr_arg);
8457 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8458 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8459 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8460 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
8461 kmem_free(probe, sizeof (dtrace_probe_t));
8464 if ((prev = dtrace_provider) == old) {
8465 ASSERT(self || dtrace_devi == NULL);
8466 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
8467 dtrace_provider = old->dtpv_next;
8468 } else {
8469 while (prev != NULL && prev->dtpv_next != old)
8470 prev = prev->dtpv_next;
8472 if (prev == NULL) {
8473 panic("attempt to unregister non-existent "
8474 "dtrace provider %p\n", (void *)id);
8477 prev->dtpv_next = old->dtpv_next;
8480 if (!self) {
8481 mutex_exit(&dtrace_lock);
8482 mutex_exit(&mod_lock);
8483 mutex_exit(&dtrace_provider_lock);
8486 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
8487 kmem_free(old, sizeof (dtrace_provider_t));
8489 return (0);
8493 * Invalidate the specified provider. All subsequent probe lookups for the
8494 * specified provider will fail, but its probes will not be removed.
8496 void
8497 dtrace_invalidate(dtrace_provider_id_t id)
8499 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
8501 ASSERT(pvp->dtpv_pops.dtps_enable !=
8502 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8504 mutex_enter(&dtrace_provider_lock);
8505 mutex_enter(&dtrace_lock);
8507 pvp->dtpv_defunct = dtrace_gethrtime();
8509 mutex_exit(&dtrace_lock);
8510 mutex_exit(&dtrace_provider_lock);
8514 * Indicate whether or not DTrace has attached.
8517 dtrace_attached(void)
8520 * dtrace_provider will be non-NULL iff the DTrace driver has
8521 * attached. (It's non-NULL because DTrace is always itself a
8522 * provider.)
8524 return (dtrace_provider != NULL);
8528 * Remove all the unenabled probes for the given provider. This function is
8529 * not unlike dtrace_unregister(), except that it doesn't remove the provider
8530 * -- just as many of its associated probes as it can.
8533 dtrace_condense(dtrace_provider_id_t id)
8535 dtrace_provider_t *prov = (dtrace_provider_t *)id;
8536 int i;
8537 dtrace_probe_t *probe;
8540 * Make sure this isn't the dtrace provider itself.
8542 ASSERT(prov->dtpv_pops.dtps_enable !=
8543 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8545 mutex_enter(&dtrace_provider_lock);
8546 mutex_enter(&dtrace_lock);
8549 * Attempt to destroy the probes associated with this provider.
8551 for (i = 0; i < dtrace_nprobes; i++) {
8552 if ((probe = dtrace_probes[i]) == NULL)
8553 continue;
8555 if (probe->dtpr_provider != prov)
8556 continue;
8558 if (probe->dtpr_ecb != NULL)
8559 continue;
8561 dtrace_probes[i] = NULL;
8563 dtrace_hash_remove(dtrace_bymod, probe);
8564 dtrace_hash_remove(dtrace_byfunc, probe);
8565 dtrace_hash_remove(dtrace_byname, probe);
8567 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
8568 probe->dtpr_arg);
8569 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8570 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8571 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8572 kmem_free(probe, sizeof (dtrace_probe_t));
8573 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
8576 mutex_exit(&dtrace_lock);
8577 mutex_exit(&dtrace_provider_lock);
8579 return (0);
8583 * DTrace Probe Management Functions
8585 * The functions in this section perform the DTrace probe management,
8586 * including functions to create probes, look-up probes, and call into the
8587 * providers to request that probes be provided. Some of these functions are
8588 * in the Provider-to-Framework API; these functions can be identified by the
8589 * fact that they are not declared "static".
8593 * Create a probe with the specified module name, function name, and name.
8595 dtrace_id_t
8596 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
8597 const char *func, const char *name, int aframes, void *arg)
8599 dtrace_probe_t *probe, **probes;
8600 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
8601 dtrace_id_t id;
8603 if (provider == dtrace_provider) {
8604 ASSERT(MUTEX_HELD(&dtrace_lock));
8605 } else {
8606 mutex_enter(&dtrace_lock);
8609 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
8610 VM_BESTFIT | VM_SLEEP);
8611 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
8613 probe->dtpr_id = id;
8614 probe->dtpr_gen = dtrace_probegen++;
8615 probe->dtpr_mod = dtrace_strdup(mod);
8616 probe->dtpr_func = dtrace_strdup(func);
8617 probe->dtpr_name = dtrace_strdup(name);
8618 probe->dtpr_arg = arg;
8619 probe->dtpr_aframes = aframes;
8620 probe->dtpr_provider = provider;
8622 dtrace_hash_add(dtrace_bymod, probe);
8623 dtrace_hash_add(dtrace_byfunc, probe);
8624 dtrace_hash_add(dtrace_byname, probe);
8626 if (id - 1 >= dtrace_nprobes) {
8627 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
8628 size_t nsize = osize << 1;
8630 if (nsize == 0) {
8631 ASSERT(osize == 0);
8632 ASSERT(dtrace_probes == NULL);
8633 nsize = sizeof (dtrace_probe_t *);
8636 probes = kmem_zalloc(nsize, KM_SLEEP);
8638 if (dtrace_probes == NULL) {
8639 ASSERT(osize == 0);
8640 dtrace_probes = probes;
8641 dtrace_nprobes = 1;
8642 } else {
8643 dtrace_probe_t **oprobes = dtrace_probes;
8645 bcopy(oprobes, probes, osize);
8646 dtrace_membar_producer();
8647 dtrace_probes = probes;
8649 dtrace_sync();
8652 * All CPUs are now seeing the new probes array; we can
8653 * safely free the old array.
8655 kmem_free(oprobes, osize);
8656 dtrace_nprobes <<= 1;
8659 ASSERT(id - 1 < dtrace_nprobes);
8662 ASSERT(dtrace_probes[id - 1] == NULL);
8663 dtrace_probes[id - 1] = probe;
8665 if (provider != dtrace_provider)
8666 mutex_exit(&dtrace_lock);
8668 return (id);
8671 static dtrace_probe_t *
8672 dtrace_probe_lookup_id(dtrace_id_t id)
8674 ASSERT(MUTEX_HELD(&dtrace_lock));
8676 if (id == 0 || id > dtrace_nprobes)
8677 return (NULL);
8679 return (dtrace_probes[id - 1]);
8682 static int
8683 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
8685 *((dtrace_id_t *)arg) = probe->dtpr_id;
8687 return (DTRACE_MATCH_DONE);
8691 * Look up a probe based on provider and one or more of module name, function
8692 * name and probe name.
8694 dtrace_id_t
8695 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
8696 const char *func, const char *name)
8698 dtrace_probekey_t pkey;
8699 dtrace_id_t id;
8700 int match;
8702 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
8703 pkey.dtpk_pmatch = &dtrace_match_string;
8704 pkey.dtpk_mod = mod;
8705 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
8706 pkey.dtpk_func = func;
8707 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
8708 pkey.dtpk_name = name;
8709 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
8710 pkey.dtpk_id = DTRACE_IDNONE;
8712 mutex_enter(&dtrace_lock);
8713 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
8714 dtrace_probe_lookup_match, &id);
8715 mutex_exit(&dtrace_lock);
8717 ASSERT(match == 1 || match == 0);
8718 return (match ? id : 0);
8722 * Returns the probe argument associated with the specified probe.
8724 void *
8725 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
8727 dtrace_probe_t *probe;
8728 void *rval = NULL;
8730 mutex_enter(&dtrace_lock);
8732 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
8733 probe->dtpr_provider == (dtrace_provider_t *)id)
8734 rval = probe->dtpr_arg;
8736 mutex_exit(&dtrace_lock);
8738 return (rval);
8742 * Copy a probe into a probe description.
8744 static void
8745 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
8747 bzero(pdp, sizeof (dtrace_probedesc_t));
8748 pdp->dtpd_id = prp->dtpr_id;
8750 (void) strncpy(pdp->dtpd_provider,
8751 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
8753 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
8754 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
8755 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
8759 * Called to indicate that a probe -- or probes -- should be provided by a
8760 * specfied provider. If the specified description is NULL, the provider will
8761 * be told to provide all of its probes. (This is done whenever a new
8762 * consumer comes along, or whenever a retained enabling is to be matched.) If
8763 * the specified description is non-NULL, the provider is given the
8764 * opportunity to dynamically provide the specified probe, allowing providers
8765 * to support the creation of probes on-the-fly. (So-called _autocreated_
8766 * probes.) If the provider is NULL, the operations will be applied to all
8767 * providers; if the provider is non-NULL the operations will only be applied
8768 * to the specified provider. The dtrace_provider_lock must be held, and the
8769 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
8770 * will need to grab the dtrace_lock when it reenters the framework through
8771 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
8773 static void
8774 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
8776 struct modctl *ctl;
8777 int all = 0;
8779 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8781 if (prv == NULL) {
8782 all = 1;
8783 prv = dtrace_provider;
8786 do {
8788 * First, call the blanket provide operation.
8790 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
8793 * Now call the per-module provide operation. We will grab
8794 * mod_lock to prevent the list from being modified. Note
8795 * that this also prevents the mod_busy bits from changing.
8796 * (mod_busy can only be changed with mod_lock held.)
8798 mutex_enter(&mod_lock);
8800 ctl = &modules;
8801 do {
8802 if (ctl->mod_busy || ctl->mod_mp == NULL)
8803 continue;
8805 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
8807 } while ((ctl = ctl->mod_next) != &modules);
8809 mutex_exit(&mod_lock);
8810 } while (all && (prv = prv->dtpv_next) != NULL);
8814 * Iterate over each probe, and call the Framework-to-Provider API function
8815 * denoted by offs.
8817 static void
8818 dtrace_probe_foreach(uintptr_t offs)
8820 dtrace_provider_t *prov;
8821 void (*func)(void *, dtrace_id_t, void *);
8822 dtrace_probe_t *probe;
8823 dtrace_icookie_t cookie;
8824 int i;
8827 * We disable interrupts to walk through the probe array. This is
8828 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
8829 * won't see stale data.
8831 cookie = dtrace_interrupt_disable();
8833 for (i = 0; i < dtrace_nprobes; i++) {
8834 if ((probe = dtrace_probes[i]) == NULL)
8835 continue;
8837 if (probe->dtpr_ecb == NULL) {
8839 * This probe isn't enabled -- don't call the function.
8841 continue;
8844 prov = probe->dtpr_provider;
8845 func = *((void(**)(void *, dtrace_id_t, void *))
8846 ((uintptr_t)&prov->dtpv_pops + offs));
8848 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
8851 dtrace_interrupt_enable(cookie);
8854 static int
8855 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
8857 dtrace_probekey_t pkey;
8858 uint32_t priv;
8859 uid_t uid;
8860 zoneid_t zoneid;
8862 ASSERT(MUTEX_HELD(&dtrace_lock));
8863 dtrace_ecb_create_cache = NULL;
8865 if (desc == NULL) {
8867 * If we're passed a NULL description, we're being asked to
8868 * create an ECB with a NULL probe.
8870 (void) dtrace_ecb_create_enable(NULL, enab);
8871 return (0);
8874 dtrace_probekey(desc, &pkey);
8875 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
8876 &priv, &uid, &zoneid);
8878 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8879 enab));
8883 * DTrace Helper Provider Functions
8885 static void
8886 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8888 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8889 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8890 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8893 static void
8894 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8895 const dof_provider_t *dofprov, char *strtab)
8897 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8898 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8899 dofprov->dofpv_provattr);
8900 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8901 dofprov->dofpv_modattr);
8902 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8903 dofprov->dofpv_funcattr);
8904 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8905 dofprov->dofpv_nameattr);
8906 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8907 dofprov->dofpv_argsattr);
8910 static void
8911 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8913 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8914 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8915 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8916 dof_provider_t *provider;
8917 dof_probe_t *probe;
8918 uint32_t *off, *enoff;
8919 uint8_t *arg;
8920 char *strtab;
8921 uint_t i, nprobes;
8922 dtrace_helper_provdesc_t dhpv;
8923 dtrace_helper_probedesc_t dhpb;
8924 dtrace_meta_t *meta = dtrace_meta_pid;
8925 dtrace_mops_t *mops = &meta->dtm_mops;
8926 void *parg;
8928 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8929 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8930 provider->dofpv_strtab * dof->dofh_secsize);
8931 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8932 provider->dofpv_probes * dof->dofh_secsize);
8933 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8934 provider->dofpv_prargs * dof->dofh_secsize);
8935 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8936 provider->dofpv_proffs * dof->dofh_secsize);
8938 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8939 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8940 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8941 enoff = NULL;
8944 * See dtrace_helper_provider_validate().
8946 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8947 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8948 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8949 provider->dofpv_prenoffs * dof->dofh_secsize);
8950 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8953 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8956 * Create the provider.
8958 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8960 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8961 return;
8963 meta->dtm_count++;
8966 * Create the probes.
8968 for (i = 0; i < nprobes; i++) {
8969 probe = (dof_probe_t *)(uintptr_t)(daddr +
8970 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8972 dhpb.dthpb_mod = dhp->dofhp_mod;
8973 dhpb.dthpb_func = strtab + probe->dofpr_func;
8974 dhpb.dthpb_name = strtab + probe->dofpr_name;
8975 dhpb.dthpb_base = probe->dofpr_addr;
8976 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8977 dhpb.dthpb_noffs = probe->dofpr_noffs;
8978 if (enoff != NULL) {
8979 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8980 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8981 } else {
8982 dhpb.dthpb_enoffs = NULL;
8983 dhpb.dthpb_nenoffs = 0;
8985 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8986 dhpb.dthpb_nargc = probe->dofpr_nargc;
8987 dhpb.dthpb_xargc = probe->dofpr_xargc;
8988 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8989 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8991 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8995 static void
8996 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8998 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8999 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9000 int i;
9002 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9004 for (i = 0; i < dof->dofh_secnum; i++) {
9005 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9006 dof->dofh_secoff + i * dof->dofh_secsize);
9008 if (sec->dofs_type != DOF_SECT_PROVIDER)
9009 continue;
9011 dtrace_helper_provide_one(dhp, sec, pid);
9015 * We may have just created probes, so we must now rematch against
9016 * any retained enablings. Note that this call will acquire both
9017 * cpu_lock and dtrace_lock; the fact that we are holding
9018 * dtrace_meta_lock now is what defines the ordering with respect to
9019 * these three locks.
9021 dtrace_enabling_matchall();
9024 static void
9025 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
9027 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9028 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9029 dof_sec_t *str_sec;
9030 dof_provider_t *provider;
9031 char *strtab;
9032 dtrace_helper_provdesc_t dhpv;
9033 dtrace_meta_t *meta = dtrace_meta_pid;
9034 dtrace_mops_t *mops = &meta->dtm_mops;
9036 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9037 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9038 provider->dofpv_strtab * dof->dofh_secsize);
9040 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9043 * Create the provider.
9045 dtrace_dofprov2hprov(&dhpv, provider, strtab);
9047 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
9049 meta->dtm_count--;
9052 static void
9053 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
9055 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9056 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9057 int i;
9059 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9061 for (i = 0; i < dof->dofh_secnum; i++) {
9062 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9063 dof->dofh_secoff + i * dof->dofh_secsize);
9065 if (sec->dofs_type != DOF_SECT_PROVIDER)
9066 continue;
9068 dtrace_helper_provider_remove_one(dhp, sec, pid);
9073 * DTrace Meta Provider-to-Framework API Functions
9075 * These functions implement the Meta Provider-to-Framework API, as described
9076 * in <sys/dtrace.h>.
9079 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
9080 dtrace_meta_provider_id_t *idp)
9082 dtrace_meta_t *meta;
9083 dtrace_helpers_t *help, *next;
9084 int i;
9086 *idp = DTRACE_METAPROVNONE;
9089 * We strictly don't need the name, but we hold onto it for
9090 * debuggability. All hail error queues!
9092 if (name == NULL) {
9093 cmn_err(CE_WARN, "failed to register meta-provider: "
9094 "invalid name");
9095 return (EINVAL);
9098 if (mops == NULL ||
9099 mops->dtms_create_probe == NULL ||
9100 mops->dtms_provide_pid == NULL ||
9101 mops->dtms_remove_pid == NULL) {
9102 cmn_err(CE_WARN, "failed to register meta-register %s: "
9103 "invalid ops", name);
9104 return (EINVAL);
9107 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
9108 meta->dtm_mops = *mops;
9109 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
9110 (void) strcpy(meta->dtm_name, name);
9111 meta->dtm_arg = arg;
9113 mutex_enter(&dtrace_meta_lock);
9114 mutex_enter(&dtrace_lock);
9116 if (dtrace_meta_pid != NULL) {
9117 mutex_exit(&dtrace_lock);
9118 mutex_exit(&dtrace_meta_lock);
9119 cmn_err(CE_WARN, "failed to register meta-register %s: "
9120 "user-land meta-provider exists", name);
9121 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
9122 kmem_free(meta, sizeof (dtrace_meta_t));
9123 return (EINVAL);
9126 dtrace_meta_pid = meta;
9127 *idp = (dtrace_meta_provider_id_t)meta;
9130 * If there are providers and probes ready to go, pass them
9131 * off to the new meta provider now.
9134 help = dtrace_deferred_pid;
9135 dtrace_deferred_pid = NULL;
9137 mutex_exit(&dtrace_lock);
9139 while (help != NULL) {
9140 for (i = 0; i < help->dthps_nprovs; i++) {
9141 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
9142 help->dthps_pid);
9145 next = help->dthps_next;
9146 help->dthps_next = NULL;
9147 help->dthps_prev = NULL;
9148 help->dthps_deferred = 0;
9149 help = next;
9152 mutex_exit(&dtrace_meta_lock);
9154 return (0);
9158 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
9160 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
9162 mutex_enter(&dtrace_meta_lock);
9163 mutex_enter(&dtrace_lock);
9165 if (old == dtrace_meta_pid) {
9166 pp = &dtrace_meta_pid;
9167 } else {
9168 panic("attempt to unregister non-existent "
9169 "dtrace meta-provider %p\n", (void *)old);
9172 if (old->dtm_count != 0) {
9173 mutex_exit(&dtrace_lock);
9174 mutex_exit(&dtrace_meta_lock);
9175 return (EBUSY);
9178 *pp = NULL;
9180 mutex_exit(&dtrace_lock);
9181 mutex_exit(&dtrace_meta_lock);
9183 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
9184 kmem_free(old, sizeof (dtrace_meta_t));
9186 return (0);
9191 * DTrace DIF Object Functions
9193 static int
9194 dtrace_difo_err(uint_t pc, const char *format, ...)
9196 if (dtrace_err_verbose) {
9197 va_list alist;
9199 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
9200 va_start(alist, format);
9201 (void) vuprintf(format, alist);
9202 va_end(alist);
9205 #ifdef DTRACE_ERRDEBUG
9206 dtrace_errdebug(format);
9207 #endif
9208 return (1);
9212 * Validate a DTrace DIF object by checking the IR instructions. The following
9213 * rules are currently enforced by dtrace_difo_validate():
9215 * 1. Each instruction must have a valid opcode
9216 * 2. Each register, string, variable, or subroutine reference must be valid
9217 * 3. No instruction can modify register %r0 (must be zero)
9218 * 4. All instruction reserved bits must be set to zero
9219 * 5. The last instruction must be a "ret" instruction
9220 * 6. All branch targets must reference a valid instruction _after_ the branch
9222 static int
9223 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
9224 cred_t *cr)
9226 int err = 0, i;
9227 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9228 int kcheckload;
9229 uint_t pc;
9230 int maxglobal = -1, maxlocal = -1, maxtlocal = -1;
9232 kcheckload = cr == NULL ||
9233 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
9235 dp->dtdo_destructive = 0;
9237 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9238 dif_instr_t instr = dp->dtdo_buf[pc];
9240 uint_t r1 = DIF_INSTR_R1(instr);
9241 uint_t r2 = DIF_INSTR_R2(instr);
9242 uint_t rd = DIF_INSTR_RD(instr);
9243 uint_t rs = DIF_INSTR_RS(instr);
9244 uint_t label = DIF_INSTR_LABEL(instr);
9245 uint_t v = DIF_INSTR_VAR(instr);
9246 uint_t subr = DIF_INSTR_SUBR(instr);
9247 uint_t type = DIF_INSTR_TYPE(instr);
9248 uint_t op = DIF_INSTR_OP(instr);
9250 switch (op) {
9251 case DIF_OP_OR:
9252 case DIF_OP_XOR:
9253 case DIF_OP_AND:
9254 case DIF_OP_SLL:
9255 case DIF_OP_SRL:
9256 case DIF_OP_SRA:
9257 case DIF_OP_SUB:
9258 case DIF_OP_ADD:
9259 case DIF_OP_MUL:
9260 case DIF_OP_SDIV:
9261 case DIF_OP_UDIV:
9262 case DIF_OP_SREM:
9263 case DIF_OP_UREM:
9264 case DIF_OP_COPYS:
9265 if (r1 >= nregs)
9266 err += efunc(pc, "invalid register %u\n", r1);
9267 if (r2 >= nregs)
9268 err += efunc(pc, "invalid register %u\n", r2);
9269 if (rd >= nregs)
9270 err += efunc(pc, "invalid register %u\n", rd);
9271 if (rd == 0)
9272 err += efunc(pc, "cannot write to %r0\n");
9273 break;
9274 case DIF_OP_NOT:
9275 case DIF_OP_MOV:
9276 case DIF_OP_ALLOCS:
9277 if (r1 >= nregs)
9278 err += efunc(pc, "invalid register %u\n", r1);
9279 if (r2 != 0)
9280 err += efunc(pc, "non-zero reserved bits\n");
9281 if (rd >= nregs)
9282 err += efunc(pc, "invalid register %u\n", rd);
9283 if (rd == 0)
9284 err += efunc(pc, "cannot write to %r0\n");
9285 break;
9286 case DIF_OP_LDSB:
9287 case DIF_OP_LDSH:
9288 case DIF_OP_LDSW:
9289 case DIF_OP_LDUB:
9290 case DIF_OP_LDUH:
9291 case DIF_OP_LDUW:
9292 case DIF_OP_LDX:
9293 if (r1 >= nregs)
9294 err += efunc(pc, "invalid register %u\n", r1);
9295 if (r2 != 0)
9296 err += efunc(pc, "non-zero reserved bits\n");
9297 if (rd >= nregs)
9298 err += efunc(pc, "invalid register %u\n", rd);
9299 if (rd == 0)
9300 err += efunc(pc, "cannot write to %r0\n");
9301 if (kcheckload)
9302 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
9303 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
9304 break;
9305 case DIF_OP_RLDSB:
9306 case DIF_OP_RLDSH:
9307 case DIF_OP_RLDSW:
9308 case DIF_OP_RLDUB:
9309 case DIF_OP_RLDUH:
9310 case DIF_OP_RLDUW:
9311 case DIF_OP_RLDX:
9312 if (r1 >= nregs)
9313 err += efunc(pc, "invalid register %u\n", r1);
9314 if (r2 != 0)
9315 err += efunc(pc, "non-zero reserved bits\n");
9316 if (rd >= nregs)
9317 err += efunc(pc, "invalid register %u\n", rd);
9318 if (rd == 0)
9319 err += efunc(pc, "cannot write to %r0\n");
9320 break;
9321 case DIF_OP_ULDSB:
9322 case DIF_OP_ULDSH:
9323 case DIF_OP_ULDSW:
9324 case DIF_OP_ULDUB:
9325 case DIF_OP_ULDUH:
9326 case DIF_OP_ULDUW:
9327 case DIF_OP_ULDX:
9328 if (r1 >= nregs)
9329 err += efunc(pc, "invalid register %u\n", r1);
9330 if (r2 != 0)
9331 err += efunc(pc, "non-zero reserved bits\n");
9332 if (rd >= nregs)
9333 err += efunc(pc, "invalid register %u\n", rd);
9334 if (rd == 0)
9335 err += efunc(pc, "cannot write to %r0\n");
9336 break;
9337 case DIF_OP_STB:
9338 case DIF_OP_STH:
9339 case DIF_OP_STW:
9340 case DIF_OP_STX:
9341 if (r1 >= nregs)
9342 err += efunc(pc, "invalid register %u\n", r1);
9343 if (r2 != 0)
9344 err += efunc(pc, "non-zero reserved bits\n");
9345 if (rd >= nregs)
9346 err += efunc(pc, "invalid register %u\n", rd);
9347 if (rd == 0)
9348 err += efunc(pc, "cannot write to 0 address\n");
9349 break;
9350 case DIF_OP_CMP:
9351 case DIF_OP_SCMP:
9352 if (r1 >= nregs)
9353 err += efunc(pc, "invalid register %u\n", r1);
9354 if (r2 >= nregs)
9355 err += efunc(pc, "invalid register %u\n", r2);
9356 if (rd != 0)
9357 err += efunc(pc, "non-zero reserved bits\n");
9358 break;
9359 case DIF_OP_TST:
9360 if (r1 >= nregs)
9361 err += efunc(pc, "invalid register %u\n", r1);
9362 if (r2 != 0 || rd != 0)
9363 err += efunc(pc, "non-zero reserved bits\n");
9364 break;
9365 case DIF_OP_BA:
9366 case DIF_OP_BE:
9367 case DIF_OP_BNE:
9368 case DIF_OP_BG:
9369 case DIF_OP_BGU:
9370 case DIF_OP_BGE:
9371 case DIF_OP_BGEU:
9372 case DIF_OP_BL:
9373 case DIF_OP_BLU:
9374 case DIF_OP_BLE:
9375 case DIF_OP_BLEU:
9376 if (label >= dp->dtdo_len) {
9377 err += efunc(pc, "invalid branch target %u\n",
9378 label);
9380 if (label <= pc) {
9381 err += efunc(pc, "backward branch to %u\n",
9382 label);
9384 break;
9385 case DIF_OP_RET:
9386 if (r1 != 0 || r2 != 0)
9387 err += efunc(pc, "non-zero reserved bits\n");
9388 if (rd >= nregs)
9389 err += efunc(pc, "invalid register %u\n", rd);
9390 break;
9391 case DIF_OP_NOP:
9392 case DIF_OP_POPTS:
9393 case DIF_OP_FLUSHTS:
9394 if (r1 != 0 || r2 != 0 || rd != 0)
9395 err += efunc(pc, "non-zero reserved bits\n");
9396 break;
9397 case DIF_OP_SETX:
9398 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
9399 err += efunc(pc, "invalid integer ref %u\n",
9400 DIF_INSTR_INTEGER(instr));
9402 if (rd >= nregs)
9403 err += efunc(pc, "invalid register %u\n", rd);
9404 if (rd == 0)
9405 err += efunc(pc, "cannot write to %r0\n");
9406 break;
9407 case DIF_OP_SETS:
9408 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
9409 err += efunc(pc, "invalid string ref %u\n",
9410 DIF_INSTR_STRING(instr));
9412 if (rd >= nregs)
9413 err += efunc(pc, "invalid register %u\n", rd);
9414 if (rd == 0)
9415 err += efunc(pc, "cannot write to %r0\n");
9416 break;
9417 case DIF_OP_LDGA:
9418 case DIF_OP_LDTA:
9419 if (r1 > DIF_VAR_ARRAY_MAX)
9420 err += efunc(pc, "invalid array %u\n", r1);
9421 if (r2 >= nregs)
9422 err += efunc(pc, "invalid register %u\n", r2);
9423 if (rd >= nregs)
9424 err += efunc(pc, "invalid register %u\n", rd);
9425 if (rd == 0)
9426 err += efunc(pc, "cannot write to %r0\n");
9427 break;
9428 case DIF_OP_STGA:
9429 if (r1 > DIF_VAR_ARRAY_MAX)
9430 err += efunc(pc, "invalid array %u\n", r1);
9431 if (r2 >= nregs)
9432 err += efunc(pc, "invalid register %u\n", r2);
9433 if (rd >= nregs)
9434 err += efunc(pc, "invalid register %u\n", rd);
9435 dp->dtdo_destructive = 1;
9436 break;
9437 case DIF_OP_LDGS:
9438 case DIF_OP_LDTS:
9439 case DIF_OP_LDLS:
9440 case DIF_OP_LDGAA:
9441 case DIF_OP_LDTAA:
9442 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
9443 err += efunc(pc, "invalid variable %u\n", v);
9444 if (rd >= nregs)
9445 err += efunc(pc, "invalid register %u\n", rd);
9446 if (rd == 0)
9447 err += efunc(pc, "cannot write to %r0\n");
9448 break;
9449 case DIF_OP_STGS:
9450 case DIF_OP_STTS:
9451 case DIF_OP_STLS:
9452 case DIF_OP_STGAA:
9453 case DIF_OP_STTAA:
9454 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
9455 err += efunc(pc, "invalid variable %u\n", v);
9456 if (rs >= nregs)
9457 err += efunc(pc, "invalid register %u\n", rd);
9458 break;
9459 case DIF_OP_CALL:
9460 if (subr > DIF_SUBR_MAX)
9461 err += efunc(pc, "invalid subr %u\n", subr);
9462 if (rd >= nregs)
9463 err += efunc(pc, "invalid register %u\n", rd);
9464 if (rd == 0)
9465 err += efunc(pc, "cannot write to %r0\n");
9467 if (subr == DIF_SUBR_COPYOUT ||
9468 subr == DIF_SUBR_COPYOUTSTR) {
9469 dp->dtdo_destructive = 1;
9472 if (subr == DIF_SUBR_GETF) {
9474 * If we have a getf() we need to record that
9475 * in our state. Note that our state can be
9476 * NULL if this is a helper -- but in that
9477 * case, the call to getf() is itself illegal,
9478 * and will be caught (slightly later) when
9479 * the helper is validated.
9481 if (vstate->dtvs_state != NULL)
9482 vstate->dtvs_state->dts_getf++;
9485 break;
9486 case DIF_OP_PUSHTR:
9487 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
9488 err += efunc(pc, "invalid ref type %u\n", type);
9489 if (r2 >= nregs)
9490 err += efunc(pc, "invalid register %u\n", r2);
9491 if (rs >= nregs)
9492 err += efunc(pc, "invalid register %u\n", rs);
9493 break;
9494 case DIF_OP_PUSHTV:
9495 if (type != DIF_TYPE_CTF)
9496 err += efunc(pc, "invalid val type %u\n", type);
9497 if (r2 >= nregs)
9498 err += efunc(pc, "invalid register %u\n", r2);
9499 if (rs >= nregs)
9500 err += efunc(pc, "invalid register %u\n", rs);
9501 break;
9502 default:
9503 err += efunc(pc, "invalid opcode %u\n",
9504 DIF_INSTR_OP(instr));
9508 if (dp->dtdo_len != 0 &&
9509 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
9510 err += efunc(dp->dtdo_len - 1,
9511 "expected 'ret' as last DIF instruction\n");
9514 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
9516 * If we're not returning by reference, the size must be either
9517 * 0 or the size of one of the base types.
9519 switch (dp->dtdo_rtype.dtdt_size) {
9520 case 0:
9521 case sizeof (uint8_t):
9522 case sizeof (uint16_t):
9523 case sizeof (uint32_t):
9524 case sizeof (uint64_t):
9525 break;
9527 default:
9528 err += efunc(dp->dtdo_len - 1, "bad return size\n");
9532 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
9533 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
9534 dtrace_diftype_t *vt, *et;
9535 uint_t id, ndx;
9537 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
9538 v->dtdv_scope != DIFV_SCOPE_THREAD &&
9539 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
9540 err += efunc(i, "unrecognized variable scope %d\n",
9541 v->dtdv_scope);
9542 break;
9545 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
9546 v->dtdv_kind != DIFV_KIND_SCALAR) {
9547 err += efunc(i, "unrecognized variable type %d\n",
9548 v->dtdv_kind);
9549 break;
9552 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
9553 err += efunc(i, "%d exceeds variable id limit\n", id);
9554 break;
9557 if (id < DIF_VAR_OTHER_UBASE)
9558 continue;
9561 * For user-defined variables, we need to check that this
9562 * definition is identical to any previous definition that we
9563 * encountered.
9565 ndx = id - DIF_VAR_OTHER_UBASE;
9567 switch (v->dtdv_scope) {
9568 case DIFV_SCOPE_GLOBAL:
9569 if (maxglobal == -1 || ndx > maxglobal)
9570 maxglobal = ndx;
9572 if (ndx < vstate->dtvs_nglobals) {
9573 dtrace_statvar_t *svar;
9575 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
9576 existing = &svar->dtsv_var;
9579 break;
9581 case DIFV_SCOPE_THREAD:
9582 if (maxtlocal == -1 || ndx > maxtlocal)
9583 maxtlocal = ndx;
9585 if (ndx < vstate->dtvs_ntlocals)
9586 existing = &vstate->dtvs_tlocals[ndx];
9587 break;
9589 case DIFV_SCOPE_LOCAL:
9590 if (maxlocal == -1 || ndx > maxlocal)
9591 maxlocal = ndx;
9593 if (ndx < vstate->dtvs_nlocals) {
9594 dtrace_statvar_t *svar;
9596 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
9597 existing = &svar->dtsv_var;
9600 break;
9603 vt = &v->dtdv_type;
9605 if (vt->dtdt_flags & DIF_TF_BYREF) {
9606 if (vt->dtdt_size == 0) {
9607 err += efunc(i, "zero-sized variable\n");
9608 break;
9611 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL ||
9612 v->dtdv_scope == DIFV_SCOPE_LOCAL) &&
9613 vt->dtdt_size > dtrace_statvar_maxsize) {
9614 err += efunc(i, "oversized by-ref static\n");
9615 break;
9619 if (existing == NULL || existing->dtdv_id == 0)
9620 continue;
9622 ASSERT(existing->dtdv_id == v->dtdv_id);
9623 ASSERT(existing->dtdv_scope == v->dtdv_scope);
9625 if (existing->dtdv_kind != v->dtdv_kind)
9626 err += efunc(i, "%d changed variable kind\n", id);
9628 et = &existing->dtdv_type;
9630 if (vt->dtdt_flags != et->dtdt_flags) {
9631 err += efunc(i, "%d changed variable type flags\n", id);
9632 break;
9635 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
9636 err += efunc(i, "%d changed variable type size\n", id);
9637 break;
9641 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9642 dif_instr_t instr = dp->dtdo_buf[pc];
9644 uint_t v = DIF_INSTR_VAR(instr);
9645 uint_t op = DIF_INSTR_OP(instr);
9647 switch (op) {
9648 case DIF_OP_LDGS:
9649 case DIF_OP_LDGAA:
9650 case DIF_OP_STGS:
9651 case DIF_OP_STGAA:
9652 if (v > DIF_VAR_OTHER_UBASE + maxglobal)
9653 err += efunc(pc, "invalid variable %u\n", v);
9654 break;
9655 case DIF_OP_LDTS:
9656 case DIF_OP_LDTAA:
9657 case DIF_OP_STTS:
9658 case DIF_OP_STTAA:
9659 if (v > DIF_VAR_OTHER_UBASE + maxtlocal)
9660 err += efunc(pc, "invalid variable %u\n", v);
9661 break;
9662 case DIF_OP_LDLS:
9663 case DIF_OP_STLS:
9664 if (v > DIF_VAR_OTHER_UBASE + maxlocal)
9665 err += efunc(pc, "invalid variable %u\n", v);
9666 break;
9667 default:
9668 break;
9672 return (err);
9676 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
9677 * are much more constrained than normal DIFOs. Specifically, they may
9678 * not:
9680 * 1. Make calls to subroutines other than copyin(), copyinstr() or
9681 * miscellaneous string routines
9682 * 2. Access DTrace variables other than the args[] array, and the
9683 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
9684 * 3. Have thread-local variables.
9685 * 4. Have dynamic variables.
9687 static int
9688 dtrace_difo_validate_helper(dtrace_difo_t *dp)
9690 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9691 int err = 0;
9692 uint_t pc;
9694 for (pc = 0; pc < dp->dtdo_len; pc++) {
9695 dif_instr_t instr = dp->dtdo_buf[pc];
9697 uint_t v = DIF_INSTR_VAR(instr);
9698 uint_t subr = DIF_INSTR_SUBR(instr);
9699 uint_t op = DIF_INSTR_OP(instr);
9701 switch (op) {
9702 case DIF_OP_OR:
9703 case DIF_OP_XOR:
9704 case DIF_OP_AND:
9705 case DIF_OP_SLL:
9706 case DIF_OP_SRL:
9707 case DIF_OP_SRA:
9708 case DIF_OP_SUB:
9709 case DIF_OP_ADD:
9710 case DIF_OP_MUL:
9711 case DIF_OP_SDIV:
9712 case DIF_OP_UDIV:
9713 case DIF_OP_SREM:
9714 case DIF_OP_UREM:
9715 case DIF_OP_COPYS:
9716 case DIF_OP_NOT:
9717 case DIF_OP_MOV:
9718 case DIF_OP_RLDSB:
9719 case DIF_OP_RLDSH:
9720 case DIF_OP_RLDSW:
9721 case DIF_OP_RLDUB:
9722 case DIF_OP_RLDUH:
9723 case DIF_OP_RLDUW:
9724 case DIF_OP_RLDX:
9725 case DIF_OP_ULDSB:
9726 case DIF_OP_ULDSH:
9727 case DIF_OP_ULDSW:
9728 case DIF_OP_ULDUB:
9729 case DIF_OP_ULDUH:
9730 case DIF_OP_ULDUW:
9731 case DIF_OP_ULDX:
9732 case DIF_OP_STB:
9733 case DIF_OP_STH:
9734 case DIF_OP_STW:
9735 case DIF_OP_STX:
9736 case DIF_OP_ALLOCS:
9737 case DIF_OP_CMP:
9738 case DIF_OP_SCMP:
9739 case DIF_OP_TST:
9740 case DIF_OP_BA:
9741 case DIF_OP_BE:
9742 case DIF_OP_BNE:
9743 case DIF_OP_BG:
9744 case DIF_OP_BGU:
9745 case DIF_OP_BGE:
9746 case DIF_OP_BGEU:
9747 case DIF_OP_BL:
9748 case DIF_OP_BLU:
9749 case DIF_OP_BLE:
9750 case DIF_OP_BLEU:
9751 case DIF_OP_RET:
9752 case DIF_OP_NOP:
9753 case DIF_OP_POPTS:
9754 case DIF_OP_FLUSHTS:
9755 case DIF_OP_SETX:
9756 case DIF_OP_SETS:
9757 case DIF_OP_LDGA:
9758 case DIF_OP_LDLS:
9759 case DIF_OP_STGS:
9760 case DIF_OP_STLS:
9761 case DIF_OP_PUSHTR:
9762 case DIF_OP_PUSHTV:
9763 break;
9765 case DIF_OP_LDGS:
9766 if (v >= DIF_VAR_OTHER_UBASE)
9767 break;
9769 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
9770 break;
9772 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
9773 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
9774 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
9775 v == DIF_VAR_UID || v == DIF_VAR_GID)
9776 break;
9778 err += efunc(pc, "illegal variable %u\n", v);
9779 break;
9781 case DIF_OP_LDTA:
9782 if (v < DIF_VAR_OTHER_UBASE) {
9783 err += efunc(pc, "illegal variable load\n");
9784 break;
9786 /* FALLTHROUGH */
9787 case DIF_OP_LDTS:
9788 case DIF_OP_LDGAA:
9789 case DIF_OP_LDTAA:
9790 err += efunc(pc, "illegal dynamic variable load\n");
9791 break;
9793 case DIF_OP_STGA:
9794 if (v < DIF_VAR_OTHER_UBASE) {
9795 err += efunc(pc, "illegal variable store\n");
9796 break;
9798 /* FALLTHROUGH */
9799 case DIF_OP_STTS:
9800 case DIF_OP_STGAA:
9801 case DIF_OP_STTAA:
9802 err += efunc(pc, "illegal dynamic variable store\n");
9803 break;
9805 case DIF_OP_CALL:
9806 if (subr == DIF_SUBR_ALLOCA ||
9807 subr == DIF_SUBR_BCOPY ||
9808 subr == DIF_SUBR_COPYIN ||
9809 subr == DIF_SUBR_COPYINTO ||
9810 subr == DIF_SUBR_COPYINSTR ||
9811 subr == DIF_SUBR_INDEX ||
9812 subr == DIF_SUBR_INET_NTOA ||
9813 subr == DIF_SUBR_INET_NTOA6 ||
9814 subr == DIF_SUBR_INET_NTOP ||
9815 subr == DIF_SUBR_JSON ||
9816 subr == DIF_SUBR_LLTOSTR ||
9817 subr == DIF_SUBR_STRTOLL ||
9818 subr == DIF_SUBR_RINDEX ||
9819 subr == DIF_SUBR_STRCHR ||
9820 subr == DIF_SUBR_STRJOIN ||
9821 subr == DIF_SUBR_STRRCHR ||
9822 subr == DIF_SUBR_STRSTR ||
9823 subr == DIF_SUBR_HTONS ||
9824 subr == DIF_SUBR_HTONL ||
9825 subr == DIF_SUBR_HTONLL ||
9826 subr == DIF_SUBR_NTOHS ||
9827 subr == DIF_SUBR_NTOHL ||
9828 subr == DIF_SUBR_NTOHLL)
9829 break;
9831 err += efunc(pc, "invalid subr %u\n", subr);
9832 break;
9834 default:
9835 err += efunc(pc, "invalid opcode %u\n",
9836 DIF_INSTR_OP(instr));
9840 return (err);
9844 * Returns 1 if the expression in the DIF object can be cached on a per-thread
9845 * basis; 0 if not.
9847 static int
9848 dtrace_difo_cacheable(dtrace_difo_t *dp)
9850 int i;
9852 if (dp == NULL)
9853 return (0);
9855 for (i = 0; i < dp->dtdo_varlen; i++) {
9856 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9858 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
9859 continue;
9861 switch (v->dtdv_id) {
9862 case DIF_VAR_CURTHREAD:
9863 case DIF_VAR_PID:
9864 case DIF_VAR_TID:
9865 case DIF_VAR_EXECNAME:
9866 case DIF_VAR_ZONENAME:
9867 break;
9869 default:
9870 return (0);
9875 * This DIF object may be cacheable. Now we need to look for any
9876 * array loading instructions, any memory loading instructions, or
9877 * any stores to thread-local variables.
9879 for (i = 0; i < dp->dtdo_len; i++) {
9880 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
9882 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
9883 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
9884 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
9885 op == DIF_OP_LDGA || op == DIF_OP_STTS)
9886 return (0);
9889 return (1);
9892 static void
9893 dtrace_difo_hold(dtrace_difo_t *dp)
9895 int i;
9897 ASSERT(MUTEX_HELD(&dtrace_lock));
9899 dp->dtdo_refcnt++;
9900 ASSERT(dp->dtdo_refcnt != 0);
9903 * We need to check this DIF object for references to the variable
9904 * DIF_VAR_VTIMESTAMP.
9906 for (i = 0; i < dp->dtdo_varlen; i++) {
9907 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9909 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9910 continue;
9912 if (dtrace_vtime_references++ == 0)
9913 dtrace_vtime_enable();
9918 * This routine calculates the dynamic variable chunksize for a given DIF
9919 * object. The calculation is not fool-proof, and can probably be tricked by
9920 * malicious DIF -- but it works for all compiler-generated DIF. Because this
9921 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
9922 * if a dynamic variable size exceeds the chunksize.
9924 static void
9925 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9927 uint64_t sval;
9928 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
9929 const dif_instr_t *text = dp->dtdo_buf;
9930 uint_t pc, srd = 0;
9931 uint_t ttop = 0;
9932 size_t size, ksize;
9933 uint_t id, i;
9935 for (pc = 0; pc < dp->dtdo_len; pc++) {
9936 dif_instr_t instr = text[pc];
9937 uint_t op = DIF_INSTR_OP(instr);
9938 uint_t rd = DIF_INSTR_RD(instr);
9939 uint_t r1 = DIF_INSTR_R1(instr);
9940 uint_t nkeys = 0;
9941 uchar_t scope;
9943 dtrace_key_t *key = tupregs;
9945 switch (op) {
9946 case DIF_OP_SETX:
9947 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
9948 srd = rd;
9949 continue;
9951 case DIF_OP_STTS:
9952 key = &tupregs[DIF_DTR_NREGS];
9953 key[0].dttk_size = 0;
9954 key[1].dttk_size = 0;
9955 nkeys = 2;
9956 scope = DIFV_SCOPE_THREAD;
9957 break;
9959 case DIF_OP_STGAA:
9960 case DIF_OP_STTAA:
9961 nkeys = ttop;
9963 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9964 key[nkeys++].dttk_size = 0;
9966 key[nkeys++].dttk_size = 0;
9968 if (op == DIF_OP_STTAA) {
9969 scope = DIFV_SCOPE_THREAD;
9970 } else {
9971 scope = DIFV_SCOPE_GLOBAL;
9974 break;
9976 case DIF_OP_PUSHTR:
9977 if (ttop == DIF_DTR_NREGS)
9978 return;
9980 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9982 * If the register for the size of the "pushtr"
9983 * is %r0 (or the value is 0) and the type is
9984 * a string, we'll use the system-wide default
9985 * string size.
9987 tupregs[ttop++].dttk_size =
9988 dtrace_strsize_default;
9989 } else {
9990 if (srd == 0)
9991 return;
9993 if (sval > LONG_MAX)
9994 return;
9996 tupregs[ttop++].dttk_size = sval;
9999 break;
10001 case DIF_OP_PUSHTV:
10002 if (ttop == DIF_DTR_NREGS)
10003 return;
10005 tupregs[ttop++].dttk_size = 0;
10006 break;
10008 case DIF_OP_FLUSHTS:
10009 ttop = 0;
10010 break;
10012 case DIF_OP_POPTS:
10013 if (ttop != 0)
10014 ttop--;
10015 break;
10018 sval = 0;
10019 srd = 0;
10021 if (nkeys == 0)
10022 continue;
10025 * We have a dynamic variable allocation; calculate its size.
10027 for (ksize = 0, i = 0; i < nkeys; i++)
10028 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
10030 size = sizeof (dtrace_dynvar_t);
10031 size += sizeof (dtrace_key_t) * (nkeys - 1);
10032 size += ksize;
10035 * Now we need to determine the size of the stored data.
10037 id = DIF_INSTR_VAR(instr);
10039 for (i = 0; i < dp->dtdo_varlen; i++) {
10040 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10042 if (v->dtdv_id == id && v->dtdv_scope == scope) {
10043 size += v->dtdv_type.dtdt_size;
10044 break;
10048 if (i == dp->dtdo_varlen)
10049 return;
10052 * We have the size. If this is larger than the chunk size
10053 * for our dynamic variable state, reset the chunk size.
10055 size = P2ROUNDUP(size, sizeof (uint64_t));
10058 * Before setting the chunk size, check that we're not going
10059 * to set it to a negative value...
10061 if (size > LONG_MAX)
10062 return;
10065 * ...and make certain that we didn't badly overflow.
10067 if (size < ksize || size < sizeof (dtrace_dynvar_t))
10068 return;
10070 if (size > vstate->dtvs_dynvars.dtds_chunksize)
10071 vstate->dtvs_dynvars.dtds_chunksize = size;
10075 static void
10076 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10078 int i, oldsvars, osz, nsz, otlocals, ntlocals;
10079 uint_t id;
10081 ASSERT(MUTEX_HELD(&dtrace_lock));
10082 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
10084 for (i = 0; i < dp->dtdo_varlen; i++) {
10085 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10086 dtrace_statvar_t *svar, ***svarp;
10087 size_t dsize = 0;
10088 uint8_t scope = v->dtdv_scope;
10089 int *np;
10091 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10092 continue;
10094 id -= DIF_VAR_OTHER_UBASE;
10096 switch (scope) {
10097 case DIFV_SCOPE_THREAD:
10098 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
10099 dtrace_difv_t *tlocals;
10101 if ((ntlocals = (otlocals << 1)) == 0)
10102 ntlocals = 1;
10104 osz = otlocals * sizeof (dtrace_difv_t);
10105 nsz = ntlocals * sizeof (dtrace_difv_t);
10107 tlocals = kmem_zalloc(nsz, KM_SLEEP);
10109 if (osz != 0) {
10110 bcopy(vstate->dtvs_tlocals,
10111 tlocals, osz);
10112 kmem_free(vstate->dtvs_tlocals, osz);
10115 vstate->dtvs_tlocals = tlocals;
10116 vstate->dtvs_ntlocals = ntlocals;
10119 vstate->dtvs_tlocals[id] = *v;
10120 continue;
10122 case DIFV_SCOPE_LOCAL:
10123 np = &vstate->dtvs_nlocals;
10124 svarp = &vstate->dtvs_locals;
10126 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10127 dsize = NCPU * (v->dtdv_type.dtdt_size +
10128 sizeof (uint64_t));
10129 else
10130 dsize = NCPU * sizeof (uint64_t);
10132 break;
10134 case DIFV_SCOPE_GLOBAL:
10135 np = &vstate->dtvs_nglobals;
10136 svarp = &vstate->dtvs_globals;
10138 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10139 dsize = v->dtdv_type.dtdt_size +
10140 sizeof (uint64_t);
10142 break;
10144 default:
10145 ASSERT(0);
10148 while (id >= (oldsvars = *np)) {
10149 dtrace_statvar_t **statics;
10150 int newsvars, oldsize, newsize;
10152 if ((newsvars = (oldsvars << 1)) == 0)
10153 newsvars = 1;
10155 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
10156 newsize = newsvars * sizeof (dtrace_statvar_t *);
10158 statics = kmem_zalloc(newsize, KM_SLEEP);
10160 if (oldsize != 0) {
10161 bcopy(*svarp, statics, oldsize);
10162 kmem_free(*svarp, oldsize);
10165 *svarp = statics;
10166 *np = newsvars;
10169 if ((svar = (*svarp)[id]) == NULL) {
10170 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
10171 svar->dtsv_var = *v;
10173 if ((svar->dtsv_size = dsize) != 0) {
10174 svar->dtsv_data = (uint64_t)(uintptr_t)
10175 kmem_zalloc(dsize, KM_SLEEP);
10178 (*svarp)[id] = svar;
10181 svar->dtsv_refcnt++;
10184 dtrace_difo_chunksize(dp, vstate);
10185 dtrace_difo_hold(dp);
10188 static dtrace_difo_t *
10189 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10191 dtrace_difo_t *new;
10192 size_t sz;
10194 ASSERT(dp->dtdo_buf != NULL);
10195 ASSERT(dp->dtdo_refcnt != 0);
10197 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
10199 ASSERT(dp->dtdo_buf != NULL);
10200 sz = dp->dtdo_len * sizeof (dif_instr_t);
10201 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
10202 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
10203 new->dtdo_len = dp->dtdo_len;
10205 if (dp->dtdo_strtab != NULL) {
10206 ASSERT(dp->dtdo_strlen != 0);
10207 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
10208 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
10209 new->dtdo_strlen = dp->dtdo_strlen;
10212 if (dp->dtdo_inttab != NULL) {
10213 ASSERT(dp->dtdo_intlen != 0);
10214 sz = dp->dtdo_intlen * sizeof (uint64_t);
10215 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
10216 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
10217 new->dtdo_intlen = dp->dtdo_intlen;
10220 if (dp->dtdo_vartab != NULL) {
10221 ASSERT(dp->dtdo_varlen != 0);
10222 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
10223 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
10224 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
10225 new->dtdo_varlen = dp->dtdo_varlen;
10228 dtrace_difo_init(new, vstate);
10229 return (new);
10232 static void
10233 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10235 int i;
10237 ASSERT(dp->dtdo_refcnt == 0);
10239 for (i = 0; i < dp->dtdo_varlen; i++) {
10240 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10241 dtrace_statvar_t *svar, **svarp;
10242 uint_t id;
10243 uint8_t scope = v->dtdv_scope;
10244 int *np;
10246 switch (scope) {
10247 case DIFV_SCOPE_THREAD:
10248 continue;
10250 case DIFV_SCOPE_LOCAL:
10251 np = &vstate->dtvs_nlocals;
10252 svarp = vstate->dtvs_locals;
10253 break;
10255 case DIFV_SCOPE_GLOBAL:
10256 np = &vstate->dtvs_nglobals;
10257 svarp = vstate->dtvs_globals;
10258 break;
10260 default:
10261 ASSERT(0);
10264 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10265 continue;
10267 id -= DIF_VAR_OTHER_UBASE;
10268 ASSERT(id < *np);
10270 svar = svarp[id];
10271 ASSERT(svar != NULL);
10272 ASSERT(svar->dtsv_refcnt > 0);
10274 if (--svar->dtsv_refcnt > 0)
10275 continue;
10277 if (svar->dtsv_size != 0) {
10278 ASSERT(svar->dtsv_data != (uintptr_t)NULL);
10279 kmem_free((void *)(uintptr_t)svar->dtsv_data,
10280 svar->dtsv_size);
10283 kmem_free(svar, sizeof (dtrace_statvar_t));
10284 svarp[id] = NULL;
10287 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
10288 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
10289 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
10290 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
10292 kmem_free(dp, sizeof (dtrace_difo_t));
10295 static void
10296 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10298 int i;
10300 ASSERT(MUTEX_HELD(&dtrace_lock));
10301 ASSERT(dp->dtdo_refcnt != 0);
10303 for (i = 0; i < dp->dtdo_varlen; i++) {
10304 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10306 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10307 continue;
10309 ASSERT(dtrace_vtime_references > 0);
10310 if (--dtrace_vtime_references == 0)
10311 dtrace_vtime_disable();
10314 if (--dp->dtdo_refcnt == 0)
10315 dtrace_difo_destroy(dp, vstate);
10319 * DTrace Format Functions
10321 static uint16_t
10322 dtrace_format_add(dtrace_state_t *state, char *str)
10324 char *fmt, **new;
10325 uint16_t ndx, len = strlen(str) + 1;
10327 fmt = kmem_zalloc(len, KM_SLEEP);
10328 bcopy(str, fmt, len);
10330 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
10331 if (state->dts_formats[ndx] == NULL) {
10332 state->dts_formats[ndx] = fmt;
10333 return (ndx + 1);
10337 if (state->dts_nformats == USHRT_MAX) {
10339 * This is only likely if a denial-of-service attack is being
10340 * attempted. As such, it's okay to fail silently here.
10342 kmem_free(fmt, len);
10343 return (0);
10347 * For simplicity, we always resize the formats array to be exactly the
10348 * number of formats.
10350 ndx = state->dts_nformats++;
10351 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
10353 if (state->dts_formats != NULL) {
10354 ASSERT(ndx != 0);
10355 bcopy(state->dts_formats, new, ndx * sizeof (char *));
10356 kmem_free(state->dts_formats, ndx * sizeof (char *));
10359 state->dts_formats = new;
10360 state->dts_formats[ndx] = fmt;
10362 return (ndx + 1);
10365 static void
10366 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
10368 char *fmt;
10370 ASSERT(state->dts_formats != NULL);
10371 ASSERT(format <= state->dts_nformats);
10372 ASSERT(state->dts_formats[format - 1] != NULL);
10374 fmt = state->dts_formats[format - 1];
10375 kmem_free(fmt, strlen(fmt) + 1);
10376 state->dts_formats[format - 1] = NULL;
10379 static void
10380 dtrace_format_destroy(dtrace_state_t *state)
10382 int i;
10384 if (state->dts_nformats == 0) {
10385 ASSERT(state->dts_formats == NULL);
10386 return;
10389 ASSERT(state->dts_formats != NULL);
10391 for (i = 0; i < state->dts_nformats; i++) {
10392 char *fmt = state->dts_formats[i];
10394 if (fmt == NULL)
10395 continue;
10397 kmem_free(fmt, strlen(fmt) + 1);
10400 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
10401 state->dts_nformats = 0;
10402 state->dts_formats = NULL;
10406 * DTrace Predicate Functions
10408 static dtrace_predicate_t *
10409 dtrace_predicate_create(dtrace_difo_t *dp)
10411 dtrace_predicate_t *pred;
10413 ASSERT(MUTEX_HELD(&dtrace_lock));
10414 ASSERT(dp->dtdo_refcnt != 0);
10416 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
10417 pred->dtp_difo = dp;
10418 pred->dtp_refcnt = 1;
10420 if (!dtrace_difo_cacheable(dp))
10421 return (pred);
10423 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
10425 * This is only theoretically possible -- we have had 2^32
10426 * cacheable predicates on this machine. We cannot allow any
10427 * more predicates to become cacheable: as unlikely as it is,
10428 * there may be a thread caching a (now stale) predicate cache
10429 * ID. (N.B.: the temptation is being successfully resisted to
10430 * have this cmn_err() "Holy shit -- we executed this code!")
10432 return (pred);
10435 pred->dtp_cacheid = dtrace_predcache_id++;
10437 return (pred);
10440 static void
10441 dtrace_predicate_hold(dtrace_predicate_t *pred)
10443 ASSERT(MUTEX_HELD(&dtrace_lock));
10444 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
10445 ASSERT(pred->dtp_refcnt > 0);
10447 pred->dtp_refcnt++;
10450 static void
10451 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
10453 dtrace_difo_t *dp = pred->dtp_difo;
10455 ASSERT(MUTEX_HELD(&dtrace_lock));
10456 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
10457 ASSERT(pred->dtp_refcnt > 0);
10459 if (--pred->dtp_refcnt == 0) {
10460 dtrace_difo_release(pred->dtp_difo, vstate);
10461 kmem_free(pred, sizeof (dtrace_predicate_t));
10466 * DTrace Action Description Functions
10468 static dtrace_actdesc_t *
10469 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
10470 uint64_t uarg, uint64_t arg)
10472 dtrace_actdesc_t *act;
10474 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != (uintptr_t)NULL &&
10475 arg >= KERNELBASE) || (arg == (uintptr_t)NULL &&
10476 kind == DTRACEACT_PRINTA));
10478 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
10479 act->dtad_kind = kind;
10480 act->dtad_ntuple = ntuple;
10481 act->dtad_uarg = uarg;
10482 act->dtad_arg = arg;
10483 act->dtad_refcnt = 1;
10485 return (act);
10488 static void
10489 dtrace_actdesc_hold(dtrace_actdesc_t *act)
10491 ASSERT(act->dtad_refcnt >= 1);
10492 act->dtad_refcnt++;
10495 static void
10496 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
10498 dtrace_actkind_t kind = act->dtad_kind;
10499 dtrace_difo_t *dp;
10501 ASSERT(act->dtad_refcnt >= 1);
10503 if (--act->dtad_refcnt != 0)
10504 return;
10506 if ((dp = act->dtad_difo) != NULL)
10507 dtrace_difo_release(dp, vstate);
10509 if (DTRACEACT_ISPRINTFLIKE(kind)) {
10510 char *str = (char *)(uintptr_t)act->dtad_arg;
10512 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
10513 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
10515 if (str != NULL)
10516 kmem_free(str, strlen(str) + 1);
10519 kmem_free(act, sizeof (dtrace_actdesc_t));
10523 * DTrace ECB Functions
10525 static dtrace_ecb_t *
10526 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
10528 dtrace_ecb_t *ecb;
10529 dtrace_epid_t epid;
10531 ASSERT(MUTEX_HELD(&dtrace_lock));
10533 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
10534 ecb->dte_predicate = NULL;
10535 ecb->dte_probe = probe;
10538 * The default size is the size of the default action: recording
10539 * the header.
10541 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
10542 ecb->dte_alignment = sizeof (dtrace_epid_t);
10544 epid = state->dts_epid++;
10546 if (epid - 1 >= state->dts_necbs) {
10547 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
10548 int necbs = state->dts_necbs << 1;
10550 ASSERT(epid == state->dts_necbs + 1);
10552 if (necbs == 0) {
10553 ASSERT(oecbs == NULL);
10554 necbs = 1;
10557 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
10559 if (oecbs != NULL)
10560 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
10562 dtrace_membar_producer();
10563 state->dts_ecbs = ecbs;
10565 if (oecbs != NULL) {
10567 * If this state is active, we must dtrace_sync()
10568 * before we can free the old dts_ecbs array: we're
10569 * coming in hot, and there may be active ring
10570 * buffer processing (which indexes into the dts_ecbs
10571 * array) on another CPU.
10573 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
10574 dtrace_sync();
10576 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
10579 dtrace_membar_producer();
10580 state->dts_necbs = necbs;
10583 ecb->dte_state = state;
10585 ASSERT(state->dts_ecbs[epid - 1] == NULL);
10586 dtrace_membar_producer();
10587 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
10589 return (ecb);
10592 static int
10593 dtrace_ecb_enable(dtrace_ecb_t *ecb)
10595 dtrace_probe_t *probe = ecb->dte_probe;
10597 ASSERT(MUTEX_HELD(&cpu_lock));
10598 ASSERT(MUTEX_HELD(&dtrace_lock));
10599 ASSERT(ecb->dte_next == NULL);
10601 if (probe == NULL) {
10603 * This is the NULL probe -- there's nothing to do.
10605 return (0);
10608 if (probe->dtpr_ecb == NULL) {
10609 dtrace_provider_t *prov = probe->dtpr_provider;
10612 * We're the first ECB on this probe.
10614 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
10616 if (ecb->dte_predicate != NULL)
10617 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
10619 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
10620 probe->dtpr_id, probe->dtpr_arg));
10621 } else {
10623 * This probe is already active. Swing the last pointer to
10624 * point to the new ECB, and issue a dtrace_sync() to assure
10625 * that all CPUs have seen the change.
10627 ASSERT(probe->dtpr_ecb_last != NULL);
10628 probe->dtpr_ecb_last->dte_next = ecb;
10629 probe->dtpr_ecb_last = ecb;
10630 probe->dtpr_predcache = 0;
10632 dtrace_sync();
10633 return (0);
10637 static int
10638 dtrace_ecb_resize(dtrace_ecb_t *ecb)
10640 dtrace_action_t *act;
10641 uint32_t curneeded = UINT32_MAX;
10642 uint32_t aggbase = UINT32_MAX;
10645 * If we record anything, we always record the dtrace_rechdr_t. (And
10646 * we always record it first.)
10648 ecb->dte_size = sizeof (dtrace_rechdr_t);
10649 ecb->dte_alignment = sizeof (dtrace_epid_t);
10651 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10652 dtrace_recdesc_t *rec = &act->dta_rec;
10653 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
10655 ecb->dte_alignment = MAX(ecb->dte_alignment,
10656 rec->dtrd_alignment);
10658 if (DTRACEACT_ISAGG(act->dta_kind)) {
10659 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10661 ASSERT(rec->dtrd_size != 0);
10662 ASSERT(agg->dtag_first != NULL);
10663 ASSERT(act->dta_prev->dta_intuple);
10664 ASSERT(aggbase != UINT32_MAX);
10665 ASSERT(curneeded != UINT32_MAX);
10667 agg->dtag_base = aggbase;
10669 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10670 rec->dtrd_offset = curneeded;
10671 if (curneeded + rec->dtrd_size < curneeded)
10672 return (EINVAL);
10673 curneeded += rec->dtrd_size;
10674 ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
10676 aggbase = UINT32_MAX;
10677 curneeded = UINT32_MAX;
10678 } else if (act->dta_intuple) {
10679 if (curneeded == UINT32_MAX) {
10681 * This is the first record in a tuple. Align
10682 * curneeded to be at offset 4 in an 8-byte
10683 * aligned block.
10685 ASSERT(act->dta_prev == NULL ||
10686 !act->dta_prev->dta_intuple);
10687 ASSERT3U(aggbase, ==, UINT32_MAX);
10688 curneeded = P2PHASEUP(ecb->dte_size,
10689 sizeof (uint64_t), sizeof (dtrace_aggid_t));
10691 aggbase = curneeded - sizeof (dtrace_aggid_t);
10692 ASSERT(IS_P2ALIGNED(aggbase,
10693 sizeof (uint64_t)));
10695 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10696 rec->dtrd_offset = curneeded;
10697 if (curneeded + rec->dtrd_size < curneeded)
10698 return (EINVAL);
10699 curneeded += rec->dtrd_size;
10700 } else {
10701 /* tuples must be followed by an aggregation */
10702 ASSERT(act->dta_prev == NULL ||
10703 !act->dta_prev->dta_intuple);
10705 ecb->dte_size = P2ROUNDUP(ecb->dte_size,
10706 rec->dtrd_alignment);
10707 rec->dtrd_offset = ecb->dte_size;
10708 if (ecb->dte_size + rec->dtrd_size < ecb->dte_size)
10709 return (EINVAL);
10710 ecb->dte_size += rec->dtrd_size;
10711 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
10715 if ((act = ecb->dte_action) != NULL &&
10716 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
10717 ecb->dte_size == sizeof (dtrace_rechdr_t)) {
10719 * If the size is still sizeof (dtrace_rechdr_t), then all
10720 * actions store no data; set the size to 0.
10722 ecb->dte_size = 0;
10725 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
10726 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
10727 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed,
10728 ecb->dte_needed);
10729 return (0);
10732 static dtrace_action_t *
10733 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10735 dtrace_aggregation_t *agg;
10736 size_t size = sizeof (uint64_t);
10737 int ntuple = desc->dtad_ntuple;
10738 dtrace_action_t *act;
10739 dtrace_recdesc_t *frec;
10740 dtrace_aggid_t aggid;
10741 dtrace_state_t *state = ecb->dte_state;
10743 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
10744 agg->dtag_ecb = ecb;
10746 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
10748 switch (desc->dtad_kind) {
10749 case DTRACEAGG_MIN:
10750 agg->dtag_initial = INT64_MAX;
10751 agg->dtag_aggregate = dtrace_aggregate_min;
10752 break;
10754 case DTRACEAGG_MAX:
10755 agg->dtag_initial = INT64_MIN;
10756 agg->dtag_aggregate = dtrace_aggregate_max;
10757 break;
10759 case DTRACEAGG_COUNT:
10760 agg->dtag_aggregate = dtrace_aggregate_count;
10761 break;
10763 case DTRACEAGG_QUANTIZE:
10764 agg->dtag_aggregate = dtrace_aggregate_quantize;
10765 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
10766 sizeof (uint64_t);
10767 break;
10769 case DTRACEAGG_LQUANTIZE: {
10770 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
10771 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
10773 agg->dtag_initial = desc->dtad_arg;
10774 agg->dtag_aggregate = dtrace_aggregate_lquantize;
10776 if (step == 0 || levels == 0)
10777 goto err;
10779 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
10780 break;
10783 case DTRACEAGG_LLQUANTIZE: {
10784 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
10785 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
10786 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
10787 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
10788 int64_t v;
10790 agg->dtag_initial = desc->dtad_arg;
10791 agg->dtag_aggregate = dtrace_aggregate_llquantize;
10793 if (factor < 2 || low >= high || nsteps < factor)
10794 goto err;
10797 * Now check that the number of steps evenly divides a power
10798 * of the factor. (This assures both integer bucket size and
10799 * linearity within each magnitude.)
10801 for (v = factor; v < nsteps; v *= factor)
10802 continue;
10804 if ((v % nsteps) || (nsteps % factor))
10805 goto err;
10807 size = (dtrace_aggregate_llquantize_bucket(factor,
10808 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
10809 break;
10812 case DTRACEAGG_AVG:
10813 agg->dtag_aggregate = dtrace_aggregate_avg;
10814 size = sizeof (uint64_t) * 2;
10815 break;
10817 case DTRACEAGG_STDDEV:
10818 agg->dtag_aggregate = dtrace_aggregate_stddev;
10819 size = sizeof (uint64_t) * 4;
10820 break;
10822 case DTRACEAGG_SUM:
10823 agg->dtag_aggregate = dtrace_aggregate_sum;
10824 break;
10826 default:
10827 goto err;
10830 agg->dtag_action.dta_rec.dtrd_size = size;
10832 if (ntuple == 0)
10833 goto err;
10836 * We must make sure that we have enough actions for the n-tuple.
10838 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
10839 if (DTRACEACT_ISAGG(act->dta_kind))
10840 break;
10842 if (--ntuple == 0) {
10844 * This is the action with which our n-tuple begins.
10846 agg->dtag_first = act;
10847 goto success;
10852 * This n-tuple is short by ntuple elements. Return failure.
10854 ASSERT(ntuple != 0);
10855 err:
10856 kmem_free(agg, sizeof (dtrace_aggregation_t));
10857 return (NULL);
10859 success:
10861 * If the last action in the tuple has a size of zero, it's actually
10862 * an expression argument for the aggregating action.
10864 ASSERT(ecb->dte_action_last != NULL);
10865 act = ecb->dte_action_last;
10867 if (act->dta_kind == DTRACEACT_DIFEXPR) {
10868 ASSERT(act->dta_difo != NULL);
10870 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
10871 agg->dtag_hasarg = 1;
10875 * We need to allocate an id for this aggregation.
10877 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
10878 VM_BESTFIT | VM_SLEEP);
10880 if (aggid - 1 >= state->dts_naggregations) {
10881 dtrace_aggregation_t **oaggs = state->dts_aggregations;
10882 dtrace_aggregation_t **aggs;
10883 int naggs = state->dts_naggregations << 1;
10884 int onaggs = state->dts_naggregations;
10886 ASSERT(aggid == state->dts_naggregations + 1);
10888 if (naggs == 0) {
10889 ASSERT(oaggs == NULL);
10890 naggs = 1;
10893 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
10895 if (oaggs != NULL) {
10896 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
10897 kmem_free(oaggs, onaggs * sizeof (*aggs));
10900 state->dts_aggregations = aggs;
10901 state->dts_naggregations = naggs;
10904 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
10905 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
10907 frec = &agg->dtag_first->dta_rec;
10908 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
10909 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
10911 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
10912 ASSERT(!act->dta_intuple);
10913 act->dta_intuple = 1;
10916 return (&agg->dtag_action);
10919 static void
10920 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
10922 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10923 dtrace_state_t *state = ecb->dte_state;
10924 dtrace_aggid_t aggid = agg->dtag_id;
10926 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
10927 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
10929 ASSERT(state->dts_aggregations[aggid - 1] == agg);
10930 state->dts_aggregations[aggid - 1] = NULL;
10932 kmem_free(agg, sizeof (dtrace_aggregation_t));
10935 static int
10936 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10938 dtrace_action_t *action, *last;
10939 dtrace_difo_t *dp = desc->dtad_difo;
10940 uint32_t size = 0, align = sizeof (uint8_t), mask;
10941 uint16_t format = 0;
10942 dtrace_recdesc_t *rec;
10943 dtrace_state_t *state = ecb->dte_state;
10944 dtrace_optval_t *opt = state->dts_options, nframes, strsize;
10945 uint64_t arg = desc->dtad_arg;
10947 ASSERT(MUTEX_HELD(&dtrace_lock));
10948 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
10950 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
10952 * If this is an aggregating action, there must be neither
10953 * a speculate nor a commit on the action chain.
10955 dtrace_action_t *act;
10957 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10958 if (act->dta_kind == DTRACEACT_COMMIT)
10959 return (EINVAL);
10961 if (act->dta_kind == DTRACEACT_SPECULATE)
10962 return (EINVAL);
10965 action = dtrace_ecb_aggregation_create(ecb, desc);
10967 if (action == NULL)
10968 return (EINVAL);
10969 } else {
10970 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10971 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10972 dp != NULL && dp->dtdo_destructive)) {
10973 state->dts_destructive = 1;
10976 switch (desc->dtad_kind) {
10977 case DTRACEACT_PRINTF:
10978 case DTRACEACT_PRINTA:
10979 case DTRACEACT_SYSTEM:
10980 case DTRACEACT_FREOPEN:
10981 case DTRACEACT_DIFEXPR:
10983 * We know that our arg is a string -- turn it into a
10984 * format.
10986 if (arg == (uintptr_t)NULL) {
10987 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
10988 desc->dtad_kind == DTRACEACT_DIFEXPR);
10989 format = 0;
10990 } else {
10991 ASSERT(arg != (uintptr_t)NULL);
10992 ASSERT(arg > KERNELBASE);
10993 format = dtrace_format_add(state,
10994 (char *)(uintptr_t)arg);
10997 /*FALLTHROUGH*/
10998 case DTRACEACT_LIBACT:
10999 case DTRACEACT_TRACEMEM:
11000 case DTRACEACT_TRACEMEM_DYNSIZE:
11001 if (dp == NULL)
11002 return (EINVAL);
11004 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
11005 break;
11007 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
11008 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11009 return (EINVAL);
11011 size = opt[DTRACEOPT_STRSIZE];
11014 break;
11016 case DTRACEACT_STACK:
11017 if ((nframes = arg) == 0) {
11018 nframes = opt[DTRACEOPT_STACKFRAMES];
11019 ASSERT(nframes > 0);
11020 arg = nframes;
11023 size = nframes * sizeof (pc_t);
11024 break;
11026 case DTRACEACT_JSTACK:
11027 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
11028 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
11030 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
11031 nframes = opt[DTRACEOPT_JSTACKFRAMES];
11033 arg = DTRACE_USTACK_ARG(nframes, strsize);
11035 /*FALLTHROUGH*/
11036 case DTRACEACT_USTACK:
11037 if (desc->dtad_kind != DTRACEACT_JSTACK &&
11038 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
11039 strsize = DTRACE_USTACK_STRSIZE(arg);
11040 nframes = opt[DTRACEOPT_USTACKFRAMES];
11041 ASSERT(nframes > 0);
11042 arg = DTRACE_USTACK_ARG(nframes, strsize);
11046 * Save a slot for the pid.
11048 size = (nframes + 1) * sizeof (uint64_t);
11049 size += DTRACE_USTACK_STRSIZE(arg);
11050 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
11052 break;
11054 case DTRACEACT_SYM:
11055 case DTRACEACT_MOD:
11056 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
11057 sizeof (uint64_t)) ||
11058 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11059 return (EINVAL);
11060 break;
11062 case DTRACEACT_USYM:
11063 case DTRACEACT_UMOD:
11064 case DTRACEACT_UADDR:
11065 if (dp == NULL ||
11066 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
11067 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11068 return (EINVAL);
11071 * We have a slot for the pid, plus a slot for the
11072 * argument. To keep things simple (aligned with
11073 * bitness-neutral sizing), we store each as a 64-bit
11074 * quantity.
11076 size = 2 * sizeof (uint64_t);
11077 break;
11079 case DTRACEACT_STOP:
11080 case DTRACEACT_BREAKPOINT:
11081 case DTRACEACT_PANIC:
11082 break;
11084 case DTRACEACT_CHILL:
11085 case DTRACEACT_DISCARD:
11086 case DTRACEACT_RAISE:
11087 if (dp == NULL)
11088 return (EINVAL);
11089 break;
11091 case DTRACEACT_EXIT:
11092 if (dp == NULL ||
11093 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
11094 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11095 return (EINVAL);
11096 break;
11098 case DTRACEACT_SPECULATE:
11099 if (ecb->dte_size > sizeof (dtrace_rechdr_t))
11100 return (EINVAL);
11102 if (dp == NULL)
11103 return (EINVAL);
11105 state->dts_speculates = 1;
11106 break;
11108 case DTRACEACT_COMMIT: {
11109 dtrace_action_t *act = ecb->dte_action;
11111 for (; act != NULL; act = act->dta_next) {
11112 if (act->dta_kind == DTRACEACT_COMMIT)
11113 return (EINVAL);
11116 if (dp == NULL)
11117 return (EINVAL);
11118 break;
11121 default:
11122 return (EINVAL);
11125 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
11127 * If this is a data-storing action or a speculate,
11128 * we must be sure that there isn't a commit on the
11129 * action chain.
11131 dtrace_action_t *act = ecb->dte_action;
11133 for (; act != NULL; act = act->dta_next) {
11134 if (act->dta_kind == DTRACEACT_COMMIT)
11135 return (EINVAL);
11139 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
11140 action->dta_rec.dtrd_size = size;
11143 action->dta_refcnt = 1;
11144 rec = &action->dta_rec;
11145 size = rec->dtrd_size;
11147 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
11148 if (!(size & mask)) {
11149 align = mask + 1;
11150 break;
11154 action->dta_kind = desc->dtad_kind;
11156 if ((action->dta_difo = dp) != NULL)
11157 dtrace_difo_hold(dp);
11159 rec->dtrd_action = action->dta_kind;
11160 rec->dtrd_arg = arg;
11161 rec->dtrd_uarg = desc->dtad_uarg;
11162 rec->dtrd_alignment = (uint16_t)align;
11163 rec->dtrd_format = format;
11165 if ((last = ecb->dte_action_last) != NULL) {
11166 ASSERT(ecb->dte_action != NULL);
11167 action->dta_prev = last;
11168 last->dta_next = action;
11169 } else {
11170 ASSERT(ecb->dte_action == NULL);
11171 ecb->dte_action = action;
11174 ecb->dte_action_last = action;
11176 return (0);
11179 static void
11180 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
11182 dtrace_action_t *act = ecb->dte_action, *next;
11183 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
11184 dtrace_difo_t *dp;
11185 uint16_t format;
11187 if (act != NULL && act->dta_refcnt > 1) {
11188 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
11189 act->dta_refcnt--;
11190 } else {
11191 for (; act != NULL; act = next) {
11192 next = act->dta_next;
11193 ASSERT(next != NULL || act == ecb->dte_action_last);
11194 ASSERT(act->dta_refcnt == 1);
11196 if ((format = act->dta_rec.dtrd_format) != 0)
11197 dtrace_format_remove(ecb->dte_state, format);
11199 if ((dp = act->dta_difo) != NULL)
11200 dtrace_difo_release(dp, vstate);
11202 if (DTRACEACT_ISAGG(act->dta_kind)) {
11203 dtrace_ecb_aggregation_destroy(ecb, act);
11204 } else {
11205 kmem_free(act, sizeof (dtrace_action_t));
11210 ecb->dte_action = NULL;
11211 ecb->dte_action_last = NULL;
11212 ecb->dte_size = 0;
11215 static void
11216 dtrace_ecb_disable(dtrace_ecb_t *ecb)
11219 * We disable the ECB by removing it from its probe.
11221 dtrace_ecb_t *pecb, *prev = NULL;
11222 dtrace_probe_t *probe = ecb->dte_probe;
11224 ASSERT(MUTEX_HELD(&dtrace_lock));
11226 if (probe == NULL) {
11228 * This is the NULL probe; there is nothing to disable.
11230 return;
11233 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
11234 if (pecb == ecb)
11235 break;
11236 prev = pecb;
11239 ASSERT(pecb != NULL);
11241 if (prev == NULL) {
11242 probe->dtpr_ecb = ecb->dte_next;
11243 } else {
11244 prev->dte_next = ecb->dte_next;
11247 if (ecb == probe->dtpr_ecb_last) {
11248 ASSERT(ecb->dte_next == NULL);
11249 probe->dtpr_ecb_last = prev;
11253 * The ECB has been disconnected from the probe; now sync to assure
11254 * that all CPUs have seen the change before returning.
11256 dtrace_sync();
11258 if (probe->dtpr_ecb == NULL) {
11260 * That was the last ECB on the probe; clear the predicate
11261 * cache ID for the probe, disable it and sync one more time
11262 * to assure that we'll never hit it again.
11264 dtrace_provider_t *prov = probe->dtpr_provider;
11266 ASSERT(ecb->dte_next == NULL);
11267 ASSERT(probe->dtpr_ecb_last == NULL);
11268 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
11269 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
11270 probe->dtpr_id, probe->dtpr_arg);
11271 dtrace_sync();
11272 } else {
11274 * There is at least one ECB remaining on the probe. If there
11275 * is _exactly_ one, set the probe's predicate cache ID to be
11276 * the predicate cache ID of the remaining ECB.
11278 ASSERT(probe->dtpr_ecb_last != NULL);
11279 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
11281 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
11282 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
11284 ASSERT(probe->dtpr_ecb->dte_next == NULL);
11286 if (p != NULL)
11287 probe->dtpr_predcache = p->dtp_cacheid;
11290 ecb->dte_next = NULL;
11294 static void
11295 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
11297 dtrace_state_t *state = ecb->dte_state;
11298 dtrace_vstate_t *vstate = &state->dts_vstate;
11299 dtrace_predicate_t *pred;
11300 dtrace_epid_t epid = ecb->dte_epid;
11302 ASSERT(MUTEX_HELD(&dtrace_lock));
11303 ASSERT(ecb->dte_next == NULL);
11304 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
11306 if ((pred = ecb->dte_predicate) != NULL)
11307 dtrace_predicate_release(pred, vstate);
11309 dtrace_ecb_action_remove(ecb);
11311 ASSERT(state->dts_ecbs[epid - 1] == ecb);
11312 state->dts_ecbs[epid - 1] = NULL;
11314 kmem_free(ecb, sizeof (dtrace_ecb_t));
11317 static dtrace_ecb_t *
11318 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
11319 dtrace_enabling_t *enab)
11321 dtrace_ecb_t *ecb;
11322 dtrace_predicate_t *pred;
11323 dtrace_actdesc_t *act;
11324 dtrace_provider_t *prov;
11325 dtrace_ecbdesc_t *desc = enab->dten_current;
11327 ASSERT(MUTEX_HELD(&dtrace_lock));
11328 ASSERT(state != NULL);
11330 ecb = dtrace_ecb_add(state, probe);
11331 ecb->dte_uarg = desc->dted_uarg;
11333 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
11334 dtrace_predicate_hold(pred);
11335 ecb->dte_predicate = pred;
11338 if (probe != NULL) {
11340 * If the provider shows more leg than the consumer is old
11341 * enough to see, we need to enable the appropriate implicit
11342 * predicate bits to prevent the ecb from activating at
11343 * revealing times.
11345 * Providers specifying DTRACE_PRIV_USER at register time
11346 * are stating that they need the /proc-style privilege
11347 * model to be enforced, and this is what DTRACE_COND_OWNER
11348 * and DTRACE_COND_ZONEOWNER will then do at probe time.
11350 prov = probe->dtpr_provider;
11351 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
11352 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11353 ecb->dte_cond |= DTRACE_COND_OWNER;
11355 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
11356 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11357 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
11360 * If the provider shows us kernel innards and the user
11361 * is lacking sufficient privilege, enable the
11362 * DTRACE_COND_USERMODE implicit predicate.
11364 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
11365 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
11366 ecb->dte_cond |= DTRACE_COND_USERMODE;
11369 if (dtrace_ecb_create_cache != NULL) {
11371 * If we have a cached ecb, we'll use its action list instead
11372 * of creating our own (saving both time and space).
11374 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
11375 dtrace_action_t *act = cached->dte_action;
11377 if (act != NULL) {
11378 ASSERT(act->dta_refcnt > 0);
11379 act->dta_refcnt++;
11380 ecb->dte_action = act;
11381 ecb->dte_action_last = cached->dte_action_last;
11382 ecb->dte_needed = cached->dte_needed;
11383 ecb->dte_size = cached->dte_size;
11384 ecb->dte_alignment = cached->dte_alignment;
11387 return (ecb);
11390 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
11391 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
11392 dtrace_ecb_destroy(ecb);
11393 return (NULL);
11397 if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) {
11398 dtrace_ecb_destroy(ecb);
11399 return (NULL);
11402 return (dtrace_ecb_create_cache = ecb);
11405 static int
11406 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
11408 dtrace_ecb_t *ecb;
11409 dtrace_enabling_t *enab = arg;
11410 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
11412 ASSERT(state != NULL);
11414 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
11416 * This probe was created in a generation for which this
11417 * enabling has previously created ECBs; we don't want to
11418 * enable it again, so just kick out.
11420 return (DTRACE_MATCH_NEXT);
11423 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
11424 return (DTRACE_MATCH_DONE);
11426 if (dtrace_ecb_enable(ecb) < 0)
11427 return (DTRACE_MATCH_FAIL);
11429 return (DTRACE_MATCH_NEXT);
11432 static dtrace_ecb_t *
11433 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
11435 dtrace_ecb_t *ecb;
11437 ASSERT(MUTEX_HELD(&dtrace_lock));
11439 if (id == 0 || id > state->dts_necbs)
11440 return (NULL);
11442 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
11443 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
11445 return (state->dts_ecbs[id - 1]);
11448 static dtrace_aggregation_t *
11449 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
11451 dtrace_aggregation_t *agg;
11453 ASSERT(MUTEX_HELD(&dtrace_lock));
11455 if (id == 0 || id > state->dts_naggregations)
11456 return (NULL);
11458 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
11459 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
11460 agg->dtag_id == id);
11462 return (state->dts_aggregations[id - 1]);
11466 * DTrace Buffer Functions
11468 * The following functions manipulate DTrace buffers. Most of these functions
11469 * are called in the context of establishing or processing consumer state;
11470 * exceptions are explicitly noted.
11474 * Note: called from cross call context. This function switches the two
11475 * buffers on a given CPU. The atomicity of this operation is assured by
11476 * disabling interrupts while the actual switch takes place; the disabling of
11477 * interrupts serializes the execution with any execution of dtrace_probe() on
11478 * the same CPU.
11480 static void
11481 dtrace_buffer_switch(dtrace_buffer_t *buf)
11483 caddr_t tomax = buf->dtb_tomax;
11484 caddr_t xamot = buf->dtb_xamot;
11485 dtrace_icookie_t cookie;
11486 hrtime_t now;
11488 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11489 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
11491 cookie = dtrace_interrupt_disable();
11492 now = dtrace_gethrtime();
11493 buf->dtb_tomax = xamot;
11494 buf->dtb_xamot = tomax;
11495 buf->dtb_xamot_drops = buf->dtb_drops;
11496 buf->dtb_xamot_offset = buf->dtb_offset;
11497 buf->dtb_xamot_errors = buf->dtb_errors;
11498 buf->dtb_xamot_flags = buf->dtb_flags;
11499 buf->dtb_offset = 0;
11500 buf->dtb_drops = 0;
11501 buf->dtb_errors = 0;
11502 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
11503 buf->dtb_interval = now - buf->dtb_switched;
11504 buf->dtb_switched = now;
11505 dtrace_interrupt_enable(cookie);
11509 * Note: called from cross call context. This function activates a buffer
11510 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
11511 * is guaranteed by the disabling of interrupts.
11513 static void
11514 dtrace_buffer_activate(dtrace_state_t *state)
11516 dtrace_buffer_t *buf;
11517 dtrace_icookie_t cookie = dtrace_interrupt_disable();
11519 buf = &state->dts_buffer[CPU->cpu_id];
11521 if (buf->dtb_tomax != NULL) {
11523 * We might like to assert that the buffer is marked inactive,
11524 * but this isn't necessarily true: the buffer for the CPU
11525 * that processes the BEGIN probe has its buffer activated
11526 * manually. In this case, we take the (harmless) action
11527 * re-clearing the bit INACTIVE bit.
11529 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
11532 dtrace_interrupt_enable(cookie);
11535 static int
11536 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
11537 processorid_t cpu, int *factor)
11539 cpu_t *cp;
11540 dtrace_buffer_t *buf;
11541 int allocated = 0, desired = 0;
11543 ASSERT(MUTEX_HELD(&cpu_lock));
11544 ASSERT(MUTEX_HELD(&dtrace_lock));
11546 *factor = 1;
11548 if (size > dtrace_nonroot_maxsize &&
11549 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
11550 return (EFBIG);
11552 cp = cpu_list;
11554 do {
11555 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11556 continue;
11558 buf = &bufs[cp->cpu_id];
11561 * If there is already a buffer allocated for this CPU, it
11562 * is only possible that this is a DR event. In this case,
11563 * the buffer size must match our specified size.
11565 if (buf->dtb_tomax != NULL) {
11566 ASSERT(buf->dtb_size == size);
11567 continue;
11570 ASSERT(buf->dtb_xamot == NULL);
11572 if ((buf->dtb_tomax = kmem_zalloc(size,
11573 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11574 goto err;
11576 buf->dtb_size = size;
11577 buf->dtb_flags = flags;
11578 buf->dtb_offset = 0;
11579 buf->dtb_drops = 0;
11581 if (flags & DTRACEBUF_NOSWITCH)
11582 continue;
11584 if ((buf->dtb_xamot = kmem_zalloc(size,
11585 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11586 goto err;
11587 } while ((cp = cp->cpu_next) != cpu_list);
11589 return (0);
11591 err:
11592 cp = cpu_list;
11594 do {
11595 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11596 continue;
11598 buf = &bufs[cp->cpu_id];
11599 desired += 2;
11601 if (buf->dtb_xamot != NULL) {
11602 ASSERT(buf->dtb_tomax != NULL);
11603 ASSERT(buf->dtb_size == size);
11604 kmem_free(buf->dtb_xamot, size);
11605 allocated++;
11608 if (buf->dtb_tomax != NULL) {
11609 ASSERT(buf->dtb_size == size);
11610 kmem_free(buf->dtb_tomax, size);
11611 allocated++;
11614 buf->dtb_tomax = NULL;
11615 buf->dtb_xamot = NULL;
11616 buf->dtb_size = 0;
11617 } while ((cp = cp->cpu_next) != cpu_list);
11619 *factor = desired / (allocated > 0 ? allocated : 1);
11621 return (ENOMEM);
11625 * Note: called from probe context. This function just increments the drop
11626 * count on a buffer. It has been made a function to allow for the
11627 * possibility of understanding the source of mysterious drop counts. (A
11628 * problem for which one may be particularly disappointed that DTrace cannot
11629 * be used to understand DTrace.)
11631 static void
11632 dtrace_buffer_drop(dtrace_buffer_t *buf)
11634 buf->dtb_drops++;
11638 * Note: called from probe context. This function is called to reserve space
11639 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
11640 * mstate. Returns the new offset in the buffer, or a negative value if an
11641 * error has occurred.
11643 static intptr_t
11644 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
11645 dtrace_state_t *state, dtrace_mstate_t *mstate)
11647 intptr_t offs = buf->dtb_offset, soffs;
11648 intptr_t woffs;
11649 caddr_t tomax;
11650 size_t total;
11652 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
11653 return (-1);
11655 if ((tomax = buf->dtb_tomax) == NULL) {
11656 dtrace_buffer_drop(buf);
11657 return (-1);
11660 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
11661 while (offs & (align - 1)) {
11663 * Assert that our alignment is off by a number which
11664 * is itself sizeof (uint32_t) aligned.
11666 ASSERT(!((align - (offs & (align - 1))) &
11667 (sizeof (uint32_t) - 1)));
11668 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11669 offs += sizeof (uint32_t);
11672 if ((soffs = offs + needed) > buf->dtb_size) {
11673 dtrace_buffer_drop(buf);
11674 return (-1);
11677 if (mstate == NULL)
11678 return (offs);
11680 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
11681 mstate->dtms_scratch_size = buf->dtb_size - soffs;
11682 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11684 return (offs);
11687 if (buf->dtb_flags & DTRACEBUF_FILL) {
11688 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
11689 (buf->dtb_flags & DTRACEBUF_FULL))
11690 return (-1);
11691 goto out;
11694 total = needed + (offs & (align - 1));
11697 * For a ring buffer, life is quite a bit more complicated. Before
11698 * we can store any padding, we need to adjust our wrapping offset.
11699 * (If we've never before wrapped or we're not about to, no adjustment
11700 * is required.)
11702 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
11703 offs + total > buf->dtb_size) {
11704 woffs = buf->dtb_xamot_offset;
11706 if (offs + total > buf->dtb_size) {
11708 * We can't fit in the end of the buffer. First, a
11709 * sanity check that we can fit in the buffer at all.
11711 if (total > buf->dtb_size) {
11712 dtrace_buffer_drop(buf);
11713 return (-1);
11717 * We're going to be storing at the top of the buffer,
11718 * so now we need to deal with the wrapped offset. We
11719 * only reset our wrapped offset to 0 if it is
11720 * currently greater than the current offset. If it
11721 * is less than the current offset, it is because a
11722 * previous allocation induced a wrap -- but the
11723 * allocation didn't subsequently take the space due
11724 * to an error or false predicate evaluation. In this
11725 * case, we'll just leave the wrapped offset alone: if
11726 * the wrapped offset hasn't been advanced far enough
11727 * for this allocation, it will be adjusted in the
11728 * lower loop.
11730 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
11731 if (woffs >= offs)
11732 woffs = 0;
11733 } else {
11734 woffs = 0;
11738 * Now we know that we're going to be storing to the
11739 * top of the buffer and that there is room for us
11740 * there. We need to clear the buffer from the current
11741 * offset to the end (there may be old gunk there).
11743 while (offs < buf->dtb_size)
11744 tomax[offs++] = 0;
11747 * We need to set our offset to zero. And because we
11748 * are wrapping, we need to set the bit indicating as
11749 * much. We can also adjust our needed space back
11750 * down to the space required by the ECB -- we know
11751 * that the top of the buffer is aligned.
11753 offs = 0;
11754 total = needed;
11755 buf->dtb_flags |= DTRACEBUF_WRAPPED;
11756 } else {
11758 * There is room for us in the buffer, so we simply
11759 * need to check the wrapped offset.
11761 if (woffs < offs) {
11763 * The wrapped offset is less than the offset.
11764 * This can happen if we allocated buffer space
11765 * that induced a wrap, but then we didn't
11766 * subsequently take the space due to an error
11767 * or false predicate evaluation. This is
11768 * okay; we know that _this_ allocation isn't
11769 * going to induce a wrap. We still can't
11770 * reset the wrapped offset to be zero,
11771 * however: the space may have been trashed in
11772 * the previous failed probe attempt. But at
11773 * least the wrapped offset doesn't need to
11774 * be adjusted at all...
11776 goto out;
11780 while (offs + total > woffs) {
11781 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
11782 size_t size;
11784 if (epid == DTRACE_EPIDNONE) {
11785 size = sizeof (uint32_t);
11786 } else {
11787 ASSERT3U(epid, <=, state->dts_necbs);
11788 ASSERT(state->dts_ecbs[epid - 1] != NULL);
11790 size = state->dts_ecbs[epid - 1]->dte_size;
11793 ASSERT(woffs + size <= buf->dtb_size);
11794 ASSERT(size != 0);
11796 if (woffs + size == buf->dtb_size) {
11798 * We've reached the end of the buffer; we want
11799 * to set the wrapped offset to 0 and break
11800 * out. However, if the offs is 0, then we're
11801 * in a strange edge-condition: the amount of
11802 * space that we want to reserve plus the size
11803 * of the record that we're overwriting is
11804 * greater than the size of the buffer. This
11805 * is problematic because if we reserve the
11806 * space but subsequently don't consume it (due
11807 * to a failed predicate or error) the wrapped
11808 * offset will be 0 -- yet the EPID at offset 0
11809 * will not be committed. This situation is
11810 * relatively easy to deal with: if we're in
11811 * this case, the buffer is indistinguishable
11812 * from one that hasn't wrapped; we need only
11813 * finish the job by clearing the wrapped bit,
11814 * explicitly setting the offset to be 0, and
11815 * zero'ing out the old data in the buffer.
11817 if (offs == 0) {
11818 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
11819 buf->dtb_offset = 0;
11820 woffs = total;
11822 while (woffs < buf->dtb_size)
11823 tomax[woffs++] = 0;
11826 woffs = 0;
11827 break;
11830 woffs += size;
11834 * We have a wrapped offset. It may be that the wrapped offset
11835 * has become zero -- that's okay.
11837 buf->dtb_xamot_offset = woffs;
11840 out:
11842 * Now we can plow the buffer with any necessary padding.
11844 while (offs & (align - 1)) {
11846 * Assert that our alignment is off by a number which
11847 * is itself sizeof (uint32_t) aligned.
11849 ASSERT(!((align - (offs & (align - 1))) &
11850 (sizeof (uint32_t) - 1)));
11851 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11852 offs += sizeof (uint32_t);
11855 if (buf->dtb_flags & DTRACEBUF_FILL) {
11856 if (offs + needed > buf->dtb_size - state->dts_reserve) {
11857 buf->dtb_flags |= DTRACEBUF_FULL;
11858 return (-1);
11862 if (mstate == NULL)
11863 return (offs);
11866 * For ring buffers and fill buffers, the scratch space is always
11867 * the inactive buffer.
11869 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
11870 mstate->dtms_scratch_size = buf->dtb_size;
11871 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11873 return (offs);
11876 static void
11877 dtrace_buffer_polish(dtrace_buffer_t *buf)
11879 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
11880 ASSERT(MUTEX_HELD(&dtrace_lock));
11882 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
11883 return;
11886 * We need to polish the ring buffer. There are three cases:
11888 * - The first (and presumably most common) is that there is no gap
11889 * between the buffer offset and the wrapped offset. In this case,
11890 * there is nothing in the buffer that isn't valid data; we can
11891 * mark the buffer as polished and return.
11893 * - The second (less common than the first but still more common
11894 * than the third) is that there is a gap between the buffer offset
11895 * and the wrapped offset, and the wrapped offset is larger than the
11896 * buffer offset. This can happen because of an alignment issue, or
11897 * can happen because of a call to dtrace_buffer_reserve() that
11898 * didn't subsequently consume the buffer space. In this case,
11899 * we need to zero the data from the buffer offset to the wrapped
11900 * offset.
11902 * - The third (and least common) is that there is a gap between the
11903 * buffer offset and the wrapped offset, but the wrapped offset is
11904 * _less_ than the buffer offset. This can only happen because a
11905 * call to dtrace_buffer_reserve() induced a wrap, but the space
11906 * was not subsequently consumed. In this case, we need to zero the
11907 * space from the offset to the end of the buffer _and_ from the
11908 * top of the buffer to the wrapped offset.
11910 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11911 bzero(buf->dtb_tomax + buf->dtb_offset,
11912 buf->dtb_xamot_offset - buf->dtb_offset);
11915 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11916 bzero(buf->dtb_tomax + buf->dtb_offset,
11917 buf->dtb_size - buf->dtb_offset);
11918 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11923 * This routine determines if data generated at the specified time has likely
11924 * been entirely consumed at user-level. This routine is called to determine
11925 * if an ECB on a defunct probe (but for an active enabling) can be safely
11926 * disabled and destroyed.
11928 static int
11929 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when)
11931 int i;
11933 for (i = 0; i < NCPU; i++) {
11934 dtrace_buffer_t *buf = &bufs[i];
11936 if (buf->dtb_size == 0)
11937 continue;
11939 if (buf->dtb_flags & DTRACEBUF_RING)
11940 return (0);
11942 if (!buf->dtb_switched && buf->dtb_offset != 0)
11943 return (0);
11945 if (buf->dtb_switched - buf->dtb_interval < when)
11946 return (0);
11949 return (1);
11952 static void
11953 dtrace_buffer_free(dtrace_buffer_t *bufs)
11955 int i;
11957 for (i = 0; i < NCPU; i++) {
11958 dtrace_buffer_t *buf = &bufs[i];
11960 if (buf->dtb_tomax == NULL) {
11961 ASSERT(buf->dtb_xamot == NULL);
11962 ASSERT(buf->dtb_size == 0);
11963 continue;
11966 if (buf->dtb_xamot != NULL) {
11967 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11968 kmem_free(buf->dtb_xamot, buf->dtb_size);
11971 kmem_free(buf->dtb_tomax, buf->dtb_size);
11972 buf->dtb_size = 0;
11973 buf->dtb_tomax = NULL;
11974 buf->dtb_xamot = NULL;
11979 * DTrace Enabling Functions
11981 static dtrace_enabling_t *
11982 dtrace_enabling_create(dtrace_vstate_t *vstate)
11984 dtrace_enabling_t *enab;
11986 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11987 enab->dten_vstate = vstate;
11989 return (enab);
11992 static void
11993 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11995 dtrace_ecbdesc_t **ndesc;
11996 size_t osize, nsize;
11999 * We can't add to enablings after we've enabled them, or after we've
12000 * retained them.
12002 ASSERT(enab->dten_probegen == 0);
12003 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12005 if (enab->dten_ndesc < enab->dten_maxdesc) {
12006 enab->dten_desc[enab->dten_ndesc++] = ecb;
12007 return;
12010 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12012 if (enab->dten_maxdesc == 0) {
12013 enab->dten_maxdesc = 1;
12014 } else {
12015 enab->dten_maxdesc <<= 1;
12018 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
12020 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12021 ndesc = kmem_zalloc(nsize, KM_SLEEP);
12022 bcopy(enab->dten_desc, ndesc, osize);
12023 kmem_free(enab->dten_desc, osize);
12025 enab->dten_desc = ndesc;
12026 enab->dten_desc[enab->dten_ndesc++] = ecb;
12029 static void
12030 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
12031 dtrace_probedesc_t *pd)
12033 dtrace_ecbdesc_t *new;
12034 dtrace_predicate_t *pred;
12035 dtrace_actdesc_t *act;
12038 * We're going to create a new ECB description that matches the
12039 * specified ECB in every way, but has the specified probe description.
12041 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12043 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
12044 dtrace_predicate_hold(pred);
12046 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
12047 dtrace_actdesc_hold(act);
12049 new->dted_action = ecb->dted_action;
12050 new->dted_pred = ecb->dted_pred;
12051 new->dted_probe = *pd;
12052 new->dted_uarg = ecb->dted_uarg;
12054 dtrace_enabling_add(enab, new);
12057 static void
12058 dtrace_enabling_dump(dtrace_enabling_t *enab)
12060 int i;
12062 for (i = 0; i < enab->dten_ndesc; i++) {
12063 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
12065 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
12066 desc->dtpd_provider, desc->dtpd_mod,
12067 desc->dtpd_func, desc->dtpd_name);
12071 static void
12072 dtrace_enabling_destroy(dtrace_enabling_t *enab)
12074 int i;
12075 dtrace_ecbdesc_t *ep;
12076 dtrace_vstate_t *vstate = enab->dten_vstate;
12078 ASSERT(MUTEX_HELD(&dtrace_lock));
12080 for (i = 0; i < enab->dten_ndesc; i++) {
12081 dtrace_actdesc_t *act, *next;
12082 dtrace_predicate_t *pred;
12084 ep = enab->dten_desc[i];
12086 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
12087 dtrace_predicate_release(pred, vstate);
12089 for (act = ep->dted_action; act != NULL; act = next) {
12090 next = act->dtad_next;
12091 dtrace_actdesc_release(act, vstate);
12094 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12097 kmem_free(enab->dten_desc,
12098 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
12101 * If this was a retained enabling, decrement the dts_nretained count
12102 * and take it off of the dtrace_retained list.
12104 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
12105 dtrace_retained == enab) {
12106 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12107 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
12108 enab->dten_vstate->dtvs_state->dts_nretained--;
12109 dtrace_retained_gen++;
12112 if (enab->dten_prev == NULL) {
12113 if (dtrace_retained == enab) {
12114 dtrace_retained = enab->dten_next;
12116 if (dtrace_retained != NULL)
12117 dtrace_retained->dten_prev = NULL;
12119 } else {
12120 ASSERT(enab != dtrace_retained);
12121 ASSERT(dtrace_retained != NULL);
12122 enab->dten_prev->dten_next = enab->dten_next;
12125 if (enab->dten_next != NULL) {
12126 ASSERT(dtrace_retained != NULL);
12127 enab->dten_next->dten_prev = enab->dten_prev;
12130 kmem_free(enab, sizeof (dtrace_enabling_t));
12133 static int
12134 dtrace_enabling_retain(dtrace_enabling_t *enab)
12136 dtrace_state_t *state;
12138 ASSERT(MUTEX_HELD(&dtrace_lock));
12139 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12140 ASSERT(enab->dten_vstate != NULL);
12142 state = enab->dten_vstate->dtvs_state;
12143 ASSERT(state != NULL);
12146 * We only allow each state to retain dtrace_retain_max enablings.
12148 if (state->dts_nretained >= dtrace_retain_max)
12149 return (ENOSPC);
12151 state->dts_nretained++;
12152 dtrace_retained_gen++;
12154 if (dtrace_retained == NULL) {
12155 dtrace_retained = enab;
12156 return (0);
12159 enab->dten_next = dtrace_retained;
12160 dtrace_retained->dten_prev = enab;
12161 dtrace_retained = enab;
12163 return (0);
12166 static int
12167 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
12168 dtrace_probedesc_t *create)
12170 dtrace_enabling_t *new, *enab;
12171 int found = 0, err = ENOENT;
12173 ASSERT(MUTEX_HELD(&dtrace_lock));
12174 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
12175 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
12176 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
12177 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
12179 new = dtrace_enabling_create(&state->dts_vstate);
12182 * Iterate over all retained enablings, looking for enablings that
12183 * match the specified state.
12185 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12186 int i;
12189 * dtvs_state can only be NULL for helper enablings -- and
12190 * helper enablings can't be retained.
12192 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12194 if (enab->dten_vstate->dtvs_state != state)
12195 continue;
12198 * Now iterate over each probe description; we're looking for
12199 * an exact match to the specified probe description.
12201 for (i = 0; i < enab->dten_ndesc; i++) {
12202 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12203 dtrace_probedesc_t *pd = &ep->dted_probe;
12205 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
12206 continue;
12208 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
12209 continue;
12211 if (strcmp(pd->dtpd_func, match->dtpd_func))
12212 continue;
12214 if (strcmp(pd->dtpd_name, match->dtpd_name))
12215 continue;
12218 * We have a winning probe! Add it to our growing
12219 * enabling.
12221 found = 1;
12222 dtrace_enabling_addlike(new, ep, create);
12226 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
12227 dtrace_enabling_destroy(new);
12228 return (err);
12231 return (0);
12234 static void
12235 dtrace_enabling_retract(dtrace_state_t *state)
12237 dtrace_enabling_t *enab, *next;
12239 ASSERT(MUTEX_HELD(&dtrace_lock));
12242 * Iterate over all retained enablings, destroy the enablings retained
12243 * for the specified state.
12245 for (enab = dtrace_retained; enab != NULL; enab = next) {
12246 next = enab->dten_next;
12249 * dtvs_state can only be NULL for helper enablings -- and
12250 * helper enablings can't be retained.
12252 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12254 if (enab->dten_vstate->dtvs_state == state) {
12255 ASSERT(state->dts_nretained > 0);
12256 dtrace_enabling_destroy(enab);
12260 ASSERT(state->dts_nretained == 0);
12263 static int
12264 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
12266 int i = 0;
12267 int total_matched = 0, matched = 0;
12269 ASSERT(MUTEX_HELD(&cpu_lock));
12270 ASSERT(MUTEX_HELD(&dtrace_lock));
12272 for (i = 0; i < enab->dten_ndesc; i++) {
12273 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12275 enab->dten_current = ep;
12276 enab->dten_error = 0;
12279 * If a provider failed to enable a probe then get out and
12280 * let the consumer know we failed.
12282 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
12283 return (EBUSY);
12285 total_matched += matched;
12287 if (enab->dten_error != 0) {
12289 * If we get an error half-way through enabling the
12290 * probes, we kick out -- perhaps with some number of
12291 * them enabled. Leaving enabled probes enabled may
12292 * be slightly confusing for user-level, but we expect
12293 * that no one will attempt to actually drive on in
12294 * the face of such errors. If this is an anonymous
12295 * enabling (indicated with a NULL nmatched pointer),
12296 * we cmn_err() a message. We aren't expecting to
12297 * get such an error -- such as it can exist at all,
12298 * it would be a result of corrupted DOF in the driver
12299 * properties.
12301 if (nmatched == NULL) {
12302 cmn_err(CE_WARN, "dtrace_enabling_match() "
12303 "error on %p: %d", (void *)ep,
12304 enab->dten_error);
12307 return (enab->dten_error);
12311 enab->dten_probegen = dtrace_probegen;
12312 if (nmatched != NULL)
12313 *nmatched = total_matched;
12315 return (0);
12318 static void
12319 dtrace_enabling_matchall(void)
12321 dtrace_enabling_t *enab;
12323 mutex_enter(&cpu_lock);
12324 mutex_enter(&dtrace_lock);
12327 * Iterate over all retained enablings to see if any probes match
12328 * against them. We only perform this operation on enablings for which
12329 * we have sufficient permissions by virtue of being in the global zone
12330 * or in the same zone as the DTrace client. Because we can be called
12331 * after dtrace_detach() has been called, we cannot assert that there
12332 * are retained enablings. We can safely load from dtrace_retained,
12333 * however: the taskq_destroy() at the end of dtrace_detach() will
12334 * block pending our completion.
12336 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12337 dtrace_cred_t *dcr = &enab->dten_vstate->dtvs_state->dts_cred;
12338 cred_t *cr = dcr->dcr_cred;
12339 zoneid_t zone = cr != NULL ? crgetzoneid(cr) : 0;
12341 if ((dcr->dcr_visible & DTRACE_CRV_ALLZONE) || (cr != NULL &&
12342 (zone == GLOBAL_ZONEID || getzoneid() == zone)))
12343 (void) dtrace_enabling_match(enab, NULL);
12346 mutex_exit(&dtrace_lock);
12347 mutex_exit(&cpu_lock);
12351 * If an enabling is to be enabled without having matched probes (that is, if
12352 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
12353 * enabling must be _primed_ by creating an ECB for every ECB description.
12354 * This must be done to assure that we know the number of speculations, the
12355 * number of aggregations, the minimum buffer size needed, etc. before we
12356 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
12357 * enabling any probes, we create ECBs for every ECB decription, but with a
12358 * NULL probe -- which is exactly what this function does.
12360 static void
12361 dtrace_enabling_prime(dtrace_state_t *state)
12363 dtrace_enabling_t *enab;
12364 int i;
12366 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12367 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12369 if (enab->dten_vstate->dtvs_state != state)
12370 continue;
12373 * We don't want to prime an enabling more than once, lest
12374 * we allow a malicious user to induce resource exhaustion.
12375 * (The ECBs that result from priming an enabling aren't
12376 * leaked -- but they also aren't deallocated until the
12377 * consumer state is destroyed.)
12379 if (enab->dten_primed)
12380 continue;
12382 for (i = 0; i < enab->dten_ndesc; i++) {
12383 enab->dten_current = enab->dten_desc[i];
12384 (void) dtrace_probe_enable(NULL, enab);
12387 enab->dten_primed = 1;
12392 * Called to indicate that probes should be provided due to retained
12393 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
12394 * must take an initial lap through the enabling calling the dtps_provide()
12395 * entry point explicitly to allow for autocreated probes.
12397 static void
12398 dtrace_enabling_provide(dtrace_provider_t *prv)
12400 int i, all = 0;
12401 dtrace_probedesc_t desc;
12402 dtrace_genid_t gen;
12404 ASSERT(MUTEX_HELD(&dtrace_lock));
12405 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
12407 if (prv == NULL) {
12408 all = 1;
12409 prv = dtrace_provider;
12412 do {
12413 dtrace_enabling_t *enab;
12414 void *parg = prv->dtpv_arg;
12416 retry:
12417 gen = dtrace_retained_gen;
12418 for (enab = dtrace_retained; enab != NULL;
12419 enab = enab->dten_next) {
12420 for (i = 0; i < enab->dten_ndesc; i++) {
12421 desc = enab->dten_desc[i]->dted_probe;
12422 mutex_exit(&dtrace_lock);
12423 prv->dtpv_pops.dtps_provide(parg, &desc);
12424 mutex_enter(&dtrace_lock);
12426 * Process the retained enablings again if
12427 * they have changed while we weren't holding
12428 * dtrace_lock.
12430 if (gen != dtrace_retained_gen)
12431 goto retry;
12434 } while (all && (prv = prv->dtpv_next) != NULL);
12436 mutex_exit(&dtrace_lock);
12437 dtrace_probe_provide(NULL, all ? NULL : prv);
12438 mutex_enter(&dtrace_lock);
12442 * Called to reap ECBs that are attached to probes from defunct providers.
12444 static void
12445 dtrace_enabling_reap(void)
12447 dtrace_provider_t *prov;
12448 dtrace_probe_t *probe;
12449 dtrace_ecb_t *ecb;
12450 hrtime_t when;
12451 int i;
12453 mutex_enter(&cpu_lock);
12454 mutex_enter(&dtrace_lock);
12456 for (i = 0; i < dtrace_nprobes; i++) {
12457 if ((probe = dtrace_probes[i]) == NULL)
12458 continue;
12460 if (probe->dtpr_ecb == NULL)
12461 continue;
12463 prov = probe->dtpr_provider;
12465 if ((when = prov->dtpv_defunct) == 0)
12466 continue;
12469 * We have ECBs on a defunct provider: we want to reap these
12470 * ECBs to allow the provider to unregister. The destruction
12471 * of these ECBs must be done carefully: if we destroy the ECB
12472 * and the consumer later wishes to consume an EPID that
12473 * corresponds to the destroyed ECB (and if the EPID metadata
12474 * has not been previously consumed), the consumer will abort
12475 * processing on the unknown EPID. To reduce (but not, sadly,
12476 * eliminate) the possibility of this, we will only destroy an
12477 * ECB for a defunct provider if, for the state that
12478 * corresponds to the ECB:
12480 * (a) There is no speculative tracing (which can effectively
12481 * cache an EPID for an arbitrary amount of time).
12483 * (b) The principal buffers have been switched twice since the
12484 * provider became defunct.
12486 * (c) The aggregation buffers are of zero size or have been
12487 * switched twice since the provider became defunct.
12489 * We use dts_speculates to determine (a) and call a function
12490 * (dtrace_buffer_consumed()) to determine (b) and (c). Note
12491 * that as soon as we've been unable to destroy one of the ECBs
12492 * associated with the probe, we quit trying -- reaping is only
12493 * fruitful in as much as we can destroy all ECBs associated
12494 * with the defunct provider's probes.
12496 while ((ecb = probe->dtpr_ecb) != NULL) {
12497 dtrace_state_t *state = ecb->dte_state;
12498 dtrace_buffer_t *buf = state->dts_buffer;
12499 dtrace_buffer_t *aggbuf = state->dts_aggbuffer;
12501 if (state->dts_speculates)
12502 break;
12504 if (!dtrace_buffer_consumed(buf, when))
12505 break;
12507 if (!dtrace_buffer_consumed(aggbuf, when))
12508 break;
12510 dtrace_ecb_disable(ecb);
12511 ASSERT(probe->dtpr_ecb != ecb);
12512 dtrace_ecb_destroy(ecb);
12516 mutex_exit(&dtrace_lock);
12517 mutex_exit(&cpu_lock);
12521 * DTrace DOF Functions
12523 /*ARGSUSED*/
12524 static void
12525 dtrace_dof_error(dof_hdr_t *dof, const char *str)
12527 if (dtrace_err_verbose)
12528 cmn_err(CE_WARN, "failed to process DOF: %s", str);
12530 #ifdef DTRACE_ERRDEBUG
12531 dtrace_errdebug(str);
12532 #endif
12536 * Create DOF out of a currently enabled state. Right now, we only create
12537 * DOF containing the run-time options -- but this could be expanded to create
12538 * complete DOF representing the enabled state.
12540 static dof_hdr_t *
12541 dtrace_dof_create(dtrace_state_t *state)
12543 dof_hdr_t *dof;
12544 dof_sec_t *sec;
12545 dof_optdesc_t *opt;
12546 int i, len = sizeof (dof_hdr_t) +
12547 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
12548 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12550 ASSERT(MUTEX_HELD(&dtrace_lock));
12552 dof = kmem_zalloc(len, KM_SLEEP);
12553 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
12554 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
12555 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
12556 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
12558 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
12559 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
12560 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
12561 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
12562 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
12563 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
12565 dof->dofh_flags = 0;
12566 dof->dofh_hdrsize = sizeof (dof_hdr_t);
12567 dof->dofh_secsize = sizeof (dof_sec_t);
12568 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
12569 dof->dofh_secoff = sizeof (dof_hdr_t);
12570 dof->dofh_loadsz = len;
12571 dof->dofh_filesz = len;
12572 dof->dofh_pad = 0;
12575 * Fill in the option section header...
12577 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
12578 sec->dofs_type = DOF_SECT_OPTDESC;
12579 sec->dofs_align = sizeof (uint64_t);
12580 sec->dofs_flags = DOF_SECF_LOAD;
12581 sec->dofs_entsize = sizeof (dof_optdesc_t);
12583 opt = (dof_optdesc_t *)((uintptr_t)sec +
12584 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
12586 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
12587 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12589 for (i = 0; i < DTRACEOPT_MAX; i++) {
12590 opt[i].dofo_option = i;
12591 opt[i].dofo_strtab = DOF_SECIDX_NONE;
12592 opt[i].dofo_value = state->dts_options[i];
12595 return (dof);
12598 static dof_hdr_t *
12599 dtrace_dof_copyin(uintptr_t uarg, int *errp)
12601 dof_hdr_t hdr, *dof;
12603 ASSERT(!MUTEX_HELD(&dtrace_lock));
12606 * First, we're going to copyin() the sizeof (dof_hdr_t).
12608 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
12609 dtrace_dof_error(NULL, "failed to copyin DOF header");
12610 *errp = EFAULT;
12611 return (NULL);
12615 * Now we'll allocate the entire DOF and copy it in -- provided
12616 * that the length isn't outrageous.
12618 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
12619 dtrace_dof_error(&hdr, "load size exceeds maximum");
12620 *errp = E2BIG;
12621 return (NULL);
12624 if (hdr.dofh_loadsz < sizeof (hdr)) {
12625 dtrace_dof_error(&hdr, "invalid load size");
12626 *errp = EINVAL;
12627 return (NULL);
12630 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
12632 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
12633 dof->dofh_loadsz != hdr.dofh_loadsz) {
12634 kmem_free(dof, hdr.dofh_loadsz);
12635 *errp = EFAULT;
12636 return (NULL);
12639 return (dof);
12642 static dof_hdr_t *
12643 dtrace_dof_property(const char *name)
12645 uchar_t *buf;
12646 uint64_t loadsz;
12647 unsigned int len, i;
12648 dof_hdr_t *dof;
12651 * Unfortunately, array of values in .conf files are always (and
12652 * only) interpreted to be integer arrays. We must read our DOF
12653 * as an integer array, and then squeeze it into a byte array.
12655 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
12656 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
12657 return (NULL);
12659 for (i = 0; i < len; i++)
12660 buf[i] = (uchar_t)(((int *)buf)[i]);
12662 if (len < sizeof (dof_hdr_t)) {
12663 ddi_prop_free(buf);
12664 dtrace_dof_error(NULL, "truncated header");
12665 return (NULL);
12668 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
12669 ddi_prop_free(buf);
12670 dtrace_dof_error(NULL, "truncated DOF");
12671 return (NULL);
12674 if (loadsz >= dtrace_dof_maxsize) {
12675 ddi_prop_free(buf);
12676 dtrace_dof_error(NULL, "oversized DOF");
12677 return (NULL);
12680 dof = kmem_alloc(loadsz, KM_SLEEP);
12681 bcopy(buf, dof, loadsz);
12682 ddi_prop_free(buf);
12684 return (dof);
12687 static void
12688 dtrace_dof_destroy(dof_hdr_t *dof)
12690 kmem_free(dof, dof->dofh_loadsz);
12694 * Return the dof_sec_t pointer corresponding to a given section index. If the
12695 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
12696 * a type other than DOF_SECT_NONE is specified, the header is checked against
12697 * this type and NULL is returned if the types do not match.
12699 static dof_sec_t *
12700 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
12702 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
12703 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
12705 if (i >= dof->dofh_secnum) {
12706 dtrace_dof_error(dof, "referenced section index is invalid");
12707 return (NULL);
12710 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
12711 dtrace_dof_error(dof, "referenced section is not loadable");
12712 return (NULL);
12715 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
12716 dtrace_dof_error(dof, "referenced section is the wrong type");
12717 return (NULL);
12720 return (sec);
12723 static dtrace_probedesc_t *
12724 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
12726 dof_probedesc_t *probe;
12727 dof_sec_t *strtab;
12728 uintptr_t daddr = (uintptr_t)dof;
12729 uintptr_t str;
12730 size_t size;
12732 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
12733 dtrace_dof_error(dof, "invalid probe section");
12734 return (NULL);
12737 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12738 dtrace_dof_error(dof, "bad alignment in probe description");
12739 return (NULL);
12742 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
12743 dtrace_dof_error(dof, "truncated probe description");
12744 return (NULL);
12747 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
12748 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
12750 if (strtab == NULL)
12751 return (NULL);
12753 str = daddr + strtab->dofs_offset;
12754 size = strtab->dofs_size;
12756 if (probe->dofp_provider >= strtab->dofs_size) {
12757 dtrace_dof_error(dof, "corrupt probe provider");
12758 return (NULL);
12761 (void) strncpy(desc->dtpd_provider,
12762 (char *)(str + probe->dofp_provider),
12763 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
12765 if (probe->dofp_mod >= strtab->dofs_size) {
12766 dtrace_dof_error(dof, "corrupt probe module");
12767 return (NULL);
12770 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
12771 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
12773 if (probe->dofp_func >= strtab->dofs_size) {
12774 dtrace_dof_error(dof, "corrupt probe function");
12775 return (NULL);
12778 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
12779 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
12781 if (probe->dofp_name >= strtab->dofs_size) {
12782 dtrace_dof_error(dof, "corrupt probe name");
12783 return (NULL);
12786 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
12787 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
12789 return (desc);
12792 static dtrace_difo_t *
12793 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12794 cred_t *cr)
12796 dtrace_difo_t *dp;
12797 size_t ttl = 0;
12798 dof_difohdr_t *dofd;
12799 uintptr_t daddr = (uintptr_t)dof;
12800 size_t max = dtrace_difo_maxsize;
12801 int i, l, n;
12803 static const struct {
12804 int section;
12805 int bufoffs;
12806 int lenoffs;
12807 int entsize;
12808 int align;
12809 const char *msg;
12810 } difo[] = {
12811 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
12812 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
12813 sizeof (dif_instr_t), "multiple DIF sections" },
12815 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
12816 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
12817 sizeof (uint64_t), "multiple integer tables" },
12819 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
12820 offsetof(dtrace_difo_t, dtdo_strlen), 0,
12821 sizeof (char), "multiple string tables" },
12823 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
12824 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
12825 sizeof (uint_t), "multiple variable tables" },
12827 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
12830 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
12831 dtrace_dof_error(dof, "invalid DIFO header section");
12832 return (NULL);
12835 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12836 dtrace_dof_error(dof, "bad alignment in DIFO header");
12837 return (NULL);
12840 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
12841 sec->dofs_size % sizeof (dof_secidx_t)) {
12842 dtrace_dof_error(dof, "bad size in DIFO header");
12843 return (NULL);
12846 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12847 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
12849 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
12850 dp->dtdo_rtype = dofd->dofd_rtype;
12852 for (l = 0; l < n; l++) {
12853 dof_sec_t *subsec;
12854 void **bufp;
12855 uint32_t *lenp;
12857 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
12858 dofd->dofd_links[l])) == NULL)
12859 goto err; /* invalid section link */
12861 if (ttl + subsec->dofs_size > max) {
12862 dtrace_dof_error(dof, "exceeds maximum size");
12863 goto err;
12866 ttl += subsec->dofs_size;
12868 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
12869 if (subsec->dofs_type != difo[i].section)
12870 continue;
12872 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
12873 dtrace_dof_error(dof, "section not loaded");
12874 goto err;
12877 if (subsec->dofs_align != difo[i].align) {
12878 dtrace_dof_error(dof, "bad alignment");
12879 goto err;
12882 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
12883 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
12885 if (*bufp != NULL) {
12886 dtrace_dof_error(dof, difo[i].msg);
12887 goto err;
12890 if (difo[i].entsize != subsec->dofs_entsize) {
12891 dtrace_dof_error(dof, "entry size mismatch");
12892 goto err;
12895 if (subsec->dofs_entsize != 0 &&
12896 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
12897 dtrace_dof_error(dof, "corrupt entry size");
12898 goto err;
12901 *lenp = subsec->dofs_size;
12902 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
12903 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
12904 *bufp, subsec->dofs_size);
12906 if (subsec->dofs_entsize != 0)
12907 *lenp /= subsec->dofs_entsize;
12909 break;
12913 * If we encounter a loadable DIFO sub-section that is not
12914 * known to us, assume this is a broken program and fail.
12916 if (difo[i].section == DOF_SECT_NONE &&
12917 (subsec->dofs_flags & DOF_SECF_LOAD)) {
12918 dtrace_dof_error(dof, "unrecognized DIFO subsection");
12919 goto err;
12923 if (dp->dtdo_buf == NULL) {
12925 * We can't have a DIF object without DIF text.
12927 dtrace_dof_error(dof, "missing DIF text");
12928 goto err;
12932 * Before we validate the DIF object, run through the variable table
12933 * looking for the strings -- if any of their size are under, we'll set
12934 * their size to be the system-wide default string size. Note that
12935 * this should _not_ happen if the "strsize" option has been set --
12936 * in this case, the compiler should have set the size to reflect the
12937 * setting of the option.
12939 for (i = 0; i < dp->dtdo_varlen; i++) {
12940 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12941 dtrace_diftype_t *t = &v->dtdv_type;
12943 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12944 continue;
12946 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12947 t->dtdt_size = dtrace_strsize_default;
12950 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12951 goto err;
12953 dtrace_difo_init(dp, vstate);
12954 return (dp);
12956 err:
12957 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12958 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12959 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12960 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12962 kmem_free(dp, sizeof (dtrace_difo_t));
12963 return (NULL);
12966 static dtrace_predicate_t *
12967 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12968 cred_t *cr)
12970 dtrace_difo_t *dp;
12972 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12973 return (NULL);
12975 return (dtrace_predicate_create(dp));
12978 static dtrace_actdesc_t *
12979 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12980 cred_t *cr)
12982 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12983 dof_actdesc_t *desc;
12984 dof_sec_t *difosec;
12985 size_t offs;
12986 uintptr_t daddr = (uintptr_t)dof;
12987 uint64_t arg;
12988 dtrace_actkind_t kind;
12990 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12991 dtrace_dof_error(dof, "invalid action section");
12992 return (NULL);
12995 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12996 dtrace_dof_error(dof, "truncated action description");
12997 return (NULL);
13000 if (sec->dofs_align != sizeof (uint64_t)) {
13001 dtrace_dof_error(dof, "bad alignment in action description");
13002 return (NULL);
13005 if (sec->dofs_size < sec->dofs_entsize) {
13006 dtrace_dof_error(dof, "section entry size exceeds total size");
13007 return (NULL);
13010 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
13011 dtrace_dof_error(dof, "bad entry size in action description");
13012 return (NULL);
13015 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
13016 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
13017 return (NULL);
13020 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
13021 desc = (dof_actdesc_t *)(daddr +
13022 (uintptr_t)sec->dofs_offset + offs);
13023 kind = (dtrace_actkind_t)desc->dofa_kind;
13025 if ((DTRACEACT_ISPRINTFLIKE(kind) &&
13026 (kind != DTRACEACT_PRINTA ||
13027 desc->dofa_strtab != DOF_SECIDX_NONE)) ||
13028 (kind == DTRACEACT_DIFEXPR &&
13029 desc->dofa_strtab != DOF_SECIDX_NONE)) {
13030 dof_sec_t *strtab;
13031 char *str, *fmt;
13032 uint64_t i;
13035 * The argument to these actions is an index into the
13036 * DOF string table. For printf()-like actions, this
13037 * is the format string. For print(), this is the
13038 * CTF type of the expression result.
13040 if ((strtab = dtrace_dof_sect(dof,
13041 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
13042 goto err;
13044 str = (char *)((uintptr_t)dof +
13045 (uintptr_t)strtab->dofs_offset);
13047 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
13048 if (str[i] == '\0')
13049 break;
13052 if (i >= strtab->dofs_size) {
13053 dtrace_dof_error(dof, "bogus format string");
13054 goto err;
13057 if (i == desc->dofa_arg) {
13058 dtrace_dof_error(dof, "empty format string");
13059 goto err;
13062 i -= desc->dofa_arg;
13063 fmt = kmem_alloc(i + 1, KM_SLEEP);
13064 bcopy(&str[desc->dofa_arg], fmt, i + 1);
13065 arg = (uint64_t)(uintptr_t)fmt;
13066 } else {
13067 if (kind == DTRACEACT_PRINTA) {
13068 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
13069 arg = 0;
13070 } else {
13071 arg = desc->dofa_arg;
13075 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
13076 desc->dofa_uarg, arg);
13078 if (last != NULL) {
13079 last->dtad_next = act;
13080 } else {
13081 first = act;
13084 last = act;
13086 if (desc->dofa_difo == DOF_SECIDX_NONE)
13087 continue;
13089 if ((difosec = dtrace_dof_sect(dof,
13090 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
13091 goto err;
13093 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
13095 if (act->dtad_difo == NULL)
13096 goto err;
13099 ASSERT(first != NULL);
13100 return (first);
13102 err:
13103 for (act = first; act != NULL; act = next) {
13104 next = act->dtad_next;
13105 dtrace_actdesc_release(act, vstate);
13108 return (NULL);
13111 static dtrace_ecbdesc_t *
13112 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13113 cred_t *cr)
13115 dtrace_ecbdesc_t *ep;
13116 dof_ecbdesc_t *ecb;
13117 dtrace_probedesc_t *desc;
13118 dtrace_predicate_t *pred = NULL;
13120 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
13121 dtrace_dof_error(dof, "truncated ECB description");
13122 return (NULL);
13125 if (sec->dofs_align != sizeof (uint64_t)) {
13126 dtrace_dof_error(dof, "bad alignment in ECB description");
13127 return (NULL);
13130 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
13131 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
13133 if (sec == NULL)
13134 return (NULL);
13136 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
13137 ep->dted_uarg = ecb->dofe_uarg;
13138 desc = &ep->dted_probe;
13140 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
13141 goto err;
13143 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
13144 if ((sec = dtrace_dof_sect(dof,
13145 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
13146 goto err;
13148 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
13149 goto err;
13151 ep->dted_pred.dtpdd_predicate = pred;
13154 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
13155 if ((sec = dtrace_dof_sect(dof,
13156 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
13157 goto err;
13159 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
13161 if (ep->dted_action == NULL)
13162 goto err;
13165 return (ep);
13167 err:
13168 if (pred != NULL)
13169 dtrace_predicate_release(pred, vstate);
13170 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
13171 return (NULL);
13175 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
13176 * specified DOF. At present, this amounts to simply adding 'ubase' to the
13177 * site of any user SETX relocations to account for load object base address.
13178 * In the future, if we need other relocations, this function can be extended.
13180 static int
13181 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
13183 uintptr_t daddr = (uintptr_t)dof;
13184 uintptr_t ts_end;
13185 dof_relohdr_t *dofr =
13186 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
13187 dof_sec_t *ss, *rs, *ts;
13188 dof_relodesc_t *r;
13189 uint_t i, n;
13191 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
13192 sec->dofs_align != sizeof (dof_secidx_t)) {
13193 dtrace_dof_error(dof, "invalid relocation header");
13194 return (-1);
13197 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
13198 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
13199 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
13200 ts_end = (uintptr_t)ts + sizeof (dof_sec_t);
13202 if (ss == NULL || rs == NULL || ts == NULL)
13203 return (-1); /* dtrace_dof_error() has been called already */
13205 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
13206 rs->dofs_align != sizeof (uint64_t)) {
13207 dtrace_dof_error(dof, "invalid relocation section");
13208 return (-1);
13211 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
13212 n = rs->dofs_size / rs->dofs_entsize;
13214 for (i = 0; i < n; i++) {
13215 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
13217 switch (r->dofr_type) {
13218 case DOF_RELO_NONE:
13219 break;
13220 case DOF_RELO_SETX:
13221 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
13222 sizeof (uint64_t) > ts->dofs_size) {
13223 dtrace_dof_error(dof, "bad relocation offset");
13224 return (-1);
13227 if (taddr >= (uintptr_t)ts && taddr < ts_end) {
13228 dtrace_dof_error(dof, "bad relocation offset");
13229 return (-1);
13232 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
13233 dtrace_dof_error(dof, "misaligned setx relo");
13234 return (-1);
13237 *(uint64_t *)taddr += ubase;
13238 break;
13239 default:
13240 dtrace_dof_error(dof, "invalid relocation type");
13241 return (-1);
13244 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
13247 return (0);
13251 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
13252 * header: it should be at the front of a memory region that is at least
13253 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
13254 * size. It need not be validated in any other way.
13256 static int
13257 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
13258 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
13260 uint64_t len = dof->dofh_loadsz, seclen;
13261 uintptr_t daddr = (uintptr_t)dof;
13262 dtrace_ecbdesc_t *ep;
13263 dtrace_enabling_t *enab;
13264 uint_t i;
13266 ASSERT(MUTEX_HELD(&dtrace_lock));
13267 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
13270 * Check the DOF header identification bytes. In addition to checking
13271 * valid settings, we also verify that unused bits/bytes are zeroed so
13272 * we can use them later without fear of regressing existing binaries.
13274 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
13275 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
13276 dtrace_dof_error(dof, "DOF magic string mismatch");
13277 return (-1);
13280 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
13281 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
13282 dtrace_dof_error(dof, "DOF has invalid data model");
13283 return (-1);
13286 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
13287 dtrace_dof_error(dof, "DOF encoding mismatch");
13288 return (-1);
13291 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
13292 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
13293 dtrace_dof_error(dof, "DOF version mismatch");
13294 return (-1);
13297 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
13298 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
13299 return (-1);
13302 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
13303 dtrace_dof_error(dof, "DOF uses too many integer registers");
13304 return (-1);
13307 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
13308 dtrace_dof_error(dof, "DOF uses too many tuple registers");
13309 return (-1);
13312 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
13313 if (dof->dofh_ident[i] != 0) {
13314 dtrace_dof_error(dof, "DOF has invalid ident byte set");
13315 return (-1);
13319 if (dof->dofh_flags & ~DOF_FL_VALID) {
13320 dtrace_dof_error(dof, "DOF has invalid flag bits set");
13321 return (-1);
13324 if (dof->dofh_secsize == 0) {
13325 dtrace_dof_error(dof, "zero section header size");
13326 return (-1);
13330 * Check that the section headers don't exceed the amount of DOF
13331 * data. Note that we cast the section size and number of sections
13332 * to uint64_t's to prevent possible overflow in the multiplication.
13334 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
13336 if (dof->dofh_secoff > len || seclen > len ||
13337 dof->dofh_secoff + seclen > len) {
13338 dtrace_dof_error(dof, "truncated section headers");
13339 return (-1);
13342 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
13343 dtrace_dof_error(dof, "misaligned section headers");
13344 return (-1);
13347 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
13348 dtrace_dof_error(dof, "misaligned section size");
13349 return (-1);
13353 * Take an initial pass through the section headers to be sure that
13354 * the headers don't have stray offsets. If the 'noprobes' flag is
13355 * set, do not permit sections relating to providers, probes, or args.
13357 for (i = 0; i < dof->dofh_secnum; i++) {
13358 dof_sec_t *sec = (dof_sec_t *)(daddr +
13359 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13361 if (noprobes) {
13362 switch (sec->dofs_type) {
13363 case DOF_SECT_PROVIDER:
13364 case DOF_SECT_PROBES:
13365 case DOF_SECT_PRARGS:
13366 case DOF_SECT_PROFFS:
13367 dtrace_dof_error(dof, "illegal sections "
13368 "for enabling");
13369 return (-1);
13373 if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
13374 !(sec->dofs_flags & DOF_SECF_LOAD)) {
13375 dtrace_dof_error(dof, "loadable section with load "
13376 "flag unset");
13377 return (-1);
13380 if (!(sec->dofs_flags & DOF_SECF_LOAD))
13381 continue; /* just ignore non-loadable sections */
13383 if (!ISP2(sec->dofs_align)) {
13384 dtrace_dof_error(dof, "bad section alignment");
13385 return (-1);
13388 if (sec->dofs_offset & (sec->dofs_align - 1)) {
13389 dtrace_dof_error(dof, "misaligned section");
13390 return (-1);
13393 if (sec->dofs_offset > len || sec->dofs_size > len ||
13394 sec->dofs_offset + sec->dofs_size > len) {
13395 dtrace_dof_error(dof, "corrupt section header");
13396 return (-1);
13399 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
13400 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
13401 dtrace_dof_error(dof, "non-terminating string table");
13402 return (-1);
13407 * Take a second pass through the sections and locate and perform any
13408 * relocations that are present. We do this after the first pass to
13409 * be sure that all sections have had their headers validated.
13411 for (i = 0; i < dof->dofh_secnum; i++) {
13412 dof_sec_t *sec = (dof_sec_t *)(daddr +
13413 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13415 if (!(sec->dofs_flags & DOF_SECF_LOAD))
13416 continue; /* skip sections that are not loadable */
13418 switch (sec->dofs_type) {
13419 case DOF_SECT_URELHDR:
13420 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
13421 return (-1);
13422 break;
13426 if ((enab = *enabp) == NULL)
13427 enab = *enabp = dtrace_enabling_create(vstate);
13429 for (i = 0; i < dof->dofh_secnum; i++) {
13430 dof_sec_t *sec = (dof_sec_t *)(daddr +
13431 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13433 if (sec->dofs_type != DOF_SECT_ECBDESC)
13434 continue;
13436 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
13437 dtrace_enabling_destroy(enab);
13438 *enabp = NULL;
13439 return (-1);
13442 dtrace_enabling_add(enab, ep);
13445 return (0);
13449 * Process DOF for any options. This routine assumes that the DOF has been
13450 * at least processed by dtrace_dof_slurp().
13452 static int
13453 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
13455 int i, rval;
13456 uint32_t entsize;
13457 size_t offs;
13458 dof_optdesc_t *desc;
13460 for (i = 0; i < dof->dofh_secnum; i++) {
13461 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
13462 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13464 if (sec->dofs_type != DOF_SECT_OPTDESC)
13465 continue;
13467 if (sec->dofs_align != sizeof (uint64_t)) {
13468 dtrace_dof_error(dof, "bad alignment in "
13469 "option description");
13470 return (EINVAL);
13473 if ((entsize = sec->dofs_entsize) == 0) {
13474 dtrace_dof_error(dof, "zeroed option entry size");
13475 return (EINVAL);
13478 if (entsize < sizeof (dof_optdesc_t)) {
13479 dtrace_dof_error(dof, "bad option entry size");
13480 return (EINVAL);
13483 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
13484 desc = (dof_optdesc_t *)((uintptr_t)dof +
13485 (uintptr_t)sec->dofs_offset + offs);
13487 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
13488 dtrace_dof_error(dof, "non-zero option string");
13489 return (EINVAL);
13492 if (desc->dofo_value == DTRACEOPT_UNSET) {
13493 dtrace_dof_error(dof, "unset option");
13494 return (EINVAL);
13497 if ((rval = dtrace_state_option(state,
13498 desc->dofo_option, desc->dofo_value)) != 0) {
13499 dtrace_dof_error(dof, "rejected option");
13500 return (rval);
13505 return (0);
13509 * DTrace Consumer State Functions
13512 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
13514 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
13515 void *base;
13516 uintptr_t limit;
13517 dtrace_dynvar_t *dvar, *next, *start;
13518 int i;
13520 ASSERT(MUTEX_HELD(&dtrace_lock));
13521 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13523 bzero(dstate, sizeof (dtrace_dstate_t));
13525 if ((dstate->dtds_chunksize = chunksize) == 0)
13526 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13528 VERIFY(dstate->dtds_chunksize < LONG_MAX);
13530 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13531 size = min;
13533 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
13534 return (ENOMEM);
13536 dstate->dtds_size = size;
13537 dstate->dtds_base = base;
13538 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
13539 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
13541 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13543 if (hashsize != 1 && (hashsize & 1))
13544 hashsize--;
13546 dstate->dtds_hashsize = hashsize;
13547 dstate->dtds_hash = dstate->dtds_base;
13550 * Set all of our hash buckets to point to the single sink, and (if
13551 * it hasn't already been set), set the sink's hash value to be the
13552 * sink sentinel value. The sink is needed for dynamic variable
13553 * lookups to know that they have iterated over an entire, valid hash
13554 * chain.
13556 for (i = 0; i < hashsize; i++)
13557 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
13559 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
13560 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
13563 * Determine number of active CPUs. Divide free list evenly among
13564 * active CPUs.
13566 start = (dtrace_dynvar_t *)
13567 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
13568 limit = (uintptr_t)base + size;
13570 VERIFY((uintptr_t)start < limit);
13571 VERIFY((uintptr_t)start >= (uintptr_t)base);
13573 maxper = (limit - (uintptr_t)start) / NCPU;
13574 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
13576 for (i = 0; i < NCPU; i++) {
13577 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
13580 * If we don't even have enough chunks to make it once through
13581 * NCPUs, we're just going to allocate everything to the first
13582 * CPU. And if we're on the last CPU, we're going to allocate
13583 * whatever is left over. In either case, we set the limit to
13584 * be the limit of the dynamic variable space.
13586 if (maxper == 0 || i == NCPU - 1) {
13587 limit = (uintptr_t)base + size;
13588 start = NULL;
13589 } else {
13590 limit = (uintptr_t)start + maxper;
13591 start = (dtrace_dynvar_t *)limit;
13594 VERIFY(limit <= (uintptr_t)base + size);
13596 for (;;) {
13597 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
13598 dstate->dtds_chunksize);
13600 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
13601 break;
13603 VERIFY((uintptr_t)dvar >= (uintptr_t)base &&
13604 (uintptr_t)dvar <= (uintptr_t)base + size);
13605 dvar->dtdv_next = next;
13606 dvar = next;
13609 if (maxper == 0)
13610 break;
13613 return (0);
13616 void
13617 dtrace_dstate_fini(dtrace_dstate_t *dstate)
13619 ASSERT(MUTEX_HELD(&cpu_lock));
13621 if (dstate->dtds_base == NULL)
13622 return;
13624 kmem_free(dstate->dtds_base, dstate->dtds_size);
13625 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
13628 static void
13629 dtrace_vstate_fini(dtrace_vstate_t *vstate)
13632 * Logical XOR, where are you?
13634 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
13636 if (vstate->dtvs_nglobals > 0) {
13637 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
13638 sizeof (dtrace_statvar_t *));
13641 if (vstate->dtvs_ntlocals > 0) {
13642 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
13643 sizeof (dtrace_difv_t));
13646 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
13648 if (vstate->dtvs_nlocals > 0) {
13649 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
13650 sizeof (dtrace_statvar_t *));
13654 static void
13655 dtrace_state_clean(dtrace_state_t *state)
13657 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
13658 return;
13660 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
13661 dtrace_speculation_clean(state);
13664 static void
13665 dtrace_state_deadman(dtrace_state_t *state)
13667 hrtime_t now;
13669 dtrace_sync();
13671 now = dtrace_gethrtime();
13673 if (state != dtrace_anon.dta_state &&
13674 now - state->dts_laststatus >= dtrace_deadman_user)
13675 return;
13678 * We must be sure that dts_alive never appears to be less than the
13679 * value upon entry to dtrace_state_deadman(), and because we lack a
13680 * dtrace_cas64(), we cannot store to it atomically. We thus instead
13681 * store INT64_MAX to it, followed by a memory barrier, followed by
13682 * the new value. This assures that dts_alive never appears to be
13683 * less than its true value, regardless of the order in which the
13684 * stores to the underlying storage are issued.
13686 state->dts_alive = INT64_MAX;
13687 dtrace_membar_producer();
13688 state->dts_alive = now;
13691 dtrace_state_t *
13692 dtrace_state_create(dev_t *devp, cred_t *cr)
13694 minor_t minor;
13695 major_t major;
13696 char c[30];
13697 dtrace_state_t *state;
13698 dtrace_optval_t *opt;
13699 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
13701 ASSERT(MUTEX_HELD(&dtrace_lock));
13702 ASSERT(MUTEX_HELD(&cpu_lock));
13704 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
13705 VM_BESTFIT | VM_SLEEP);
13707 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
13708 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13709 return (NULL);
13712 state = ddi_get_soft_state(dtrace_softstate, minor);
13713 state->dts_epid = DTRACE_EPIDNONE + 1;
13715 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
13716 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
13717 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
13719 if (devp != NULL) {
13720 major = getemajor(*devp);
13721 } else {
13722 major = ddi_driver_major(dtrace_devi);
13725 state->dts_dev = makedevice(major, minor);
13727 if (devp != NULL)
13728 *devp = state->dts_dev;
13731 * We allocate NCPU buffers. On the one hand, this can be quite
13732 * a bit of memory per instance (nearly 36K on a Starcat). On the
13733 * other hand, it saves an additional memory reference in the probe
13734 * path.
13736 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
13737 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
13738 state->dts_cleaner = CYCLIC_NONE;
13739 state->dts_deadman = CYCLIC_NONE;
13740 state->dts_vstate.dtvs_state = state;
13742 for (i = 0; i < DTRACEOPT_MAX; i++)
13743 state->dts_options[i] = DTRACEOPT_UNSET;
13746 * Set the default options.
13748 opt = state->dts_options;
13749 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
13750 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
13751 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
13752 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
13753 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
13754 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
13755 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
13756 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
13757 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
13758 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
13759 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
13760 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
13761 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
13762 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
13764 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
13767 * Depending on the user credentials, we set flag bits which alter probe
13768 * visibility or the amount of destructiveness allowed. In the case of
13769 * actual anonymous tracing, or the possession of all privileges, all of
13770 * the normal checks are bypassed.
13772 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
13773 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
13774 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
13775 } else {
13777 * Set up the credentials for this instantiation. We take a
13778 * hold on the credential to prevent it from disappearing on
13779 * us; this in turn prevents the zone_t referenced by this
13780 * credential from disappearing. This means that we can
13781 * examine the credential and the zone from probe context.
13783 crhold(cr);
13784 state->dts_cred.dcr_cred = cr;
13787 * CRA_PROC means "we have *some* privilege for dtrace" and
13788 * unlocks the use of variables like pid, zonename, etc.
13790 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
13791 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13792 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
13796 * dtrace_user allows use of syscall and profile providers.
13797 * If the user also has proc_owner and/or proc_zone, we
13798 * extend the scope to include additional visibility and
13799 * destructive power.
13801 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
13802 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
13803 state->dts_cred.dcr_visible |=
13804 DTRACE_CRV_ALLPROC;
13806 state->dts_cred.dcr_action |=
13807 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13810 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
13811 state->dts_cred.dcr_visible |=
13812 DTRACE_CRV_ALLZONE;
13814 state->dts_cred.dcr_action |=
13815 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13819 * If we have all privs in whatever zone this is,
13820 * we can do destructive things to processes which
13821 * have altered credentials.
13823 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13824 cr->cr_zone->zone_privset)) {
13825 state->dts_cred.dcr_action |=
13826 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13831 * Holding the dtrace_kernel privilege also implies that
13832 * the user has the dtrace_user privilege from a visibility
13833 * perspective. But without further privileges, some
13834 * destructive actions are not available.
13836 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
13838 * Make all probes in all zones visible. However,
13839 * this doesn't mean that all actions become available
13840 * to all zones.
13842 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
13843 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
13845 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
13846 DTRACE_CRA_PROC;
13848 * Holding proc_owner means that destructive actions
13849 * for *this* zone are allowed.
13851 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13852 state->dts_cred.dcr_action |=
13853 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13856 * Holding proc_zone means that destructive actions
13857 * for this user/group ID in all zones is allowed.
13859 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13860 state->dts_cred.dcr_action |=
13861 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13864 * If we have all privs in whatever zone this is,
13865 * we can do destructive things to processes which
13866 * have altered credentials.
13868 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13869 cr->cr_zone->zone_privset)) {
13870 state->dts_cred.dcr_action |=
13871 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13876 * Holding the dtrace_proc privilege gives control over fasttrap
13877 * and pid providers. We need to grant wider destructive
13878 * privileges in the event that the user has proc_owner and/or
13879 * proc_zone.
13881 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13882 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13883 state->dts_cred.dcr_action |=
13884 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13886 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13887 state->dts_cred.dcr_action |=
13888 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13892 return (state);
13895 static int
13896 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13898 dtrace_optval_t *opt = state->dts_options, size;
13899 processorid_t cpu;
13900 int flags = 0, rval, factor, divisor = 1;
13902 ASSERT(MUTEX_HELD(&dtrace_lock));
13903 ASSERT(MUTEX_HELD(&cpu_lock));
13904 ASSERT(which < DTRACEOPT_MAX);
13905 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13906 (state == dtrace_anon.dta_state &&
13907 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13909 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13910 return (0);
13912 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13913 cpu = opt[DTRACEOPT_CPU];
13915 if (which == DTRACEOPT_SPECSIZE)
13916 flags |= DTRACEBUF_NOSWITCH;
13918 if (which == DTRACEOPT_BUFSIZE) {
13919 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13920 flags |= DTRACEBUF_RING;
13922 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13923 flags |= DTRACEBUF_FILL;
13925 if (state != dtrace_anon.dta_state ||
13926 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13927 flags |= DTRACEBUF_INACTIVE;
13930 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) {
13932 * The size must be 8-byte aligned. If the size is not 8-byte
13933 * aligned, drop it down by the difference.
13935 if (size & (sizeof (uint64_t) - 1))
13936 size -= size & (sizeof (uint64_t) - 1);
13938 if (size < state->dts_reserve) {
13940 * Buffers always must be large enough to accommodate
13941 * their prereserved space. We return E2BIG instead
13942 * of ENOMEM in this case to allow for user-level
13943 * software to differentiate the cases.
13945 return (E2BIG);
13948 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor);
13950 if (rval != ENOMEM) {
13951 opt[which] = size;
13952 return (rval);
13955 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13956 return (rval);
13958 for (divisor = 2; divisor < factor; divisor <<= 1)
13959 continue;
13962 return (ENOMEM);
13965 static int
13966 dtrace_state_buffers(dtrace_state_t *state)
13968 dtrace_speculation_t *spec = state->dts_speculations;
13969 int rval, i;
13971 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13972 DTRACEOPT_BUFSIZE)) != 0)
13973 return (rval);
13975 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13976 DTRACEOPT_AGGSIZE)) != 0)
13977 return (rval);
13979 for (i = 0; i < state->dts_nspeculations; i++) {
13980 if ((rval = dtrace_state_buffer(state,
13981 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13982 return (rval);
13985 return (0);
13988 static void
13989 dtrace_state_prereserve(dtrace_state_t *state)
13991 dtrace_ecb_t *ecb;
13992 dtrace_probe_t *probe;
13994 state->dts_reserve = 0;
13996 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13997 return;
14000 * If our buffer policy is a "fill" buffer policy, we need to set the
14001 * prereserved space to be the space required by the END probes.
14003 probe = dtrace_probes[dtrace_probeid_end - 1];
14004 ASSERT(probe != NULL);
14006 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
14007 if (ecb->dte_state != state)
14008 continue;
14010 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
14014 static int
14015 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
14017 dtrace_optval_t *opt = state->dts_options, sz, nspec;
14018 dtrace_speculation_t *spec;
14019 dtrace_buffer_t *buf;
14020 cyc_handler_t hdlr;
14021 cyc_time_t when;
14022 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14023 dtrace_icookie_t cookie;
14025 mutex_enter(&cpu_lock);
14026 mutex_enter(&dtrace_lock);
14028 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
14029 rval = EBUSY;
14030 goto out;
14034 * Before we can perform any checks, we must prime all of the
14035 * retained enablings that correspond to this state.
14037 dtrace_enabling_prime(state);
14039 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
14040 rval = EACCES;
14041 goto out;
14044 dtrace_state_prereserve(state);
14047 * Now we want to do is try to allocate our speculations.
14048 * We do not automatically resize the number of speculations; if
14049 * this fails, we will fail the operation.
14051 nspec = opt[DTRACEOPT_NSPEC];
14052 ASSERT(nspec != DTRACEOPT_UNSET);
14054 if (nspec > INT_MAX) {
14055 rval = ENOMEM;
14056 goto out;
14059 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
14060 KM_NOSLEEP | KM_NORMALPRI);
14062 if (spec == NULL) {
14063 rval = ENOMEM;
14064 goto out;
14067 state->dts_speculations = spec;
14068 state->dts_nspeculations = (int)nspec;
14070 for (i = 0; i < nspec; i++) {
14071 if ((buf = kmem_zalloc(bufsize,
14072 KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
14073 rval = ENOMEM;
14074 goto err;
14077 spec[i].dtsp_buffer = buf;
14080 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
14081 if (dtrace_anon.dta_state == NULL) {
14082 rval = ENOENT;
14083 goto out;
14086 if (state->dts_necbs != 0) {
14087 rval = EALREADY;
14088 goto out;
14091 state->dts_anon = dtrace_anon_grab();
14092 ASSERT(state->dts_anon != NULL);
14093 state = state->dts_anon;
14096 * We want "grabanon" to be set in the grabbed state, so we'll
14097 * copy that option value from the grabbing state into the
14098 * grabbed state.
14100 state->dts_options[DTRACEOPT_GRABANON] =
14101 opt[DTRACEOPT_GRABANON];
14103 *cpu = dtrace_anon.dta_beganon;
14106 * If the anonymous state is active (as it almost certainly
14107 * is if the anonymous enabling ultimately matched anything),
14108 * we don't allow any further option processing -- but we
14109 * don't return failure.
14111 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14112 goto out;
14115 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
14116 opt[DTRACEOPT_AGGSIZE] != 0) {
14117 if (state->dts_aggregations == NULL) {
14119 * We're not going to create an aggregation buffer
14120 * because we don't have any ECBs that contain
14121 * aggregations -- set this option to 0.
14123 opt[DTRACEOPT_AGGSIZE] = 0;
14124 } else {
14126 * If we have an aggregation buffer, we must also have
14127 * a buffer to use as scratch.
14129 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
14130 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
14131 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
14136 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
14137 opt[DTRACEOPT_SPECSIZE] != 0) {
14138 if (!state->dts_speculates) {
14140 * We're not going to create speculation buffers
14141 * because we don't have any ECBs that actually
14142 * speculate -- set the speculation size to 0.
14144 opt[DTRACEOPT_SPECSIZE] = 0;
14149 * The bare minimum size for any buffer that we're actually going to
14150 * do anything to is sizeof (uint64_t).
14152 sz = sizeof (uint64_t);
14154 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
14155 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
14156 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
14158 * A buffer size has been explicitly set to 0 (or to a size
14159 * that will be adjusted to 0) and we need the space -- we
14160 * need to return failure. We return ENOSPC to differentiate
14161 * it from failing to allocate a buffer due to failure to meet
14162 * the reserve (for which we return E2BIG).
14164 rval = ENOSPC;
14165 goto out;
14168 if ((rval = dtrace_state_buffers(state)) != 0)
14169 goto err;
14171 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
14172 sz = dtrace_dstate_defsize;
14174 do {
14175 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
14177 if (rval == 0)
14178 break;
14180 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14181 goto err;
14182 } while (sz >>= 1);
14184 opt[DTRACEOPT_DYNVARSIZE] = sz;
14186 if (rval != 0)
14187 goto err;
14189 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
14190 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
14192 if (opt[DTRACEOPT_CLEANRATE] == 0)
14193 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14195 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
14196 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
14198 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
14199 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14201 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
14202 hdlr.cyh_arg = state;
14203 hdlr.cyh_level = CY_LOW_LEVEL;
14205 when.cyt_when = 0;
14206 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
14208 state->dts_cleaner = cyclic_add(&hdlr, &when);
14210 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
14211 hdlr.cyh_arg = state;
14212 hdlr.cyh_level = CY_LOW_LEVEL;
14214 when.cyt_when = 0;
14215 when.cyt_interval = dtrace_deadman_interval;
14217 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
14218 state->dts_deadman = cyclic_add(&hdlr, &when);
14220 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
14222 if (state->dts_getf != 0 &&
14223 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
14225 * We don't have kernel privs but we have at least one call
14226 * to getf(); we need to bump our zone's count, and (if
14227 * this is the first enabling to have an unprivileged call
14228 * to getf()) we need to hook into closef().
14230 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++;
14232 if (dtrace_getf++ == 0) {
14233 ASSERT(dtrace_closef == NULL);
14234 dtrace_closef = dtrace_getf_barrier;
14239 * Now it's time to actually fire the BEGIN probe. We need to disable
14240 * interrupts here both to record the CPU on which we fired the BEGIN
14241 * probe (the data from this CPU will be processed first at user
14242 * level) and to manually activate the buffer for this CPU.
14244 cookie = dtrace_interrupt_disable();
14245 *cpu = CPU->cpu_id;
14246 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
14247 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
14249 dtrace_probe(dtrace_probeid_begin,
14250 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14251 dtrace_interrupt_enable(cookie);
14253 * We may have had an exit action from a BEGIN probe; only change our
14254 * state to ACTIVE if we're still in WARMUP.
14256 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
14257 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
14259 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
14260 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
14263 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
14264 * want each CPU to transition its principal buffer out of the
14265 * INACTIVE state. Doing this assures that no CPU will suddenly begin
14266 * processing an ECB halfway down a probe's ECB chain; all CPUs will
14267 * atomically transition from processing none of a state's ECBs to
14268 * processing all of them.
14270 dtrace_xcall(DTRACE_CPUALL,
14271 (dtrace_xcall_t)dtrace_buffer_activate, state);
14272 goto out;
14274 err:
14275 dtrace_buffer_free(state->dts_buffer);
14276 dtrace_buffer_free(state->dts_aggbuffer);
14278 if ((nspec = state->dts_nspeculations) == 0) {
14279 ASSERT(state->dts_speculations == NULL);
14280 goto out;
14283 spec = state->dts_speculations;
14284 ASSERT(spec != NULL);
14286 for (i = 0; i < state->dts_nspeculations; i++) {
14287 if ((buf = spec[i].dtsp_buffer) == NULL)
14288 break;
14290 dtrace_buffer_free(buf);
14291 kmem_free(buf, bufsize);
14294 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14295 state->dts_nspeculations = 0;
14296 state->dts_speculations = NULL;
14298 out:
14299 mutex_exit(&dtrace_lock);
14300 mutex_exit(&cpu_lock);
14302 return (rval);
14305 static int
14306 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
14308 dtrace_icookie_t cookie;
14310 ASSERT(MUTEX_HELD(&dtrace_lock));
14312 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
14313 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
14314 return (EINVAL);
14317 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
14318 * to be sure that every CPU has seen it. See below for the details
14319 * on why this is done.
14321 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
14322 dtrace_sync();
14325 * By this point, it is impossible for any CPU to be still processing
14326 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
14327 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
14328 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
14329 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
14330 * iff we're in the END probe.
14332 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
14333 dtrace_sync();
14334 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
14337 * Finally, we can release the reserve and call the END probe. We
14338 * disable interrupts across calling the END probe to allow us to
14339 * return the CPU on which we actually called the END probe. This
14340 * allows user-land to be sure that this CPU's principal buffer is
14341 * processed last.
14343 state->dts_reserve = 0;
14345 cookie = dtrace_interrupt_disable();
14346 *cpu = CPU->cpu_id;
14347 dtrace_probe(dtrace_probeid_end,
14348 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14349 dtrace_interrupt_enable(cookie);
14351 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
14352 dtrace_sync();
14354 if (state->dts_getf != 0 &&
14355 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
14357 * We don't have kernel privs but we have at least one call
14358 * to getf(); we need to lower our zone's count, and (if
14359 * this is the last enabling to have an unprivileged call
14360 * to getf()) we need to clear the closef() hook.
14362 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0);
14363 ASSERT(dtrace_closef == dtrace_getf_barrier);
14364 ASSERT(dtrace_getf > 0);
14366 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--;
14368 if (--dtrace_getf == 0)
14369 dtrace_closef = NULL;
14372 return (0);
14375 static int
14376 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
14377 dtrace_optval_t val)
14379 ASSERT(MUTEX_HELD(&dtrace_lock));
14381 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14382 return (EBUSY);
14384 if (option >= DTRACEOPT_MAX)
14385 return (EINVAL);
14387 if (option != DTRACEOPT_CPU && val < 0)
14388 return (EINVAL);
14390 switch (option) {
14391 case DTRACEOPT_DESTRUCTIVE:
14392 if (dtrace_destructive_disallow)
14393 return (EACCES);
14395 state->dts_cred.dcr_destructive = 1;
14396 break;
14398 case DTRACEOPT_BUFSIZE:
14399 case DTRACEOPT_DYNVARSIZE:
14400 case DTRACEOPT_AGGSIZE:
14401 case DTRACEOPT_SPECSIZE:
14402 case DTRACEOPT_STRSIZE:
14403 if (val < 0)
14404 return (EINVAL);
14406 if (val >= LONG_MAX) {
14408 * If this is an otherwise negative value, set it to
14409 * the highest multiple of 128m less than LONG_MAX.
14410 * Technically, we're adjusting the size without
14411 * regard to the buffer resizing policy, but in fact,
14412 * this has no effect -- if we set the buffer size to
14413 * ~LONG_MAX and the buffer policy is ultimately set to
14414 * be "manual", the buffer allocation is guaranteed to
14415 * fail, if only because the allocation requires two
14416 * buffers. (We set the the size to the highest
14417 * multiple of 128m because it ensures that the size
14418 * will remain a multiple of a megabyte when
14419 * repeatedly halved -- all the way down to 15m.)
14421 val = LONG_MAX - (1 << 27) + 1;
14425 state->dts_options[option] = val;
14427 return (0);
14430 static void
14431 dtrace_state_destroy(dtrace_state_t *state)
14433 dtrace_ecb_t *ecb;
14434 dtrace_vstate_t *vstate = &state->dts_vstate;
14435 minor_t minor = getminor(state->dts_dev);
14436 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14437 dtrace_speculation_t *spec = state->dts_speculations;
14438 int nspec = state->dts_nspeculations;
14439 uint32_t match;
14441 ASSERT(MUTEX_HELD(&dtrace_lock));
14442 ASSERT(MUTEX_HELD(&cpu_lock));
14445 * First, retract any retained enablings for this state.
14447 dtrace_enabling_retract(state);
14448 ASSERT(state->dts_nretained == 0);
14450 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
14451 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
14453 * We have managed to come into dtrace_state_destroy() on a
14454 * hot enabling -- almost certainly because of a disorderly
14455 * shutdown of a consumer. (That is, a consumer that is
14456 * exiting without having called dtrace_stop().) In this case,
14457 * we're going to set our activity to be KILLED, and then
14458 * issue a sync to be sure that everyone is out of probe
14459 * context before we start blowing away ECBs.
14461 state->dts_activity = DTRACE_ACTIVITY_KILLED;
14462 dtrace_sync();
14466 * Release the credential hold we took in dtrace_state_create().
14468 if (state->dts_cred.dcr_cred != NULL)
14469 crfree(state->dts_cred.dcr_cred);
14472 * Now we can safely disable and destroy any enabled probes. Because
14473 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
14474 * (especially if they're all enabled), we take two passes through the
14475 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
14476 * in the second we disable whatever is left over.
14478 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
14479 for (i = 0; i < state->dts_necbs; i++) {
14480 if ((ecb = state->dts_ecbs[i]) == NULL)
14481 continue;
14483 if (match && ecb->dte_probe != NULL) {
14484 dtrace_probe_t *probe = ecb->dte_probe;
14485 dtrace_provider_t *prov = probe->dtpr_provider;
14487 if (!(prov->dtpv_priv.dtpp_flags & match))
14488 continue;
14491 dtrace_ecb_disable(ecb);
14492 dtrace_ecb_destroy(ecb);
14495 if (!match)
14496 break;
14500 * Before we free the buffers, perform one more sync to assure that
14501 * every CPU is out of probe context.
14503 dtrace_sync();
14505 dtrace_buffer_free(state->dts_buffer);
14506 dtrace_buffer_free(state->dts_aggbuffer);
14508 for (i = 0; i < nspec; i++)
14509 dtrace_buffer_free(spec[i].dtsp_buffer);
14511 if (state->dts_cleaner != CYCLIC_NONE)
14512 cyclic_remove(state->dts_cleaner);
14514 if (state->dts_deadman != CYCLIC_NONE)
14515 cyclic_remove(state->dts_deadman);
14517 dtrace_dstate_fini(&vstate->dtvs_dynvars);
14518 dtrace_vstate_fini(vstate);
14519 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
14521 if (state->dts_aggregations != NULL) {
14522 #ifdef DEBUG
14523 for (i = 0; i < state->dts_naggregations; i++)
14524 ASSERT(state->dts_aggregations[i] == NULL);
14525 #endif
14526 ASSERT(state->dts_naggregations > 0);
14527 kmem_free(state->dts_aggregations,
14528 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
14531 kmem_free(state->dts_buffer, bufsize);
14532 kmem_free(state->dts_aggbuffer, bufsize);
14534 for (i = 0; i < nspec; i++)
14535 kmem_free(spec[i].dtsp_buffer, bufsize);
14537 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14539 dtrace_format_destroy(state);
14541 vmem_destroy(state->dts_aggid_arena);
14542 ddi_soft_state_free(dtrace_softstate, minor);
14543 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
14547 * DTrace Anonymous Enabling Functions
14549 static dtrace_state_t *
14550 dtrace_anon_grab(void)
14552 dtrace_state_t *state;
14554 ASSERT(MUTEX_HELD(&dtrace_lock));
14556 if ((state = dtrace_anon.dta_state) == NULL) {
14557 ASSERT(dtrace_anon.dta_enabling == NULL);
14558 return (NULL);
14561 ASSERT(dtrace_anon.dta_enabling != NULL);
14562 ASSERT(dtrace_retained != NULL);
14564 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
14565 dtrace_anon.dta_enabling = NULL;
14566 dtrace_anon.dta_state = NULL;
14568 return (state);
14571 static void
14572 dtrace_anon_property(void)
14574 int i, rv;
14575 dtrace_state_t *state;
14576 dof_hdr_t *dof;
14577 char c[32]; /* enough for "dof-data-" + digits */
14579 ASSERT(MUTEX_HELD(&dtrace_lock));
14580 ASSERT(MUTEX_HELD(&cpu_lock));
14582 for (i = 0; ; i++) {
14583 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
14585 dtrace_err_verbose = 1;
14587 if ((dof = dtrace_dof_property(c)) == NULL) {
14588 dtrace_err_verbose = 0;
14589 break;
14593 * We want to create anonymous state, so we need to transition
14594 * the kernel debugger to indicate that DTrace is active. If
14595 * this fails (e.g. because the debugger has modified text in
14596 * some way), we won't continue with the processing.
14598 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
14599 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
14600 "enabling ignored.");
14601 dtrace_dof_destroy(dof);
14602 break;
14606 * If we haven't allocated an anonymous state, we'll do so now.
14608 if ((state = dtrace_anon.dta_state) == NULL) {
14609 state = dtrace_state_create(NULL, NULL);
14610 dtrace_anon.dta_state = state;
14612 if (state == NULL) {
14614 * This basically shouldn't happen: the only
14615 * failure mode from dtrace_state_create() is a
14616 * failure of ddi_soft_state_zalloc() that
14617 * itself should never happen. Still, the
14618 * interface allows for a failure mode, and
14619 * we want to fail as gracefully as possible:
14620 * we'll emit an error message and cease
14621 * processing anonymous state in this case.
14623 cmn_err(CE_WARN, "failed to create "
14624 "anonymous state");
14625 dtrace_dof_destroy(dof);
14626 break;
14630 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
14631 &dtrace_anon.dta_enabling, 0, B_TRUE);
14633 if (rv == 0)
14634 rv = dtrace_dof_options(dof, state);
14636 dtrace_err_verbose = 0;
14637 dtrace_dof_destroy(dof);
14639 if (rv != 0) {
14641 * This is malformed DOF; chuck any anonymous state
14642 * that we created.
14644 ASSERT(dtrace_anon.dta_enabling == NULL);
14645 dtrace_state_destroy(state);
14646 dtrace_anon.dta_state = NULL;
14647 break;
14650 ASSERT(dtrace_anon.dta_enabling != NULL);
14653 if (dtrace_anon.dta_enabling != NULL) {
14654 int rval;
14657 * dtrace_enabling_retain() can only fail because we are
14658 * trying to retain more enablings than are allowed -- but
14659 * we only have one anonymous enabling, and we are guaranteed
14660 * to be allowed at least one retained enabling; we assert
14661 * that dtrace_enabling_retain() returns success.
14663 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
14664 ASSERT(rval == 0);
14666 dtrace_enabling_dump(dtrace_anon.dta_enabling);
14671 * DTrace Helper Functions
14673 static void
14674 dtrace_helper_trace(dtrace_helper_action_t *helper,
14675 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
14677 uint32_t size, next, nnext, i;
14678 dtrace_helptrace_t *ent, *buffer;
14679 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14681 if ((buffer = dtrace_helptrace_buffer) == NULL)
14682 return;
14684 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
14687 * What would a tracing framework be without its own tracing
14688 * framework? (Well, a hell of a lot simpler, for starters...)
14690 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
14691 sizeof (uint64_t) - sizeof (uint64_t);
14694 * Iterate until we can allocate a slot in the trace buffer.
14696 do {
14697 next = dtrace_helptrace_next;
14699 if (next + size < dtrace_helptrace_bufsize) {
14700 nnext = next + size;
14701 } else {
14702 nnext = size;
14704 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
14707 * We have our slot; fill it in.
14709 if (nnext == size) {
14710 dtrace_helptrace_wrapped++;
14711 next = 0;
14714 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next);
14715 ent->dtht_helper = helper;
14716 ent->dtht_where = where;
14717 ent->dtht_nlocals = vstate->dtvs_nlocals;
14719 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
14720 mstate->dtms_fltoffs : -1;
14721 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
14722 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
14724 for (i = 0; i < vstate->dtvs_nlocals; i++) {
14725 dtrace_statvar_t *svar;
14727 if ((svar = vstate->dtvs_locals[i]) == NULL)
14728 continue;
14730 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
14731 ent->dtht_locals[i] =
14732 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
14736 static uint64_t
14737 dtrace_helper(int which, dtrace_mstate_t *mstate,
14738 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
14740 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14741 uint64_t sarg0 = mstate->dtms_arg[0];
14742 uint64_t sarg1 = mstate->dtms_arg[1];
14743 uint64_t rval;
14744 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
14745 dtrace_helper_action_t *helper;
14746 dtrace_vstate_t *vstate;
14747 dtrace_difo_t *pred;
14748 int i, trace = dtrace_helptrace_buffer != NULL;
14750 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
14752 if (helpers == NULL)
14753 return ((uintptr_t)NULL);
14755 if ((helper = helpers->dthps_actions[which]) == NULL)
14756 return ((uintptr_t)NULL);
14758 vstate = &helpers->dthps_vstate;
14759 mstate->dtms_arg[0] = arg0;
14760 mstate->dtms_arg[1] = arg1;
14763 * Now iterate over each helper. If its predicate evaluates to 'true',
14764 * we'll call the corresponding actions. Note that the below calls
14765 * to dtrace_dif_emulate() may set faults in machine state. This is
14766 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
14767 * the stored DIF offset with its own (which is the desired behavior).
14768 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
14769 * from machine state; this is okay, too.
14771 for (; helper != NULL; helper = helper->dtha_next) {
14772 if ((pred = helper->dtha_predicate) != NULL) {
14773 if (trace)
14774 dtrace_helper_trace(helper, mstate, vstate, 0);
14776 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
14777 goto next;
14779 if (*flags & CPU_DTRACE_FAULT)
14780 goto err;
14783 for (i = 0; i < helper->dtha_nactions; i++) {
14784 if (trace)
14785 dtrace_helper_trace(helper,
14786 mstate, vstate, i + 1);
14788 rval = dtrace_dif_emulate(helper->dtha_actions[i],
14789 mstate, vstate, state);
14791 if (*flags & CPU_DTRACE_FAULT)
14792 goto err;
14795 next:
14796 if (trace)
14797 dtrace_helper_trace(helper, mstate, vstate,
14798 DTRACE_HELPTRACE_NEXT);
14801 if (trace)
14802 dtrace_helper_trace(helper, mstate, vstate,
14803 DTRACE_HELPTRACE_DONE);
14806 * Restore the arg0 that we saved upon entry.
14808 mstate->dtms_arg[0] = sarg0;
14809 mstate->dtms_arg[1] = sarg1;
14811 return (rval);
14813 err:
14814 if (trace)
14815 dtrace_helper_trace(helper, mstate, vstate,
14816 DTRACE_HELPTRACE_ERR);
14819 * Restore the arg0 that we saved upon entry.
14821 mstate->dtms_arg[0] = sarg0;
14822 mstate->dtms_arg[1] = sarg1;
14824 return ((uintptr_t)NULL);
14827 static void
14828 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
14829 dtrace_vstate_t *vstate)
14831 int i;
14833 if (helper->dtha_predicate != NULL)
14834 dtrace_difo_release(helper->dtha_predicate, vstate);
14836 for (i = 0; i < helper->dtha_nactions; i++) {
14837 ASSERT(helper->dtha_actions[i] != NULL);
14838 dtrace_difo_release(helper->dtha_actions[i], vstate);
14841 kmem_free(helper->dtha_actions,
14842 helper->dtha_nactions * sizeof (dtrace_difo_t *));
14843 kmem_free(helper, sizeof (dtrace_helper_action_t));
14846 static int
14847 dtrace_helper_destroygen(int gen)
14849 proc_t *p = curproc;
14850 dtrace_helpers_t *help = p->p_dtrace_helpers;
14851 dtrace_vstate_t *vstate;
14852 int i;
14854 ASSERT(MUTEX_HELD(&dtrace_lock));
14856 if (help == NULL || gen > help->dthps_generation)
14857 return (EINVAL);
14859 vstate = &help->dthps_vstate;
14861 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14862 dtrace_helper_action_t *last = NULL, *h, *next;
14864 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14865 next = h->dtha_next;
14867 if (h->dtha_generation == gen) {
14868 if (last != NULL) {
14869 last->dtha_next = next;
14870 } else {
14871 help->dthps_actions[i] = next;
14874 dtrace_helper_action_destroy(h, vstate);
14875 } else {
14876 last = h;
14882 * Interate until we've cleared out all helper providers with the
14883 * given generation number.
14885 for (;;) {
14886 dtrace_helper_provider_t *prov;
14889 * Look for a helper provider with the right generation. We
14890 * have to start back at the beginning of the list each time
14891 * because we drop dtrace_lock. It's unlikely that we'll make
14892 * more than two passes.
14894 for (i = 0; i < help->dthps_nprovs; i++) {
14895 prov = help->dthps_provs[i];
14897 if (prov->dthp_generation == gen)
14898 break;
14902 * If there were no matches, we're done.
14904 if (i == help->dthps_nprovs)
14905 break;
14908 * Move the last helper provider into this slot.
14910 help->dthps_nprovs--;
14911 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14912 help->dthps_provs[help->dthps_nprovs] = NULL;
14914 mutex_exit(&dtrace_lock);
14917 * If we have a meta provider, remove this helper provider.
14919 mutex_enter(&dtrace_meta_lock);
14920 if (dtrace_meta_pid != NULL) {
14921 ASSERT(dtrace_deferred_pid == NULL);
14922 dtrace_helper_provider_remove(&prov->dthp_prov,
14923 p->p_pid);
14925 mutex_exit(&dtrace_meta_lock);
14927 dtrace_helper_provider_destroy(prov);
14929 mutex_enter(&dtrace_lock);
14932 return (0);
14935 static int
14936 dtrace_helper_validate(dtrace_helper_action_t *helper)
14938 int err = 0, i;
14939 dtrace_difo_t *dp;
14941 if ((dp = helper->dtha_predicate) != NULL)
14942 err += dtrace_difo_validate_helper(dp);
14944 for (i = 0; i < helper->dtha_nactions; i++)
14945 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14947 return (err == 0);
14950 static int
14951 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14953 dtrace_helpers_t *help;
14954 dtrace_helper_action_t *helper, *last;
14955 dtrace_actdesc_t *act;
14956 dtrace_vstate_t *vstate;
14957 dtrace_predicate_t *pred;
14958 int count = 0, nactions = 0, i;
14960 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14961 return (EINVAL);
14963 help = curproc->p_dtrace_helpers;
14964 last = help->dthps_actions[which];
14965 vstate = &help->dthps_vstate;
14967 for (count = 0; last != NULL; last = last->dtha_next) {
14968 count++;
14969 if (last->dtha_next == NULL)
14970 break;
14974 * If we already have dtrace_helper_actions_max helper actions for this
14975 * helper action type, we'll refuse to add a new one.
14977 if (count >= dtrace_helper_actions_max)
14978 return (ENOSPC);
14980 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14981 helper->dtha_generation = help->dthps_generation;
14983 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14984 ASSERT(pred->dtp_difo != NULL);
14985 dtrace_difo_hold(pred->dtp_difo);
14986 helper->dtha_predicate = pred->dtp_difo;
14989 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14990 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14991 goto err;
14993 if (act->dtad_difo == NULL)
14994 goto err;
14996 nactions++;
14999 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
15000 (helper->dtha_nactions = nactions), KM_SLEEP);
15002 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
15003 dtrace_difo_hold(act->dtad_difo);
15004 helper->dtha_actions[i++] = act->dtad_difo;
15007 if (!dtrace_helper_validate(helper))
15008 goto err;
15010 if (last == NULL) {
15011 help->dthps_actions[which] = helper;
15012 } else {
15013 last->dtha_next = helper;
15016 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
15017 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
15018 dtrace_helptrace_next = 0;
15021 return (0);
15022 err:
15023 dtrace_helper_action_destroy(helper, vstate);
15024 return (EINVAL);
15027 static void
15028 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
15029 dof_helper_t *dofhp)
15031 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
15033 mutex_enter(&dtrace_meta_lock);
15034 mutex_enter(&dtrace_lock);
15036 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
15038 * If the dtrace module is loaded but not attached, or if
15039 * there aren't isn't a meta provider registered to deal with
15040 * these provider descriptions, we need to postpone creating
15041 * the actual providers until later.
15044 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
15045 dtrace_deferred_pid != help) {
15046 help->dthps_deferred = 1;
15047 help->dthps_pid = p->p_pid;
15048 help->dthps_next = dtrace_deferred_pid;
15049 help->dthps_prev = NULL;
15050 if (dtrace_deferred_pid != NULL)
15051 dtrace_deferred_pid->dthps_prev = help;
15052 dtrace_deferred_pid = help;
15055 mutex_exit(&dtrace_lock);
15057 } else if (dofhp != NULL) {
15059 * If the dtrace module is loaded and we have a particular
15060 * helper provider description, pass that off to the
15061 * meta provider.
15064 mutex_exit(&dtrace_lock);
15066 dtrace_helper_provide(dofhp, p->p_pid);
15068 } else {
15070 * Otherwise, just pass all the helper provider descriptions
15071 * off to the meta provider.
15074 int i;
15075 mutex_exit(&dtrace_lock);
15077 for (i = 0; i < help->dthps_nprovs; i++) {
15078 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
15079 p->p_pid);
15083 mutex_exit(&dtrace_meta_lock);
15086 static int
15087 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
15089 dtrace_helpers_t *help;
15090 dtrace_helper_provider_t *hprov, **tmp_provs;
15091 uint_t tmp_maxprovs, i;
15093 ASSERT(MUTEX_HELD(&dtrace_lock));
15095 help = curproc->p_dtrace_helpers;
15096 ASSERT(help != NULL);
15099 * If we already have dtrace_helper_providers_max helper providers,
15100 * we're refuse to add a new one.
15102 if (help->dthps_nprovs >= dtrace_helper_providers_max)
15103 return (ENOSPC);
15106 * Check to make sure this isn't a duplicate.
15108 for (i = 0; i < help->dthps_nprovs; i++) {
15109 if (dofhp->dofhp_addr ==
15110 help->dthps_provs[i]->dthp_prov.dofhp_addr)
15111 return (EALREADY);
15114 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
15115 hprov->dthp_prov = *dofhp;
15116 hprov->dthp_ref = 1;
15117 hprov->dthp_generation = gen;
15120 * Allocate a bigger table for helper providers if it's already full.
15122 if (help->dthps_maxprovs == help->dthps_nprovs) {
15123 tmp_maxprovs = help->dthps_maxprovs;
15124 tmp_provs = help->dthps_provs;
15126 if (help->dthps_maxprovs == 0)
15127 help->dthps_maxprovs = 2;
15128 else
15129 help->dthps_maxprovs *= 2;
15130 if (help->dthps_maxprovs > dtrace_helper_providers_max)
15131 help->dthps_maxprovs = dtrace_helper_providers_max;
15133 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
15135 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
15136 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15138 if (tmp_provs != NULL) {
15139 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
15140 sizeof (dtrace_helper_provider_t *));
15141 kmem_free(tmp_provs, tmp_maxprovs *
15142 sizeof (dtrace_helper_provider_t *));
15146 help->dthps_provs[help->dthps_nprovs] = hprov;
15147 help->dthps_nprovs++;
15149 return (0);
15152 static void
15153 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
15155 mutex_enter(&dtrace_lock);
15157 if (--hprov->dthp_ref == 0) {
15158 dof_hdr_t *dof;
15159 mutex_exit(&dtrace_lock);
15160 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
15161 dtrace_dof_destroy(dof);
15162 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
15163 } else {
15164 mutex_exit(&dtrace_lock);
15168 static int
15169 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
15171 uintptr_t daddr = (uintptr_t)dof;
15172 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
15173 dof_provider_t *provider;
15174 dof_probe_t *probe;
15175 uint8_t *arg;
15176 char *strtab, *typestr;
15177 dof_stridx_t typeidx;
15178 size_t typesz;
15179 uint_t nprobes, j, k;
15181 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
15183 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
15184 dtrace_dof_error(dof, "misaligned section offset");
15185 return (-1);
15189 * The section needs to be large enough to contain the DOF provider
15190 * structure appropriate for the given version.
15192 if (sec->dofs_size <
15193 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
15194 offsetof(dof_provider_t, dofpv_prenoffs) :
15195 sizeof (dof_provider_t))) {
15196 dtrace_dof_error(dof, "provider section too small");
15197 return (-1);
15200 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
15201 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
15202 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
15203 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
15204 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
15206 if (str_sec == NULL || prb_sec == NULL ||
15207 arg_sec == NULL || off_sec == NULL)
15208 return (-1);
15210 enoff_sec = NULL;
15212 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
15213 provider->dofpv_prenoffs != DOF_SECT_NONE &&
15214 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
15215 provider->dofpv_prenoffs)) == NULL)
15216 return (-1);
15218 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
15220 if (provider->dofpv_name >= str_sec->dofs_size ||
15221 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
15222 dtrace_dof_error(dof, "invalid provider name");
15223 return (-1);
15226 if (prb_sec->dofs_entsize == 0 ||
15227 prb_sec->dofs_entsize > prb_sec->dofs_size) {
15228 dtrace_dof_error(dof, "invalid entry size");
15229 return (-1);
15232 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
15233 dtrace_dof_error(dof, "misaligned entry size");
15234 return (-1);
15237 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
15238 dtrace_dof_error(dof, "invalid entry size");
15239 return (-1);
15242 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
15243 dtrace_dof_error(dof, "misaligned section offset");
15244 return (-1);
15247 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
15248 dtrace_dof_error(dof, "invalid entry size");
15249 return (-1);
15252 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
15254 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
15257 * Take a pass through the probes to check for errors.
15259 for (j = 0; j < nprobes; j++) {
15260 probe = (dof_probe_t *)(uintptr_t)(daddr +
15261 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
15263 if (probe->dofpr_func >= str_sec->dofs_size) {
15264 dtrace_dof_error(dof, "invalid function name");
15265 return (-1);
15268 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
15269 dtrace_dof_error(dof, "function name too long");
15270 return (-1);
15273 if (probe->dofpr_name >= str_sec->dofs_size ||
15274 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
15275 dtrace_dof_error(dof, "invalid probe name");
15276 return (-1);
15280 * The offset count must not wrap the index, and the offsets
15281 * must also not overflow the section's data.
15283 if (probe->dofpr_offidx + probe->dofpr_noffs <
15284 probe->dofpr_offidx ||
15285 (probe->dofpr_offidx + probe->dofpr_noffs) *
15286 off_sec->dofs_entsize > off_sec->dofs_size) {
15287 dtrace_dof_error(dof, "invalid probe offset");
15288 return (-1);
15291 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
15293 * If there's no is-enabled offset section, make sure
15294 * there aren't any is-enabled offsets. Otherwise
15295 * perform the same checks as for probe offsets
15296 * (immediately above).
15298 if (enoff_sec == NULL) {
15299 if (probe->dofpr_enoffidx != 0 ||
15300 probe->dofpr_nenoffs != 0) {
15301 dtrace_dof_error(dof, "is-enabled "
15302 "offsets with null section");
15303 return (-1);
15305 } else if (probe->dofpr_enoffidx +
15306 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
15307 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
15308 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
15309 dtrace_dof_error(dof, "invalid is-enabled "
15310 "offset");
15311 return (-1);
15314 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
15315 dtrace_dof_error(dof, "zero probe and "
15316 "is-enabled offsets");
15317 return (-1);
15319 } else if (probe->dofpr_noffs == 0) {
15320 dtrace_dof_error(dof, "zero probe offsets");
15321 return (-1);
15324 if (probe->dofpr_argidx + probe->dofpr_xargc <
15325 probe->dofpr_argidx ||
15326 (probe->dofpr_argidx + probe->dofpr_xargc) *
15327 arg_sec->dofs_entsize > arg_sec->dofs_size) {
15328 dtrace_dof_error(dof, "invalid args");
15329 return (-1);
15332 typeidx = probe->dofpr_nargv;
15333 typestr = strtab + probe->dofpr_nargv;
15334 for (k = 0; k < probe->dofpr_nargc; k++) {
15335 if (typeidx >= str_sec->dofs_size) {
15336 dtrace_dof_error(dof, "bad "
15337 "native argument type");
15338 return (-1);
15341 typesz = strlen(typestr) + 1;
15342 if (typesz > DTRACE_ARGTYPELEN) {
15343 dtrace_dof_error(dof, "native "
15344 "argument type too long");
15345 return (-1);
15347 typeidx += typesz;
15348 typestr += typesz;
15351 typeidx = probe->dofpr_xargv;
15352 typestr = strtab + probe->dofpr_xargv;
15353 for (k = 0; k < probe->dofpr_xargc; k++) {
15354 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
15355 dtrace_dof_error(dof, "bad "
15356 "native argument index");
15357 return (-1);
15360 if (typeidx >= str_sec->dofs_size) {
15361 dtrace_dof_error(dof, "bad "
15362 "translated argument type");
15363 return (-1);
15366 typesz = strlen(typestr) + 1;
15367 if (typesz > DTRACE_ARGTYPELEN) {
15368 dtrace_dof_error(dof, "translated argument "
15369 "type too long");
15370 return (-1);
15373 typeidx += typesz;
15374 typestr += typesz;
15378 return (0);
15381 static int
15382 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
15384 dtrace_helpers_t *help;
15385 dtrace_vstate_t *vstate;
15386 dtrace_enabling_t *enab = NULL;
15387 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
15388 uintptr_t daddr = (uintptr_t)dof;
15390 ASSERT(MUTEX_HELD(&dtrace_lock));
15392 if ((help = curproc->p_dtrace_helpers) == NULL)
15393 help = dtrace_helpers_create(curproc);
15395 vstate = &help->dthps_vstate;
15397 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
15398 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
15399 dtrace_dof_destroy(dof);
15400 return (rv);
15404 * Look for helper providers and validate their descriptions.
15406 if (dhp != NULL) {
15407 for (i = 0; i < dof->dofh_secnum; i++) {
15408 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
15409 dof->dofh_secoff + i * dof->dofh_secsize);
15411 if (sec->dofs_type != DOF_SECT_PROVIDER)
15412 continue;
15414 if (dtrace_helper_provider_validate(dof, sec) != 0) {
15415 dtrace_enabling_destroy(enab);
15416 dtrace_dof_destroy(dof);
15417 return (-1);
15420 nprovs++;
15425 * Now we need to walk through the ECB descriptions in the enabling.
15427 for (i = 0; i < enab->dten_ndesc; i++) {
15428 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
15429 dtrace_probedesc_t *desc = &ep->dted_probe;
15431 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
15432 continue;
15434 if (strcmp(desc->dtpd_mod, "helper") != 0)
15435 continue;
15437 if (strcmp(desc->dtpd_func, "ustack") != 0)
15438 continue;
15440 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
15441 ep)) != 0) {
15443 * Adding this helper action failed -- we are now going
15444 * to rip out the entire generation and return failure.
15446 (void) dtrace_helper_destroygen(help->dthps_generation);
15447 dtrace_enabling_destroy(enab);
15448 dtrace_dof_destroy(dof);
15449 return (-1);
15452 nhelpers++;
15455 if (nhelpers < enab->dten_ndesc)
15456 dtrace_dof_error(dof, "unmatched helpers");
15458 gen = help->dthps_generation++;
15459 dtrace_enabling_destroy(enab);
15461 if (dhp != NULL && nprovs > 0) {
15463 * Now that this is in-kernel, we change the sense of the
15464 * members: dofhp_dof denotes the in-kernel copy of the DOF
15465 * and dofhp_addr denotes the address at user-level.
15467 dhp->dofhp_addr = dhp->dofhp_dof;
15468 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
15470 if (dtrace_helper_provider_add(dhp, gen) == 0) {
15471 mutex_exit(&dtrace_lock);
15472 dtrace_helper_provider_register(curproc, help, dhp);
15473 mutex_enter(&dtrace_lock);
15475 destroy = 0;
15479 if (destroy)
15480 dtrace_dof_destroy(dof);
15482 return (gen);
15485 static dtrace_helpers_t *
15486 dtrace_helpers_create(proc_t *p)
15488 dtrace_helpers_t *help;
15490 ASSERT(MUTEX_HELD(&dtrace_lock));
15491 ASSERT(p->p_dtrace_helpers == NULL);
15493 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
15494 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
15495 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
15497 p->p_dtrace_helpers = help;
15498 dtrace_helpers++;
15500 return (help);
15503 static void
15504 dtrace_helpers_destroy(proc_t *p)
15506 dtrace_helpers_t *help;
15507 dtrace_vstate_t *vstate;
15508 int i;
15510 mutex_enter(&dtrace_lock);
15512 ASSERT(p->p_dtrace_helpers != NULL);
15513 ASSERT(dtrace_helpers > 0);
15515 help = p->p_dtrace_helpers;
15516 vstate = &help->dthps_vstate;
15519 * We're now going to lose the help from this process.
15521 p->p_dtrace_helpers = NULL;
15522 if (p == curproc) {
15523 dtrace_sync();
15524 } else {
15526 * It is sometimes necessary to clean up dtrace helpers from a
15527 * an incomplete child process as part of a failed fork
15528 * operation. In such situations, a dtrace_sync() call should
15529 * be unnecessary as the process should be devoid of threads,
15530 * much less any in probe context.
15532 VERIFY(p->p_stat == SIDL);
15536 * Destroy the helper actions.
15538 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15539 dtrace_helper_action_t *h, *next;
15541 for (h = help->dthps_actions[i]; h != NULL; h = next) {
15542 next = h->dtha_next;
15543 dtrace_helper_action_destroy(h, vstate);
15544 h = next;
15548 mutex_exit(&dtrace_lock);
15551 * Destroy the helper providers.
15553 if (help->dthps_maxprovs > 0) {
15554 mutex_enter(&dtrace_meta_lock);
15555 if (dtrace_meta_pid != NULL) {
15556 ASSERT(dtrace_deferred_pid == NULL);
15558 for (i = 0; i < help->dthps_nprovs; i++) {
15559 dtrace_helper_provider_remove(
15560 &help->dthps_provs[i]->dthp_prov, p->p_pid);
15562 } else {
15563 mutex_enter(&dtrace_lock);
15564 ASSERT(help->dthps_deferred == 0 ||
15565 help->dthps_next != NULL ||
15566 help->dthps_prev != NULL ||
15567 help == dtrace_deferred_pid);
15570 * Remove the helper from the deferred list.
15572 if (help->dthps_next != NULL)
15573 help->dthps_next->dthps_prev = help->dthps_prev;
15574 if (help->dthps_prev != NULL)
15575 help->dthps_prev->dthps_next = help->dthps_next;
15576 if (dtrace_deferred_pid == help) {
15577 dtrace_deferred_pid = help->dthps_next;
15578 ASSERT(help->dthps_prev == NULL);
15581 mutex_exit(&dtrace_lock);
15584 mutex_exit(&dtrace_meta_lock);
15586 for (i = 0; i < help->dthps_nprovs; i++) {
15587 dtrace_helper_provider_destroy(help->dthps_provs[i]);
15590 kmem_free(help->dthps_provs, help->dthps_maxprovs *
15591 sizeof (dtrace_helper_provider_t *));
15594 mutex_enter(&dtrace_lock);
15596 dtrace_vstate_fini(&help->dthps_vstate);
15597 kmem_free(help->dthps_actions,
15598 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
15599 kmem_free(help, sizeof (dtrace_helpers_t));
15601 --dtrace_helpers;
15602 mutex_exit(&dtrace_lock);
15605 static void
15606 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
15608 dtrace_helpers_t *help, *newhelp;
15609 dtrace_helper_action_t *helper, *new, *last;
15610 dtrace_difo_t *dp;
15611 dtrace_vstate_t *vstate;
15612 int i, j, sz, hasprovs = 0;
15614 mutex_enter(&dtrace_lock);
15615 ASSERT(from->p_dtrace_helpers != NULL);
15616 ASSERT(dtrace_helpers > 0);
15618 help = from->p_dtrace_helpers;
15619 newhelp = dtrace_helpers_create(to);
15620 ASSERT(to->p_dtrace_helpers != NULL);
15622 newhelp->dthps_generation = help->dthps_generation;
15623 vstate = &newhelp->dthps_vstate;
15626 * Duplicate the helper actions.
15628 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15629 if ((helper = help->dthps_actions[i]) == NULL)
15630 continue;
15632 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
15633 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
15634 KM_SLEEP);
15635 new->dtha_generation = helper->dtha_generation;
15637 if ((dp = helper->dtha_predicate) != NULL) {
15638 dp = dtrace_difo_duplicate(dp, vstate);
15639 new->dtha_predicate = dp;
15642 new->dtha_nactions = helper->dtha_nactions;
15643 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
15644 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
15646 for (j = 0; j < new->dtha_nactions; j++) {
15647 dtrace_difo_t *dp = helper->dtha_actions[j];
15649 ASSERT(dp != NULL);
15650 dp = dtrace_difo_duplicate(dp, vstate);
15651 new->dtha_actions[j] = dp;
15654 if (last != NULL) {
15655 last->dtha_next = new;
15656 } else {
15657 newhelp->dthps_actions[i] = new;
15660 last = new;
15665 * Duplicate the helper providers and register them with the
15666 * DTrace framework.
15668 if (help->dthps_nprovs > 0) {
15669 newhelp->dthps_nprovs = help->dthps_nprovs;
15670 newhelp->dthps_maxprovs = help->dthps_nprovs;
15671 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
15672 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15673 for (i = 0; i < newhelp->dthps_nprovs; i++) {
15674 newhelp->dthps_provs[i] = help->dthps_provs[i];
15675 newhelp->dthps_provs[i]->dthp_ref++;
15678 hasprovs = 1;
15681 mutex_exit(&dtrace_lock);
15683 if (hasprovs)
15684 dtrace_helper_provider_register(to, newhelp, NULL);
15688 * DTrace Hook Functions
15690 static void
15691 dtrace_module_loaded(struct modctl *ctl)
15693 dtrace_provider_t *prv;
15695 mutex_enter(&dtrace_provider_lock);
15696 mutex_enter(&mod_lock);
15698 ASSERT(ctl->mod_busy);
15701 * We're going to call each providers per-module provide operation
15702 * specifying only this module.
15704 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
15705 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
15707 mutex_exit(&mod_lock);
15708 mutex_exit(&dtrace_provider_lock);
15711 * If we have any retained enablings, we need to match against them.
15712 * Enabling probes requires that cpu_lock be held, and we cannot hold
15713 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
15714 * module. (In particular, this happens when loading scheduling
15715 * classes.) So if we have any retained enablings, we need to dispatch
15716 * our task queue to do the match for us.
15718 mutex_enter(&dtrace_lock);
15720 if (dtrace_retained == NULL) {
15721 mutex_exit(&dtrace_lock);
15722 return;
15725 (void) taskq_dispatch(dtrace_taskq,
15726 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
15728 mutex_exit(&dtrace_lock);
15731 * And now, for a little heuristic sleaze: in general, we want to
15732 * match modules as soon as they load. However, we cannot guarantee
15733 * this, because it would lead us to the lock ordering violation
15734 * outlined above. The common case, of course, is that cpu_lock is
15735 * _not_ held -- so we delay here for a clock tick, hoping that that's
15736 * long enough for the task queue to do its work. If it's not, it's
15737 * not a serious problem -- it just means that the module that we
15738 * just loaded may not be immediately instrumentable.
15740 delay(1);
15743 static void
15744 dtrace_module_unloaded(struct modctl *ctl)
15746 dtrace_probe_t template, *probe, *first, *next;
15747 dtrace_provider_t *prov;
15749 template.dtpr_mod = ctl->mod_modname;
15751 mutex_enter(&dtrace_provider_lock);
15752 mutex_enter(&mod_lock);
15753 mutex_enter(&dtrace_lock);
15755 if (dtrace_bymod == NULL) {
15757 * The DTrace module is loaded (obviously) but not attached;
15758 * we don't have any work to do.
15760 mutex_exit(&dtrace_provider_lock);
15761 mutex_exit(&mod_lock);
15762 mutex_exit(&dtrace_lock);
15763 return;
15766 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
15767 probe != NULL; probe = probe->dtpr_nextmod) {
15768 if (probe->dtpr_ecb != NULL) {
15769 mutex_exit(&dtrace_provider_lock);
15770 mutex_exit(&mod_lock);
15771 mutex_exit(&dtrace_lock);
15774 * This shouldn't _actually_ be possible -- we're
15775 * unloading a module that has an enabled probe in it.
15776 * (It's normally up to the provider to make sure that
15777 * this can't happen.) However, because dtps_enable()
15778 * doesn't have a failure mode, there can be an
15779 * enable/unload race. Upshot: we don't want to
15780 * assert, but we're not going to disable the
15781 * probe, either.
15783 if (dtrace_err_verbose) {
15784 cmn_err(CE_WARN, "unloaded module '%s' had "
15785 "enabled probes", ctl->mod_modname);
15788 return;
15792 probe = first;
15794 for (first = NULL; probe != NULL; probe = next) {
15795 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
15797 dtrace_probes[probe->dtpr_id - 1] = NULL;
15799 next = probe->dtpr_nextmod;
15800 dtrace_hash_remove(dtrace_bymod, probe);
15801 dtrace_hash_remove(dtrace_byfunc, probe);
15802 dtrace_hash_remove(dtrace_byname, probe);
15804 if (first == NULL) {
15805 first = probe;
15806 probe->dtpr_nextmod = NULL;
15807 } else {
15808 probe->dtpr_nextmod = first;
15809 first = probe;
15814 * We've removed all of the module's probes from the hash chains and
15815 * from the probe array. Now issue a dtrace_sync() to be sure that
15816 * everyone has cleared out from any probe array processing.
15818 dtrace_sync();
15820 for (probe = first; probe != NULL; probe = first) {
15821 first = probe->dtpr_nextmod;
15822 prov = probe->dtpr_provider;
15823 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
15824 probe->dtpr_arg);
15825 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
15826 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
15827 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
15828 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
15829 kmem_free(probe, sizeof (dtrace_probe_t));
15832 mutex_exit(&dtrace_lock);
15833 mutex_exit(&mod_lock);
15834 mutex_exit(&dtrace_provider_lock);
15837 void
15838 dtrace_suspend(void)
15840 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
15843 void
15844 dtrace_resume(void)
15846 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
15849 static int
15850 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
15852 ASSERT(MUTEX_HELD(&cpu_lock));
15853 mutex_enter(&dtrace_lock);
15855 switch (what) {
15856 case CPU_CONFIG: {
15857 dtrace_state_t *state;
15858 dtrace_optval_t *opt, rs, c;
15861 * For now, we only allocate a new buffer for anonymous state.
15863 if ((state = dtrace_anon.dta_state) == NULL)
15864 break;
15866 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
15867 break;
15869 opt = state->dts_options;
15870 c = opt[DTRACEOPT_CPU];
15872 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
15873 break;
15876 * Regardless of what the actual policy is, we're going to
15877 * temporarily set our resize policy to be manual. We're
15878 * also going to temporarily set our CPU option to denote
15879 * the newly configured CPU.
15881 rs = opt[DTRACEOPT_BUFRESIZE];
15882 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
15883 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
15885 (void) dtrace_state_buffers(state);
15887 opt[DTRACEOPT_BUFRESIZE] = rs;
15888 opt[DTRACEOPT_CPU] = c;
15890 break;
15893 case CPU_UNCONFIG:
15895 * We don't free the buffer in the CPU_UNCONFIG case. (The
15896 * buffer will be freed when the consumer exits.)
15898 break;
15900 default:
15901 break;
15904 mutex_exit(&dtrace_lock);
15905 return (0);
15908 static void
15909 dtrace_cpu_setup_initial(processorid_t cpu)
15911 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15914 static void
15915 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15917 if (dtrace_toxranges >= dtrace_toxranges_max) {
15918 int osize, nsize;
15919 dtrace_toxrange_t *range;
15921 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15923 if (osize == 0) {
15924 ASSERT(dtrace_toxrange == NULL);
15925 ASSERT(dtrace_toxranges_max == 0);
15926 dtrace_toxranges_max = 1;
15927 } else {
15928 dtrace_toxranges_max <<= 1;
15931 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15932 range = kmem_zalloc(nsize, KM_SLEEP);
15934 if (dtrace_toxrange != NULL) {
15935 ASSERT(osize != 0);
15936 bcopy(dtrace_toxrange, range, osize);
15937 kmem_free(dtrace_toxrange, osize);
15940 dtrace_toxrange = range;
15943 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == (uintptr_t)NULL);
15944 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == (uintptr_t)NULL);
15946 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15947 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15948 dtrace_toxranges++;
15951 static void
15952 dtrace_getf_barrier()
15955 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings
15956 * that contain calls to getf(), this routine will be called on every
15957 * closef() before either the underlying vnode is released or the
15958 * file_t itself is freed. By the time we are here, it is essential
15959 * that the file_t can no longer be accessed from a call to getf()
15960 * in probe context -- that assures that a dtrace_sync() can be used
15961 * to clear out any enablings referring to the old structures.
15963 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 ||
15964 kcred->cr_zone->zone_dtrace_getf != 0)
15965 dtrace_sync();
15969 * DTrace Driver Cookbook Functions
15971 /*ARGSUSED*/
15972 static int
15973 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15975 dtrace_provider_id_t id;
15976 dtrace_state_t *state = NULL;
15977 dtrace_enabling_t *enab;
15979 mutex_enter(&cpu_lock);
15980 mutex_enter(&dtrace_provider_lock);
15981 mutex_enter(&dtrace_lock);
15983 if (ddi_soft_state_init(&dtrace_softstate,
15984 sizeof (dtrace_state_t), 0) != 0) {
15985 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15986 mutex_exit(&cpu_lock);
15987 mutex_exit(&dtrace_provider_lock);
15988 mutex_exit(&dtrace_lock);
15989 return (DDI_FAILURE);
15992 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15993 DTRACEMNRN_DTRACE, DDI_PSEUDO, 0) == DDI_FAILURE ||
15994 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15995 DTRACEMNRN_HELPER, DDI_PSEUDO, 0) == DDI_FAILURE) {
15996 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15997 ddi_remove_minor_node(devi, NULL);
15998 ddi_soft_state_fini(&dtrace_softstate);
15999 mutex_exit(&cpu_lock);
16000 mutex_exit(&dtrace_provider_lock);
16001 mutex_exit(&dtrace_lock);
16002 return (DDI_FAILURE);
16005 ddi_report_dev(devi);
16006 dtrace_devi = devi;
16008 dtrace_modload = dtrace_module_loaded;
16009 dtrace_modunload = dtrace_module_unloaded;
16010 dtrace_cpu_init = dtrace_cpu_setup_initial;
16011 dtrace_helpers_cleanup = dtrace_helpers_destroy;
16012 dtrace_helpers_fork = dtrace_helpers_duplicate;
16013 dtrace_cpustart_init = dtrace_suspend;
16014 dtrace_cpustart_fini = dtrace_resume;
16015 dtrace_debugger_init = dtrace_suspend;
16016 dtrace_debugger_fini = dtrace_resume;
16018 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16020 ASSERT(MUTEX_HELD(&cpu_lock));
16022 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
16023 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
16024 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
16025 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
16026 VM_SLEEP | VMC_IDENTIFIER);
16027 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
16028 1, INT_MAX, 0);
16030 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
16031 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
16032 NULL, NULL, NULL, NULL, NULL, 0);
16034 ASSERT(MUTEX_HELD(&cpu_lock));
16035 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
16036 offsetof(dtrace_probe_t, dtpr_nextmod),
16037 offsetof(dtrace_probe_t, dtpr_prevmod));
16039 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
16040 offsetof(dtrace_probe_t, dtpr_nextfunc),
16041 offsetof(dtrace_probe_t, dtpr_prevfunc));
16043 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
16044 offsetof(dtrace_probe_t, dtpr_nextname),
16045 offsetof(dtrace_probe_t, dtpr_prevname));
16047 if (dtrace_retain_max < 1) {
16048 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
16049 "setting to 1", dtrace_retain_max);
16050 dtrace_retain_max = 1;
16054 * Now discover our toxic ranges.
16056 dtrace_toxic_ranges(dtrace_toxrange_add);
16059 * Before we register ourselves as a provider to our own framework,
16060 * we would like to assert that dtrace_provider is NULL -- but that's
16061 * not true if we were loaded as a dependency of a DTrace provider.
16062 * Once we've registered, we can assert that dtrace_provider is our
16063 * pseudo provider.
16065 (void) dtrace_register("dtrace", &dtrace_provider_attr,
16066 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
16068 ASSERT(dtrace_provider != NULL);
16069 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
16071 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
16072 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
16073 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
16074 dtrace_provider, NULL, NULL, "END", 0, NULL);
16075 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
16076 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
16078 dtrace_anon_property();
16079 mutex_exit(&cpu_lock);
16082 * If there are already providers, we must ask them to provide their
16083 * probes, and then match any anonymous enabling against them. Note
16084 * that there should be no other retained enablings at this time:
16085 * the only retained enablings at this time should be the anonymous
16086 * enabling.
16088 if (dtrace_anon.dta_enabling != NULL) {
16089 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
16091 dtrace_enabling_provide(NULL);
16092 state = dtrace_anon.dta_state;
16095 * We couldn't hold cpu_lock across the above call to
16096 * dtrace_enabling_provide(), but we must hold it to actually
16097 * enable the probes. We have to drop all of our locks, pick
16098 * up cpu_lock, and regain our locks before matching the
16099 * retained anonymous enabling.
16101 mutex_exit(&dtrace_lock);
16102 mutex_exit(&dtrace_provider_lock);
16104 mutex_enter(&cpu_lock);
16105 mutex_enter(&dtrace_provider_lock);
16106 mutex_enter(&dtrace_lock);
16108 if ((enab = dtrace_anon.dta_enabling) != NULL)
16109 (void) dtrace_enabling_match(enab, NULL);
16111 mutex_exit(&cpu_lock);
16114 mutex_exit(&dtrace_lock);
16115 mutex_exit(&dtrace_provider_lock);
16117 if (state != NULL) {
16119 * If we created any anonymous state, set it going now.
16121 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
16124 return (DDI_SUCCESS);
16127 /*ARGSUSED*/
16128 static int
16129 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
16131 dtrace_state_t *state;
16132 uint32_t priv;
16133 uid_t uid;
16134 zoneid_t zoneid;
16136 if (getminor(*devp) == DTRACEMNRN_HELPER)
16137 return (0);
16140 * If this wasn't an open with the "helper" minor, then it must be
16141 * the "dtrace" minor.
16143 if (getminor(*devp) != DTRACEMNRN_DTRACE)
16144 return (ENXIO);
16147 * If no DTRACE_PRIV_* bits are set in the credential, then the
16148 * caller lacks sufficient permission to do anything with DTrace.
16150 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
16151 if (priv == DTRACE_PRIV_NONE)
16152 return (EACCES);
16155 * Ask all providers to provide all their probes.
16157 mutex_enter(&dtrace_provider_lock);
16158 dtrace_probe_provide(NULL, NULL);
16159 mutex_exit(&dtrace_provider_lock);
16161 mutex_enter(&cpu_lock);
16162 mutex_enter(&dtrace_lock);
16163 dtrace_opens++;
16164 dtrace_membar_producer();
16167 * If the kernel debugger is active (that is, if the kernel debugger
16168 * modified text in some way), we won't allow the open.
16170 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
16171 dtrace_opens--;
16172 mutex_exit(&cpu_lock);
16173 mutex_exit(&dtrace_lock);
16174 return (EBUSY);
16177 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) {
16179 * If DTrace helper tracing is enabled, we need to allocate the
16180 * trace buffer and initialize the values.
16182 dtrace_helptrace_buffer =
16183 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
16184 dtrace_helptrace_next = 0;
16185 dtrace_helptrace_wrapped = 0;
16186 dtrace_helptrace_enable = 0;
16189 state = dtrace_state_create(devp, cred_p);
16190 mutex_exit(&cpu_lock);
16192 if (state == NULL) {
16193 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
16194 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16195 mutex_exit(&dtrace_lock);
16196 return (EAGAIN);
16199 mutex_exit(&dtrace_lock);
16201 return (0);
16204 /*ARGSUSED*/
16205 static int
16206 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
16208 minor_t minor = getminor(dev);
16209 dtrace_state_t *state;
16210 dtrace_helptrace_t *buf = NULL;
16212 if (minor == DTRACEMNRN_HELPER)
16213 return (0);
16215 state = ddi_get_soft_state(dtrace_softstate, minor);
16217 mutex_enter(&cpu_lock);
16218 mutex_enter(&dtrace_lock);
16220 if (state->dts_anon) {
16222 * There is anonymous state. Destroy that first.
16224 ASSERT(dtrace_anon.dta_state == NULL);
16225 dtrace_state_destroy(state->dts_anon);
16228 if (dtrace_helptrace_disable) {
16230 * If we have been told to disable helper tracing, set the
16231 * buffer to NULL before calling into dtrace_state_destroy();
16232 * we take advantage of its dtrace_sync() to know that no
16233 * CPU is in probe context with enabled helper tracing
16234 * after it returns.
16236 buf = dtrace_helptrace_buffer;
16237 dtrace_helptrace_buffer = NULL;
16240 dtrace_state_destroy(state);
16241 ASSERT(dtrace_opens > 0);
16244 * Only relinquish control of the kernel debugger interface when there
16245 * are no consumers and no anonymous enablings.
16247 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
16248 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16250 if (buf != NULL) {
16251 kmem_free(buf, dtrace_helptrace_bufsize);
16252 dtrace_helptrace_disable = 0;
16255 mutex_exit(&dtrace_lock);
16256 mutex_exit(&cpu_lock);
16258 return (0);
16261 /*ARGSUSED*/
16262 static int
16263 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
16265 int rval;
16266 dof_helper_t help, *dhp = NULL;
16268 switch (cmd) {
16269 case DTRACEHIOC_ADDDOF:
16270 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
16271 dtrace_dof_error(NULL, "failed to copyin DOF helper");
16272 return (EFAULT);
16275 dhp = &help;
16276 arg = (intptr_t)help.dofhp_dof;
16277 /*FALLTHROUGH*/
16279 case DTRACEHIOC_ADD: {
16280 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
16282 if (dof == NULL)
16283 return (rval);
16285 mutex_enter(&dtrace_lock);
16288 * dtrace_helper_slurp() takes responsibility for the dof --
16289 * it may free it now or it may save it and free it later.
16291 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
16292 *rv = rval;
16293 rval = 0;
16294 } else {
16295 rval = EINVAL;
16298 mutex_exit(&dtrace_lock);
16299 return (rval);
16302 case DTRACEHIOC_REMOVE: {
16303 mutex_enter(&dtrace_lock);
16304 rval = dtrace_helper_destroygen(arg);
16305 mutex_exit(&dtrace_lock);
16307 return (rval);
16310 default:
16311 break;
16314 return (ENOTTY);
16317 /*ARGSUSED*/
16318 static int
16319 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
16321 minor_t minor = getminor(dev);
16322 dtrace_state_t *state;
16323 int rval;
16325 if (minor == DTRACEMNRN_HELPER)
16326 return (dtrace_ioctl_helper(cmd, arg, rv));
16328 state = ddi_get_soft_state(dtrace_softstate, minor);
16330 if (state->dts_anon) {
16331 ASSERT(dtrace_anon.dta_state == NULL);
16332 state = state->dts_anon;
16335 switch (cmd) {
16336 case DTRACEIOC_PROVIDER: {
16337 dtrace_providerdesc_t pvd;
16338 dtrace_provider_t *pvp;
16340 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
16341 return (EFAULT);
16343 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
16344 mutex_enter(&dtrace_provider_lock);
16346 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
16347 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
16348 break;
16351 mutex_exit(&dtrace_provider_lock);
16353 if (pvp == NULL)
16354 return (ESRCH);
16356 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
16357 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
16358 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
16359 return (EFAULT);
16361 return (0);
16364 case DTRACEIOC_EPROBE: {
16365 dtrace_eprobedesc_t epdesc;
16366 dtrace_ecb_t *ecb;
16367 dtrace_action_t *act;
16368 void *buf;
16369 size_t size;
16370 uintptr_t dest;
16371 int nrecs;
16373 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
16374 return (EFAULT);
16376 mutex_enter(&dtrace_lock);
16378 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
16379 mutex_exit(&dtrace_lock);
16380 return (EINVAL);
16383 if (ecb->dte_probe == NULL) {
16384 mutex_exit(&dtrace_lock);
16385 return (EINVAL);
16388 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
16389 epdesc.dtepd_uarg = ecb->dte_uarg;
16390 epdesc.dtepd_size = ecb->dte_size;
16392 nrecs = epdesc.dtepd_nrecs;
16393 epdesc.dtepd_nrecs = 0;
16394 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
16395 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
16396 continue;
16398 epdesc.dtepd_nrecs++;
16402 * Now that we have the size, we need to allocate a temporary
16403 * buffer in which to store the complete description. We need
16404 * the temporary buffer to be able to drop dtrace_lock()
16405 * across the copyout(), below.
16407 size = sizeof (dtrace_eprobedesc_t) +
16408 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
16410 buf = kmem_alloc(size, KM_SLEEP);
16411 dest = (uintptr_t)buf;
16413 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
16414 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
16416 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
16417 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
16418 continue;
16420 if (nrecs-- == 0)
16421 break;
16423 bcopy(&act->dta_rec, (void *)dest,
16424 sizeof (dtrace_recdesc_t));
16425 dest += sizeof (dtrace_recdesc_t);
16428 mutex_exit(&dtrace_lock);
16430 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16431 kmem_free(buf, size);
16432 return (EFAULT);
16435 kmem_free(buf, size);
16436 return (0);
16439 case DTRACEIOC_AGGDESC: {
16440 dtrace_aggdesc_t aggdesc;
16441 dtrace_action_t *act;
16442 dtrace_aggregation_t *agg;
16443 int nrecs;
16444 uint32_t offs;
16445 dtrace_recdesc_t *lrec;
16446 void *buf;
16447 size_t size;
16448 uintptr_t dest;
16450 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
16451 return (EFAULT);
16453 mutex_enter(&dtrace_lock);
16455 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
16456 mutex_exit(&dtrace_lock);
16457 return (EINVAL);
16460 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
16462 nrecs = aggdesc.dtagd_nrecs;
16463 aggdesc.dtagd_nrecs = 0;
16465 offs = agg->dtag_base;
16466 lrec = &agg->dtag_action.dta_rec;
16467 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
16469 for (act = agg->dtag_first; ; act = act->dta_next) {
16470 ASSERT(act->dta_intuple ||
16471 DTRACEACT_ISAGG(act->dta_kind));
16474 * If this action has a record size of zero, it
16475 * denotes an argument to the aggregating action.
16476 * Because the presence of this record doesn't (or
16477 * shouldn't) affect the way the data is interpreted,
16478 * we don't copy it out to save user-level the
16479 * confusion of dealing with a zero-length record.
16481 if (act->dta_rec.dtrd_size == 0) {
16482 ASSERT(agg->dtag_hasarg);
16483 continue;
16486 aggdesc.dtagd_nrecs++;
16488 if (act == &agg->dtag_action)
16489 break;
16493 * Now that we have the size, we need to allocate a temporary
16494 * buffer in which to store the complete description. We need
16495 * the temporary buffer to be able to drop dtrace_lock()
16496 * across the copyout(), below.
16498 size = sizeof (dtrace_aggdesc_t) +
16499 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
16501 buf = kmem_alloc(size, KM_SLEEP);
16502 dest = (uintptr_t)buf;
16504 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
16505 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
16507 for (act = agg->dtag_first; ; act = act->dta_next) {
16508 dtrace_recdesc_t rec = act->dta_rec;
16511 * See the comment in the above loop for why we pass
16512 * over zero-length records.
16514 if (rec.dtrd_size == 0) {
16515 ASSERT(agg->dtag_hasarg);
16516 continue;
16519 if (nrecs-- == 0)
16520 break;
16522 rec.dtrd_offset -= offs;
16523 bcopy(&rec, (void *)dest, sizeof (rec));
16524 dest += sizeof (dtrace_recdesc_t);
16526 if (act == &agg->dtag_action)
16527 break;
16530 mutex_exit(&dtrace_lock);
16532 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16533 kmem_free(buf, size);
16534 return (EFAULT);
16537 kmem_free(buf, size);
16538 return (0);
16541 case DTRACEIOC_ENABLE: {
16542 dof_hdr_t *dof;
16543 dtrace_enabling_t *enab = NULL;
16544 dtrace_vstate_t *vstate;
16545 int err = 0;
16547 *rv = 0;
16550 * If a NULL argument has been passed, we take this as our
16551 * cue to reevaluate our enablings.
16553 if (arg == (intptr_t)NULL) {
16554 dtrace_enabling_matchall();
16556 return (0);
16559 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
16560 return (rval);
16562 mutex_enter(&cpu_lock);
16563 mutex_enter(&dtrace_lock);
16564 vstate = &state->dts_vstate;
16566 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
16567 mutex_exit(&dtrace_lock);
16568 mutex_exit(&cpu_lock);
16569 dtrace_dof_destroy(dof);
16570 return (EBUSY);
16573 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
16574 mutex_exit(&dtrace_lock);
16575 mutex_exit(&cpu_lock);
16576 dtrace_dof_destroy(dof);
16577 return (EINVAL);
16580 if ((rval = dtrace_dof_options(dof, state)) != 0) {
16581 dtrace_enabling_destroy(enab);
16582 mutex_exit(&dtrace_lock);
16583 mutex_exit(&cpu_lock);
16584 dtrace_dof_destroy(dof);
16585 return (rval);
16588 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
16589 err = dtrace_enabling_retain(enab);
16590 } else {
16591 dtrace_enabling_destroy(enab);
16594 mutex_exit(&cpu_lock);
16595 mutex_exit(&dtrace_lock);
16596 dtrace_dof_destroy(dof);
16598 return (err);
16601 case DTRACEIOC_REPLICATE: {
16602 dtrace_repldesc_t desc;
16603 dtrace_probedesc_t *match = &desc.dtrpd_match;
16604 dtrace_probedesc_t *create = &desc.dtrpd_create;
16605 int err;
16607 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16608 return (EFAULT);
16610 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16611 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16612 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16613 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16615 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16616 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16617 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16618 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16620 mutex_enter(&dtrace_lock);
16621 err = dtrace_enabling_replicate(state, match, create);
16622 mutex_exit(&dtrace_lock);
16624 return (err);
16627 case DTRACEIOC_PROBEMATCH:
16628 case DTRACEIOC_PROBES: {
16629 dtrace_probe_t *probe = NULL;
16630 dtrace_probedesc_t desc;
16631 dtrace_probekey_t pkey;
16632 dtrace_id_t i;
16633 int m = 0;
16634 uint32_t priv;
16635 uid_t uid;
16636 zoneid_t zoneid;
16638 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16639 return (EFAULT);
16641 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16642 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16643 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16644 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16647 * Before we attempt to match this probe, we want to give
16648 * all providers the opportunity to provide it.
16650 if (desc.dtpd_id == DTRACE_IDNONE) {
16651 mutex_enter(&dtrace_provider_lock);
16652 dtrace_probe_provide(&desc, NULL);
16653 mutex_exit(&dtrace_provider_lock);
16654 desc.dtpd_id++;
16657 if (cmd == DTRACEIOC_PROBEMATCH) {
16658 dtrace_probekey(&desc, &pkey);
16659 pkey.dtpk_id = DTRACE_IDNONE;
16662 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
16664 mutex_enter(&dtrace_lock);
16666 if (cmd == DTRACEIOC_PROBEMATCH) {
16667 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16668 if ((probe = dtrace_probes[i - 1]) != NULL &&
16669 (m = dtrace_match_probe(probe, &pkey,
16670 priv, uid, zoneid)) != 0)
16671 break;
16674 if (m < 0) {
16675 mutex_exit(&dtrace_lock);
16676 return (EINVAL);
16679 } else {
16680 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16681 if ((probe = dtrace_probes[i - 1]) != NULL &&
16682 dtrace_match_priv(probe, priv, uid, zoneid))
16683 break;
16687 if (probe == NULL) {
16688 mutex_exit(&dtrace_lock);
16689 return (ESRCH);
16692 dtrace_probe_description(probe, &desc);
16693 mutex_exit(&dtrace_lock);
16695 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16696 return (EFAULT);
16698 return (0);
16701 case DTRACEIOC_PROBEARG: {
16702 dtrace_argdesc_t desc;
16703 dtrace_probe_t *probe;
16704 dtrace_provider_t *prov;
16706 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16707 return (EFAULT);
16709 if (desc.dtargd_id == DTRACE_IDNONE)
16710 return (EINVAL);
16712 if (desc.dtargd_ndx == DTRACE_ARGNONE)
16713 return (EINVAL);
16715 mutex_enter(&dtrace_provider_lock);
16716 mutex_enter(&mod_lock);
16717 mutex_enter(&dtrace_lock);
16719 if (desc.dtargd_id > dtrace_nprobes) {
16720 mutex_exit(&dtrace_lock);
16721 mutex_exit(&mod_lock);
16722 mutex_exit(&dtrace_provider_lock);
16723 return (EINVAL);
16726 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
16727 mutex_exit(&dtrace_lock);
16728 mutex_exit(&mod_lock);
16729 mutex_exit(&dtrace_provider_lock);
16730 return (EINVAL);
16733 mutex_exit(&dtrace_lock);
16735 prov = probe->dtpr_provider;
16737 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
16739 * There isn't any typed information for this probe.
16740 * Set the argument number to DTRACE_ARGNONE.
16742 desc.dtargd_ndx = DTRACE_ARGNONE;
16743 } else {
16744 desc.dtargd_native[0] = '\0';
16745 desc.dtargd_xlate[0] = '\0';
16746 desc.dtargd_mapping = desc.dtargd_ndx;
16748 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
16749 probe->dtpr_id, probe->dtpr_arg, &desc);
16752 mutex_exit(&mod_lock);
16753 mutex_exit(&dtrace_provider_lock);
16755 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16756 return (EFAULT);
16758 return (0);
16761 case DTRACEIOC_GO: {
16762 processorid_t cpuid;
16763 rval = dtrace_state_go(state, &cpuid);
16765 if (rval != 0)
16766 return (rval);
16768 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16769 return (EFAULT);
16771 return (0);
16774 case DTRACEIOC_STOP: {
16775 processorid_t cpuid;
16777 mutex_enter(&dtrace_lock);
16778 rval = dtrace_state_stop(state, &cpuid);
16779 mutex_exit(&dtrace_lock);
16781 if (rval != 0)
16782 return (rval);
16784 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16785 return (EFAULT);
16787 return (0);
16790 case DTRACEIOC_DOFGET: {
16791 dof_hdr_t hdr, *dof;
16792 uint64_t len;
16794 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
16795 return (EFAULT);
16797 mutex_enter(&dtrace_lock);
16798 dof = dtrace_dof_create(state);
16799 mutex_exit(&dtrace_lock);
16801 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
16802 rval = copyout(dof, (void *)arg, len);
16803 dtrace_dof_destroy(dof);
16805 return (rval == 0 ? 0 : EFAULT);
16808 case DTRACEIOC_AGGSNAP:
16809 case DTRACEIOC_BUFSNAP: {
16810 dtrace_bufdesc_t desc;
16811 caddr_t cached;
16812 dtrace_buffer_t *buf;
16814 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16815 return (EFAULT);
16817 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
16818 return (EINVAL);
16820 mutex_enter(&dtrace_lock);
16822 if (cmd == DTRACEIOC_BUFSNAP) {
16823 buf = &state->dts_buffer[desc.dtbd_cpu];
16824 } else {
16825 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16828 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16829 size_t sz = buf->dtb_offset;
16831 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16832 mutex_exit(&dtrace_lock);
16833 return (EBUSY);
16837 * If this buffer has already been consumed, we're
16838 * going to indicate that there's nothing left here
16839 * to consume.
16841 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16842 mutex_exit(&dtrace_lock);
16844 desc.dtbd_size = 0;
16845 desc.dtbd_drops = 0;
16846 desc.dtbd_errors = 0;
16847 desc.dtbd_oldest = 0;
16848 sz = sizeof (desc);
16850 if (copyout(&desc, (void *)arg, sz) != 0)
16851 return (EFAULT);
16853 return (0);
16857 * If this is a ring buffer that has wrapped, we want
16858 * to copy the whole thing out.
16860 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16861 dtrace_buffer_polish(buf);
16862 sz = buf->dtb_size;
16865 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16866 mutex_exit(&dtrace_lock);
16867 return (EFAULT);
16870 desc.dtbd_size = sz;
16871 desc.dtbd_drops = buf->dtb_drops;
16872 desc.dtbd_errors = buf->dtb_errors;
16873 desc.dtbd_oldest = buf->dtb_xamot_offset;
16874 desc.dtbd_timestamp = dtrace_gethrtime();
16876 mutex_exit(&dtrace_lock);
16878 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16879 return (EFAULT);
16881 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16883 return (0);
16886 if (buf->dtb_tomax == NULL) {
16887 ASSERT(buf->dtb_xamot == NULL);
16888 mutex_exit(&dtrace_lock);
16889 return (ENOENT);
16892 cached = buf->dtb_tomax;
16893 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16895 dtrace_xcall(desc.dtbd_cpu,
16896 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16898 state->dts_errors += buf->dtb_xamot_errors;
16901 * If the buffers did not actually switch, then the cross call
16902 * did not take place -- presumably because the given CPU is
16903 * not in the ready set. If this is the case, we'll return
16904 * ENOENT.
16906 if (buf->dtb_tomax == cached) {
16907 ASSERT(buf->dtb_xamot != cached);
16908 mutex_exit(&dtrace_lock);
16909 return (ENOENT);
16912 ASSERT(cached == buf->dtb_xamot);
16915 * We have our snapshot; now copy it out.
16917 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16918 buf->dtb_xamot_offset) != 0) {
16919 mutex_exit(&dtrace_lock);
16920 return (EFAULT);
16923 desc.dtbd_size = buf->dtb_xamot_offset;
16924 desc.dtbd_drops = buf->dtb_xamot_drops;
16925 desc.dtbd_errors = buf->dtb_xamot_errors;
16926 desc.dtbd_oldest = 0;
16927 desc.dtbd_timestamp = buf->dtb_switched;
16929 mutex_exit(&dtrace_lock);
16932 * Finally, copy out the buffer description.
16934 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16935 return (EFAULT);
16937 return (0);
16940 case DTRACEIOC_CONF: {
16941 dtrace_conf_t conf;
16943 bzero(&conf, sizeof (conf));
16944 conf.dtc_difversion = DIF_VERSION;
16945 conf.dtc_difintregs = DIF_DIR_NREGS;
16946 conf.dtc_diftupregs = DIF_DTR_NREGS;
16947 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16949 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16950 return (EFAULT);
16952 return (0);
16955 case DTRACEIOC_STATUS: {
16956 dtrace_status_t stat;
16957 dtrace_dstate_t *dstate;
16958 int i, j;
16959 uint64_t nerrs;
16962 * See the comment in dtrace_state_deadman() for the reason
16963 * for setting dts_laststatus to INT64_MAX before setting
16964 * it to the correct value.
16966 state->dts_laststatus = INT64_MAX;
16967 dtrace_membar_producer();
16968 state->dts_laststatus = dtrace_gethrtime();
16970 bzero(&stat, sizeof (stat));
16972 mutex_enter(&dtrace_lock);
16974 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16975 mutex_exit(&dtrace_lock);
16976 return (ENOENT);
16979 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16980 stat.dtst_exiting = 1;
16982 nerrs = state->dts_errors;
16983 dstate = &state->dts_vstate.dtvs_dynvars;
16985 for (i = 0; i < NCPU; i++) {
16986 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16988 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16989 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16990 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16992 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16993 stat.dtst_filled++;
16995 nerrs += state->dts_buffer[i].dtb_errors;
16997 for (j = 0; j < state->dts_nspeculations; j++) {
16998 dtrace_speculation_t *spec;
16999 dtrace_buffer_t *buf;
17001 spec = &state->dts_speculations[j];
17002 buf = &spec->dtsp_buffer[i];
17003 stat.dtst_specdrops += buf->dtb_xamot_drops;
17007 stat.dtst_specdrops_busy = state->dts_speculations_busy;
17008 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
17009 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
17010 stat.dtst_dblerrors = state->dts_dblerrors;
17011 stat.dtst_killed =
17012 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
17013 stat.dtst_errors = nerrs;
17015 mutex_exit(&dtrace_lock);
17017 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
17018 return (EFAULT);
17020 return (0);
17023 case DTRACEIOC_FORMAT: {
17024 dtrace_fmtdesc_t fmt;
17025 char *str;
17026 int len;
17028 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
17029 return (EFAULT);
17031 mutex_enter(&dtrace_lock);
17033 if (fmt.dtfd_format == 0 ||
17034 fmt.dtfd_format > state->dts_nformats) {
17035 mutex_exit(&dtrace_lock);
17036 return (EINVAL);
17040 * Format strings are allocated contiguously and they are
17041 * never freed; if a format index is less than the number
17042 * of formats, we can assert that the format map is non-NULL
17043 * and that the format for the specified index is non-NULL.
17045 ASSERT(state->dts_formats != NULL);
17046 str = state->dts_formats[fmt.dtfd_format - 1];
17047 ASSERT(str != NULL);
17049 len = strlen(str) + 1;
17051 if (len > fmt.dtfd_length) {
17052 fmt.dtfd_length = len;
17054 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
17055 mutex_exit(&dtrace_lock);
17056 return (EINVAL);
17058 } else {
17059 if (copyout(str, fmt.dtfd_string, len) != 0) {
17060 mutex_exit(&dtrace_lock);
17061 return (EINVAL);
17065 mutex_exit(&dtrace_lock);
17066 return (0);
17069 default:
17070 break;
17073 return (ENOTTY);
17076 /*ARGSUSED*/
17077 static int
17078 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
17080 dtrace_state_t *state;
17082 switch (cmd) {
17083 case DDI_DETACH:
17084 break;
17086 case DDI_SUSPEND:
17087 return (DDI_SUCCESS);
17089 default:
17090 return (DDI_FAILURE);
17093 mutex_enter(&cpu_lock);
17094 mutex_enter(&dtrace_provider_lock);
17095 mutex_enter(&dtrace_lock);
17097 ASSERT(dtrace_opens == 0);
17099 if (dtrace_helpers > 0) {
17100 mutex_exit(&dtrace_provider_lock);
17101 mutex_exit(&dtrace_lock);
17102 mutex_exit(&cpu_lock);
17103 return (DDI_FAILURE);
17106 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
17107 mutex_exit(&dtrace_provider_lock);
17108 mutex_exit(&dtrace_lock);
17109 mutex_exit(&cpu_lock);
17110 return (DDI_FAILURE);
17113 dtrace_provider = NULL;
17115 if ((state = dtrace_anon_grab()) != NULL) {
17117 * If there were ECBs on this state, the provider should
17118 * have not been allowed to detach; assert that there is
17119 * none.
17121 ASSERT(state->dts_necbs == 0);
17122 dtrace_state_destroy(state);
17125 * If we're being detached with anonymous state, we need to
17126 * indicate to the kernel debugger that DTrace is now inactive.
17128 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17131 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
17132 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
17133 dtrace_cpu_init = NULL;
17134 dtrace_helpers_cleanup = NULL;
17135 dtrace_helpers_fork = NULL;
17136 dtrace_cpustart_init = NULL;
17137 dtrace_cpustart_fini = NULL;
17138 dtrace_debugger_init = NULL;
17139 dtrace_debugger_fini = NULL;
17140 dtrace_modload = NULL;
17141 dtrace_modunload = NULL;
17143 ASSERT(dtrace_getf == 0);
17144 ASSERT(dtrace_closef == NULL);
17146 mutex_exit(&cpu_lock);
17148 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
17149 dtrace_probes = NULL;
17150 dtrace_nprobes = 0;
17152 dtrace_hash_destroy(dtrace_bymod);
17153 dtrace_hash_destroy(dtrace_byfunc);
17154 dtrace_hash_destroy(dtrace_byname);
17155 dtrace_bymod = NULL;
17156 dtrace_byfunc = NULL;
17157 dtrace_byname = NULL;
17159 kmem_cache_destroy(dtrace_state_cache);
17160 vmem_destroy(dtrace_minor);
17161 vmem_destroy(dtrace_arena);
17163 if (dtrace_toxrange != NULL) {
17164 kmem_free(dtrace_toxrange,
17165 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
17166 dtrace_toxrange = NULL;
17167 dtrace_toxranges = 0;
17168 dtrace_toxranges_max = 0;
17171 ddi_remove_minor_node(dtrace_devi, NULL);
17172 dtrace_devi = NULL;
17174 ddi_soft_state_fini(&dtrace_softstate);
17176 ASSERT(dtrace_vtime_references == 0);
17177 ASSERT(dtrace_opens == 0);
17178 ASSERT(dtrace_retained == NULL);
17180 mutex_exit(&dtrace_lock);
17181 mutex_exit(&dtrace_provider_lock);
17184 * We don't destroy the task queue until after we have dropped our
17185 * locks (taskq_destroy() may block on running tasks). To prevent
17186 * attempting to do work after we have effectively detached but before
17187 * the task queue has been destroyed, all tasks dispatched via the
17188 * task queue must check that DTrace is still attached before
17189 * performing any operation.
17191 taskq_destroy(dtrace_taskq);
17192 dtrace_taskq = NULL;
17194 return (DDI_SUCCESS);
17197 /*ARGSUSED*/
17198 static int
17199 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
17201 int error;
17203 switch (infocmd) {
17204 case DDI_INFO_DEVT2DEVINFO:
17205 *result = (void *)dtrace_devi;
17206 error = DDI_SUCCESS;
17207 break;
17208 case DDI_INFO_DEVT2INSTANCE:
17209 *result = NULL;
17210 error = DDI_SUCCESS;
17211 break;
17212 default:
17213 error = DDI_FAILURE;
17215 return (error);
17218 static struct cb_ops dtrace_cb_ops = {
17219 dtrace_open, /* open */
17220 dtrace_close, /* close */
17221 nulldev, /* strategy */
17222 nulldev, /* print */
17223 nodev, /* dump */
17224 nodev, /* read */
17225 nodev, /* write */
17226 dtrace_ioctl, /* ioctl */
17227 nodev, /* devmap */
17228 nodev, /* mmap */
17229 nodev, /* segmap */
17230 nochpoll, /* poll */
17231 ddi_prop_op, /* cb_prop_op */
17232 0, /* streamtab */
17233 D_NEW | D_MP /* Driver compatibility flag */
17236 static struct dev_ops dtrace_ops = {
17237 DEVO_REV, /* devo_rev */
17238 0, /* refcnt */
17239 dtrace_info, /* get_dev_info */
17240 nulldev, /* identify */
17241 nulldev, /* probe */
17242 dtrace_attach, /* attach */
17243 dtrace_detach, /* detach */
17244 nodev, /* reset */
17245 &dtrace_cb_ops, /* driver operations */
17246 NULL, /* bus operations */
17247 nodev, /* dev power */
17248 ddi_quiesce_not_needed, /* quiesce */
17251 static struct modldrv modldrv = {
17252 &mod_driverops, /* module type (this is a pseudo driver) */
17253 "Dynamic Tracing", /* name of module */
17254 &dtrace_ops, /* driver ops */
17257 static struct modlinkage modlinkage = {
17258 MODREV_1,
17259 (void *)&modldrv,
17260 NULL
17264 _init(void)
17266 return (mod_install(&modlinkage));
17270 _info(struct modinfo *modinfop)
17272 return (mod_info(&modlinkage, modinfop));
17276 _fini(void)
17278 return (mod_remove(&modlinkage));