2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 static kmem_zone_t
*ktrace_hdr_zone
;
21 static kmem_zone_t
*ktrace_ent_zone
;
22 static int ktrace_zentries
;
25 ktrace_init(int zentries
)
27 ktrace_zentries
= roundup_pow_of_two(zentries
);
29 ktrace_hdr_zone
= kmem_zone_init(sizeof(ktrace_t
),
31 ASSERT(ktrace_hdr_zone
);
33 ktrace_ent_zone
= kmem_zone_init(ktrace_zentries
34 * sizeof(ktrace_entry_t
),
36 ASSERT(ktrace_ent_zone
);
42 kmem_zone_destroy(ktrace_hdr_zone
);
43 kmem_zone_destroy(ktrace_ent_zone
);
49 * Allocate a ktrace header and enough buffering for the given
50 * number of entries. Round the number of entries up to a
51 * power of 2 so we can do fast masking to get the index from
52 * the atomic index counter.
55 ktrace_alloc(int nentries
, unsigned int __nocast sleep
)
61 ktp
= (ktrace_t
*)kmem_zone_alloc(ktrace_hdr_zone
, sleep
);
63 if (ktp
== (ktrace_t
*)NULL
) {
65 * KM_SLEEP callers don't expect failure.
68 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
74 * Special treatment for buffers with the ktrace_zentries entries
76 entries
= roundup_pow_of_two(nentries
);
77 if (entries
== ktrace_zentries
) {
78 ktep
= (ktrace_entry_t
*)kmem_zone_zalloc(ktrace_ent_zone
,
81 ktep
= (ktrace_entry_t
*)kmem_zalloc((entries
* sizeof(*ktep
)),
87 * KM_SLEEP callers don't expect failure.
90 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
97 ktp
->kt_entries
= ktep
;
98 ktp
->kt_nentries
= entries
;
99 ASSERT(is_power_of_2(entries
));
100 ktp
->kt_index_mask
= entries
- 1;
101 atomic_set(&ktp
->kt_index
, 0);
102 ktp
->kt_rollover
= 0;
110 * Free up the ktrace header and buffer. It is up to the caller
111 * to ensure that no-one is referencing it.
114 ktrace_free(ktrace_t
*ktp
)
116 if (ktp
== (ktrace_t
*)NULL
)
120 * Special treatment for the Vnode trace buffer.
122 if (ktp
->kt_nentries
== ktrace_zentries
)
123 kmem_zone_free(ktrace_ent_zone
, ktp
->kt_entries
);
125 kmem_free(ktp
->kt_entries
);
127 kmem_zone_free(ktrace_hdr_zone
, ktp
);
132 * Enter the given values into the "next" entry in the trace buffer.
133 * kt_index is always the index of the next entry to be filled.
156 ktrace_entry_t
*ktep
;
161 * Grab an entry by pushing the index up to the next one.
163 index
= atomic_add_return(1, &ktp
->kt_index
);
164 index
= (index
- 1) & ktp
->kt_index_mask
;
165 if (!ktp
->kt_rollover
&& index
== ktp
->kt_nentries
- 1)
166 ktp
->kt_rollover
= 1;
168 ASSERT((index
>= 0) && (index
< ktp
->kt_nentries
));
170 ktep
= &(ktp
->kt_entries
[index
]);
182 ktep
->val
[10] = val10
;
183 ktep
->val
[11] = val11
;
184 ktep
->val
[12] = val12
;
185 ktep
->val
[13] = val13
;
186 ktep
->val
[14] = val14
;
187 ktep
->val
[15] = val15
;
191 * Return the number of entries in the trace buffer.
201 index
= atomic_read(&ktp
->kt_index
) & ktp
->kt_index_mask
;
202 return (ktp
->kt_rollover
? ktp
->kt_nentries
: index
);
208 * This is used to find the start of the trace buffer.
209 * In conjunction with ktrace_next() it can be used to
210 * iterate through the entire trace buffer. This code does
211 * not do any locking because it is assumed that it is called
214 * The caller must pass in a pointer to a ktrace_snap
215 * structure in which we will keep some state used to
216 * iterate through the buffer. This state must not touched
217 * by any code outside of this module.
220 ktrace_first(ktrace_t
*ktp
, ktrace_snap_t
*ktsp
)
222 ktrace_entry_t
*ktep
;
226 if (ktp
->kt_rollover
)
227 index
= atomic_read(&ktp
->kt_index
) & ktp
->kt_index_mask
;
231 ktsp
->ks_start
= index
;
232 ktep
= &(ktp
->kt_entries
[index
]);
234 nentries
= ktrace_nentries(ktp
);
236 if (index
< nentries
) {
237 ktsp
->ks_index
= index
;
240 if (index
> nentries
)
249 * This is used to iterate through the entries of the given
250 * trace buffer. The caller must pass in the ktrace_snap_t
251 * structure initialized by ktrace_first(). The return value
252 * will be either a pointer to the next ktrace_entry or NULL
253 * if all of the entries have been traversed.
261 ktrace_entry_t
*ktep
;
263 index
= ktsp
->ks_index
;
264 if (index
== ktsp
->ks_start
) {
267 ktep
= &ktp
->kt_entries
[index
];
271 if (index
== ktrace_nentries(ktp
)) {
274 ktsp
->ks_index
= index
;
283 * Skip the next "count" entries and return the entry after that.
284 * Return NULL if this causes us to iterate past the beginning again.
294 ktrace_entry_t
*ktep
;
295 int nentries
= ktrace_nentries(ktp
);
297 index
= ktsp
->ks_index
;
298 new_index
= index
+ count
;
299 while (new_index
>= nentries
) {
300 new_index
-= nentries
;
302 if (index
== ktsp
->ks_start
) {
304 * We've iterated around to the start, so we're done.
307 } else if ((new_index
< index
) && (index
< ktsp
->ks_index
)) {
309 * We've skipped past the start again, so we're done.
312 ktsp
->ks_index
= ktsp
->ks_start
;
314 ktep
= &(ktp
->kt_entries
[new_index
]);
316 if (new_index
== nentries
) {
319 ktsp
->ks_index
= new_index
;