2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
35 static kmem_zone_t
*ktrace_hdr_zone
;
36 static kmem_zone_t
*ktrace_ent_zone
;
37 static int ktrace_zentries
;
40 ktrace_init(int zentries
)
42 ktrace_zentries
= zentries
;
44 ktrace_hdr_zone
= kmem_zone_init(sizeof(ktrace_t
),
46 ASSERT(ktrace_hdr_zone
);
48 ktrace_ent_zone
= kmem_zone_init(ktrace_zentries
49 * sizeof(ktrace_entry_t
),
51 ASSERT(ktrace_ent_zone
);
57 kmem_cache_destroy(ktrace_hdr_zone
);
58 kmem_cache_destroy(ktrace_ent_zone
);
64 * Allocate a ktrace header and enough buffering for the given
68 ktrace_alloc(int nentries
, int sleep
)
73 ktp
= (ktrace_t
*)kmem_zone_alloc(ktrace_hdr_zone
, sleep
);
75 if (ktp
== (ktrace_t
*)NULL
) {
77 * KM_SLEEP callers don't expect failure.
80 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
86 * Special treatment for buffers with the ktrace_zentries entries
88 if (nentries
== ktrace_zentries
) {
89 ktep
= (ktrace_entry_t
*)kmem_zone_zalloc(ktrace_ent_zone
,
92 ktep
= (ktrace_entry_t
*)kmem_zalloc((nentries
* sizeof(*ktep
)),
98 * KM_SLEEP callers don't expect failure.
100 if (sleep
& KM_SLEEP
)
101 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
103 kmem_free(ktp
, sizeof(*ktp
));
108 spinlock_init(&(ktp
->kt_lock
), "kt_lock");
110 ktp
->kt_entries
= ktep
;
111 ktp
->kt_nentries
= nentries
;
113 ktp
->kt_rollover
= 0;
121 * Free up the ktrace header and buffer. It is up to the caller
122 * to ensure that no-one is referencing it.
125 ktrace_free(ktrace_t
*ktp
)
129 if (ktp
== (ktrace_t
*)NULL
)
132 spinlock_destroy(&ktp
->kt_lock
);
135 * Special treatment for the Vnode trace buffer.
137 if (ktp
->kt_nentries
== ktrace_zentries
) {
138 kmem_zone_free(ktrace_ent_zone
, ktp
->kt_entries
);
140 entries_size
= (int)(ktp
->kt_nentries
* sizeof(ktrace_entry_t
));
142 kmem_free(ktp
->kt_entries
, entries_size
);
145 kmem_zone_free(ktrace_hdr_zone
, ktp
);
150 * Enter the given values into the "next" entry in the trace buffer.
151 * kt_index is always the index of the next entry to be filled.
173 static lock_t wrap_lock
= SPIN_LOCK_UNLOCKED
;
176 ktrace_entry_t
*ktep
;
181 * Grab an entry by pushing the index up to the next one.
183 spin_lock_irqsave(&wrap_lock
, flags
);
184 index
= ktp
->kt_index
;
185 if (++ktp
->kt_index
== ktp
->kt_nentries
)
187 spin_unlock_irqrestore(&wrap_lock
, flags
);
189 if (!ktp
->kt_rollover
&& index
== ktp
->kt_nentries
- 1)
190 ktp
->kt_rollover
= 1;
192 ASSERT((index
>= 0) && (index
< ktp
->kt_nentries
));
194 ktep
= &(ktp
->kt_entries
[index
]);
206 ktep
->val
[10] = val10
;
207 ktep
->val
[11] = val11
;
208 ktep
->val
[12] = val12
;
209 ktep
->val
[13] = val13
;
210 ktep
->val
[14] = val14
;
211 ktep
->val
[15] = val15
;
215 * Return the number of entries in the trace buffer.
225 return (ktp
->kt_rollover
? ktp
->kt_nentries
: ktp
->kt_index
);
231 * This is used to find the start of the trace buffer.
232 * In conjunction with ktrace_next() it can be used to
233 * iterate through the entire trace buffer. This code does
234 * not do any locking because it is assumed that it is called
237 * The caller must pass in a pointer to a ktrace_snap
238 * structure in which we will keep some state used to
239 * iterate through the buffer. This state must not touched
240 * by any code outside of this module.
243 ktrace_first(ktrace_t
*ktp
, ktrace_snap_t
*ktsp
)
245 ktrace_entry_t
*ktep
;
249 if (ktp
->kt_rollover
)
250 index
= ktp
->kt_index
;
254 ktsp
->ks_start
= index
;
255 ktep
= &(ktp
->kt_entries
[index
]);
257 nentries
= ktrace_nentries(ktp
);
259 if (index
< nentries
) {
260 ktsp
->ks_index
= index
;
263 if (index
> nentries
)
272 * This is used to iterate through the entries of the given
273 * trace buffer. The caller must pass in the ktrace_snap_t
274 * structure initialized by ktrace_first(). The return value
275 * will be either a pointer to the next ktrace_entry or NULL
276 * if all of the entries have been traversed.
284 ktrace_entry_t
*ktep
;
286 index
= ktsp
->ks_index
;
287 if (index
== ktsp
->ks_start
) {
290 ktep
= &ktp
->kt_entries
[index
];
294 if (index
== ktrace_nentries(ktp
)) {
297 ktsp
->ks_index
= index
;
306 * Skip the next "count" entries and return the entry after that.
307 * Return NULL if this causes us to iterate past the beginning again.
317 ktrace_entry_t
*ktep
;
318 int nentries
= ktrace_nentries(ktp
);
320 index
= ktsp
->ks_index
;
321 new_index
= index
+ count
;
322 while (new_index
>= nentries
) {
323 new_index
-= nentries
;
325 if (index
== ktsp
->ks_start
) {
327 * We've iterated around to the start, so we're done.
330 } else if ((new_index
< index
) && (index
< ktsp
->ks_index
)) {
332 * We've skipped past the start again, so we're done.
335 ktsp
->ks_index
= ktsp
->ks_start
;
337 ktep
= &(ktp
->kt_entries
[new_index
]);
339 if (new_index
== nentries
) {
342 ktsp
->ks_index
= new_index
;