Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / fs / xfs / support / ktrace.c
blob129067cfcb86eba3f9fb54d18a27a3c47bf7ba22
1 /*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <xfs.h>
20 static kmem_zone_t *ktrace_hdr_zone;
21 static kmem_zone_t *ktrace_ent_zone;
22 static int ktrace_zentries;
24 void __init
25 ktrace_init(int zentries)
27 ktrace_zentries = zentries;
29 ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
30 "ktrace_hdr");
31 ASSERT(ktrace_hdr_zone);
33 ktrace_ent_zone = kmem_zone_init(ktrace_zentries
34 * sizeof(ktrace_entry_t),
35 "ktrace_ent");
36 ASSERT(ktrace_ent_zone);
39 void __exit
40 ktrace_uninit(void)
42 kmem_zone_destroy(ktrace_hdr_zone);
43 kmem_zone_destroy(ktrace_ent_zone);
47 * ktrace_alloc()
49 * Allocate a ktrace header and enough buffering for the given
50 * number of entries.
52 ktrace_t *
53 ktrace_alloc(int nentries, unsigned int __nocast sleep)
55 ktrace_t *ktp;
56 ktrace_entry_t *ktep;
58 ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
60 if (ktp == (ktrace_t*)NULL) {
62 * KM_SLEEP callers don't expect failure.
64 if (sleep & KM_SLEEP)
65 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
67 return NULL;
71 * Special treatment for buffers with the ktrace_zentries entries
73 if (nentries == ktrace_zentries) {
74 ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
75 sleep);
76 } else {
77 ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
78 sleep | KM_LARGE);
81 if (ktep == NULL) {
83 * KM_SLEEP callers don't expect failure.
85 if (sleep & KM_SLEEP)
86 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
88 kmem_free(ktp, sizeof(*ktp));
90 return NULL;
93 ktp->kt_entries = ktep;
94 ktp->kt_nentries = nentries;
95 ktp->kt_index = 0;
96 ktp->kt_rollover = 0;
97 return ktp;
102 * ktrace_free()
104 * Free up the ktrace header and buffer. It is up to the caller
105 * to ensure that no-one is referencing it.
107 void
108 ktrace_free(ktrace_t *ktp)
110 int entries_size;
112 if (ktp == (ktrace_t *)NULL)
113 return;
116 * Special treatment for the Vnode trace buffer.
118 if (ktp->kt_nentries == ktrace_zentries) {
119 kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
120 } else {
121 entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
123 kmem_free(ktp->kt_entries, entries_size);
126 kmem_zone_free(ktrace_hdr_zone, ktp);
131 * Enter the given values into the "next" entry in the trace buffer.
132 * kt_index is always the index of the next entry to be filled.
134 void
135 ktrace_enter(
136 ktrace_t *ktp,
137 void *val0,
138 void *val1,
139 void *val2,
140 void *val3,
141 void *val4,
142 void *val5,
143 void *val6,
144 void *val7,
145 void *val8,
146 void *val9,
147 void *val10,
148 void *val11,
149 void *val12,
150 void *val13,
151 void *val14,
152 void *val15)
154 static DEFINE_SPINLOCK(wrap_lock);
155 unsigned long flags;
156 int index;
157 ktrace_entry_t *ktep;
159 ASSERT(ktp != NULL);
162 * Grab an entry by pushing the index up to the next one.
164 spin_lock_irqsave(&wrap_lock, flags);
165 index = ktp->kt_index;
166 if (++ktp->kt_index == ktp->kt_nentries)
167 ktp->kt_index = 0;
168 spin_unlock_irqrestore(&wrap_lock, flags);
170 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
171 ktp->kt_rollover = 1;
173 ASSERT((index >= 0) && (index < ktp->kt_nentries));
175 ktep = &(ktp->kt_entries[index]);
177 ktep->val[0] = val0;
178 ktep->val[1] = val1;
179 ktep->val[2] = val2;
180 ktep->val[3] = val3;
181 ktep->val[4] = val4;
182 ktep->val[5] = val5;
183 ktep->val[6] = val6;
184 ktep->val[7] = val7;
185 ktep->val[8] = val8;
186 ktep->val[9] = val9;
187 ktep->val[10] = val10;
188 ktep->val[11] = val11;
189 ktep->val[12] = val12;
190 ktep->val[13] = val13;
191 ktep->val[14] = val14;
192 ktep->val[15] = val15;
196 * Return the number of entries in the trace buffer.
199 ktrace_nentries(
200 ktrace_t *ktp)
202 if (ktp == NULL) {
203 return 0;
206 return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index);
210 * ktrace_first()
212 * This is used to find the start of the trace buffer.
213 * In conjunction with ktrace_next() it can be used to
214 * iterate through the entire trace buffer. This code does
215 * not do any locking because it is assumed that it is called
216 * from the debugger.
218 * The caller must pass in a pointer to a ktrace_snap
219 * structure in which we will keep some state used to
220 * iterate through the buffer. This state must not touched
221 * by any code outside of this module.
223 ktrace_entry_t *
224 ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
226 ktrace_entry_t *ktep;
227 int index;
228 int nentries;
230 if (ktp->kt_rollover)
231 index = ktp->kt_index;
232 else
233 index = 0;
235 ktsp->ks_start = index;
236 ktep = &(ktp->kt_entries[index]);
238 nentries = ktrace_nentries(ktp);
239 index++;
240 if (index < nentries) {
241 ktsp->ks_index = index;
242 } else {
243 ktsp->ks_index = 0;
244 if (index > nentries)
245 ktep = NULL;
247 return ktep;
251 * ktrace_next()
253 * This is used to iterate through the entries of the given
254 * trace buffer. The caller must pass in the ktrace_snap_t
255 * structure initialized by ktrace_first(). The return value
256 * will be either a pointer to the next ktrace_entry or NULL
257 * if all of the entries have been traversed.
259 ktrace_entry_t *
260 ktrace_next(
261 ktrace_t *ktp,
262 ktrace_snap_t *ktsp)
264 int index;
265 ktrace_entry_t *ktep;
267 index = ktsp->ks_index;
268 if (index == ktsp->ks_start) {
269 ktep = NULL;
270 } else {
271 ktep = &ktp->kt_entries[index];
274 index++;
275 if (index == ktrace_nentries(ktp)) {
276 ktsp->ks_index = 0;
277 } else {
278 ktsp->ks_index = index;
281 return ktep;
285 * ktrace_skip()
287 * Skip the next "count" entries and return the entry after that.
288 * Return NULL if this causes us to iterate past the beginning again.
290 ktrace_entry_t *
291 ktrace_skip(
292 ktrace_t *ktp,
293 int count,
294 ktrace_snap_t *ktsp)
296 int index;
297 int new_index;
298 ktrace_entry_t *ktep;
299 int nentries = ktrace_nentries(ktp);
301 index = ktsp->ks_index;
302 new_index = index + count;
303 while (new_index >= nentries) {
304 new_index -= nentries;
306 if (index == ktsp->ks_start) {
308 * We've iterated around to the start, so we're done.
310 ktep = NULL;
311 } else if ((new_index < index) && (index < ktsp->ks_index)) {
313 * We've skipped past the start again, so we're done.
315 ktep = NULL;
316 ktsp->ks_index = ktsp->ks_start;
317 } else {
318 ktep = &(ktp->kt_entries[new_index]);
319 new_index++;
320 if (new_index == nentries) {
321 ktsp->ks_index = 0;
322 } else {
323 ktsp->ks_index = new_index;
326 return ktep;