Linux 3.12.39
[linux/fpc-iii.git] / include / trace / events / bcache.h
blob5ebda976ea93bd52c6ca80194d723e49e94c1558
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM bcache
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
7 #include <linux/tracepoint.h>
9 struct search;
11 DECLARE_EVENT_CLASS(bcache_request,
12 TP_PROTO(struct search *s, struct bio *bio),
13 TP_ARGS(s, bio),
15 TP_STRUCT__entry(
16 __field(dev_t, dev )
17 __field(unsigned int, orig_major )
18 __field(unsigned int, orig_minor )
19 __field(sector_t, sector )
20 __field(dev_t, orig_sector )
21 __field(unsigned int, nr_sector )
22 __array(char, rwbs, 6 )
25 TP_fast_assign(
26 __entry->dev = bio->bi_bdev->bd_dev;
27 __entry->orig_major = s->d->disk->major;
28 __entry->orig_minor = s->d->disk->first_minor;
29 __entry->sector = bio->bi_sector;
30 __entry->orig_sector = bio->bi_sector - 16;
31 __entry->nr_sector = bio->bi_size >> 9;
32 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
35 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
36 MAJOR(__entry->dev), MINOR(__entry->dev),
37 __entry->rwbs, (unsigned long long)__entry->sector,
38 __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
39 (unsigned long long)__entry->orig_sector)
42 DECLARE_EVENT_CLASS(bkey,
43 TP_PROTO(struct bkey *k),
44 TP_ARGS(k),
46 TP_STRUCT__entry(
47 __field(u32, size )
48 __field(u32, inode )
49 __field(u64, offset )
50 __field(bool, dirty )
53 TP_fast_assign(
54 __entry->inode = KEY_INODE(k);
55 __entry->offset = KEY_OFFSET(k);
56 __entry->size = KEY_SIZE(k);
57 __entry->dirty = KEY_DIRTY(k);
60 TP_printk("%u:%llu len %u dirty %u", __entry->inode,
61 __entry->offset, __entry->size, __entry->dirty)
64 DECLARE_EVENT_CLASS(btree_node,
65 TP_PROTO(struct btree *b),
66 TP_ARGS(b),
68 TP_STRUCT__entry(
69 __field(size_t, bucket )
72 TP_fast_assign(
73 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
76 TP_printk("bucket %zu", __entry->bucket)
79 /* request.c */
81 DEFINE_EVENT(bcache_request, bcache_request_start,
82 TP_PROTO(struct search *s, struct bio *bio),
83 TP_ARGS(s, bio)
86 DEFINE_EVENT(bcache_request, bcache_request_end,
87 TP_PROTO(struct search *s, struct bio *bio),
88 TP_ARGS(s, bio)
91 DECLARE_EVENT_CLASS(bcache_bio,
92 TP_PROTO(struct bio *bio),
93 TP_ARGS(bio),
95 TP_STRUCT__entry(
96 __field(dev_t, dev )
97 __field(sector_t, sector )
98 __field(unsigned int, nr_sector )
99 __array(char, rwbs, 6 )
102 TP_fast_assign(
103 __entry->dev = bio->bi_bdev->bd_dev;
104 __entry->sector = bio->bi_sector;
105 __entry->nr_sector = bio->bi_size >> 9;
106 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
109 TP_printk("%d,%d %s %llu + %u",
110 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
111 (unsigned long long)__entry->sector, __entry->nr_sector)
114 DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
115 TP_PROTO(struct bio *bio),
116 TP_ARGS(bio)
119 DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
120 TP_PROTO(struct bio *bio),
121 TP_ARGS(bio)
124 TRACE_EVENT(bcache_read,
125 TP_PROTO(struct bio *bio, bool hit, bool bypass),
126 TP_ARGS(bio, hit, bypass),
128 TP_STRUCT__entry(
129 __field(dev_t, dev )
130 __field(sector_t, sector )
131 __field(unsigned int, nr_sector )
132 __array(char, rwbs, 6 )
133 __field(bool, cache_hit )
134 __field(bool, bypass )
137 TP_fast_assign(
138 __entry->dev = bio->bi_bdev->bd_dev;
139 __entry->sector = bio->bi_sector;
140 __entry->nr_sector = bio->bi_size >> 9;
141 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
142 __entry->cache_hit = hit;
143 __entry->bypass = bypass;
146 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
147 MAJOR(__entry->dev), MINOR(__entry->dev),
148 __entry->rwbs, (unsigned long long)__entry->sector,
149 __entry->nr_sector, __entry->cache_hit, __entry->bypass)
152 TRACE_EVENT(bcache_write,
153 TP_PROTO(struct bio *bio, bool writeback, bool bypass),
154 TP_ARGS(bio, writeback, bypass),
156 TP_STRUCT__entry(
157 __field(dev_t, dev )
158 __field(sector_t, sector )
159 __field(unsigned int, nr_sector )
160 __array(char, rwbs, 6 )
161 __field(bool, writeback )
162 __field(bool, bypass )
165 TP_fast_assign(
166 __entry->dev = bio->bi_bdev->bd_dev;
167 __entry->sector = bio->bi_sector;
168 __entry->nr_sector = bio->bi_size >> 9;
169 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
170 __entry->writeback = writeback;
171 __entry->bypass = bypass;
174 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
175 MAJOR(__entry->dev), MINOR(__entry->dev),
176 __entry->rwbs, (unsigned long long)__entry->sector,
177 __entry->nr_sector, __entry->writeback, __entry->bypass)
180 DEFINE_EVENT(bcache_bio, bcache_read_retry,
181 TP_PROTO(struct bio *bio),
182 TP_ARGS(bio)
185 DEFINE_EVENT(bkey, bcache_cache_insert,
186 TP_PROTO(struct bkey *k),
187 TP_ARGS(k)
190 /* Journal */
192 DECLARE_EVENT_CLASS(cache_set,
193 TP_PROTO(struct cache_set *c),
194 TP_ARGS(c),
196 TP_STRUCT__entry(
197 __array(char, uuid, 16 )
200 TP_fast_assign(
201 memcpy(__entry->uuid, c->sb.set_uuid, 16);
204 TP_printk("%pU", __entry->uuid)
207 DEFINE_EVENT(bkey, bcache_journal_replay_key,
208 TP_PROTO(struct bkey *k),
209 TP_ARGS(k)
212 DEFINE_EVENT(cache_set, bcache_journal_full,
213 TP_PROTO(struct cache_set *c),
214 TP_ARGS(c)
217 DEFINE_EVENT(cache_set, bcache_journal_entry_full,
218 TP_PROTO(struct cache_set *c),
219 TP_ARGS(c)
222 DEFINE_EVENT(bcache_bio, bcache_journal_write,
223 TP_PROTO(struct bio *bio),
224 TP_ARGS(bio)
227 /* Btree */
229 DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
230 TP_PROTO(struct cache_set *c),
231 TP_ARGS(c)
234 DEFINE_EVENT(btree_node, bcache_btree_read,
235 TP_PROTO(struct btree *b),
236 TP_ARGS(b)
239 TRACE_EVENT(bcache_btree_write,
240 TP_PROTO(struct btree *b),
241 TP_ARGS(b),
243 TP_STRUCT__entry(
244 __field(size_t, bucket )
245 __field(unsigned, block )
246 __field(unsigned, keys )
249 TP_fast_assign(
250 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
251 __entry->block = b->written;
252 __entry->keys = b->sets[b->nsets].data->keys;
255 TP_printk("bucket %zu", __entry->bucket)
258 DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
259 TP_PROTO(struct btree *b),
260 TP_ARGS(b)
263 DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail,
264 TP_PROTO(struct btree *b),
265 TP_ARGS(b)
268 DEFINE_EVENT(btree_node, bcache_btree_node_free,
269 TP_PROTO(struct btree *b),
270 TP_ARGS(b)
273 TRACE_EVENT(bcache_btree_gc_coalesce,
274 TP_PROTO(unsigned nodes),
275 TP_ARGS(nodes),
277 TP_STRUCT__entry(
278 __field(unsigned, nodes )
281 TP_fast_assign(
282 __entry->nodes = nodes;
285 TP_printk("coalesced %u nodes", __entry->nodes)
288 DEFINE_EVENT(cache_set, bcache_gc_start,
289 TP_PROTO(struct cache_set *c),
290 TP_ARGS(c)
293 DEFINE_EVENT(cache_set, bcache_gc_end,
294 TP_PROTO(struct cache_set *c),
295 TP_ARGS(c)
298 DEFINE_EVENT(bkey, bcache_gc_copy,
299 TP_PROTO(struct bkey *k),
300 TP_ARGS(k)
303 DEFINE_EVENT(bkey, bcache_gc_copy_collision,
304 TP_PROTO(struct bkey *k),
305 TP_ARGS(k)
308 TRACE_EVENT(bcache_btree_insert_key,
309 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
310 TP_ARGS(b, k, op, status),
312 TP_STRUCT__entry(
313 __field(u64, btree_node )
314 __field(u32, btree_level )
315 __field(u32, inode )
316 __field(u64, offset )
317 __field(u32, size )
318 __field(u8, dirty )
319 __field(u8, op )
320 __field(u8, status )
323 TP_fast_assign(
324 __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
325 __entry->btree_level = b->level;
326 __entry->inode = KEY_INODE(k);
327 __entry->offset = KEY_OFFSET(k);
328 __entry->size = KEY_SIZE(k);
329 __entry->dirty = KEY_DIRTY(k);
330 __entry->op = op;
331 __entry->status = status;
334 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
335 __entry->status, __entry->op,
336 __entry->btree_node, __entry->btree_level,
337 __entry->inode, __entry->offset,
338 __entry->size, __entry->dirty)
341 DECLARE_EVENT_CLASS(btree_split,
342 TP_PROTO(struct btree *b, unsigned keys),
343 TP_ARGS(b, keys),
345 TP_STRUCT__entry(
346 __field(size_t, bucket )
347 __field(unsigned, keys )
350 TP_fast_assign(
351 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
352 __entry->keys = keys;
355 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
358 DEFINE_EVENT(btree_split, bcache_btree_node_split,
359 TP_PROTO(struct btree *b, unsigned keys),
360 TP_ARGS(b, keys)
363 DEFINE_EVENT(btree_split, bcache_btree_node_compact,
364 TP_PROTO(struct btree *b, unsigned keys),
365 TP_ARGS(b, keys)
368 DEFINE_EVENT(btree_node, bcache_btree_set_root,
369 TP_PROTO(struct btree *b),
370 TP_ARGS(b)
373 /* Allocator */
375 TRACE_EVENT(bcache_alloc_invalidate,
376 TP_PROTO(struct cache *ca),
377 TP_ARGS(ca),
379 TP_STRUCT__entry(
380 __field(unsigned, free )
381 __field(unsigned, free_inc )
382 __field(unsigned, free_inc_size )
383 __field(unsigned, unused )
386 TP_fast_assign(
387 __entry->free = fifo_used(&ca->free);
388 __entry->free_inc = fifo_used(&ca->free_inc);
389 __entry->free_inc_size = ca->free_inc.size;
390 __entry->unused = fifo_used(&ca->unused);
393 TP_printk("free %u free_inc %u/%u unused %u", __entry->free,
394 __entry->free_inc, __entry->free_inc_size, __entry->unused)
397 TRACE_EVENT(bcache_alloc_fail,
398 TP_PROTO(struct cache *ca),
399 TP_ARGS(ca),
401 TP_STRUCT__entry(
402 __field(unsigned, free )
403 __field(unsigned, free_inc )
404 __field(unsigned, unused )
405 __field(unsigned, blocked )
408 TP_fast_assign(
409 __entry->free = fifo_used(&ca->free);
410 __entry->free_inc = fifo_used(&ca->free_inc);
411 __entry->unused = fifo_used(&ca->unused);
412 __entry->blocked = atomic_read(&ca->set->prio_blocked);
415 TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free,
416 __entry->free_inc, __entry->unused, __entry->blocked)
419 /* Background writeback */
421 DEFINE_EVENT(bkey, bcache_writeback,
422 TP_PROTO(struct bkey *k),
423 TP_ARGS(k)
426 DEFINE_EVENT(bkey, bcache_writeback_collision,
427 TP_PROTO(struct bkey *k),
428 TP_ARGS(k)
431 #endif /* _TRACE_BCACHE_H */
433 /* This part must be outside protection */
434 #include <trace/define_trace.h>