2 #define TRACE_SYSTEM bcache
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
7 #include <linux/tracepoint.h>
9 DECLARE_EVENT_CLASS(bcache_request
,
10 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
15 __field(unsigned int, orig_major
)
16 __field(unsigned int, orig_minor
)
17 __field(sector_t
, sector
)
18 __field(dev_t
, orig_sector
)
19 __field(unsigned int, nr_sector
)
20 __array(char, rwbs
, 6 )
24 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
25 __entry
->orig_major
= d
->disk
->major
;
26 __entry
->orig_minor
= d
->disk
->first_minor
;
27 __entry
->sector
= bio
->bi_iter
.bi_sector
;
28 __entry
->orig_sector
= bio
->bi_iter
.bi_sector
- 16;
29 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
30 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
34 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
35 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
36 __entry
->nr_sector
, __entry
->orig_major
, __entry
->orig_minor
,
37 (unsigned long long)__entry
->orig_sector
)
40 DECLARE_EVENT_CLASS(bkey
,
41 TP_PROTO(struct bkey
*k
),
52 __entry
->inode
= KEY_INODE(k
);
53 __entry
->offset
= KEY_OFFSET(k
);
54 __entry
->size
= KEY_SIZE(k
);
55 __entry
->dirty
= KEY_DIRTY(k
);
58 TP_printk("%u:%llu len %u dirty %u", __entry
->inode
,
59 __entry
->offset
, __entry
->size
, __entry
->dirty
)
62 DECLARE_EVENT_CLASS(btree_node
,
63 TP_PROTO(struct btree
*b
),
67 __field(size_t, bucket
)
71 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
74 TP_printk("bucket %zu", __entry
->bucket
)
79 DEFINE_EVENT(bcache_request
, bcache_request_start
,
80 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
84 DEFINE_EVENT(bcache_request
, bcache_request_end
,
85 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
89 DECLARE_EVENT_CLASS(bcache_bio
,
90 TP_PROTO(struct bio
*bio
),
95 __field(sector_t
, sector
)
96 __field(unsigned int, nr_sector
)
97 __array(char, rwbs
, 6 )
101 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
102 __entry
->sector
= bio
->bi_iter
.bi_sector
;
103 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
104 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
107 TP_printk("%d,%d %s %llu + %u",
108 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
109 (unsigned long long)__entry
->sector
, __entry
->nr_sector
)
112 DEFINE_EVENT(bcache_bio
, bcache_bypass_sequential
,
113 TP_PROTO(struct bio
*bio
),
117 DEFINE_EVENT(bcache_bio
, bcache_bypass_congested
,
118 TP_PROTO(struct bio
*bio
),
122 TRACE_EVENT(bcache_read
,
123 TP_PROTO(struct bio
*bio
, bool hit
, bool bypass
),
124 TP_ARGS(bio
, hit
, bypass
),
128 __field(sector_t
, sector
)
129 __field(unsigned int, nr_sector
)
130 __array(char, rwbs
, 6 )
131 __field(bool, cache_hit
)
132 __field(bool, bypass
)
136 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
137 __entry
->sector
= bio
->bi_iter
.bi_sector
;
138 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
139 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
140 __entry
->cache_hit
= hit
;
141 __entry
->bypass
= bypass
;
144 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
145 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
146 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
147 __entry
->nr_sector
, __entry
->cache_hit
, __entry
->bypass
)
150 TRACE_EVENT(bcache_write
,
151 TP_PROTO(struct bio
*bio
, bool writeback
, bool bypass
),
152 TP_ARGS(bio
, writeback
, bypass
),
156 __field(sector_t
, sector
)
157 __field(unsigned int, nr_sector
)
158 __array(char, rwbs
, 6 )
159 __field(bool, writeback
)
160 __field(bool, bypass
)
164 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
165 __entry
->sector
= bio
->bi_iter
.bi_sector
;
166 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
167 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
168 __entry
->writeback
= writeback
;
169 __entry
->bypass
= bypass
;
172 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
173 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
174 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
175 __entry
->nr_sector
, __entry
->writeback
, __entry
->bypass
)
178 DEFINE_EVENT(bcache_bio
, bcache_read_retry
,
179 TP_PROTO(struct bio
*bio
),
183 DEFINE_EVENT(bkey
, bcache_cache_insert
,
184 TP_PROTO(struct bkey
*k
),
190 DECLARE_EVENT_CLASS(cache_set
,
191 TP_PROTO(struct cache_set
*c
),
195 __array(char, uuid
, 16 )
199 memcpy(__entry
->uuid
, c
->sb
.set_uuid
, 16);
202 TP_printk("%pU", __entry
->uuid
)
205 DEFINE_EVENT(bkey
, bcache_journal_replay_key
,
206 TP_PROTO(struct bkey
*k
),
210 DEFINE_EVENT(cache_set
, bcache_journal_full
,
211 TP_PROTO(struct cache_set
*c
),
215 DEFINE_EVENT(cache_set
, bcache_journal_entry_full
,
216 TP_PROTO(struct cache_set
*c
),
220 DEFINE_EVENT(bcache_bio
, bcache_journal_write
,
221 TP_PROTO(struct bio
*bio
),
227 DEFINE_EVENT(cache_set
, bcache_btree_cache_cannibalize
,
228 TP_PROTO(struct cache_set
*c
),
232 DEFINE_EVENT(btree_node
, bcache_btree_read
,
233 TP_PROTO(struct btree
*b
),
237 TRACE_EVENT(bcache_btree_write
,
238 TP_PROTO(struct btree
*b
),
242 __field(size_t, bucket
)
243 __field(unsigned, block
)
244 __field(unsigned, keys
)
248 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
249 __entry
->block
= b
->written
;
250 __entry
->keys
= b
->keys
.set
[b
->keys
.nsets
].data
->keys
;
253 TP_printk("bucket %zu", __entry
->bucket
)
256 DEFINE_EVENT(btree_node
, bcache_btree_node_alloc
,
257 TP_PROTO(struct btree
*b
),
261 DEFINE_EVENT(btree_node
, bcache_btree_node_alloc_fail
,
262 TP_PROTO(struct btree
*b
),
266 DEFINE_EVENT(btree_node
, bcache_btree_node_free
,
267 TP_PROTO(struct btree
*b
),
271 TRACE_EVENT(bcache_btree_gc_coalesce
,
272 TP_PROTO(unsigned nodes
),
276 __field(unsigned, nodes
)
280 __entry
->nodes
= nodes
;
283 TP_printk("coalesced %u nodes", __entry
->nodes
)
286 DEFINE_EVENT(cache_set
, bcache_gc_start
,
287 TP_PROTO(struct cache_set
*c
),
291 DEFINE_EVENT(cache_set
, bcache_gc_end
,
292 TP_PROTO(struct cache_set
*c
),
296 DEFINE_EVENT(bkey
, bcache_gc_copy
,
297 TP_PROTO(struct bkey
*k
),
301 DEFINE_EVENT(bkey
, bcache_gc_copy_collision
,
302 TP_PROTO(struct bkey
*k
),
306 TRACE_EVENT(bcache_btree_insert_key
,
307 TP_PROTO(struct btree
*b
, struct bkey
*k
, unsigned op
, unsigned status
),
308 TP_ARGS(b
, k
, op
, status
),
311 __field(u64
, btree_node
)
312 __field(u32
, btree_level
)
314 __field(u64
, offset
)
322 __entry
->btree_node
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
323 __entry
->btree_level
= b
->level
;
324 __entry
->inode
= KEY_INODE(k
);
325 __entry
->offset
= KEY_OFFSET(k
);
326 __entry
->size
= KEY_SIZE(k
);
327 __entry
->dirty
= KEY_DIRTY(k
);
329 __entry
->status
= status
;
332 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
333 __entry
->status
, __entry
->op
,
334 __entry
->btree_node
, __entry
->btree_level
,
335 __entry
->inode
, __entry
->offset
,
336 __entry
->size
, __entry
->dirty
)
339 DECLARE_EVENT_CLASS(btree_split
,
340 TP_PROTO(struct btree
*b
, unsigned keys
),
344 __field(size_t, bucket
)
345 __field(unsigned, keys
)
349 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
350 __entry
->keys
= keys
;
353 TP_printk("bucket %zu keys %u", __entry
->bucket
, __entry
->keys
)
356 DEFINE_EVENT(btree_split
, bcache_btree_node_split
,
357 TP_PROTO(struct btree
*b
, unsigned keys
),
361 DEFINE_EVENT(btree_split
, bcache_btree_node_compact
,
362 TP_PROTO(struct btree
*b
, unsigned keys
),
366 DEFINE_EVENT(btree_node
, bcache_btree_set_root
,
367 TP_PROTO(struct btree
*b
),
371 TRACE_EVENT(bcache_keyscan
,
372 TP_PROTO(unsigned nr_found
,
373 unsigned start_inode
, uint64_t start_offset
,
374 unsigned end_inode
, uint64_t end_offset
),
376 start_inode
, start_offset
,
377 end_inode
, end_offset
),
380 __field(__u32
, nr_found
)
381 __field(__u32
, start_inode
)
382 __field(__u64
, start_offset
)
383 __field(__u32
, end_inode
)
384 __field(__u64
, end_offset
)
388 __entry
->nr_found
= nr_found
;
389 __entry
->start_inode
= start_inode
;
390 __entry
->start_offset
= start_offset
;
391 __entry
->end_inode
= end_inode
;
392 __entry
->end_offset
= end_offset
;
395 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry
->nr_found
,
396 __entry
->start_inode
, __entry
->start_offset
,
397 __entry
->end_inode
, __entry
->end_offset
)
402 TRACE_EVENT(bcache_alloc_invalidate
,
403 TP_PROTO(struct cache
*ca
),
407 __field(unsigned, free
)
408 __field(unsigned, free_inc
)
409 __field(unsigned, free_inc_size
)
410 __field(unsigned, unused
)
414 __entry
->free
= fifo_used(&ca
->free
[RESERVE_NONE
]);
415 __entry
->free_inc
= fifo_used(&ca
->free_inc
);
416 __entry
->free_inc_size
= ca
->free_inc
.size
;
417 __entry
->unused
= fifo_used(&ca
->unused
);
420 TP_printk("free %u free_inc %u/%u unused %u", __entry
->free
,
421 __entry
->free_inc
, __entry
->free_inc_size
, __entry
->unused
)
424 TRACE_EVENT(bcache_alloc_fail
,
425 TP_PROTO(struct cache
*ca
, unsigned reserve
),
426 TP_ARGS(ca
, reserve
),
429 __field(unsigned, free
)
430 __field(unsigned, free_inc
)
431 __field(unsigned, unused
)
432 __field(unsigned, blocked
)
436 __entry
->free
= fifo_used(&ca
->free
[reserve
]);
437 __entry
->free_inc
= fifo_used(&ca
->free_inc
);
438 __entry
->unused
= fifo_used(&ca
->unused
);
439 __entry
->blocked
= atomic_read(&ca
->set
->prio_blocked
);
442 TP_printk("free %u free_inc %u unused %u blocked %u", __entry
->free
,
443 __entry
->free_inc
, __entry
->unused
, __entry
->blocked
)
446 /* Background writeback */
448 DEFINE_EVENT(bkey
, bcache_writeback
,
449 TP_PROTO(struct bkey
*k
),
453 DEFINE_EVENT(bkey
, bcache_writeback_collision
,
454 TP_PROTO(struct bkey
*k
),
458 #endif /* _TRACE_BCACHE_H */
460 /* This part must be outside protection */
461 #include <trace/define_trace.h>