1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM bcache
5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHE_H
8 #include <linux/tracepoint.h>
10 DECLARE_EVENT_CLASS(bcache_request
,
11 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
16 __field(unsigned int, orig_major
)
17 __field(unsigned int, orig_minor
)
18 __field(sector_t
, sector
)
19 __field(dev_t
, orig_sector
)
20 __field(unsigned int, nr_sector
)
21 __array(char, rwbs
, 6 )
25 __entry
->dev
= bio_dev(bio
);
26 __entry
->orig_major
= d
->disk
->major
;
27 __entry
->orig_minor
= d
->disk
->first_minor
;
28 __entry
->sector
= bio
->bi_iter
.bi_sector
;
29 __entry
->orig_sector
= bio
->bi_iter
.bi_sector
- 16;
30 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
31 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
34 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
35 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
36 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
37 __entry
->nr_sector
, __entry
->orig_major
, __entry
->orig_minor
,
38 (unsigned long long)__entry
->orig_sector
)
41 DECLARE_EVENT_CLASS(bkey
,
42 TP_PROTO(struct bkey
*k
),
53 __entry
->inode
= KEY_INODE(k
);
54 __entry
->offset
= KEY_OFFSET(k
);
55 __entry
->size
= KEY_SIZE(k
);
56 __entry
->dirty
= KEY_DIRTY(k
);
59 TP_printk("%u:%llu len %u dirty %u", __entry
->inode
,
60 __entry
->offset
, __entry
->size
, __entry
->dirty
)
63 DECLARE_EVENT_CLASS(btree_node
,
64 TP_PROTO(struct btree
*b
),
68 __field(size_t, bucket
)
72 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
75 TP_printk("bucket %zu", __entry
->bucket
)
80 DEFINE_EVENT(bcache_request
, bcache_request_start
,
81 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
85 DEFINE_EVENT(bcache_request
, bcache_request_end
,
86 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
90 DECLARE_EVENT_CLASS(bcache_bio
,
91 TP_PROTO(struct bio
*bio
),
96 __field(sector_t
, sector
)
97 __field(unsigned int, nr_sector
)
98 __array(char, rwbs
, 6 )
102 __entry
->dev
= bio_dev(bio
);
103 __entry
->sector
= bio
->bi_iter
.bi_sector
;
104 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
105 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
108 TP_printk("%d,%d %s %llu + %u",
109 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
110 (unsigned long long)__entry
->sector
, __entry
->nr_sector
)
113 DEFINE_EVENT(bcache_bio
, bcache_bypass_sequential
,
114 TP_PROTO(struct bio
*bio
),
118 DEFINE_EVENT(bcache_bio
, bcache_bypass_congested
,
119 TP_PROTO(struct bio
*bio
),
123 TRACE_EVENT(bcache_read
,
124 TP_PROTO(struct bio
*bio
, bool hit
, bool bypass
),
125 TP_ARGS(bio
, hit
, bypass
),
129 __field(sector_t
, sector
)
130 __field(unsigned int, nr_sector
)
131 __array(char, rwbs
, 6 )
132 __field(bool, cache_hit
)
133 __field(bool, bypass
)
137 __entry
->dev
= bio_dev(bio
);
138 __entry
->sector
= bio
->bi_iter
.bi_sector
;
139 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
140 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
141 __entry
->cache_hit
= hit
;
142 __entry
->bypass
= bypass
;
145 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
146 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
147 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
148 __entry
->nr_sector
, __entry
->cache_hit
, __entry
->bypass
)
151 TRACE_EVENT(bcache_write
,
152 TP_PROTO(struct cache_set
*c
, u64 inode
, struct bio
*bio
,
153 bool writeback
, bool bypass
),
154 TP_ARGS(c
, inode
, bio
, writeback
, bypass
),
157 __array(char, uuid
, 16 )
159 __field(sector_t
, sector
)
160 __field(unsigned int, nr_sector
)
161 __array(char, rwbs
, 6 )
162 __field(bool, writeback
)
163 __field(bool, bypass
)
167 memcpy(__entry
->uuid
, c
->sb
.set_uuid
, 16);
168 __entry
->inode
= inode
;
169 __entry
->sector
= bio
->bi_iter
.bi_sector
;
170 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
171 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
172 __entry
->writeback
= writeback
;
173 __entry
->bypass
= bypass
;
176 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
177 __entry
->uuid
, __entry
->inode
,
178 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
179 __entry
->nr_sector
, __entry
->writeback
, __entry
->bypass
)
182 DEFINE_EVENT(bcache_bio
, bcache_read_retry
,
183 TP_PROTO(struct bio
*bio
),
187 DEFINE_EVENT(bkey
, bcache_cache_insert
,
188 TP_PROTO(struct bkey
*k
),
194 DECLARE_EVENT_CLASS(cache_set
,
195 TP_PROTO(struct cache_set
*c
),
199 __array(char, uuid
, 16 )
203 memcpy(__entry
->uuid
, c
->sb
.set_uuid
, 16);
206 TP_printk("%pU", __entry
->uuid
)
209 DEFINE_EVENT(bkey
, bcache_journal_replay_key
,
210 TP_PROTO(struct bkey
*k
),
214 DEFINE_EVENT(cache_set
, bcache_journal_full
,
215 TP_PROTO(struct cache_set
*c
),
219 DEFINE_EVENT(cache_set
, bcache_journal_entry_full
,
220 TP_PROTO(struct cache_set
*c
),
224 TRACE_EVENT(bcache_journal_write
,
225 TP_PROTO(struct bio
*bio
, u32 keys
),
230 __field(sector_t
, sector
)
231 __field(unsigned int, nr_sector
)
232 __array(char, rwbs
, 6 )
233 __field(u32
, nr_keys
)
237 __entry
->dev
= bio_dev(bio
);
238 __entry
->sector
= bio
->bi_iter
.bi_sector
;
239 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
240 __entry
->nr_keys
= keys
;
241 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
244 TP_printk("%d,%d %s %llu + %u keys %u",
245 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
246 (unsigned long long)__entry
->sector
, __entry
->nr_sector
,
252 DEFINE_EVENT(cache_set
, bcache_btree_cache_cannibalize
,
253 TP_PROTO(struct cache_set
*c
),
257 DEFINE_EVENT(btree_node
, bcache_btree_read
,
258 TP_PROTO(struct btree
*b
),
262 TRACE_EVENT(bcache_btree_write
,
263 TP_PROTO(struct btree
*b
),
267 __field(size_t, bucket
)
268 __field(unsigned, block
)
269 __field(unsigned, keys
)
273 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
274 __entry
->block
= b
->written
;
275 __entry
->keys
= b
->keys
.set
[b
->keys
.nsets
].data
->keys
;
278 TP_printk("bucket %zu", __entry
->bucket
)
281 DEFINE_EVENT(btree_node
, bcache_btree_node_alloc
,
282 TP_PROTO(struct btree
*b
),
286 DEFINE_EVENT(cache_set
, bcache_btree_node_alloc_fail
,
287 TP_PROTO(struct cache_set
*c
),
291 DEFINE_EVENT(btree_node
, bcache_btree_node_free
,
292 TP_PROTO(struct btree
*b
),
296 TRACE_EVENT(bcache_btree_gc_coalesce
,
297 TP_PROTO(unsigned nodes
),
301 __field(unsigned, nodes
)
305 __entry
->nodes
= nodes
;
308 TP_printk("coalesced %u nodes", __entry
->nodes
)
311 DEFINE_EVENT(cache_set
, bcache_gc_start
,
312 TP_PROTO(struct cache_set
*c
),
316 DEFINE_EVENT(cache_set
, bcache_gc_end
,
317 TP_PROTO(struct cache_set
*c
),
321 DEFINE_EVENT(bkey
, bcache_gc_copy
,
322 TP_PROTO(struct bkey
*k
),
326 DEFINE_EVENT(bkey
, bcache_gc_copy_collision
,
327 TP_PROTO(struct bkey
*k
),
331 TRACE_EVENT(bcache_btree_insert_key
,
332 TP_PROTO(struct btree
*b
, struct bkey
*k
, unsigned op
, unsigned status
),
333 TP_ARGS(b
, k
, op
, status
),
336 __field(u64
, btree_node
)
337 __field(u32
, btree_level
)
339 __field(u64
, offset
)
347 __entry
->btree_node
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
348 __entry
->btree_level
= b
->level
;
349 __entry
->inode
= KEY_INODE(k
);
350 __entry
->offset
= KEY_OFFSET(k
);
351 __entry
->size
= KEY_SIZE(k
);
352 __entry
->dirty
= KEY_DIRTY(k
);
354 __entry
->status
= status
;
357 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
358 __entry
->status
, __entry
->op
,
359 __entry
->btree_node
, __entry
->btree_level
,
360 __entry
->inode
, __entry
->offset
,
361 __entry
->size
, __entry
->dirty
)
364 DECLARE_EVENT_CLASS(btree_split
,
365 TP_PROTO(struct btree
*b
, unsigned keys
),
369 __field(size_t, bucket
)
370 __field(unsigned, keys
)
374 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
375 __entry
->keys
= keys
;
378 TP_printk("bucket %zu keys %u", __entry
->bucket
, __entry
->keys
)
381 DEFINE_EVENT(btree_split
, bcache_btree_node_split
,
382 TP_PROTO(struct btree
*b
, unsigned keys
),
386 DEFINE_EVENT(btree_split
, bcache_btree_node_compact
,
387 TP_PROTO(struct btree
*b
, unsigned keys
),
391 DEFINE_EVENT(btree_node
, bcache_btree_set_root
,
392 TP_PROTO(struct btree
*b
),
396 TRACE_EVENT(bcache_keyscan
,
397 TP_PROTO(unsigned nr_found
,
398 unsigned start_inode
, uint64_t start_offset
,
399 unsigned end_inode
, uint64_t end_offset
),
401 start_inode
, start_offset
,
402 end_inode
, end_offset
),
405 __field(__u32
, nr_found
)
406 __field(__u32
, start_inode
)
407 __field(__u64
, start_offset
)
408 __field(__u32
, end_inode
)
409 __field(__u64
, end_offset
)
413 __entry
->nr_found
= nr_found
;
414 __entry
->start_inode
= start_inode
;
415 __entry
->start_offset
= start_offset
;
416 __entry
->end_inode
= end_inode
;
417 __entry
->end_offset
= end_offset
;
420 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry
->nr_found
,
421 __entry
->start_inode
, __entry
->start_offset
,
422 __entry
->end_inode
, __entry
->end_offset
)
427 TRACE_EVENT(bcache_invalidate
,
428 TP_PROTO(struct cache
*ca
, size_t bucket
),
432 __field(unsigned, sectors
)
434 __field(__u64
, offset
)
438 __entry
->dev
= ca
->bdev
->bd_dev
;
439 __entry
->offset
= bucket
<< ca
->set
->bucket_bits
;
440 __entry
->sectors
= GC_SECTORS_USED(&ca
->buckets
[bucket
]);
443 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
444 __entry
->sectors
, MAJOR(__entry
->dev
),
445 MINOR(__entry
->dev
), __entry
->offset
)
448 TRACE_EVENT(bcache_alloc
,
449 TP_PROTO(struct cache
*ca
, size_t bucket
),
454 __field(__u64
, offset
)
458 __entry
->dev
= ca
->bdev
->bd_dev
;
459 __entry
->offset
= bucket
<< ca
->set
->bucket_bits
;
462 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry
->dev
),
463 MINOR(__entry
->dev
), __entry
->offset
)
466 TRACE_EVENT(bcache_alloc_fail
,
467 TP_PROTO(struct cache
*ca
, unsigned reserve
),
468 TP_ARGS(ca
, reserve
),
472 __field(unsigned, free
)
473 __field(unsigned, free_inc
)
474 __field(unsigned, blocked
)
478 __entry
->dev
= ca
->bdev
->bd_dev
;
479 __entry
->free
= fifo_used(&ca
->free
[reserve
]);
480 __entry
->free_inc
= fifo_used(&ca
->free_inc
);
481 __entry
->blocked
= atomic_read(&ca
->set
->prio_blocked
);
484 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
485 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->free
,
486 __entry
->free_inc
, __entry
->blocked
)
489 /* Background writeback */
491 DEFINE_EVENT(bkey
, bcache_writeback
,
492 TP_PROTO(struct bkey
*k
),
496 DEFINE_EVENT(bkey
, bcache_writeback_collision
,
497 TP_PROTO(struct bkey
*k
),
501 #endif /* _TRACE_BCACHE_H */
503 /* This part must be outside protection */
504 #include <trace/define_trace.h>