1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM bcache
5 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_BCACHE_H
8 #include <linux/tracepoint.h>
10 DECLARE_EVENT_CLASS(bcache_request
,
11 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
16 __field(unsigned int, orig_major
)
17 __field(unsigned int, orig_minor
)
18 __field(sector_t
, sector
)
19 __field(dev_t
, orig_sector
)
20 __field(unsigned int, nr_sector
)
21 __array(char, rwbs
, 6 )
25 __entry
->dev
= bio_dev(bio
);
26 __entry
->orig_major
= d
->disk
->major
;
27 __entry
->orig_minor
= d
->disk
->first_minor
;
28 __entry
->sector
= bio
->bi_iter
.bi_sector
;
29 __entry
->orig_sector
= bio
->bi_iter
.bi_sector
- 16;
30 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
31 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
34 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
35 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
36 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
37 __entry
->nr_sector
, __entry
->orig_major
, __entry
->orig_minor
,
38 (unsigned long long)__entry
->orig_sector
)
41 DECLARE_EVENT_CLASS(bkey
,
42 TP_PROTO(struct bkey
*k
),
53 __entry
->inode
= KEY_INODE(k
);
54 __entry
->offset
= KEY_OFFSET(k
);
55 __entry
->size
= KEY_SIZE(k
);
56 __entry
->dirty
= KEY_DIRTY(k
);
59 TP_printk("%u:%llu len %u dirty %u", __entry
->inode
,
60 __entry
->offset
, __entry
->size
, __entry
->dirty
)
63 DECLARE_EVENT_CLASS(btree_node
,
64 TP_PROTO(struct btree
*b
),
68 __field(size_t, bucket
)
72 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
75 TP_printk("bucket %zu", __entry
->bucket
)
80 DEFINE_EVENT(bcache_request
, bcache_request_start
,
81 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
85 DEFINE_EVENT(bcache_request
, bcache_request_end
,
86 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
90 DECLARE_EVENT_CLASS(bcache_bio
,
91 TP_PROTO(struct bio
*bio
),
96 __field(sector_t
, sector
)
97 __field(unsigned int, nr_sector
)
98 __array(char, rwbs
, 6 )
102 __entry
->dev
= bio_dev(bio
);
103 __entry
->sector
= bio
->bi_iter
.bi_sector
;
104 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
105 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
108 TP_printk("%d,%d %s %llu + %u",
109 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
110 (unsigned long long)__entry
->sector
, __entry
->nr_sector
)
113 DEFINE_EVENT(bcache_bio
, bcache_bypass_sequential
,
114 TP_PROTO(struct bio
*bio
),
118 DEFINE_EVENT(bcache_bio
, bcache_bypass_congested
,
119 TP_PROTO(struct bio
*bio
),
123 TRACE_EVENT(bcache_read
,
124 TP_PROTO(struct bio
*bio
, bool hit
, bool bypass
),
125 TP_ARGS(bio
, hit
, bypass
),
129 __field(sector_t
, sector
)
130 __field(unsigned int, nr_sector
)
131 __array(char, rwbs
, 6 )
132 __field(bool, cache_hit
)
133 __field(bool, bypass
)
137 __entry
->dev
= bio_dev(bio
);
138 __entry
->sector
= bio
->bi_iter
.bi_sector
;
139 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
140 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
141 __entry
->cache_hit
= hit
;
142 __entry
->bypass
= bypass
;
145 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
146 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
147 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
148 __entry
->nr_sector
, __entry
->cache_hit
, __entry
->bypass
)
151 TRACE_EVENT(bcache_write
,
152 TP_PROTO(struct cache_set
*c
, u64 inode
, struct bio
*bio
,
153 bool writeback
, bool bypass
),
154 TP_ARGS(c
, inode
, bio
, writeback
, bypass
),
157 __array(char, uuid
, 16 )
159 __field(sector_t
, sector
)
160 __field(unsigned int, nr_sector
)
161 __array(char, rwbs
, 6 )
162 __field(bool, writeback
)
163 __field(bool, bypass
)
167 memcpy(__entry
->uuid
, c
->sb
.set_uuid
, 16);
168 __entry
->inode
= inode
;
169 __entry
->sector
= bio
->bi_iter
.bi_sector
;
170 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
171 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
172 __entry
->writeback
= writeback
;
173 __entry
->bypass
= bypass
;
176 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
177 __entry
->uuid
, __entry
->inode
,
178 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
179 __entry
->nr_sector
, __entry
->writeback
, __entry
->bypass
)
182 DEFINE_EVENT(bcache_bio
, bcache_read_retry
,
183 TP_PROTO(struct bio
*bio
),
187 DEFINE_EVENT(bkey
, bcache_cache_insert
,
188 TP_PROTO(struct bkey
*k
),
194 DECLARE_EVENT_CLASS(cache_set
,
195 TP_PROTO(struct cache_set
*c
),
199 __array(char, uuid
, 16 )
203 memcpy(__entry
->uuid
, c
->sb
.set_uuid
, 16);
206 TP_printk("%pU", __entry
->uuid
)
209 DEFINE_EVENT(bkey
, bcache_journal_replay_key
,
210 TP_PROTO(struct bkey
*k
),
214 DEFINE_EVENT(cache_set
, bcache_journal_full
,
215 TP_PROTO(struct cache_set
*c
),
219 DEFINE_EVENT(cache_set
, bcache_journal_entry_full
,
220 TP_PROTO(struct cache_set
*c
),
224 TRACE_EVENT(bcache_journal_write
,
225 TP_PROTO(struct bio
*bio
, u32 keys
),
230 __field(sector_t
, sector
)
231 __field(unsigned int, nr_sector
)
232 __array(char, rwbs
, 6 )
233 __field(u32
, nr_keys
)
237 __entry
->dev
= bio_dev(bio
);
238 __entry
->sector
= bio
->bi_iter
.bi_sector
;
239 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
240 __entry
->nr_keys
= keys
;
241 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
, bio
->bi_iter
.bi_size
);
244 TP_printk("%d,%d %s %llu + %u keys %u",
245 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
246 (unsigned long long)__entry
->sector
, __entry
->nr_sector
,
252 DEFINE_EVENT(cache_set
, bcache_btree_cache_cannibalize
,
253 TP_PROTO(struct cache_set
*c
),
257 DEFINE_EVENT(btree_node
, bcache_btree_read
,
258 TP_PROTO(struct btree
*b
),
262 TRACE_EVENT(bcache_btree_write
,
263 TP_PROTO(struct btree
*b
),
267 __field(size_t, bucket
)
268 __field(unsigned, block
)
269 __field(unsigned, keys
)
273 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
274 __entry
->block
= b
->written
;
275 __entry
->keys
= b
->keys
.set
[b
->keys
.nsets
].data
->keys
;
278 TP_printk("bucket %zu written block %u + %u",
279 __entry
->bucket
, __entry
->block
, __entry
->keys
)
282 DEFINE_EVENT(btree_node
, bcache_btree_node_alloc
,
283 TP_PROTO(struct btree
*b
),
287 DEFINE_EVENT(cache_set
, bcache_btree_node_alloc_fail
,
288 TP_PROTO(struct cache_set
*c
),
292 DEFINE_EVENT(btree_node
, bcache_btree_node_free
,
293 TP_PROTO(struct btree
*b
),
297 TRACE_EVENT(bcache_btree_gc_coalesce
,
298 TP_PROTO(unsigned nodes
),
302 __field(unsigned, nodes
)
306 __entry
->nodes
= nodes
;
309 TP_printk("coalesced %u nodes", __entry
->nodes
)
312 DEFINE_EVENT(cache_set
, bcache_gc_start
,
313 TP_PROTO(struct cache_set
*c
),
317 DEFINE_EVENT(cache_set
, bcache_gc_end
,
318 TP_PROTO(struct cache_set
*c
),
322 DEFINE_EVENT(bkey
, bcache_gc_copy
,
323 TP_PROTO(struct bkey
*k
),
327 DEFINE_EVENT(bkey
, bcache_gc_copy_collision
,
328 TP_PROTO(struct bkey
*k
),
332 TRACE_EVENT(bcache_btree_insert_key
,
333 TP_PROTO(struct btree
*b
, struct bkey
*k
, unsigned op
, unsigned status
),
334 TP_ARGS(b
, k
, op
, status
),
337 __field(u64
, btree_node
)
338 __field(u32
, btree_level
)
340 __field(u64
, offset
)
348 __entry
->btree_node
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
349 __entry
->btree_level
= b
->level
;
350 __entry
->inode
= KEY_INODE(k
);
351 __entry
->offset
= KEY_OFFSET(k
);
352 __entry
->size
= KEY_SIZE(k
);
353 __entry
->dirty
= KEY_DIRTY(k
);
355 __entry
->status
= status
;
358 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
359 __entry
->status
, __entry
->op
,
360 __entry
->btree_node
, __entry
->btree_level
,
361 __entry
->inode
, __entry
->offset
,
362 __entry
->size
, __entry
->dirty
)
365 DECLARE_EVENT_CLASS(btree_split
,
366 TP_PROTO(struct btree
*b
, unsigned keys
),
370 __field(size_t, bucket
)
371 __field(unsigned, keys
)
375 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
376 __entry
->keys
= keys
;
379 TP_printk("bucket %zu keys %u", __entry
->bucket
, __entry
->keys
)
382 DEFINE_EVENT(btree_split
, bcache_btree_node_split
,
383 TP_PROTO(struct btree
*b
, unsigned keys
),
387 DEFINE_EVENT(btree_split
, bcache_btree_node_compact
,
388 TP_PROTO(struct btree
*b
, unsigned keys
),
392 DEFINE_EVENT(btree_node
, bcache_btree_set_root
,
393 TP_PROTO(struct btree
*b
),
397 TRACE_EVENT(bcache_keyscan
,
398 TP_PROTO(unsigned nr_found
,
399 unsigned start_inode
, uint64_t start_offset
,
400 unsigned end_inode
, uint64_t end_offset
),
402 start_inode
, start_offset
,
403 end_inode
, end_offset
),
406 __field(__u32
, nr_found
)
407 __field(__u32
, start_inode
)
408 __field(__u64
, start_offset
)
409 __field(__u32
, end_inode
)
410 __field(__u64
, end_offset
)
414 __entry
->nr_found
= nr_found
;
415 __entry
->start_inode
= start_inode
;
416 __entry
->start_offset
= start_offset
;
417 __entry
->end_inode
= end_inode
;
418 __entry
->end_offset
= end_offset
;
421 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry
->nr_found
,
422 __entry
->start_inode
, __entry
->start_offset
,
423 __entry
->end_inode
, __entry
->end_offset
)
428 TRACE_EVENT(bcache_invalidate
,
429 TP_PROTO(struct cache
*ca
, size_t bucket
),
433 __field(unsigned, sectors
)
435 __field(__u64
, offset
)
439 __entry
->dev
= ca
->bdev
->bd_dev
;
440 __entry
->offset
= bucket
<< ca
->set
->bucket_bits
;
441 __entry
->sectors
= GC_SECTORS_USED(&ca
->buckets
[bucket
]);
444 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
445 __entry
->sectors
, MAJOR(__entry
->dev
),
446 MINOR(__entry
->dev
), __entry
->offset
)
449 TRACE_EVENT(bcache_alloc
,
450 TP_PROTO(struct cache
*ca
, size_t bucket
),
455 __field(__u64
, offset
)
459 __entry
->dev
= ca
->bdev
->bd_dev
;
460 __entry
->offset
= bucket
<< ca
->set
->bucket_bits
;
463 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry
->dev
),
464 MINOR(__entry
->dev
), __entry
->offset
)
467 TRACE_EVENT(bcache_alloc_fail
,
468 TP_PROTO(struct cache
*ca
, unsigned reserve
),
469 TP_ARGS(ca
, reserve
),
473 __field(unsigned, free
)
474 __field(unsigned, free_inc
)
475 __field(unsigned, blocked
)
479 __entry
->dev
= ca
->bdev
->bd_dev
;
480 __entry
->free
= fifo_used(&ca
->free
[reserve
]);
481 __entry
->free_inc
= fifo_used(&ca
->free_inc
);
482 __entry
->blocked
= atomic_read(&ca
->set
->prio_blocked
);
485 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
486 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->free
,
487 __entry
->free_inc
, __entry
->blocked
)
490 /* Background writeback */
492 DEFINE_EVENT(bkey
, bcache_writeback
,
493 TP_PROTO(struct bkey
*k
),
497 DEFINE_EVENT(bkey
, bcache_writeback_collision
,
498 TP_PROTO(struct bkey
*k
),
502 #endif /* _TRACE_BCACHE_H */
504 /* This part must be outside protection */
505 #include <trace/define_trace.h>