2 #define TRACE_SYSTEM bcache
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
7 #include <linux/tracepoint.h>
9 DECLARE_EVENT_CLASS(bcache_request
,
10 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
15 __field(unsigned int, orig_major
)
16 __field(unsigned int, orig_minor
)
17 __field(sector_t
, sector
)
18 __field(dev_t
, orig_sector
)
19 __field(unsigned int, nr_sector
)
20 __array(char, rwbs
, 6 )
24 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
25 __entry
->orig_major
= d
->disk
->major
;
26 __entry
->orig_minor
= d
->disk
->first_minor
;
27 __entry
->sector
= bio
->bi_iter
.bi_sector
;
28 __entry
->orig_sector
= bio
->bi_iter
.bi_sector
- 16;
29 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
30 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
34 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
35 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
36 __entry
->nr_sector
, __entry
->orig_major
, __entry
->orig_minor
,
37 (unsigned long long)__entry
->orig_sector
)
40 DECLARE_EVENT_CLASS(bkey
,
41 TP_PROTO(struct bkey
*k
),
52 __entry
->inode
= KEY_INODE(k
);
53 __entry
->offset
= KEY_OFFSET(k
);
54 __entry
->size
= KEY_SIZE(k
);
55 __entry
->dirty
= KEY_DIRTY(k
);
58 TP_printk("%u:%llu len %u dirty %u", __entry
->inode
,
59 __entry
->offset
, __entry
->size
, __entry
->dirty
)
62 DECLARE_EVENT_CLASS(btree_node
,
63 TP_PROTO(struct btree
*b
),
67 __field(size_t, bucket
)
71 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
74 TP_printk("bucket %zu", __entry
->bucket
)
79 DEFINE_EVENT(bcache_request
, bcache_request_start
,
80 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
84 DEFINE_EVENT(bcache_request
, bcache_request_end
,
85 TP_PROTO(struct bcache_device
*d
, struct bio
*bio
),
89 DECLARE_EVENT_CLASS(bcache_bio
,
90 TP_PROTO(struct bio
*bio
),
95 __field(sector_t
, sector
)
96 __field(unsigned int, nr_sector
)
97 __array(char, rwbs
, 6 )
101 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
102 __entry
->sector
= bio
->bi_iter
.bi_sector
;
103 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
104 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
107 TP_printk("%d,%d %s %llu + %u",
108 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
109 (unsigned long long)__entry
->sector
, __entry
->nr_sector
)
112 DEFINE_EVENT(bcache_bio
, bcache_bypass_sequential
,
113 TP_PROTO(struct bio
*bio
),
117 DEFINE_EVENT(bcache_bio
, bcache_bypass_congested
,
118 TP_PROTO(struct bio
*bio
),
122 TRACE_EVENT(bcache_read
,
123 TP_PROTO(struct bio
*bio
, bool hit
, bool bypass
),
124 TP_ARGS(bio
, hit
, bypass
),
128 __field(sector_t
, sector
)
129 __field(unsigned int, nr_sector
)
130 __array(char, rwbs
, 6 )
131 __field(bool, cache_hit
)
132 __field(bool, bypass
)
136 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
137 __entry
->sector
= bio
->bi_iter
.bi_sector
;
138 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
139 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
140 __entry
->cache_hit
= hit
;
141 __entry
->bypass
= bypass
;
144 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
145 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
146 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
147 __entry
->nr_sector
, __entry
->cache_hit
, __entry
->bypass
)
150 TRACE_EVENT(bcache_write
,
151 TP_PROTO(struct cache_set
*c
, u64 inode
, struct bio
*bio
,
152 bool writeback
, bool bypass
),
153 TP_ARGS(c
, inode
, bio
, writeback
, bypass
),
156 __array(char, uuid
, 16 )
158 __field(sector_t
, sector
)
159 __field(unsigned int, nr_sector
)
160 __array(char, rwbs
, 6 )
161 __field(bool, writeback
)
162 __field(bool, bypass
)
166 memcpy(__entry
->uuid
, c
->sb
.set_uuid
, 16);
167 __entry
->inode
= inode
;
168 __entry
->sector
= bio
->bi_iter
.bi_sector
;
169 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
170 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
171 __entry
->writeback
= writeback
;
172 __entry
->bypass
= bypass
;
175 TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
176 __entry
->uuid
, __entry
->inode
,
177 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
178 __entry
->nr_sector
, __entry
->writeback
, __entry
->bypass
)
181 DEFINE_EVENT(bcache_bio
, bcache_read_retry
,
182 TP_PROTO(struct bio
*bio
),
186 DEFINE_EVENT(bkey
, bcache_cache_insert
,
187 TP_PROTO(struct bkey
*k
),
193 DECLARE_EVENT_CLASS(cache_set
,
194 TP_PROTO(struct cache_set
*c
),
198 __array(char, uuid
, 16 )
202 memcpy(__entry
->uuid
, c
->sb
.set_uuid
, 16);
205 TP_printk("%pU", __entry
->uuid
)
208 DEFINE_EVENT(bkey
, bcache_journal_replay_key
,
209 TP_PROTO(struct bkey
*k
),
213 DEFINE_EVENT(cache_set
, bcache_journal_full
,
214 TP_PROTO(struct cache_set
*c
),
218 DEFINE_EVENT(cache_set
, bcache_journal_entry_full
,
219 TP_PROTO(struct cache_set
*c
),
223 DEFINE_EVENT(bcache_bio
, bcache_journal_write
,
224 TP_PROTO(struct bio
*bio
),
230 DEFINE_EVENT(cache_set
, bcache_btree_cache_cannibalize
,
231 TP_PROTO(struct cache_set
*c
),
235 DEFINE_EVENT(btree_node
, bcache_btree_read
,
236 TP_PROTO(struct btree
*b
),
240 TRACE_EVENT(bcache_btree_write
,
241 TP_PROTO(struct btree
*b
),
245 __field(size_t, bucket
)
246 __field(unsigned, block
)
247 __field(unsigned, keys
)
251 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
252 __entry
->block
= b
->written
;
253 __entry
->keys
= b
->keys
.set
[b
->keys
.nsets
].data
->keys
;
256 TP_printk("bucket %zu", __entry
->bucket
)
259 DEFINE_EVENT(btree_node
, bcache_btree_node_alloc
,
260 TP_PROTO(struct btree
*b
),
264 DEFINE_EVENT(cache_set
, bcache_btree_node_alloc_fail
,
265 TP_PROTO(struct cache_set
*c
),
269 DEFINE_EVENT(btree_node
, bcache_btree_node_free
,
270 TP_PROTO(struct btree
*b
),
274 TRACE_EVENT(bcache_btree_gc_coalesce
,
275 TP_PROTO(unsigned nodes
),
279 __field(unsigned, nodes
)
283 __entry
->nodes
= nodes
;
286 TP_printk("coalesced %u nodes", __entry
->nodes
)
289 DEFINE_EVENT(cache_set
, bcache_gc_start
,
290 TP_PROTO(struct cache_set
*c
),
294 DEFINE_EVENT(cache_set
, bcache_gc_end
,
295 TP_PROTO(struct cache_set
*c
),
299 DEFINE_EVENT(bkey
, bcache_gc_copy
,
300 TP_PROTO(struct bkey
*k
),
304 DEFINE_EVENT(bkey
, bcache_gc_copy_collision
,
305 TP_PROTO(struct bkey
*k
),
309 TRACE_EVENT(bcache_btree_insert_key
,
310 TP_PROTO(struct btree
*b
, struct bkey
*k
, unsigned op
, unsigned status
),
311 TP_ARGS(b
, k
, op
, status
),
314 __field(u64
, btree_node
)
315 __field(u32
, btree_level
)
317 __field(u64
, offset
)
325 __entry
->btree_node
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
326 __entry
->btree_level
= b
->level
;
327 __entry
->inode
= KEY_INODE(k
);
328 __entry
->offset
= KEY_OFFSET(k
);
329 __entry
->size
= KEY_SIZE(k
);
330 __entry
->dirty
= KEY_DIRTY(k
);
332 __entry
->status
= status
;
335 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
336 __entry
->status
, __entry
->op
,
337 __entry
->btree_node
, __entry
->btree_level
,
338 __entry
->inode
, __entry
->offset
,
339 __entry
->size
, __entry
->dirty
)
342 DECLARE_EVENT_CLASS(btree_split
,
343 TP_PROTO(struct btree
*b
, unsigned keys
),
347 __field(size_t, bucket
)
348 __field(unsigned, keys
)
352 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
353 __entry
->keys
= keys
;
356 TP_printk("bucket %zu keys %u", __entry
->bucket
, __entry
->keys
)
359 DEFINE_EVENT(btree_split
, bcache_btree_node_split
,
360 TP_PROTO(struct btree
*b
, unsigned keys
),
364 DEFINE_EVENT(btree_split
, bcache_btree_node_compact
,
365 TP_PROTO(struct btree
*b
, unsigned keys
),
369 DEFINE_EVENT(btree_node
, bcache_btree_set_root
,
370 TP_PROTO(struct btree
*b
),
374 TRACE_EVENT(bcache_keyscan
,
375 TP_PROTO(unsigned nr_found
,
376 unsigned start_inode
, uint64_t start_offset
,
377 unsigned end_inode
, uint64_t end_offset
),
379 start_inode
, start_offset
,
380 end_inode
, end_offset
),
383 __field(__u32
, nr_found
)
384 __field(__u32
, start_inode
)
385 __field(__u64
, start_offset
)
386 __field(__u32
, end_inode
)
387 __field(__u64
, end_offset
)
391 __entry
->nr_found
= nr_found
;
392 __entry
->start_inode
= start_inode
;
393 __entry
->start_offset
= start_offset
;
394 __entry
->end_inode
= end_inode
;
395 __entry
->end_offset
= end_offset
;
398 TP_printk("found %u keys from %u:%llu to %u:%llu", __entry
->nr_found
,
399 __entry
->start_inode
, __entry
->start_offset
,
400 __entry
->end_inode
, __entry
->end_offset
)
405 TRACE_EVENT(bcache_invalidate
,
406 TP_PROTO(struct cache
*ca
, size_t bucket
),
410 __field(unsigned, sectors
)
412 __field(__u64
, offset
)
416 __entry
->dev
= ca
->bdev
->bd_dev
;
417 __entry
->offset
= bucket
<< ca
->set
->bucket_bits
;
418 __entry
->sectors
= GC_SECTORS_USED(&ca
->buckets
[bucket
]);
421 TP_printk("invalidated %u sectors at %d,%d sector=%llu",
422 __entry
->sectors
, MAJOR(__entry
->dev
),
423 MINOR(__entry
->dev
), __entry
->offset
)
426 TRACE_EVENT(bcache_alloc
,
427 TP_PROTO(struct cache
*ca
, size_t bucket
),
432 __field(__u64
, offset
)
436 __entry
->dev
= ca
->bdev
->bd_dev
;
437 __entry
->offset
= bucket
<< ca
->set
->bucket_bits
;
440 TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry
->dev
),
441 MINOR(__entry
->dev
), __entry
->offset
)
444 TRACE_EVENT(bcache_alloc_fail
,
445 TP_PROTO(struct cache
*ca
, unsigned reserve
),
446 TP_ARGS(ca
, reserve
),
450 __field(unsigned, free
)
451 __field(unsigned, free_inc
)
452 __field(unsigned, blocked
)
456 __entry
->dev
= ca
->bdev
->bd_dev
;
457 __entry
->free
= fifo_used(&ca
->free
[reserve
]);
458 __entry
->free_inc
= fifo_used(&ca
->free_inc
);
459 __entry
->blocked
= atomic_read(&ca
->set
->prio_blocked
);
462 TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
463 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->free
,
464 __entry
->free_inc
, __entry
->blocked
)
467 /* Background writeback */
469 DEFINE_EVENT(bkey
, bcache_writeback
,
470 TP_PROTO(struct bkey
*k
),
474 DEFINE_EVENT(bkey
, bcache_writeback_collision
,
475 TP_PROTO(struct bkey
*k
),
479 #endif /* _TRACE_BCACHE_H */
481 /* This part must be outside protection */
482 #include <trace/define_trace.h>