2 #define TRACE_SYSTEM bcache
4 #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BCACHE_H
7 #include <linux/tracepoint.h>
11 DECLARE_EVENT_CLASS(bcache_request
,
12 TP_PROTO(struct search
*s
, struct bio
*bio
),
17 __field(unsigned int, orig_major
)
18 __field(unsigned int, orig_minor
)
19 __field(sector_t
, sector
)
20 __field(dev_t
, orig_sector
)
21 __field(unsigned int, nr_sector
)
22 __array(char, rwbs
, 6 )
26 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
27 __entry
->orig_major
= s
->d
->disk
->major
;
28 __entry
->orig_minor
= s
->d
->disk
->first_minor
;
29 __entry
->sector
= bio
->bi_sector
;
30 __entry
->orig_sector
= bio
->bi_sector
- 16;
31 __entry
->nr_sector
= bio
->bi_size
>> 9;
32 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_size
);
35 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
36 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
37 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
38 __entry
->nr_sector
, __entry
->orig_major
, __entry
->orig_minor
,
39 (unsigned long long)__entry
->orig_sector
)
42 DECLARE_EVENT_CLASS(bkey
,
43 TP_PROTO(struct bkey
*k
),
54 __entry
->inode
= KEY_INODE(k
);
55 __entry
->offset
= KEY_OFFSET(k
);
56 __entry
->size
= KEY_SIZE(k
);
57 __entry
->dirty
= KEY_DIRTY(k
);
60 TP_printk("%u:%llu len %u dirty %u", __entry
->inode
,
61 __entry
->offset
, __entry
->size
, __entry
->dirty
)
64 DECLARE_EVENT_CLASS(btree_node
,
65 TP_PROTO(struct btree
*b
),
69 __field(size_t, bucket
)
73 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
76 TP_printk("bucket %zu", __entry
->bucket
)
81 DEFINE_EVENT(bcache_request
, bcache_request_start
,
82 TP_PROTO(struct search
*s
, struct bio
*bio
),
86 DEFINE_EVENT(bcache_request
, bcache_request_end
,
87 TP_PROTO(struct search
*s
, struct bio
*bio
),
91 DECLARE_EVENT_CLASS(bcache_bio
,
92 TP_PROTO(struct bio
*bio
),
97 __field(sector_t
, sector
)
98 __field(unsigned int, nr_sector
)
99 __array(char, rwbs
, 6 )
103 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
104 __entry
->sector
= bio
->bi_sector
;
105 __entry
->nr_sector
= bio
->bi_size
>> 9;
106 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_size
);
109 TP_printk("%d,%d %s %llu + %u",
110 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
111 (unsigned long long)__entry
->sector
, __entry
->nr_sector
)
114 DEFINE_EVENT(bcache_bio
, bcache_bypass_sequential
,
115 TP_PROTO(struct bio
*bio
),
119 DEFINE_EVENT(bcache_bio
, bcache_bypass_congested
,
120 TP_PROTO(struct bio
*bio
),
124 TRACE_EVENT(bcache_read
,
125 TP_PROTO(struct bio
*bio
, bool hit
, bool bypass
),
126 TP_ARGS(bio
, hit
, bypass
),
130 __field(sector_t
, sector
)
131 __field(unsigned int, nr_sector
)
132 __array(char, rwbs
, 6 )
133 __field(bool, cache_hit
)
134 __field(bool, bypass
)
138 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
139 __entry
->sector
= bio
->bi_sector
;
140 __entry
->nr_sector
= bio
->bi_size
>> 9;
141 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_size
);
142 __entry
->cache_hit
= hit
;
143 __entry
->bypass
= bypass
;
146 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
147 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
148 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
149 __entry
->nr_sector
, __entry
->cache_hit
, __entry
->bypass
)
152 TRACE_EVENT(bcache_write
,
153 TP_PROTO(struct bio
*bio
, bool writeback
, bool bypass
),
154 TP_ARGS(bio
, writeback
, bypass
),
158 __field(sector_t
, sector
)
159 __field(unsigned int, nr_sector
)
160 __array(char, rwbs
, 6 )
161 __field(bool, writeback
)
162 __field(bool, bypass
)
166 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
167 __entry
->sector
= bio
->bi_sector
;
168 __entry
->nr_sector
= bio
->bi_size
>> 9;
169 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_size
);
170 __entry
->writeback
= writeback
;
171 __entry
->bypass
= bypass
;
174 TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
175 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
176 __entry
->rwbs
, (unsigned long long)__entry
->sector
,
177 __entry
->nr_sector
, __entry
->writeback
, __entry
->bypass
)
180 DEFINE_EVENT(bcache_bio
, bcache_read_retry
,
181 TP_PROTO(struct bio
*bio
),
185 DEFINE_EVENT(bkey
, bcache_cache_insert
,
186 TP_PROTO(struct bkey
*k
),
192 DECLARE_EVENT_CLASS(cache_set
,
193 TP_PROTO(struct cache_set
*c
),
197 __array(char, uuid
, 16 )
201 memcpy(__entry
->uuid
, c
->sb
.set_uuid
, 16);
204 TP_printk("%pU", __entry
->uuid
)
207 DEFINE_EVENT(bkey
, bcache_journal_replay_key
,
208 TP_PROTO(struct bkey
*k
),
212 DEFINE_EVENT(cache_set
, bcache_journal_full
,
213 TP_PROTO(struct cache_set
*c
),
217 DEFINE_EVENT(cache_set
, bcache_journal_entry_full
,
218 TP_PROTO(struct cache_set
*c
),
222 DEFINE_EVENT(bcache_bio
, bcache_journal_write
,
223 TP_PROTO(struct bio
*bio
),
229 DEFINE_EVENT(cache_set
, bcache_btree_cache_cannibalize
,
230 TP_PROTO(struct cache_set
*c
),
234 DEFINE_EVENT(btree_node
, bcache_btree_read
,
235 TP_PROTO(struct btree
*b
),
239 TRACE_EVENT(bcache_btree_write
,
240 TP_PROTO(struct btree
*b
),
244 __field(size_t, bucket
)
245 __field(unsigned, block
)
246 __field(unsigned, keys
)
250 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
251 __entry
->block
= b
->written
;
252 __entry
->keys
= b
->sets
[b
->nsets
].data
->keys
;
255 TP_printk("bucket %zu", __entry
->bucket
)
258 DEFINE_EVENT(btree_node
, bcache_btree_node_alloc
,
259 TP_PROTO(struct btree
*b
),
263 DEFINE_EVENT(btree_node
, bcache_btree_node_alloc_fail
,
264 TP_PROTO(struct btree
*b
),
268 DEFINE_EVENT(btree_node
, bcache_btree_node_free
,
269 TP_PROTO(struct btree
*b
),
273 TRACE_EVENT(bcache_btree_gc_coalesce
,
274 TP_PROTO(unsigned nodes
),
278 __field(unsigned, nodes
)
282 __entry
->nodes
= nodes
;
285 TP_printk("coalesced %u nodes", __entry
->nodes
)
288 DEFINE_EVENT(cache_set
, bcache_gc_start
,
289 TP_PROTO(struct cache_set
*c
),
293 DEFINE_EVENT(cache_set
, bcache_gc_end
,
294 TP_PROTO(struct cache_set
*c
),
298 DEFINE_EVENT(bkey
, bcache_gc_copy
,
299 TP_PROTO(struct bkey
*k
),
303 DEFINE_EVENT(bkey
, bcache_gc_copy_collision
,
304 TP_PROTO(struct bkey
*k
),
308 TRACE_EVENT(bcache_btree_insert_key
,
309 TP_PROTO(struct btree
*b
, struct bkey
*k
, unsigned op
, unsigned status
),
310 TP_ARGS(b
, k
, op
, status
),
313 __field(u64
, btree_node
)
314 __field(u32
, btree_level
)
316 __field(u64
, offset
)
324 __entry
->btree_node
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
325 __entry
->btree_level
= b
->level
;
326 __entry
->inode
= KEY_INODE(k
);
327 __entry
->offset
= KEY_OFFSET(k
);
328 __entry
->size
= KEY_SIZE(k
);
329 __entry
->dirty
= KEY_DIRTY(k
);
331 __entry
->status
= status
;
334 TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
335 __entry
->status
, __entry
->op
,
336 __entry
->btree_node
, __entry
->btree_level
,
337 __entry
->inode
, __entry
->offset
,
338 __entry
->size
, __entry
->dirty
)
341 DECLARE_EVENT_CLASS(btree_split
,
342 TP_PROTO(struct btree
*b
, unsigned keys
),
346 __field(size_t, bucket
)
347 __field(unsigned, keys
)
351 __entry
->bucket
= PTR_BUCKET_NR(b
->c
, &b
->key
, 0);
352 __entry
->keys
= keys
;
355 TP_printk("bucket %zu keys %u", __entry
->bucket
, __entry
->keys
)
358 DEFINE_EVENT(btree_split
, bcache_btree_node_split
,
359 TP_PROTO(struct btree
*b
, unsigned keys
),
363 DEFINE_EVENT(btree_split
, bcache_btree_node_compact
,
364 TP_PROTO(struct btree
*b
, unsigned keys
),
368 DEFINE_EVENT(btree_node
, bcache_btree_set_root
,
369 TP_PROTO(struct btree
*b
),
375 TRACE_EVENT(bcache_alloc_invalidate
,
376 TP_PROTO(struct cache
*ca
),
380 __field(unsigned, free
)
381 __field(unsigned, free_inc
)
382 __field(unsigned, free_inc_size
)
383 __field(unsigned, unused
)
387 __entry
->free
= fifo_used(&ca
->free
);
388 __entry
->free_inc
= fifo_used(&ca
->free_inc
);
389 __entry
->free_inc_size
= ca
->free_inc
.size
;
390 __entry
->unused
= fifo_used(&ca
->unused
);
393 TP_printk("free %u free_inc %u/%u unused %u", __entry
->free
,
394 __entry
->free_inc
, __entry
->free_inc_size
, __entry
->unused
)
397 TRACE_EVENT(bcache_alloc_fail
,
398 TP_PROTO(struct cache
*ca
),
402 __field(unsigned, free
)
403 __field(unsigned, free_inc
)
404 __field(unsigned, unused
)
405 __field(unsigned, blocked
)
409 __entry
->free
= fifo_used(&ca
->free
);
410 __entry
->free_inc
= fifo_used(&ca
->free_inc
);
411 __entry
->unused
= fifo_used(&ca
->unused
);
412 __entry
->blocked
= atomic_read(&ca
->set
->prio_blocked
);
415 TP_printk("free %u free_inc %u unused %u blocked %u", __entry
->free
,
416 __entry
->free_inc
, __entry
->unused
, __entry
->blocked
)
419 /* Background writeback */
421 DEFINE_EVENT(bkey
, bcache_writeback
,
422 TP_PROTO(struct bkey
*k
),
426 DEFINE_EVENT(bkey
, bcache_writeback_collision
,
427 TP_PROTO(struct bkey
*k
),
431 #endif /* _TRACE_BCACHE_H */
433 /* This part must be outside protection */
434 #include <trace/define_trace.h>