Linux 6.13-rc4
[linux.git] / fs / bcachefs / trace.h
blob5597b9d6297f1b330af647c6eeb125ad083fe43a
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM bcachefs
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/tracepoint.h>
9 #define TRACE_BPOS_entries(name) \
10 __field(u64, name##_inode ) \
11 __field(u64, name##_offset ) \
12 __field(u32, name##_snapshot )
14 #define TRACE_BPOS_assign(dst, src) \
15 __entry->dst##_inode = (src).inode; \
16 __entry->dst##_offset = (src).offset; \
17 __entry->dst##_snapshot = (src).snapshot
19 DECLARE_EVENT_CLASS(bpos,
20 TP_PROTO(const struct bpos *p),
21 TP_ARGS(p),
23 TP_STRUCT__entry(
24 TRACE_BPOS_entries(p)
27 TP_fast_assign(
28 TRACE_BPOS_assign(p, *p);
31 TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
34 DECLARE_EVENT_CLASS(fs_str,
35 TP_PROTO(struct bch_fs *c, const char *str),
36 TP_ARGS(c, str),
38 TP_STRUCT__entry(
39 __field(dev_t, dev )
40 __string(str, str )
43 TP_fast_assign(
44 __entry->dev = c->dev;
45 __assign_str(str);
48 TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
51 DECLARE_EVENT_CLASS(trans_str,
52 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
53 TP_ARGS(trans, caller_ip, str),
55 TP_STRUCT__entry(
56 __field(dev_t, dev )
57 __array(char, trans_fn, 32 )
58 __field(unsigned long, caller_ip )
59 __string(str, str )
62 TP_fast_assign(
63 __entry->dev = trans->c->dev;
64 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
65 __entry->caller_ip = caller_ip;
66 __assign_str(str);
69 TP_printk("%d,%d %s %pS %s",
70 MAJOR(__entry->dev), MINOR(__entry->dev),
71 __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
74 DECLARE_EVENT_CLASS(trans_str_nocaller,
75 TP_PROTO(struct btree_trans *trans, const char *str),
76 TP_ARGS(trans, str),
78 TP_STRUCT__entry(
79 __field(dev_t, dev )
80 __array(char, trans_fn, 32 )
81 __string(str, str )
84 TP_fast_assign(
85 __entry->dev = trans->c->dev;
86 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
87 __assign_str(str);
90 TP_printk("%d,%d %s %s",
91 MAJOR(__entry->dev), MINOR(__entry->dev),
92 __entry->trans_fn, __get_str(str))
95 DECLARE_EVENT_CLASS(btree_node_nofs,
96 TP_PROTO(struct bch_fs *c, struct btree *b),
97 TP_ARGS(c, b),
99 TP_STRUCT__entry(
100 __field(dev_t, dev )
101 __field(u8, level )
102 __field(u8, btree_id )
103 TRACE_BPOS_entries(pos)
106 TP_fast_assign(
107 __entry->dev = c->dev;
108 __entry->level = b->c.level;
109 __entry->btree_id = b->c.btree_id;
110 TRACE_BPOS_assign(pos, b->key.k.p);
113 TP_printk("%d,%d %u %s %llu:%llu:%u",
114 MAJOR(__entry->dev), MINOR(__entry->dev),
115 __entry->level,
116 bch2_btree_id_str(__entry->btree_id),
117 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
120 DECLARE_EVENT_CLASS(btree_node,
121 TP_PROTO(struct btree_trans *trans, struct btree *b),
122 TP_ARGS(trans, b),
124 TP_STRUCT__entry(
125 __field(dev_t, dev )
126 __array(char, trans_fn, 32 )
127 __field(u8, level )
128 __field(u8, btree_id )
129 TRACE_BPOS_entries(pos)
132 TP_fast_assign(
133 __entry->dev = trans->c->dev;
134 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
135 __entry->level = b->c.level;
136 __entry->btree_id = b->c.btree_id;
137 TRACE_BPOS_assign(pos, b->key.k.p);
140 TP_printk("%d,%d %s %u %s %llu:%llu:%u",
141 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn,
142 __entry->level,
143 bch2_btree_id_str(__entry->btree_id),
144 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
147 DECLARE_EVENT_CLASS(bch_fs,
148 TP_PROTO(struct bch_fs *c),
149 TP_ARGS(c),
151 TP_STRUCT__entry(
152 __field(dev_t, dev )
155 TP_fast_assign(
156 __entry->dev = c->dev;
159 TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
162 DECLARE_EVENT_CLASS(btree_trans,
163 TP_PROTO(struct btree_trans *trans),
164 TP_ARGS(trans),
166 TP_STRUCT__entry(
167 __field(dev_t, dev )
168 __array(char, trans_fn, 32 )
171 TP_fast_assign(
172 __entry->dev = trans->c->dev;
173 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
176 TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->trans_fn)
179 DECLARE_EVENT_CLASS(bio,
180 TP_PROTO(struct bio *bio),
181 TP_ARGS(bio),
183 TP_STRUCT__entry(
184 __field(dev_t, dev )
185 __field(sector_t, sector )
186 __field(unsigned int, nr_sector )
187 __array(char, rwbs, 6 )
190 TP_fast_assign(
191 __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0;
192 __entry->sector = bio->bi_iter.bi_sector;
193 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
194 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
197 TP_printk("%d,%d %s %llu + %u",
198 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
199 (unsigned long long)__entry->sector, __entry->nr_sector)
202 /* fs.c: */
203 TRACE_EVENT(bch2_sync_fs,
204 TP_PROTO(struct super_block *sb, int wait),
206 TP_ARGS(sb, wait),
208 TP_STRUCT__entry(
209 __field( dev_t, dev )
210 __field( int, wait )
214 TP_fast_assign(
215 __entry->dev = sb->s_dev;
216 __entry->wait = wait;
219 TP_printk("dev %d,%d wait %d",
220 MAJOR(__entry->dev), MINOR(__entry->dev),
221 __entry->wait)
224 /* fs-io.c: */
225 TRACE_EVENT(bch2_fsync,
226 TP_PROTO(struct file *file, int datasync),
228 TP_ARGS(file, datasync),
230 TP_STRUCT__entry(
231 __field( dev_t, dev )
232 __field( ino_t, ino )
233 __field( ino_t, parent )
234 __field( int, datasync )
237 TP_fast_assign(
238 struct dentry *dentry = file->f_path.dentry;
240 __entry->dev = dentry->d_sb->s_dev;
241 __entry->ino = d_inode(dentry)->i_ino;
242 __entry->parent = d_inode(dentry->d_parent)->i_ino;
243 __entry->datasync = datasync;
246 TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
247 MAJOR(__entry->dev), MINOR(__entry->dev),
248 (unsigned long) __entry->ino,
249 (unsigned long) __entry->parent, __entry->datasync)
252 /* super-io.c: */
253 TRACE_EVENT(write_super,
254 TP_PROTO(struct bch_fs *c, unsigned long ip),
255 TP_ARGS(c, ip),
257 TP_STRUCT__entry(
258 __field(dev_t, dev )
259 __field(unsigned long, ip )
262 TP_fast_assign(
263 __entry->dev = c->dev;
264 __entry->ip = ip;
267 TP_printk("%d,%d for %pS",
268 MAJOR(__entry->dev), MINOR(__entry->dev),
269 (void *) __entry->ip)
272 /* io.c: */
274 DEFINE_EVENT(bio, read_promote,
275 TP_PROTO(struct bio *bio),
276 TP_ARGS(bio)
279 TRACE_EVENT(read_nopromote,
280 TP_PROTO(struct bch_fs *c, int ret),
281 TP_ARGS(c, ret),
283 TP_STRUCT__entry(
284 __field(dev_t, dev )
285 __array(char, ret, 32 )
288 TP_fast_assign(
289 __entry->dev = c->dev;
290 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
293 TP_printk("%d,%d ret %s",
294 MAJOR(__entry->dev), MINOR(__entry->dev),
295 __entry->ret)
298 DEFINE_EVENT(bio, read_bounce,
299 TP_PROTO(struct bio *bio),
300 TP_ARGS(bio)
303 DEFINE_EVENT(bio, read_split,
304 TP_PROTO(struct bio *bio),
305 TP_ARGS(bio)
308 DEFINE_EVENT(bio, read_retry,
309 TP_PROTO(struct bio *bio),
310 TP_ARGS(bio)
313 DEFINE_EVENT(bio, read_reuse_race,
314 TP_PROTO(struct bio *bio),
315 TP_ARGS(bio)
318 /* Journal */
320 DEFINE_EVENT(bch_fs, journal_full,
321 TP_PROTO(struct bch_fs *c),
322 TP_ARGS(c)
325 DEFINE_EVENT(fs_str, journal_entry_full,
326 TP_PROTO(struct bch_fs *c, const char *str),
327 TP_ARGS(c, str)
330 DEFINE_EVENT(fs_str, journal_entry_close,
331 TP_PROTO(struct bch_fs *c, const char *str),
332 TP_ARGS(c, str)
335 DEFINE_EVENT(bio, journal_write,
336 TP_PROTO(struct bio *bio),
337 TP_ARGS(bio)
340 TRACE_EVENT(journal_reclaim_start,
341 TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
342 u64 min_nr, u64 min_key_cache,
343 u64 btree_cache_dirty, u64 btree_cache_total,
344 u64 btree_key_cache_dirty, u64 btree_key_cache_total),
345 TP_ARGS(c, direct, kicked, min_nr, min_key_cache,
346 btree_cache_dirty, btree_cache_total,
347 btree_key_cache_dirty, btree_key_cache_total),
349 TP_STRUCT__entry(
350 __field(dev_t, dev )
351 __field(bool, direct )
352 __field(bool, kicked )
353 __field(u64, min_nr )
354 __field(u64, min_key_cache )
355 __field(u64, btree_cache_dirty )
356 __field(u64, btree_cache_total )
357 __field(u64, btree_key_cache_dirty )
358 __field(u64, btree_key_cache_total )
361 TP_fast_assign(
362 __entry->dev = c->dev;
363 __entry->direct = direct;
364 __entry->kicked = kicked;
365 __entry->min_nr = min_nr;
366 __entry->min_key_cache = min_key_cache;
367 __entry->btree_cache_dirty = btree_cache_dirty;
368 __entry->btree_cache_total = btree_cache_total;
369 __entry->btree_key_cache_dirty = btree_key_cache_dirty;
370 __entry->btree_key_cache_total = btree_key_cache_total;
373 TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
374 MAJOR(__entry->dev), MINOR(__entry->dev),
375 __entry->direct,
376 __entry->kicked,
377 __entry->min_nr,
378 __entry->min_key_cache,
379 __entry->btree_cache_dirty,
380 __entry->btree_cache_total,
381 __entry->btree_key_cache_dirty,
382 __entry->btree_key_cache_total)
385 TRACE_EVENT(journal_reclaim_finish,
386 TP_PROTO(struct bch_fs *c, u64 nr_flushed),
387 TP_ARGS(c, nr_flushed),
389 TP_STRUCT__entry(
390 __field(dev_t, dev )
391 __field(u64, nr_flushed )
394 TP_fast_assign(
395 __entry->dev = c->dev;
396 __entry->nr_flushed = nr_flushed;
399 TP_printk("%d,%d flushed %llu",
400 MAJOR(__entry->dev), MINOR(__entry->dev),
401 __entry->nr_flushed)
404 /* bset.c: */
406 DEFINE_EVENT(bpos, bkey_pack_pos_fail,
407 TP_PROTO(const struct bpos *p),
408 TP_ARGS(p)
411 /* Btree cache: */
413 TRACE_EVENT(btree_cache_scan,
414 TP_PROTO(long nr_to_scan, long can_free, long ret),
415 TP_ARGS(nr_to_scan, can_free, ret),
417 TP_STRUCT__entry(
418 __field(long, nr_to_scan )
419 __field(long, can_free )
420 __field(long, ret )
423 TP_fast_assign(
424 __entry->nr_to_scan = nr_to_scan;
425 __entry->can_free = can_free;
426 __entry->ret = ret;
429 TP_printk("scanned for %li nodes, can free %li, ret %li",
430 __entry->nr_to_scan, __entry->can_free, __entry->ret)
433 DEFINE_EVENT(btree_node_nofs, btree_cache_reap,
434 TP_PROTO(struct bch_fs *c, struct btree *b),
435 TP_ARGS(c, b)
438 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock_fail,
439 TP_PROTO(struct btree_trans *trans),
440 TP_ARGS(trans)
443 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_lock,
444 TP_PROTO(struct btree_trans *trans),
445 TP_ARGS(trans)
448 DEFINE_EVENT(btree_trans, btree_cache_cannibalize,
449 TP_PROTO(struct btree_trans *trans),
450 TP_ARGS(trans)
453 DEFINE_EVENT(btree_trans, btree_cache_cannibalize_unlock,
454 TP_PROTO(struct btree_trans *trans),
455 TP_ARGS(trans)
458 /* Btree */
460 DEFINE_EVENT(btree_node, btree_node_read,
461 TP_PROTO(struct btree_trans *trans, struct btree *b),
462 TP_ARGS(trans, b)
465 TRACE_EVENT(btree_node_write,
466 TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
467 TP_ARGS(b, bytes, sectors),
469 TP_STRUCT__entry(
470 __field(enum btree_node_type, type)
471 __field(unsigned, bytes )
472 __field(unsigned, sectors )
475 TP_fast_assign(
476 __entry->type = btree_node_type(b);
477 __entry->bytes = bytes;
478 __entry->sectors = sectors;
481 TP_printk("bkey type %u bytes %u sectors %u",
482 __entry->type , __entry->bytes, __entry->sectors)
485 DEFINE_EVENT(btree_node, btree_node_alloc,
486 TP_PROTO(struct btree_trans *trans, struct btree *b),
487 TP_ARGS(trans, b)
490 DEFINE_EVENT(btree_node, btree_node_free,
491 TP_PROTO(struct btree_trans *trans, struct btree *b),
492 TP_ARGS(trans, b)
495 TRACE_EVENT(btree_reserve_get_fail,
496 TP_PROTO(const char *trans_fn,
497 unsigned long caller_ip,
498 size_t required,
499 int ret),
500 TP_ARGS(trans_fn, caller_ip, required, ret),
502 TP_STRUCT__entry(
503 __array(char, trans_fn, 32 )
504 __field(unsigned long, caller_ip )
505 __field(size_t, required )
506 __array(char, ret, 32 )
509 TP_fast_assign(
510 strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
511 __entry->caller_ip = caller_ip;
512 __entry->required = required;
513 strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
516 TP_printk("%s %pS required %zu ret %s",
517 __entry->trans_fn,
518 (void *) __entry->caller_ip,
519 __entry->required,
520 __entry->ret)
523 DEFINE_EVENT(btree_node, btree_node_compact,
524 TP_PROTO(struct btree_trans *trans, struct btree *b),
525 TP_ARGS(trans, b)
528 DEFINE_EVENT(btree_node, btree_node_merge,
529 TP_PROTO(struct btree_trans *trans, struct btree *b),
530 TP_ARGS(trans, b)
533 DEFINE_EVENT(btree_node, btree_node_split,
534 TP_PROTO(struct btree_trans *trans, struct btree *b),
535 TP_ARGS(trans, b)
538 DEFINE_EVENT(btree_node, btree_node_rewrite,
539 TP_PROTO(struct btree_trans *trans, struct btree *b),
540 TP_ARGS(trans, b)
543 DEFINE_EVENT(btree_node, btree_node_set_root,
544 TP_PROTO(struct btree_trans *trans, struct btree *b),
545 TP_ARGS(trans, b)
548 TRACE_EVENT(btree_path_relock_fail,
549 TP_PROTO(struct btree_trans *trans,
550 unsigned long caller_ip,
551 struct btree_path *path,
552 unsigned level),
553 TP_ARGS(trans, caller_ip, path, level),
555 TP_STRUCT__entry(
556 __array(char, trans_fn, 32 )
557 __field(unsigned long, caller_ip )
558 __field(u8, btree_id )
559 __field(u8, level )
560 __field(u8, path_idx)
561 TRACE_BPOS_entries(pos)
562 __array(char, node, 24 )
563 __field(u8, self_read_count )
564 __field(u8, self_intent_count)
565 __field(u8, read_count )
566 __field(u8, intent_count )
567 __field(u32, iter_lock_seq )
568 __field(u32, node_lock_seq )
571 TP_fast_assign(
572 struct btree *b = btree_path_node(path, level);
573 struct six_lock_count c;
575 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
576 __entry->caller_ip = caller_ip;
577 __entry->btree_id = path->btree_id;
578 __entry->level = level;
579 __entry->path_idx = path - trans->paths;
580 TRACE_BPOS_assign(pos, path->pos);
582 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
583 __entry->self_read_count = c.n[SIX_LOCK_read];
584 __entry->self_intent_count = c.n[SIX_LOCK_intent];
586 if (IS_ERR(b)) {
587 strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
588 } else {
589 c = six_lock_counts(&path->l[level].b->c.lock);
590 __entry->read_count = c.n[SIX_LOCK_read];
591 __entry->intent_count = c.n[SIX_LOCK_intent];
592 scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
594 __entry->iter_lock_seq = path->l[level].lock_seq;
595 __entry->node_lock_seq = is_btree_node(path, level)
596 ? six_lock_seq(&path->l[level].b->c.lock)
597 : 0;
600 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
601 __entry->trans_fn,
602 (void *) __entry->caller_ip,
603 __entry->path_idx,
604 bch2_btree_id_str(__entry->btree_id),
605 __entry->pos_inode,
606 __entry->pos_offset,
607 __entry->pos_snapshot,
608 __entry->level,
609 __entry->node,
610 __entry->self_read_count,
611 __entry->self_intent_count,
612 __entry->read_count,
613 __entry->intent_count,
614 __entry->iter_lock_seq,
615 __entry->node_lock_seq)
618 TRACE_EVENT(btree_path_upgrade_fail,
619 TP_PROTO(struct btree_trans *trans,
620 unsigned long caller_ip,
621 struct btree_path *path,
622 unsigned level),
623 TP_ARGS(trans, caller_ip, path, level),
625 TP_STRUCT__entry(
626 __array(char, trans_fn, 32 )
627 __field(unsigned long, caller_ip )
628 __field(u8, btree_id )
629 __field(u8, level )
630 __field(u8, path_idx)
631 TRACE_BPOS_entries(pos)
632 __field(u8, locked )
633 __field(u8, self_read_count )
634 __field(u8, self_intent_count)
635 __field(u8, read_count )
636 __field(u8, intent_count )
637 __field(u32, iter_lock_seq )
638 __field(u32, node_lock_seq )
641 TP_fast_assign(
642 struct six_lock_count c;
644 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
645 __entry->caller_ip = caller_ip;
646 __entry->btree_id = path->btree_id;
647 __entry->level = level;
648 __entry->path_idx = path - trans->paths;
649 TRACE_BPOS_assign(pos, path->pos);
650 __entry->locked = btree_node_locked(path, level);
652 c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
653 __entry->self_read_count = c.n[SIX_LOCK_read];
654 __entry->self_intent_count = c.n[SIX_LOCK_intent];
655 c = six_lock_counts(&path->l[level].b->c.lock);
656 __entry->read_count = c.n[SIX_LOCK_read];
657 __entry->intent_count = c.n[SIX_LOCK_intent];
658 __entry->iter_lock_seq = path->l[level].lock_seq;
659 __entry->node_lock_seq = is_btree_node(path, level)
660 ? six_lock_seq(&path->l[level].b->c.lock)
661 : 0;
664 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
665 __entry->trans_fn,
666 (void *) __entry->caller_ip,
667 __entry->path_idx,
668 bch2_btree_id_str(__entry->btree_id),
669 __entry->pos_inode,
670 __entry->pos_offset,
671 __entry->pos_snapshot,
672 __entry->level,
673 __entry->locked,
674 __entry->self_read_count,
675 __entry->self_intent_count,
676 __entry->read_count,
677 __entry->intent_count,
678 __entry->iter_lock_seq,
679 __entry->node_lock_seq)
682 /* Garbage collection */
684 DEFINE_EVENT(bch_fs, gc_gens_start,
685 TP_PROTO(struct bch_fs *c),
686 TP_ARGS(c)
689 DEFINE_EVENT(bch_fs, gc_gens_end,
690 TP_PROTO(struct bch_fs *c),
691 TP_ARGS(c)
694 /* Allocator */
696 DEFINE_EVENT(fs_str, bucket_alloc,
697 TP_PROTO(struct bch_fs *c, const char *str),
698 TP_ARGS(c, str)
701 DEFINE_EVENT(fs_str, bucket_alloc_fail,
702 TP_PROTO(struct bch_fs *c, const char *str),
703 TP_ARGS(c, str)
706 TRACE_EVENT(discard_buckets,
707 TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
708 u64 need_journal_commit, u64 discarded, const char *err),
709 TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
711 TP_STRUCT__entry(
712 __field(dev_t, dev )
713 __field(u64, seen )
714 __field(u64, open )
715 __field(u64, need_journal_commit )
716 __field(u64, discarded )
717 __array(char, err, 16 )
720 TP_fast_assign(
721 __entry->dev = c->dev;
722 __entry->seen = seen;
723 __entry->open = open;
724 __entry->need_journal_commit = need_journal_commit;
725 __entry->discarded = discarded;
726 strscpy(__entry->err, err, sizeof(__entry->err));
729 TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
730 MAJOR(__entry->dev), MINOR(__entry->dev),
731 __entry->seen,
732 __entry->open,
733 __entry->need_journal_commit,
734 __entry->discarded,
735 __entry->err)
738 TRACE_EVENT(bucket_invalidate,
739 TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
740 TP_ARGS(c, dev, bucket, sectors),
742 TP_STRUCT__entry(
743 __field(dev_t, dev )
744 __field(u32, dev_idx )
745 __field(u32, sectors )
746 __field(u64, bucket )
749 TP_fast_assign(
750 __entry->dev = c->dev;
751 __entry->dev_idx = dev;
752 __entry->sectors = sectors;
753 __entry->bucket = bucket;
756 TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
757 MAJOR(__entry->dev), MINOR(__entry->dev),
758 __entry->dev_idx, __entry->bucket,
759 __entry->sectors)
762 /* Moving IO */
764 TRACE_EVENT(bucket_evacuate,
765 TP_PROTO(struct bch_fs *c, struct bpos *bucket),
766 TP_ARGS(c, bucket),
768 TP_STRUCT__entry(
769 __field(dev_t, dev )
770 __field(u32, dev_idx )
771 __field(u64, bucket )
774 TP_fast_assign(
775 __entry->dev = c->dev;
776 __entry->dev_idx = bucket->inode;
777 __entry->bucket = bucket->offset;
780 TP_printk("%d:%d %u:%llu",
781 MAJOR(__entry->dev), MINOR(__entry->dev),
782 __entry->dev_idx, __entry->bucket)
785 DEFINE_EVENT(fs_str, move_extent,
786 TP_PROTO(struct bch_fs *c, const char *str),
787 TP_ARGS(c, str)
790 DEFINE_EVENT(fs_str, move_extent_read,
791 TP_PROTO(struct bch_fs *c, const char *str),
792 TP_ARGS(c, str)
795 DEFINE_EVENT(fs_str, move_extent_write,
796 TP_PROTO(struct bch_fs *c, const char *str),
797 TP_ARGS(c, str)
800 DEFINE_EVENT(fs_str, move_extent_finish,
801 TP_PROTO(struct bch_fs *c, const char *str),
802 TP_ARGS(c, str)
805 DEFINE_EVENT(fs_str, move_extent_fail,
806 TP_PROTO(struct bch_fs *c, const char *str),
807 TP_ARGS(c, str)
810 DEFINE_EVENT(fs_str, move_extent_start_fail,
811 TP_PROTO(struct bch_fs *c, const char *str),
812 TP_ARGS(c, str)
815 TRACE_EVENT(move_data,
816 TP_PROTO(struct bch_fs *c,
817 struct bch_move_stats *stats),
818 TP_ARGS(c, stats),
820 TP_STRUCT__entry(
821 __field(dev_t, dev )
822 __field(u64, keys_moved )
823 __field(u64, keys_raced )
824 __field(u64, sectors_seen )
825 __field(u64, sectors_moved )
826 __field(u64, sectors_raced )
829 TP_fast_assign(
830 __entry->dev = c->dev;
831 __entry->keys_moved = atomic64_read(&stats->keys_moved);
832 __entry->keys_raced = atomic64_read(&stats->keys_raced);
833 __entry->sectors_seen = atomic64_read(&stats->sectors_seen);
834 __entry->sectors_moved = atomic64_read(&stats->sectors_moved);
835 __entry->sectors_raced = atomic64_read(&stats->sectors_raced);
838 TP_printk("%d,%d keys moved %llu raced %llu"
839 "sectors seen %llu moved %llu raced %llu",
840 MAJOR(__entry->dev), MINOR(__entry->dev),
841 __entry->keys_moved,
842 __entry->keys_raced,
843 __entry->sectors_seen,
844 __entry->sectors_moved,
845 __entry->sectors_raced)
848 TRACE_EVENT(evacuate_bucket,
849 TP_PROTO(struct bch_fs *c, struct bpos *bucket,
850 unsigned sectors, unsigned bucket_size,
851 u64 fragmentation, int ret),
852 TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
854 TP_STRUCT__entry(
855 __field(dev_t, dev )
856 __field(u64, member )
857 __field(u64, bucket )
858 __field(u32, sectors )
859 __field(u32, bucket_size )
860 __field(u64, fragmentation )
861 __field(int, ret )
864 TP_fast_assign(
865 __entry->dev = c->dev;
866 __entry->member = bucket->inode;
867 __entry->bucket = bucket->offset;
868 __entry->sectors = sectors;
869 __entry->bucket_size = bucket_size;
870 __entry->fragmentation = fragmentation;
871 __entry->ret = ret;
874 TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
875 MAJOR(__entry->dev), MINOR(__entry->dev),
876 __entry->member, __entry->bucket,
877 __entry->sectors, __entry->bucket_size,
878 __entry->fragmentation, __entry->ret)
881 TRACE_EVENT(copygc,
882 TP_PROTO(struct bch_fs *c,
883 u64 sectors_moved, u64 sectors_not_moved,
884 u64 buckets_moved, u64 buckets_not_moved),
885 TP_ARGS(c,
886 sectors_moved, sectors_not_moved,
887 buckets_moved, buckets_not_moved),
889 TP_STRUCT__entry(
890 __field(dev_t, dev )
891 __field(u64, sectors_moved )
892 __field(u64, sectors_not_moved )
893 __field(u64, buckets_moved )
894 __field(u64, buckets_not_moved )
897 TP_fast_assign(
898 __entry->dev = c->dev;
899 __entry->sectors_moved = sectors_moved;
900 __entry->sectors_not_moved = sectors_not_moved;
901 __entry->buckets_moved = buckets_moved;
902 __entry->buckets_not_moved = buckets_moved;
905 TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
906 MAJOR(__entry->dev), MINOR(__entry->dev),
907 __entry->sectors_moved, __entry->sectors_not_moved,
908 __entry->buckets_moved, __entry->buckets_not_moved)
911 TRACE_EVENT(copygc_wait,
912 TP_PROTO(struct bch_fs *c,
913 u64 wait_amount, u64 until),
914 TP_ARGS(c, wait_amount, until),
916 TP_STRUCT__entry(
917 __field(dev_t, dev )
918 __field(u64, wait_amount )
919 __field(u64, until )
922 TP_fast_assign(
923 __entry->dev = c->dev;
924 __entry->wait_amount = wait_amount;
925 __entry->until = until;
928 TP_printk("%d,%u waiting for %llu sectors until %llu",
929 MAJOR(__entry->dev), MINOR(__entry->dev),
930 __entry->wait_amount, __entry->until)
933 /* btree transactions: */
935 DECLARE_EVENT_CLASS(transaction_event,
936 TP_PROTO(struct btree_trans *trans,
937 unsigned long caller_ip),
938 TP_ARGS(trans, caller_ip),
940 TP_STRUCT__entry(
941 __array(char, trans_fn, 32 )
942 __field(unsigned long, caller_ip )
945 TP_fast_assign(
946 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
947 __entry->caller_ip = caller_ip;
950 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
953 DEFINE_EVENT(transaction_event, transaction_commit,
954 TP_PROTO(struct btree_trans *trans,
955 unsigned long caller_ip),
956 TP_ARGS(trans, caller_ip)
959 DEFINE_EVENT(transaction_event, trans_restart_injected,
960 TP_PROTO(struct btree_trans *trans,
961 unsigned long caller_ip),
962 TP_ARGS(trans, caller_ip)
965 TRACE_EVENT(trans_restart_split_race,
966 TP_PROTO(struct btree_trans *trans,
967 unsigned long caller_ip,
968 struct btree *b),
969 TP_ARGS(trans, caller_ip, b),
971 TP_STRUCT__entry(
972 __array(char, trans_fn, 32 )
973 __field(unsigned long, caller_ip )
974 __field(u8, level )
975 __field(u16, written )
976 __field(u16, blocks )
977 __field(u16, u64s_remaining )
980 TP_fast_assign(
981 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
982 __entry->caller_ip = caller_ip;
983 __entry->level = b->c.level;
984 __entry->written = b->written;
985 __entry->blocks = btree_blocks(trans->c);
986 __entry->u64s_remaining = bch2_btree_keys_u64s_remaining(b);
989 TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
990 __entry->trans_fn, (void *) __entry->caller_ip,
991 __entry->level,
992 __entry->written, __entry->blocks,
993 __entry->u64s_remaining)
996 TRACE_EVENT(trans_blocked_journal_reclaim,
997 TP_PROTO(struct btree_trans *trans,
998 unsigned long caller_ip),
999 TP_ARGS(trans, caller_ip),
1001 TP_STRUCT__entry(
1002 __array(char, trans_fn, 32 )
1003 __field(unsigned long, caller_ip )
1005 __field(unsigned long, key_cache_nr_keys )
1006 __field(unsigned long, key_cache_nr_dirty )
1007 __field(long, must_wait )
1010 TP_fast_assign(
1011 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1012 __entry->caller_ip = caller_ip;
1013 __entry->key_cache_nr_keys = atomic_long_read(&trans->c->btree_key_cache.nr_keys);
1014 __entry->key_cache_nr_dirty = atomic_long_read(&trans->c->btree_key_cache.nr_dirty);
1015 __entry->must_wait = __bch2_btree_key_cache_must_wait(trans->c);
1018 TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
1019 __entry->trans_fn, (void *) __entry->caller_ip,
1020 __entry->key_cache_nr_keys,
1021 __entry->key_cache_nr_dirty,
1022 __entry->must_wait)
1025 TRACE_EVENT(trans_restart_journal_preres_get,
1026 TP_PROTO(struct btree_trans *trans,
1027 unsigned long caller_ip,
1028 unsigned flags),
1029 TP_ARGS(trans, caller_ip, flags),
1031 TP_STRUCT__entry(
1032 __array(char, trans_fn, 32 )
1033 __field(unsigned long, caller_ip )
1034 __field(unsigned, flags )
1037 TP_fast_assign(
1038 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1039 __entry->caller_ip = caller_ip;
1040 __entry->flags = flags;
1043 TP_printk("%s %pS %x", __entry->trans_fn,
1044 (void *) __entry->caller_ip,
1045 __entry->flags)
1048 DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
1049 TP_PROTO(struct btree_trans *trans,
1050 unsigned long caller_ip),
1051 TP_ARGS(trans, caller_ip)
1054 DEFINE_EVENT(transaction_event, trans_traverse_all,
1055 TP_PROTO(struct btree_trans *trans,
1056 unsigned long caller_ip),
1057 TP_ARGS(trans, caller_ip)
1060 DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
1061 TP_PROTO(struct btree_trans *trans,
1062 unsigned long caller_ip),
1063 TP_ARGS(trans, caller_ip)
1066 DEFINE_EVENT(trans_str, trans_restart_too_many_iters,
1067 TP_PROTO(struct btree_trans *trans,
1068 unsigned long caller_ip,
1069 const char *paths),
1070 TP_ARGS(trans, caller_ip, paths)
1073 DECLARE_EVENT_CLASS(transaction_restart_iter,
1074 TP_PROTO(struct btree_trans *trans,
1075 unsigned long caller_ip,
1076 struct btree_path *path),
1077 TP_ARGS(trans, caller_ip, path),
1079 TP_STRUCT__entry(
1080 __array(char, trans_fn, 32 )
1081 __field(unsigned long, caller_ip )
1082 __field(u8, btree_id )
1083 TRACE_BPOS_entries(pos)
1086 TP_fast_assign(
1087 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1088 __entry->caller_ip = caller_ip;
1089 __entry->btree_id = path->btree_id;
1090 TRACE_BPOS_assign(pos, path->pos)
1093 TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1094 __entry->trans_fn,
1095 (void *) __entry->caller_ip,
1096 bch2_btree_id_str(__entry->btree_id),
1097 __entry->pos_inode,
1098 __entry->pos_offset,
1099 __entry->pos_snapshot)
1102 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused,
1103 TP_PROTO(struct btree_trans *trans,
1104 unsigned long caller_ip,
1105 struct btree_path *path),
1106 TP_ARGS(trans, caller_ip, path)
1109 DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
1110 TP_PROTO(struct btree_trans *trans,
1111 unsigned long caller_ip,
1112 struct btree_path *path),
1113 TP_ARGS(trans, caller_ip, path)
1116 TRACE_EVENT(trans_restart_upgrade,
1117 TP_PROTO(struct btree_trans *trans,
1118 unsigned long caller_ip,
1119 struct btree_path *path,
1120 unsigned old_locks_want,
1121 unsigned new_locks_want,
1122 struct get_locks_fail *f),
1123 TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
1125 TP_STRUCT__entry(
1126 __array(char, trans_fn, 32 )
1127 __field(unsigned long, caller_ip )
1128 __field(u8, btree_id )
1129 __field(u8, old_locks_want )
1130 __field(u8, new_locks_want )
1131 __field(u8, level )
1132 __field(u32, path_seq )
1133 __field(u32, node_seq )
1134 TRACE_BPOS_entries(pos)
1137 TP_fast_assign(
1138 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1139 __entry->caller_ip = caller_ip;
1140 __entry->btree_id = path->btree_id;
1141 __entry->old_locks_want = old_locks_want;
1142 __entry->new_locks_want = new_locks_want;
1143 __entry->level = f->l;
1144 __entry->path_seq = path->l[f->l].lock_seq;
1145 __entry->node_seq = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
1146 TRACE_BPOS_assign(pos, path->pos)
1149 TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
1150 __entry->trans_fn,
1151 (void *) __entry->caller_ip,
1152 bch2_btree_id_str(__entry->btree_id),
1153 __entry->pos_inode,
1154 __entry->pos_offset,
1155 __entry->pos_snapshot,
1156 __entry->old_locks_want,
1157 __entry->new_locks_want,
1158 __entry->level,
1159 __entry->path_seq,
1160 __entry->node_seq)
1163 DEFINE_EVENT(trans_str, trans_restart_relock,
1164 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
1165 TP_ARGS(trans, caller_ip, str)
1168 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
1169 TP_PROTO(struct btree_trans *trans,
1170 unsigned long caller_ip,
1171 struct btree_path *path),
1172 TP_ARGS(trans, caller_ip, path)
1175 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
1176 TP_PROTO(struct btree_trans *trans,
1177 unsigned long caller_ip,
1178 struct btree_path *path),
1179 TP_ARGS(trans, caller_ip, path)
1182 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
1183 TP_PROTO(struct btree_trans *trans,
1184 unsigned long caller_ip,
1185 struct btree_path *path),
1186 TP_ARGS(trans, caller_ip, path)
1189 DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
1190 TP_PROTO(struct btree_trans *trans,
1191 unsigned long caller_ip),
1192 TP_ARGS(trans, caller_ip)
1195 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
1196 TP_PROTO(struct btree_trans *trans,
1197 unsigned long caller_ip,
1198 struct btree_path *path),
1199 TP_ARGS(trans, caller_ip, path)
1202 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
1203 TP_PROTO(struct btree_trans *trans,
1204 unsigned long caller_ip,
1205 struct btree_path *path),
1206 TP_ARGS(trans, caller_ip, path)
1209 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
1210 TP_PROTO(struct btree_trans *trans,
1211 unsigned long caller_ip,
1212 struct btree_path *path),
1213 TP_ARGS(trans, caller_ip, path)
1216 DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
1217 TP_PROTO(struct btree_trans *trans,
1218 unsigned long caller_ip,
1219 struct btree_path *path),
1220 TP_ARGS(trans, caller_ip, path)
1223 DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure,
1224 TP_PROTO(struct btree_trans *trans,
1225 unsigned long caller_ip,
1226 struct btree_path *path),
1227 TP_ARGS(trans, caller_ip, path)
1230 DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
1231 TP_PROTO(struct btree_trans *trans,
1232 const char *cycle),
1233 TP_ARGS(trans, cycle)
1236 DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
1237 TP_PROTO(struct btree_trans *trans,
1238 unsigned long caller_ip),
1239 TP_ARGS(trans, caller_ip)
1242 TRACE_EVENT(trans_restart_would_deadlock_write,
1243 TP_PROTO(struct btree_trans *trans),
1244 TP_ARGS(trans),
1246 TP_STRUCT__entry(
1247 __array(char, trans_fn, 32 )
1250 TP_fast_assign(
1251 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1254 TP_printk("%s", __entry->trans_fn)
1257 TRACE_EVENT(trans_restart_mem_realloced,
1258 TP_PROTO(struct btree_trans *trans,
1259 unsigned long caller_ip,
1260 unsigned long bytes),
1261 TP_ARGS(trans, caller_ip, bytes),
1263 TP_STRUCT__entry(
1264 __array(char, trans_fn, 32 )
1265 __field(unsigned long, caller_ip )
1266 __field(unsigned long, bytes )
1269 TP_fast_assign(
1270 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1271 __entry->caller_ip = caller_ip;
1272 __entry->bytes = bytes;
1275 TP_printk("%s %pS bytes %lu",
1276 __entry->trans_fn,
1277 (void *) __entry->caller_ip,
1278 __entry->bytes)
1281 TRACE_EVENT(trans_restart_key_cache_key_realloced,
1282 TP_PROTO(struct btree_trans *trans,
1283 unsigned long caller_ip,
1284 struct btree_path *path,
1285 unsigned old_u64s,
1286 unsigned new_u64s),
1287 TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
1289 TP_STRUCT__entry(
1290 __array(char, trans_fn, 32 )
1291 __field(unsigned long, caller_ip )
1292 __field(enum btree_id, btree_id )
1293 TRACE_BPOS_entries(pos)
1294 __field(u32, old_u64s )
1295 __field(u32, new_u64s )
1298 TP_fast_assign(
1299 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1300 __entry->caller_ip = caller_ip;
1302 __entry->btree_id = path->btree_id;
1303 TRACE_BPOS_assign(pos, path->pos);
1304 __entry->old_u64s = old_u64s;
1305 __entry->new_u64s = new_u64s;
1308 TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1309 __entry->trans_fn,
1310 (void *) __entry->caller_ip,
1311 bch2_btree_id_str(__entry->btree_id),
1312 __entry->pos_inode,
1313 __entry->pos_offset,
1314 __entry->pos_snapshot,
1315 __entry->old_u64s,
1316 __entry->new_u64s)
1319 TRACE_EVENT(path_downgrade,
1320 TP_PROTO(struct btree_trans *trans,
1321 unsigned long caller_ip,
1322 struct btree_path *path,
1323 unsigned old_locks_want),
1324 TP_ARGS(trans, caller_ip, path, old_locks_want),
1326 TP_STRUCT__entry(
1327 __array(char, trans_fn, 32 )
1328 __field(unsigned long, caller_ip )
1329 __field(unsigned, old_locks_want )
1330 __field(unsigned, new_locks_want )
1331 __field(unsigned, btree )
1332 TRACE_BPOS_entries(pos)
1335 TP_fast_assign(
1336 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1337 __entry->caller_ip = caller_ip;
1338 __entry->old_locks_want = old_locks_want;
1339 __entry->new_locks_want = path->locks_want;
1340 __entry->btree = path->btree_id;
1341 TRACE_BPOS_assign(pos, path->pos);
1344 TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1345 __entry->trans_fn,
1346 (void *) __entry->caller_ip,
1347 __entry->old_locks_want,
1348 __entry->new_locks_want,
1349 bch2_btree_id_str(__entry->btree),
1350 __entry->pos_inode,
1351 __entry->pos_offset,
1352 __entry->pos_snapshot)
1355 DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
1356 TP_PROTO(struct btree_trans *trans,
1357 unsigned long caller_ip),
1358 TP_ARGS(trans, caller_ip)
1361 TRACE_EVENT(write_buffer_flush,
1362 TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
1363 TP_ARGS(trans, nr, skipped, fast, size),
1365 TP_STRUCT__entry(
1366 __field(size_t, nr )
1367 __field(size_t, skipped )
1368 __field(size_t, fast )
1369 __field(size_t, size )
1372 TP_fast_assign(
1373 __entry->nr = nr;
1374 __entry->skipped = skipped;
1375 __entry->fast = fast;
1376 __entry->size = size;
1379 TP_printk("%zu/%zu skipped %zu fast %zu",
1380 __entry->nr, __entry->size, __entry->skipped, __entry->fast)
1383 TRACE_EVENT(write_buffer_flush_sync,
1384 TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
1385 TP_ARGS(trans, caller_ip),
1387 TP_STRUCT__entry(
1388 __array(char, trans_fn, 32 )
1389 __field(unsigned long, caller_ip )
1392 TP_fast_assign(
1393 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1394 __entry->caller_ip = caller_ip;
1397 TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
1400 TRACE_EVENT(write_buffer_flush_slowpath,
1401 TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
1402 TP_ARGS(trans, slowpath, total),
1404 TP_STRUCT__entry(
1405 __field(size_t, slowpath )
1406 __field(size_t, total )
1409 TP_fast_assign(
1410 __entry->slowpath = slowpath;
1411 __entry->total = total;
1414 TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
1417 DEFINE_EVENT(fs_str, rebalance_extent,
1418 TP_PROTO(struct bch_fs *c, const char *str),
1419 TP_ARGS(c, str)
1422 DEFINE_EVENT(fs_str, data_update,
1423 TP_PROTO(struct bch_fs *c, const char *str),
1424 TP_ARGS(c, str)
1427 TRACE_EVENT(error_downcast,
1428 TP_PROTO(int bch_err, int std_err, unsigned long ip),
1429 TP_ARGS(bch_err, std_err, ip),
1431 TP_STRUCT__entry(
1432 __array(char, bch_err, 32 )
1433 __array(char, std_err, 32 )
1434 __array(char, ip, 32 )
1437 TP_fast_assign(
1438 strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
1439 strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
1440 snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
1443 TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
1446 #ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
1448 TRACE_EVENT(update_by_path,
1449 TP_PROTO(struct btree_trans *trans, struct btree_path *path,
1450 struct btree_insert_entry *i, bool overwrite),
1451 TP_ARGS(trans, path, i, overwrite),
1453 TP_STRUCT__entry(
1454 __array(char, trans_fn, 32 )
1455 __field(btree_path_idx_t, path_idx )
1456 __field(u8, btree_id )
1457 TRACE_BPOS_entries(pos)
1458 __field(u8, overwrite )
1459 __field(btree_path_idx_t, update_idx )
1460 __field(btree_path_idx_t, nr_updates )
1463 TP_fast_assign(
1464 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1465 __entry->path_idx = path - trans->paths;
1466 __entry->btree_id = path->btree_id;
1467 TRACE_BPOS_assign(pos, path->pos);
1468 __entry->overwrite = overwrite;
1469 __entry->update_idx = i - trans->updates;
1470 __entry->nr_updates = trans->nr_updates;
1473 TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
1474 __entry->trans_fn,
1475 __entry->path_idx,
1476 bch2_btree_id_str(__entry->btree_id),
1477 __entry->pos_inode,
1478 __entry->pos_offset,
1479 __entry->pos_snapshot,
1480 __entry->overwrite,
1481 __entry->update_idx,
1482 __entry->nr_updates)
1485 TRACE_EVENT(btree_path_lock,
1486 TP_PROTO(struct btree_trans *trans,
1487 unsigned long caller_ip,
1488 struct btree_bkey_cached_common *b),
1489 TP_ARGS(trans, caller_ip, b),
1491 TP_STRUCT__entry(
1492 __array(char, trans_fn, 32 )
1493 __field(unsigned long, caller_ip )
1494 __field(u8, btree_id )
1495 __field(u8, level )
1496 __array(char, node, 24 )
1497 __field(u32, lock_seq )
1500 TP_fast_assign(
1501 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1502 __entry->caller_ip = caller_ip;
1503 __entry->btree_id = b->btree_id;
1504 __entry->level = b->level;
1506 scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
1507 __entry->lock_seq = six_lock_seq(&b->lock);
1510 TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
1511 __entry->trans_fn,
1512 (void *) __entry->caller_ip,
1513 bch2_btree_id_str(__entry->btree_id),
1514 __entry->level,
1515 __entry->node,
1516 __entry->lock_seq)
1519 DECLARE_EVENT_CLASS(btree_path_ev,
1520 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1521 TP_ARGS(trans, path),
1523 TP_STRUCT__entry(
1524 __field(u16, idx )
1525 __field(u8, ref )
1526 __field(u8, btree_id )
1527 TRACE_BPOS_entries(pos)
1530 TP_fast_assign(
1531 __entry->idx = path - trans->paths;
1532 __entry->ref = path->ref;
1533 __entry->btree_id = path->btree_id;
1534 TRACE_BPOS_assign(pos, path->pos);
1537 TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
1538 __entry->idx, __entry->ref,
1539 bch2_btree_id_str(__entry->btree_id),
1540 __entry->pos_inode,
1541 __entry->pos_offset,
1542 __entry->pos_snapshot)
1545 DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
1546 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1547 TP_ARGS(trans, path)
1550 DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
1551 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1552 TP_ARGS(trans, path)
1555 DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
1556 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1557 TP_ARGS(trans, path)
1560 TRACE_EVENT(btree_path_alloc,
1561 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1562 TP_ARGS(trans, path),
1564 TP_STRUCT__entry(
1565 __field(btree_path_idx_t, idx )
1566 __field(u8, locks_want )
1567 __field(u8, btree_id )
1568 TRACE_BPOS_entries(pos)
1571 TP_fast_assign(
1572 __entry->idx = path - trans->paths;
1573 __entry->locks_want = path->locks_want;
1574 __entry->btree_id = path->btree_id;
1575 TRACE_BPOS_assign(pos, path->pos);
1578 TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
1579 __entry->idx,
1580 bch2_btree_id_str(__entry->btree_id),
1581 __entry->locks_want,
1582 __entry->pos_inode,
1583 __entry->pos_offset,
1584 __entry->pos_snapshot)
1587 TRACE_EVENT(btree_path_get,
1588 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
1589 TP_ARGS(trans, path, new_pos),
1591 TP_STRUCT__entry(
1592 __field(btree_path_idx_t, idx )
1593 __field(u8, ref )
1594 __field(u8, preserve )
1595 __field(u8, locks_want )
1596 __field(u8, btree_id )
1597 TRACE_BPOS_entries(old_pos)
1598 TRACE_BPOS_entries(new_pos)
1601 TP_fast_assign(
1602 __entry->idx = path - trans->paths;
1603 __entry->ref = path->ref;
1604 __entry->preserve = path->preserve;
1605 __entry->locks_want = path->locks_want;
1606 __entry->btree_id = path->btree_id;
1607 TRACE_BPOS_assign(old_pos, path->pos);
1608 TRACE_BPOS_assign(new_pos, *new_pos);
1611 TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
1612 __entry->idx,
1613 __entry->ref,
1614 __entry->preserve,
1615 bch2_btree_id_str(__entry->btree_id),
1616 __entry->locks_want,
1617 __entry->old_pos_inode,
1618 __entry->old_pos_offset,
1619 __entry->old_pos_snapshot,
1620 __entry->new_pos_inode,
1621 __entry->new_pos_offset,
1622 __entry->new_pos_snapshot)
1625 DECLARE_EVENT_CLASS(btree_path_clone,
1626 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1627 TP_ARGS(trans, path, new),
1629 TP_STRUCT__entry(
1630 __field(btree_path_idx_t, idx )
1631 __field(u8, new_idx )
1632 __field(u8, btree_id )
1633 __field(u8, ref )
1634 __field(u8, preserve )
1635 TRACE_BPOS_entries(pos)
1638 TP_fast_assign(
1639 __entry->idx = path - trans->paths;
1640 __entry->new_idx = new - trans->paths;
1641 __entry->btree_id = path->btree_id;
1642 __entry->ref = path->ref;
1643 __entry->preserve = path->preserve;
1644 TRACE_BPOS_assign(pos, path->pos);
1647 TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
1648 __entry->idx,
1649 __entry->ref,
1650 __entry->preserve,
1651 bch2_btree_id_str(__entry->btree_id),
1652 __entry->pos_inode,
1653 __entry->pos_offset,
1654 __entry->pos_snapshot,
1655 __entry->new_idx)
1658 DEFINE_EVENT(btree_path_clone, btree_path_clone,
1659 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1660 TP_ARGS(trans, path, new)
1663 DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
1664 TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
1665 TP_ARGS(trans, path, new)
1668 DECLARE_EVENT_CLASS(btree_path_traverse,
1669 TP_PROTO(struct btree_trans *trans,
1670 struct btree_path *path),
1671 TP_ARGS(trans, path),
1673 TP_STRUCT__entry(
1674 __array(char, trans_fn, 32 )
1675 __field(btree_path_idx_t, idx )
1676 __field(u8, ref )
1677 __field(u8, preserve )
1678 __field(u8, should_be_locked )
1679 __field(u8, btree_id )
1680 __field(u8, level )
1681 TRACE_BPOS_entries(pos)
1682 __field(u8, locks_want )
1683 __field(u8, nodes_locked )
1684 __array(char, node0, 24 )
1685 __array(char, node1, 24 )
1686 __array(char, node2, 24 )
1687 __array(char, node3, 24 )
1690 TP_fast_assign(
1691 strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
1693 __entry->idx = path - trans->paths;
1694 __entry->ref = path->ref;
1695 __entry->preserve = path->preserve;
1696 __entry->btree_id = path->btree_id;
1697 __entry->level = path->level;
1698 TRACE_BPOS_assign(pos, path->pos);
1700 __entry->locks_want = path->locks_want;
1701 __entry->nodes_locked = path->nodes_locked;
1702 struct btree *b = path->l[0].b;
1703 if (IS_ERR(b))
1704 strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1705 else
1706 scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1707 b = path->l[1].b;
1708 if (IS_ERR(b))
1709 strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1710 else
1711 scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1712 b = path->l[2].b;
1713 if (IS_ERR(b))
1714 strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1715 else
1716 scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1717 b = path->l[3].b;
1718 if (IS_ERR(b))
1719 strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1720 else
1721 scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1724 TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
1725 "locks %u %u %u %u node %s %s %s %s",
1726 __entry->trans_fn,
1727 __entry->idx,
1728 __entry->ref,
1729 __entry->preserve,
1730 bch2_btree_id_str(__entry->btree_id),
1731 __entry->pos_inode,
1732 __entry->pos_offset,
1733 __entry->pos_snapshot,
1734 __entry->level,
1735 __entry->locks_want,
1736 (__entry->nodes_locked >> 6) & 3,
1737 (__entry->nodes_locked >> 4) & 3,
1738 (__entry->nodes_locked >> 2) & 3,
1739 (__entry->nodes_locked >> 0) & 3,
1740 __entry->node3,
1741 __entry->node2,
1742 __entry->node1,
1743 __entry->node0)
1746 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
1747 TP_PROTO(struct btree_trans *trans,
1748 struct btree_path *path),
1749 TP_ARGS(trans, path)
1752 DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
1753 TP_PROTO(struct btree_trans *trans, struct btree_path *path),
1754 TP_ARGS(trans, path)
1757 TRACE_EVENT(btree_path_set_pos,
1758 TP_PROTO(struct btree_trans *trans,
1759 struct btree_path *path,
1760 struct bpos *new_pos),
1761 TP_ARGS(trans, path, new_pos),
1763 TP_STRUCT__entry(
1764 __field(btree_path_idx_t, idx )
1765 __field(u8, ref )
1766 __field(u8, preserve )
1767 __field(u8, btree_id )
1768 TRACE_BPOS_entries(old_pos)
1769 TRACE_BPOS_entries(new_pos)
1770 __field(u8, locks_want )
1771 __field(u8, nodes_locked )
1772 __array(char, node0, 24 )
1773 __array(char, node1, 24 )
1774 __array(char, node2, 24 )
1775 __array(char, node3, 24 )
1778 TP_fast_assign(
1779 __entry->idx = path - trans->paths;
1780 __entry->ref = path->ref;
1781 __entry->preserve = path->preserve;
1782 __entry->btree_id = path->btree_id;
1783 TRACE_BPOS_assign(old_pos, path->pos);
1784 TRACE_BPOS_assign(new_pos, *new_pos);
1786 __entry->nodes_locked = path->nodes_locked;
1787 struct btree *b = path->l[0].b;
1788 if (IS_ERR(b))
1789 strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1790 else
1791 scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
1792 b = path->l[1].b;
1793 if (IS_ERR(b))
1794 strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1795 else
1796 scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
1797 b = path->l[2].b;
1798 if (IS_ERR(b))
1799 strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1800 else
1801 scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
1802 b = path->l[3].b;
1803 if (IS_ERR(b))
1804 strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
1805 else
1806 scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
1809 TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
1810 "locks %u %u %u %u node %s %s %s %s",
1811 __entry->idx,
1812 __entry->ref,
1813 __entry->preserve,
1814 bch2_btree_id_str(__entry->btree_id),
1815 __entry->old_pos_inode,
1816 __entry->old_pos_offset,
1817 __entry->old_pos_snapshot,
1818 __entry->new_pos_inode,
1819 __entry->new_pos_offset,
1820 __entry->new_pos_snapshot,
1821 (__entry->nodes_locked >> 6) & 3,
1822 (__entry->nodes_locked >> 4) & 3,
1823 (__entry->nodes_locked >> 2) & 3,
1824 (__entry->nodes_locked >> 0) & 3,
1825 __entry->node3,
1826 __entry->node2,
1827 __entry->node1,
1828 __entry->node0)
1831 TRACE_EVENT(btree_path_free,
1832 TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
1833 TP_ARGS(trans, path, dup),
1835 TP_STRUCT__entry(
1836 __field(btree_path_idx_t, idx )
1837 __field(u8, preserve )
1838 __field(u8, should_be_locked)
1839 __field(s8, dup )
1840 __field(u8, dup_locked )
1843 TP_fast_assign(
1844 __entry->idx = path;
1845 __entry->preserve = trans->paths[path].preserve;
1846 __entry->should_be_locked = trans->paths[path].should_be_locked;
1847 __entry->dup = dup ? dup - trans->paths : -1;
1848 __entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0;
1851 TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx,
1852 __entry->preserve ? 'P' : ' ',
1853 __entry->should_be_locked ? 'S' : ' ',
1854 __entry->dup,
1855 __entry->dup_locked)
1858 TRACE_EVENT(btree_path_free_trans_begin,
1859 TP_PROTO(btree_path_idx_t path),
1860 TP_ARGS(path),
1862 TP_STRUCT__entry(
1863 __field(btree_path_idx_t, idx )
1866 TP_fast_assign(
1867 __entry->idx = path;
1870 TP_printk(" path %3u", __entry->idx)
1873 #else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1874 #ifndef _TRACE_BCACHEFS_H
1876 static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
1877 struct btree_insert_entry *i, bool overwrite) {}
1878 static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
1879 static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
1880 static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
1881 static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
1882 static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
1883 static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
1884 static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
1885 static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
1886 static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
1887 static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
1888 static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
1889 static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
1890 static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
1892 #endif
1893 #endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1895 #define _TRACE_BCACHEFS_H
1896 #endif /* _TRACE_BCACHEFS_H */
1898 /* This part must be outside protection */
1899 #undef TRACE_INCLUDE_PATH
1900 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1902 #undef TRACE_INCLUDE_FILE
1903 #define TRACE_INCLUDE_FILE trace
1905 #include <trace/define_trace.h>