1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM bcachefs
5 #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/tracepoint.h>
9 #define TRACE_BPOS_entries(name) \
10 __field(u64, name##_inode ) \
11 __field(u64, name##_offset ) \
12 __field(u32, name##_snapshot )
14 #define TRACE_BPOS_assign(dst, src) \
15 __entry->dst##_inode = (src).inode; \
16 __entry->dst##_offset = (src).offset; \
17 __entry->dst##_snapshot = (src).snapshot
19 DECLARE_EVENT_CLASS(bpos
,
20 TP_PROTO(const struct bpos
*p
),
28 TRACE_BPOS_assign(p
, *p
);
31 TP_printk("%llu:%llu:%u", __entry
->p_inode
, __entry
->p_offset
, __entry
->p_snapshot
)
34 DECLARE_EVENT_CLASS(fs_str
,
35 TP_PROTO(struct bch_fs
*c
, const char *str
),
44 __entry
->dev
= c
->dev
;
48 TP_printk("%d,%d\n%s", MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __get_str(str
))
51 DECLARE_EVENT_CLASS(trans_str
,
52 TP_PROTO(struct btree_trans
*trans
, unsigned long caller_ip
, const char *str
),
53 TP_ARGS(trans
, caller_ip
, str
),
57 __array(char, trans_fn
, 32 )
58 __field(unsigned long, caller_ip
)
63 __entry
->dev
= trans
->c
->dev
;
64 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
65 __entry
->caller_ip
= caller_ip
;
69 TP_printk("%d,%d %s %pS %s",
70 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
71 __entry
->trans_fn
, (void *) __entry
->caller_ip
, __get_str(str
))
74 DECLARE_EVENT_CLASS(trans_str_nocaller
,
75 TP_PROTO(struct btree_trans
*trans
, const char *str
),
80 __array(char, trans_fn
, 32 )
85 __entry
->dev
= trans
->c
->dev
;
86 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
90 TP_printk("%d,%d %s %s",
91 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
92 __entry
->trans_fn
, __get_str(str
))
95 DECLARE_EVENT_CLASS(btree_node_nofs
,
96 TP_PROTO(struct bch_fs
*c
, struct btree
*b
),
102 __field(u8
, btree_id
)
103 TRACE_BPOS_entries(pos
)
107 __entry
->dev
= c
->dev
;
108 __entry
->level
= b
->c
.level
;
109 __entry
->btree_id
= b
->c
.btree_id
;
110 TRACE_BPOS_assign(pos
, b
->key
.k
.p
);
113 TP_printk("%d,%d %u %s %llu:%llu:%u",
114 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
116 bch2_btree_id_str(__entry
->btree_id
),
117 __entry
->pos_inode
, __entry
->pos_offset
, __entry
->pos_snapshot
)
120 DECLARE_EVENT_CLASS(btree_node
,
121 TP_PROTO(struct btree_trans
*trans
, struct btree
*b
),
126 __array(char, trans_fn
, 32 )
128 __field(u8
, btree_id
)
129 TRACE_BPOS_entries(pos
)
133 __entry
->dev
= trans
->c
->dev
;
134 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
135 __entry
->level
= b
->c
.level
;
136 __entry
->btree_id
= b
->c
.btree_id
;
137 TRACE_BPOS_assign(pos
, b
->key
.k
.p
);
140 TP_printk("%d,%d %s %u %s %llu:%llu:%u",
141 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->trans_fn
,
143 bch2_btree_id_str(__entry
->btree_id
),
144 __entry
->pos_inode
, __entry
->pos_offset
, __entry
->pos_snapshot
)
147 DECLARE_EVENT_CLASS(bch_fs
,
148 TP_PROTO(struct bch_fs
*c
),
156 __entry
->dev
= c
->dev
;
159 TP_printk("%d,%d", MAJOR(__entry
->dev
), MINOR(__entry
->dev
))
162 DECLARE_EVENT_CLASS(btree_trans
,
163 TP_PROTO(struct btree_trans
*trans
),
168 __array(char, trans_fn
, 32 )
172 __entry
->dev
= trans
->c
->dev
;
173 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
176 TP_printk("%d,%d %s", MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->trans_fn
)
179 DECLARE_EVENT_CLASS(bio
,
180 TP_PROTO(struct bio
*bio
),
185 __field(sector_t
, sector
)
186 __field(unsigned int, nr_sector
)
187 __array(char, rwbs
, 6 )
191 __entry
->dev
= bio
->bi_bdev
? bio_dev(bio
) : 0;
192 __entry
->sector
= bio
->bi_iter
.bi_sector
;
193 __entry
->nr_sector
= bio
->bi_iter
.bi_size
>> 9;
194 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_opf
);
197 TP_printk("%d,%d %s %llu + %u",
198 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
199 (unsigned long long)__entry
->sector
, __entry
->nr_sector
)
203 TRACE_EVENT(bch2_sync_fs
,
204 TP_PROTO(struct super_block
*sb
, int wait
),
209 __field( dev_t
, dev
)
215 __entry
->dev
= sb
->s_dev
;
216 __entry
->wait
= wait
;
219 TP_printk("dev %d,%d wait %d",
220 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
225 TRACE_EVENT(bch2_fsync
,
226 TP_PROTO(struct file
*file
, int datasync
),
228 TP_ARGS(file
, datasync
),
231 __field( dev_t
, dev
)
232 __field( ino_t
, ino
)
233 __field( ino_t
, parent
)
234 __field( int, datasync
)
238 struct dentry
*dentry
= file
->f_path
.dentry
;
240 __entry
->dev
= dentry
->d_sb
->s_dev
;
241 __entry
->ino
= d_inode(dentry
)->i_ino
;
242 __entry
->parent
= d_inode(dentry
->d_parent
)->i_ino
;
243 __entry
->datasync
= datasync
;
246 TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
247 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
248 (unsigned long) __entry
->ino
,
249 (unsigned long) __entry
->parent
, __entry
->datasync
)
253 TRACE_EVENT(write_super
,
254 TP_PROTO(struct bch_fs
*c
, unsigned long ip
),
259 __field(unsigned long, ip
)
263 __entry
->dev
= c
->dev
;
267 TP_printk("%d,%d for %pS",
268 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
269 (void *) __entry
->ip
)
274 DEFINE_EVENT(bio
, read_promote
,
275 TP_PROTO(struct bio
*bio
),
279 TRACE_EVENT(read_nopromote
,
280 TP_PROTO(struct bch_fs
*c
, int ret
),
285 __array(char, ret
, 32 )
289 __entry
->dev
= c
->dev
;
290 strscpy(__entry
->ret
, bch2_err_str(ret
), sizeof(__entry
->ret
));
293 TP_printk("%d,%d ret %s",
294 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
298 DEFINE_EVENT(bio
, read_bounce
,
299 TP_PROTO(struct bio
*bio
),
303 DEFINE_EVENT(bio
, read_split
,
304 TP_PROTO(struct bio
*bio
),
308 DEFINE_EVENT(bio
, read_retry
,
309 TP_PROTO(struct bio
*bio
),
313 DEFINE_EVENT(bio
, read_reuse_race
,
314 TP_PROTO(struct bio
*bio
),
320 DEFINE_EVENT(bch_fs
, journal_full
,
321 TP_PROTO(struct bch_fs
*c
),
325 DEFINE_EVENT(fs_str
, journal_entry_full
,
326 TP_PROTO(struct bch_fs
*c
, const char *str
),
330 DEFINE_EVENT(fs_str
, journal_entry_close
,
331 TP_PROTO(struct bch_fs
*c
, const char *str
),
335 DEFINE_EVENT(bio
, journal_write
,
336 TP_PROTO(struct bio
*bio
),
340 TRACE_EVENT(journal_reclaim_start
,
341 TP_PROTO(struct bch_fs
*c
, bool direct
, bool kicked
,
342 u64 min_nr
, u64 min_key_cache
,
343 u64 btree_cache_dirty
, u64 btree_cache_total
,
344 u64 btree_key_cache_dirty
, u64 btree_key_cache_total
),
345 TP_ARGS(c
, direct
, kicked
, min_nr
, min_key_cache
,
346 btree_cache_dirty
, btree_cache_total
,
347 btree_key_cache_dirty
, btree_key_cache_total
),
351 __field(bool, direct
)
352 __field(bool, kicked
)
353 __field(u64
, min_nr
)
354 __field(u64
, min_key_cache
)
355 __field(u64
, btree_cache_dirty
)
356 __field(u64
, btree_cache_total
)
357 __field(u64
, btree_key_cache_dirty
)
358 __field(u64
, btree_key_cache_total
)
362 __entry
->dev
= c
->dev
;
363 __entry
->direct
= direct
;
364 __entry
->kicked
= kicked
;
365 __entry
->min_nr
= min_nr
;
366 __entry
->min_key_cache
= min_key_cache
;
367 __entry
->btree_cache_dirty
= btree_cache_dirty
;
368 __entry
->btree_cache_total
= btree_cache_total
;
369 __entry
->btree_key_cache_dirty
= btree_key_cache_dirty
;
370 __entry
->btree_key_cache_total
= btree_key_cache_total
;
373 TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu btree cache %llu/%llu key cache %llu/%llu",
374 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
378 __entry
->min_key_cache
,
379 __entry
->btree_cache_dirty
,
380 __entry
->btree_cache_total
,
381 __entry
->btree_key_cache_dirty
,
382 __entry
->btree_key_cache_total
)
385 TRACE_EVENT(journal_reclaim_finish
,
386 TP_PROTO(struct bch_fs
*c
, u64 nr_flushed
),
387 TP_ARGS(c
, nr_flushed
),
391 __field(u64
, nr_flushed
)
395 __entry
->dev
= c
->dev
;
396 __entry
->nr_flushed
= nr_flushed
;
399 TP_printk("%d,%d flushed %llu",
400 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
406 DEFINE_EVENT(bpos
, bkey_pack_pos_fail
,
407 TP_PROTO(const struct bpos
*p
),
413 TRACE_EVENT(btree_cache_scan
,
414 TP_PROTO(long nr_to_scan
, long can_free
, long ret
),
415 TP_ARGS(nr_to_scan
, can_free
, ret
),
418 __field(long, nr_to_scan
)
419 __field(long, can_free
)
424 __entry
->nr_to_scan
= nr_to_scan
;
425 __entry
->can_free
= can_free
;
429 TP_printk("scanned for %li nodes, can free %li, ret %li",
430 __entry
->nr_to_scan
, __entry
->can_free
, __entry
->ret
)
433 DEFINE_EVENT(btree_node_nofs
, btree_cache_reap
,
434 TP_PROTO(struct bch_fs
*c
, struct btree
*b
),
438 DEFINE_EVENT(btree_trans
, btree_cache_cannibalize_lock_fail
,
439 TP_PROTO(struct btree_trans
*trans
),
443 DEFINE_EVENT(btree_trans
, btree_cache_cannibalize_lock
,
444 TP_PROTO(struct btree_trans
*trans
),
448 DEFINE_EVENT(btree_trans
, btree_cache_cannibalize
,
449 TP_PROTO(struct btree_trans
*trans
),
453 DEFINE_EVENT(btree_trans
, btree_cache_cannibalize_unlock
,
454 TP_PROTO(struct btree_trans
*trans
),
460 DEFINE_EVENT(btree_node
, btree_node_read
,
461 TP_PROTO(struct btree_trans
*trans
, struct btree
*b
),
465 TRACE_EVENT(btree_node_write
,
466 TP_PROTO(struct btree
*b
, unsigned bytes
, unsigned sectors
),
467 TP_ARGS(b
, bytes
, sectors
),
470 __field(enum btree_node_type
, type
)
471 __field(unsigned, bytes
)
472 __field(unsigned, sectors
)
476 __entry
->type
= btree_node_type(b
);
477 __entry
->bytes
= bytes
;
478 __entry
->sectors
= sectors
;
481 TP_printk("bkey type %u bytes %u sectors %u",
482 __entry
->type
, __entry
->bytes
, __entry
->sectors
)
485 DEFINE_EVENT(btree_node
, btree_node_alloc
,
486 TP_PROTO(struct btree_trans
*trans
, struct btree
*b
),
490 DEFINE_EVENT(btree_node
, btree_node_free
,
491 TP_PROTO(struct btree_trans
*trans
, struct btree
*b
),
495 TRACE_EVENT(btree_reserve_get_fail
,
496 TP_PROTO(const char *trans_fn
,
497 unsigned long caller_ip
,
500 TP_ARGS(trans_fn
, caller_ip
, required
, ret
),
503 __array(char, trans_fn
, 32 )
504 __field(unsigned long, caller_ip
)
505 __field(size_t, required
)
506 __array(char, ret
, 32 )
510 strscpy(__entry
->trans_fn
, trans_fn
, sizeof(__entry
->trans_fn
));
511 __entry
->caller_ip
= caller_ip
;
512 __entry
->required
= required
;
513 strscpy(__entry
->ret
, bch2_err_str(ret
), sizeof(__entry
->ret
));
516 TP_printk("%s %pS required %zu ret %s",
518 (void *) __entry
->caller_ip
,
523 DEFINE_EVENT(btree_node
, btree_node_compact
,
524 TP_PROTO(struct btree_trans
*trans
, struct btree
*b
),
528 DEFINE_EVENT(btree_node
, btree_node_merge
,
529 TP_PROTO(struct btree_trans
*trans
, struct btree
*b
),
533 DEFINE_EVENT(btree_node
, btree_node_split
,
534 TP_PROTO(struct btree_trans
*trans
, struct btree
*b
),
538 DEFINE_EVENT(btree_node
, btree_node_rewrite
,
539 TP_PROTO(struct btree_trans
*trans
, struct btree
*b
),
543 DEFINE_EVENT(btree_node
, btree_node_set_root
,
544 TP_PROTO(struct btree_trans
*trans
, struct btree
*b
),
548 TRACE_EVENT(btree_path_relock_fail
,
549 TP_PROTO(struct btree_trans
*trans
,
550 unsigned long caller_ip
,
551 struct btree_path
*path
,
553 TP_ARGS(trans
, caller_ip
, path
, level
),
556 __array(char, trans_fn
, 32 )
557 __field(unsigned long, caller_ip
)
558 __field(u8
, btree_id
)
560 __field(u8
, path_idx
)
561 TRACE_BPOS_entries(pos
)
562 __array(char, node
, 24 )
563 __field(u8
, self_read_count
)
564 __field(u8
, self_intent_count
)
565 __field(u8
, read_count
)
566 __field(u8
, intent_count
)
567 __field(u32
, iter_lock_seq
)
568 __field(u32
, node_lock_seq
)
572 struct btree
*b
= btree_path_node(path
, level
);
573 struct six_lock_count c
;
575 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
576 __entry
->caller_ip
= caller_ip
;
577 __entry
->btree_id
= path
->btree_id
;
578 __entry
->level
= level
;
579 __entry
->path_idx
= path
- trans
->paths
;
580 TRACE_BPOS_assign(pos
, path
->pos
);
582 c
= bch2_btree_node_lock_counts(trans
, NULL
, &path
->l
[level
].b
->c
, level
);
583 __entry
->self_read_count
= c
.n
[SIX_LOCK_read
];
584 __entry
->self_intent_count
= c
.n
[SIX_LOCK_intent
];
587 strscpy(__entry
->node
, bch2_err_str(PTR_ERR(b
)), sizeof(__entry
->node
));
589 c
= six_lock_counts(&path
->l
[level
].b
->c
.lock
);
590 __entry
->read_count
= c
.n
[SIX_LOCK_read
];
591 __entry
->intent_count
= c
.n
[SIX_LOCK_intent
];
592 scnprintf(__entry
->node
, sizeof(__entry
->node
), "%px", &b
->c
);
594 __entry
->iter_lock_seq
= path
->l
[level
].lock_seq
;
595 __entry
->node_lock_seq
= is_btree_node(path
, level
)
596 ? six_lock_seq(&path
->l
[level
].b
->c
.lock
)
600 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
602 (void *) __entry
->caller_ip
,
604 bch2_btree_id_str(__entry
->btree_id
),
607 __entry
->pos_snapshot
,
610 __entry
->self_read_count
,
611 __entry
->self_intent_count
,
613 __entry
->intent_count
,
614 __entry
->iter_lock_seq
,
615 __entry
->node_lock_seq
)
618 TRACE_EVENT(btree_path_upgrade_fail
,
619 TP_PROTO(struct btree_trans
*trans
,
620 unsigned long caller_ip
,
621 struct btree_path
*path
,
623 TP_ARGS(trans
, caller_ip
, path
, level
),
626 __array(char, trans_fn
, 32 )
627 __field(unsigned long, caller_ip
)
628 __field(u8
, btree_id
)
630 __field(u8
, path_idx
)
631 TRACE_BPOS_entries(pos
)
633 __field(u8
, self_read_count
)
634 __field(u8
, self_intent_count
)
635 __field(u8
, read_count
)
636 __field(u8
, intent_count
)
637 __field(u32
, iter_lock_seq
)
638 __field(u32
, node_lock_seq
)
642 struct six_lock_count c
;
644 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
645 __entry
->caller_ip
= caller_ip
;
646 __entry
->btree_id
= path
->btree_id
;
647 __entry
->level
= level
;
648 __entry
->path_idx
= path
- trans
->paths
;
649 TRACE_BPOS_assign(pos
, path
->pos
);
650 __entry
->locked
= btree_node_locked(path
, level
);
652 c
= bch2_btree_node_lock_counts(trans
, NULL
, &path
->l
[level
].b
->c
, level
),
653 __entry
->self_read_count
= c
.n
[SIX_LOCK_read
];
654 __entry
->self_intent_count
= c
.n
[SIX_LOCK_intent
];
655 c
= six_lock_counts(&path
->l
[level
].b
->c
.lock
);
656 __entry
->read_count
= c
.n
[SIX_LOCK_read
];
657 __entry
->intent_count
= c
.n
[SIX_LOCK_intent
];
658 __entry
->iter_lock_seq
= path
->l
[level
].lock_seq
;
659 __entry
->node_lock_seq
= is_btree_node(path
, level
)
660 ? six_lock_seq(&path
->l
[level
].b
->c
.lock
)
664 TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
666 (void *) __entry
->caller_ip
,
668 bch2_btree_id_str(__entry
->btree_id
),
671 __entry
->pos_snapshot
,
674 __entry
->self_read_count
,
675 __entry
->self_intent_count
,
677 __entry
->intent_count
,
678 __entry
->iter_lock_seq
,
679 __entry
->node_lock_seq
)
682 /* Garbage collection */
684 DEFINE_EVENT(bch_fs
, gc_gens_start
,
685 TP_PROTO(struct bch_fs
*c
),
689 DEFINE_EVENT(bch_fs
, gc_gens_end
,
690 TP_PROTO(struct bch_fs
*c
),
696 DEFINE_EVENT(fs_str
, bucket_alloc
,
697 TP_PROTO(struct bch_fs
*c
, const char *str
),
701 DEFINE_EVENT(fs_str
, bucket_alloc_fail
,
702 TP_PROTO(struct bch_fs
*c
, const char *str
),
706 TRACE_EVENT(discard_buckets
,
707 TP_PROTO(struct bch_fs
*c
, u64 seen
, u64 open
,
708 u64 need_journal_commit
, u64 discarded
, const char *err
),
709 TP_ARGS(c
, seen
, open
, need_journal_commit
, discarded
, err
),
715 __field(u64
, need_journal_commit
)
716 __field(u64
, discarded
)
717 __array(char, err
, 16 )
721 __entry
->dev
= c
->dev
;
722 __entry
->seen
= seen
;
723 __entry
->open
= open
;
724 __entry
->need_journal_commit
= need_journal_commit
;
725 __entry
->discarded
= discarded
;
726 strscpy(__entry
->err
, err
, sizeof(__entry
->err
));
729 TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
730 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
733 __entry
->need_journal_commit
,
738 TRACE_EVENT(bucket_invalidate
,
739 TP_PROTO(struct bch_fs
*c
, unsigned dev
, u64 bucket
, u32 sectors
),
740 TP_ARGS(c
, dev
, bucket
, sectors
),
744 __field(u32
, dev_idx
)
745 __field(u32
, sectors
)
746 __field(u64
, bucket
)
750 __entry
->dev
= c
->dev
;
751 __entry
->dev_idx
= dev
;
752 __entry
->sectors
= sectors
;
753 __entry
->bucket
= bucket
;
756 TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
757 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
758 __entry
->dev_idx
, __entry
->bucket
,
764 TRACE_EVENT(bucket_evacuate
,
765 TP_PROTO(struct bch_fs
*c
, struct bpos
*bucket
),
770 __field(u32
, dev_idx
)
771 __field(u64
, bucket
)
775 __entry
->dev
= c
->dev
;
776 __entry
->dev_idx
= bucket
->inode
;
777 __entry
->bucket
= bucket
->offset
;
780 TP_printk("%d:%d %u:%llu",
781 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
782 __entry
->dev_idx
, __entry
->bucket
)
785 DEFINE_EVENT(fs_str
, move_extent
,
786 TP_PROTO(struct bch_fs
*c
, const char *str
),
790 DEFINE_EVENT(fs_str
, move_extent_read
,
791 TP_PROTO(struct bch_fs
*c
, const char *str
),
795 DEFINE_EVENT(fs_str
, move_extent_write
,
796 TP_PROTO(struct bch_fs
*c
, const char *str
),
800 DEFINE_EVENT(fs_str
, move_extent_finish
,
801 TP_PROTO(struct bch_fs
*c
, const char *str
),
805 DEFINE_EVENT(fs_str
, move_extent_fail
,
806 TP_PROTO(struct bch_fs
*c
, const char *str
),
810 DEFINE_EVENT(fs_str
, move_extent_start_fail
,
811 TP_PROTO(struct bch_fs
*c
, const char *str
),
815 TRACE_EVENT(move_data
,
816 TP_PROTO(struct bch_fs
*c
,
817 struct bch_move_stats
*stats
),
822 __field(u64
, keys_moved
)
823 __field(u64
, keys_raced
)
824 __field(u64
, sectors_seen
)
825 __field(u64
, sectors_moved
)
826 __field(u64
, sectors_raced
)
830 __entry
->dev
= c
->dev
;
831 __entry
->keys_moved
= atomic64_read(&stats
->keys_moved
);
832 __entry
->keys_raced
= atomic64_read(&stats
->keys_raced
);
833 __entry
->sectors_seen
= atomic64_read(&stats
->sectors_seen
);
834 __entry
->sectors_moved
= atomic64_read(&stats
->sectors_moved
);
835 __entry
->sectors_raced
= atomic64_read(&stats
->sectors_raced
);
838 TP_printk("%d,%d keys moved %llu raced %llu"
839 "sectors seen %llu moved %llu raced %llu",
840 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
843 __entry
->sectors_seen
,
844 __entry
->sectors_moved
,
845 __entry
->sectors_raced
)
848 TRACE_EVENT(evacuate_bucket
,
849 TP_PROTO(struct bch_fs
*c
, struct bpos
*bucket
,
850 unsigned sectors
, unsigned bucket_size
,
851 u64 fragmentation
, int ret
),
852 TP_ARGS(c
, bucket
, sectors
, bucket_size
, fragmentation
, ret
),
856 __field(u64
, member
)
857 __field(u64
, bucket
)
858 __field(u32
, sectors
)
859 __field(u32
, bucket_size
)
860 __field(u64
, fragmentation
)
865 __entry
->dev
= c
->dev
;
866 __entry
->member
= bucket
->inode
;
867 __entry
->bucket
= bucket
->offset
;
868 __entry
->sectors
= sectors
;
869 __entry
->bucket_size
= bucket_size
;
870 __entry
->fragmentation
= fragmentation
;
874 TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
875 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
876 __entry
->member
, __entry
->bucket
,
877 __entry
->sectors
, __entry
->bucket_size
,
878 __entry
->fragmentation
, __entry
->ret
)
882 TP_PROTO(struct bch_fs
*c
,
883 u64 sectors_moved
, u64 sectors_not_moved
,
884 u64 buckets_moved
, u64 buckets_not_moved
),
886 sectors_moved
, sectors_not_moved
,
887 buckets_moved
, buckets_not_moved
),
891 __field(u64
, sectors_moved
)
892 __field(u64
, sectors_not_moved
)
893 __field(u64
, buckets_moved
)
894 __field(u64
, buckets_not_moved
)
898 __entry
->dev
= c
->dev
;
899 __entry
->sectors_moved
= sectors_moved
;
900 __entry
->sectors_not_moved
= sectors_not_moved
;
901 __entry
->buckets_moved
= buckets_moved
;
902 __entry
->buckets_not_moved
= buckets_moved
;
905 TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
906 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
907 __entry
->sectors_moved
, __entry
->sectors_not_moved
,
908 __entry
->buckets_moved
, __entry
->buckets_not_moved
)
911 TRACE_EVENT(copygc_wait
,
912 TP_PROTO(struct bch_fs
*c
,
913 u64 wait_amount
, u64 until
),
914 TP_ARGS(c
, wait_amount
, until
),
918 __field(u64
, wait_amount
)
923 __entry
->dev
= c
->dev
;
924 __entry
->wait_amount
= wait_amount
;
925 __entry
->until
= until
;
928 TP_printk("%d,%u waiting for %llu sectors until %llu",
929 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
930 __entry
->wait_amount
, __entry
->until
)
933 /* btree transactions: */
935 DECLARE_EVENT_CLASS(transaction_event
,
936 TP_PROTO(struct btree_trans
*trans
,
937 unsigned long caller_ip
),
938 TP_ARGS(trans
, caller_ip
),
941 __array(char, trans_fn
, 32 )
942 __field(unsigned long, caller_ip
)
946 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
947 __entry
->caller_ip
= caller_ip
;
950 TP_printk("%s %pS", __entry
->trans_fn
, (void *) __entry
->caller_ip
)
953 DEFINE_EVENT(transaction_event
, transaction_commit
,
954 TP_PROTO(struct btree_trans
*trans
,
955 unsigned long caller_ip
),
956 TP_ARGS(trans
, caller_ip
)
959 DEFINE_EVENT(transaction_event
, trans_restart_injected
,
960 TP_PROTO(struct btree_trans
*trans
,
961 unsigned long caller_ip
),
962 TP_ARGS(trans
, caller_ip
)
965 TRACE_EVENT(trans_restart_split_race
,
966 TP_PROTO(struct btree_trans
*trans
,
967 unsigned long caller_ip
,
969 TP_ARGS(trans
, caller_ip
, b
),
972 __array(char, trans_fn
, 32 )
973 __field(unsigned long, caller_ip
)
975 __field(u16
, written
)
976 __field(u16
, blocks
)
977 __field(u16
, u64s_remaining
)
981 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
982 __entry
->caller_ip
= caller_ip
;
983 __entry
->level
= b
->c
.level
;
984 __entry
->written
= b
->written
;
985 __entry
->blocks
= btree_blocks(trans
->c
);
986 __entry
->u64s_remaining
= bch2_btree_keys_u64s_remaining(b
);
989 TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
990 __entry
->trans_fn
, (void *) __entry
->caller_ip
,
992 __entry
->written
, __entry
->blocks
,
993 __entry
->u64s_remaining
)
996 TRACE_EVENT(trans_blocked_journal_reclaim
,
997 TP_PROTO(struct btree_trans
*trans
,
998 unsigned long caller_ip
),
999 TP_ARGS(trans
, caller_ip
),
1002 __array(char, trans_fn
, 32 )
1003 __field(unsigned long, caller_ip
)
1005 __field(unsigned long, key_cache_nr_keys
)
1006 __field(unsigned long, key_cache_nr_dirty
)
1007 __field(long, must_wait
)
1011 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1012 __entry
->caller_ip
= caller_ip
;
1013 __entry
->key_cache_nr_keys
= atomic_long_read(&trans
->c
->btree_key_cache
.nr_keys
);
1014 __entry
->key_cache_nr_dirty
= atomic_long_read(&trans
->c
->btree_key_cache
.nr_dirty
);
1015 __entry
->must_wait
= __bch2_btree_key_cache_must_wait(trans
->c
);
1018 TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
1019 __entry
->trans_fn
, (void *) __entry
->caller_ip
,
1020 __entry
->key_cache_nr_keys
,
1021 __entry
->key_cache_nr_dirty
,
1025 TRACE_EVENT(trans_restart_journal_preres_get
,
1026 TP_PROTO(struct btree_trans
*trans
,
1027 unsigned long caller_ip
,
1029 TP_ARGS(trans
, caller_ip
, flags
),
1032 __array(char, trans_fn
, 32 )
1033 __field(unsigned long, caller_ip
)
1034 __field(unsigned, flags
)
1038 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1039 __entry
->caller_ip
= caller_ip
;
1040 __entry
->flags
= flags
;
1043 TP_printk("%s %pS %x", __entry
->trans_fn
,
1044 (void *) __entry
->caller_ip
,
1048 DEFINE_EVENT(transaction_event
, trans_restart_fault_inject
,
1049 TP_PROTO(struct btree_trans
*trans
,
1050 unsigned long caller_ip
),
1051 TP_ARGS(trans
, caller_ip
)
1054 DEFINE_EVENT(transaction_event
, trans_traverse_all
,
1055 TP_PROTO(struct btree_trans
*trans
,
1056 unsigned long caller_ip
),
1057 TP_ARGS(trans
, caller_ip
)
1060 DEFINE_EVENT(transaction_event
, trans_restart_key_cache_raced
,
1061 TP_PROTO(struct btree_trans
*trans
,
1062 unsigned long caller_ip
),
1063 TP_ARGS(trans
, caller_ip
)
1066 DEFINE_EVENT(trans_str
, trans_restart_too_many_iters
,
1067 TP_PROTO(struct btree_trans
*trans
,
1068 unsigned long caller_ip
,
1070 TP_ARGS(trans
, caller_ip
, paths
)
1073 DECLARE_EVENT_CLASS(transaction_restart_iter
,
1074 TP_PROTO(struct btree_trans
*trans
,
1075 unsigned long caller_ip
,
1076 struct btree_path
*path
),
1077 TP_ARGS(trans
, caller_ip
, path
),
1080 __array(char, trans_fn
, 32 )
1081 __field(unsigned long, caller_ip
)
1082 __field(u8
, btree_id
)
1083 TRACE_BPOS_entries(pos
)
1087 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1088 __entry
->caller_ip
= caller_ip
;
1089 __entry
->btree_id
= path
->btree_id
;
1090 TRACE_BPOS_assign(pos
, path
->pos
)
1093 TP_printk("%s %pS btree %s pos %llu:%llu:%u",
1095 (void *) __entry
->caller_ip
,
1096 bch2_btree_id_str(__entry
->btree_id
),
1098 __entry
->pos_offset
,
1099 __entry
->pos_snapshot
)
1102 DEFINE_EVENT(transaction_restart_iter
, trans_restart_btree_node_reused
,
1103 TP_PROTO(struct btree_trans
*trans
,
1104 unsigned long caller_ip
,
1105 struct btree_path
*path
),
1106 TP_ARGS(trans
, caller_ip
, path
)
1109 DEFINE_EVENT(transaction_restart_iter
, trans_restart_btree_node_split
,
1110 TP_PROTO(struct btree_trans
*trans
,
1111 unsigned long caller_ip
,
1112 struct btree_path
*path
),
1113 TP_ARGS(trans
, caller_ip
, path
)
1116 TRACE_EVENT(trans_restart_upgrade
,
1117 TP_PROTO(struct btree_trans
*trans
,
1118 unsigned long caller_ip
,
1119 struct btree_path
*path
,
1120 unsigned old_locks_want
,
1121 unsigned new_locks_want
,
1122 struct get_locks_fail
*f
),
1123 TP_ARGS(trans
, caller_ip
, path
, old_locks_want
, new_locks_want
, f
),
1126 __array(char, trans_fn
, 32 )
1127 __field(unsigned long, caller_ip
)
1128 __field(u8
, btree_id
)
1129 __field(u8
, old_locks_want
)
1130 __field(u8
, new_locks_want
)
1132 __field(u32
, path_seq
)
1133 __field(u32
, node_seq
)
1134 TRACE_BPOS_entries(pos
)
1138 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1139 __entry
->caller_ip
= caller_ip
;
1140 __entry
->btree_id
= path
->btree_id
;
1141 __entry
->old_locks_want
= old_locks_want
;
1142 __entry
->new_locks_want
= new_locks_want
;
1143 __entry
->level
= f
->l
;
1144 __entry
->path_seq
= path
->l
[f
->l
].lock_seq
;
1145 __entry
->node_seq
= IS_ERR_OR_NULL(f
->b
) ? 0 : f
->b
->c
.lock
.seq
;
1146 TRACE_BPOS_assign(pos
, path
->pos
)
1149 TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u",
1151 (void *) __entry
->caller_ip
,
1152 bch2_btree_id_str(__entry
->btree_id
),
1154 __entry
->pos_offset
,
1155 __entry
->pos_snapshot
,
1156 __entry
->old_locks_want
,
1157 __entry
->new_locks_want
,
1163 DEFINE_EVENT(trans_str
, trans_restart_relock
,
1164 TP_PROTO(struct btree_trans
*trans
, unsigned long caller_ip
, const char *str
),
1165 TP_ARGS(trans
, caller_ip
, str
)
1168 DEFINE_EVENT(transaction_restart_iter
, trans_restart_relock_next_node
,
1169 TP_PROTO(struct btree_trans
*trans
,
1170 unsigned long caller_ip
,
1171 struct btree_path
*path
),
1172 TP_ARGS(trans
, caller_ip
, path
)
1175 DEFINE_EVENT(transaction_restart_iter
, trans_restart_relock_parent_for_fill
,
1176 TP_PROTO(struct btree_trans
*trans
,
1177 unsigned long caller_ip
,
1178 struct btree_path
*path
),
1179 TP_ARGS(trans
, caller_ip
, path
)
1182 DEFINE_EVENT(transaction_restart_iter
, trans_restart_relock_after_fill
,
1183 TP_PROTO(struct btree_trans
*trans
,
1184 unsigned long caller_ip
,
1185 struct btree_path
*path
),
1186 TP_ARGS(trans
, caller_ip
, path
)
1189 DEFINE_EVENT(transaction_event
, trans_restart_key_cache_upgrade
,
1190 TP_PROTO(struct btree_trans
*trans
,
1191 unsigned long caller_ip
),
1192 TP_ARGS(trans
, caller_ip
)
1195 DEFINE_EVENT(transaction_restart_iter
, trans_restart_relock_key_cache_fill
,
1196 TP_PROTO(struct btree_trans
*trans
,
1197 unsigned long caller_ip
,
1198 struct btree_path
*path
),
1199 TP_ARGS(trans
, caller_ip
, path
)
1202 DEFINE_EVENT(transaction_restart_iter
, trans_restart_relock_path
,
1203 TP_PROTO(struct btree_trans
*trans
,
1204 unsigned long caller_ip
,
1205 struct btree_path
*path
),
1206 TP_ARGS(trans
, caller_ip
, path
)
1209 DEFINE_EVENT(transaction_restart_iter
, trans_restart_relock_path_intent
,
1210 TP_PROTO(struct btree_trans
*trans
,
1211 unsigned long caller_ip
,
1212 struct btree_path
*path
),
1213 TP_ARGS(trans
, caller_ip
, path
)
1216 DEFINE_EVENT(transaction_restart_iter
, trans_restart_traverse
,
1217 TP_PROTO(struct btree_trans
*trans
,
1218 unsigned long caller_ip
,
1219 struct btree_path
*path
),
1220 TP_ARGS(trans
, caller_ip
, path
)
1223 DEFINE_EVENT(transaction_restart_iter
, trans_restart_memory_allocation_failure
,
1224 TP_PROTO(struct btree_trans
*trans
,
1225 unsigned long caller_ip
,
1226 struct btree_path
*path
),
1227 TP_ARGS(trans
, caller_ip
, path
)
1230 DEFINE_EVENT(trans_str_nocaller
, trans_restart_would_deadlock
,
1231 TP_PROTO(struct btree_trans
*trans
,
1233 TP_ARGS(trans
, cycle
)
1236 DEFINE_EVENT(transaction_event
, trans_restart_would_deadlock_recursion_limit
,
1237 TP_PROTO(struct btree_trans
*trans
,
1238 unsigned long caller_ip
),
1239 TP_ARGS(trans
, caller_ip
)
1242 TRACE_EVENT(trans_restart_would_deadlock_write
,
1243 TP_PROTO(struct btree_trans
*trans
),
1247 __array(char, trans_fn
, 32 )
1251 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1254 TP_printk("%s", __entry
->trans_fn
)
1257 TRACE_EVENT(trans_restart_mem_realloced
,
1258 TP_PROTO(struct btree_trans
*trans
,
1259 unsigned long caller_ip
,
1260 unsigned long bytes
),
1261 TP_ARGS(trans
, caller_ip
, bytes
),
1264 __array(char, trans_fn
, 32 )
1265 __field(unsigned long, caller_ip
)
1266 __field(unsigned long, bytes
)
1270 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1271 __entry
->caller_ip
= caller_ip
;
1272 __entry
->bytes
= bytes
;
1275 TP_printk("%s %pS bytes %lu",
1277 (void *) __entry
->caller_ip
,
1281 TRACE_EVENT(trans_restart_key_cache_key_realloced
,
1282 TP_PROTO(struct btree_trans
*trans
,
1283 unsigned long caller_ip
,
1284 struct btree_path
*path
,
1287 TP_ARGS(trans
, caller_ip
, path
, old_u64s
, new_u64s
),
1290 __array(char, trans_fn
, 32 )
1291 __field(unsigned long, caller_ip
)
1292 __field(enum btree_id
, btree_id
)
1293 TRACE_BPOS_entries(pos
)
1294 __field(u32
, old_u64s
)
1295 __field(u32
, new_u64s
)
1299 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1300 __entry
->caller_ip
= caller_ip
;
1302 __entry
->btree_id
= path
->btree_id
;
1303 TRACE_BPOS_assign(pos
, path
->pos
);
1304 __entry
->old_u64s
= old_u64s
;
1305 __entry
->new_u64s
= new_u64s
;
1308 TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
1310 (void *) __entry
->caller_ip
,
1311 bch2_btree_id_str(__entry
->btree_id
),
1313 __entry
->pos_offset
,
1314 __entry
->pos_snapshot
,
1319 TRACE_EVENT(path_downgrade
,
1320 TP_PROTO(struct btree_trans
*trans
,
1321 unsigned long caller_ip
,
1322 struct btree_path
*path
,
1323 unsigned old_locks_want
),
1324 TP_ARGS(trans
, caller_ip
, path
, old_locks_want
),
1327 __array(char, trans_fn
, 32 )
1328 __field(unsigned long, caller_ip
)
1329 __field(unsigned, old_locks_want
)
1330 __field(unsigned, new_locks_want
)
1331 __field(unsigned, btree
)
1332 TRACE_BPOS_entries(pos
)
1336 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1337 __entry
->caller_ip
= caller_ip
;
1338 __entry
->old_locks_want
= old_locks_want
;
1339 __entry
->new_locks_want
= path
->locks_want
;
1340 __entry
->btree
= path
->btree_id
;
1341 TRACE_BPOS_assign(pos
, path
->pos
);
1344 TP_printk("%s %pS locks_want %u -> %u %s %llu:%llu:%u",
1346 (void *) __entry
->caller_ip
,
1347 __entry
->old_locks_want
,
1348 __entry
->new_locks_want
,
1349 bch2_btree_id_str(__entry
->btree
),
1351 __entry
->pos_offset
,
1352 __entry
->pos_snapshot
)
1355 DEFINE_EVENT(transaction_event
, trans_restart_write_buffer_flush
,
1356 TP_PROTO(struct btree_trans
*trans
,
1357 unsigned long caller_ip
),
1358 TP_ARGS(trans
, caller_ip
)
1361 TRACE_EVENT(write_buffer_flush
,
1362 TP_PROTO(struct btree_trans
*trans
, size_t nr
, size_t skipped
, size_t fast
, size_t size
),
1363 TP_ARGS(trans
, nr
, skipped
, fast
, size
),
1366 __field(size_t, nr
)
1367 __field(size_t, skipped
)
1368 __field(size_t, fast
)
1369 __field(size_t, size
)
1374 __entry
->skipped
= skipped
;
1375 __entry
->fast
= fast
;
1376 __entry
->size
= size
;
1379 TP_printk("%zu/%zu skipped %zu fast %zu",
1380 __entry
->nr
, __entry
->size
, __entry
->skipped
, __entry
->fast
)
1383 TRACE_EVENT(write_buffer_flush_sync
,
1384 TP_PROTO(struct btree_trans
*trans
, unsigned long caller_ip
),
1385 TP_ARGS(trans
, caller_ip
),
1388 __array(char, trans_fn
, 32 )
1389 __field(unsigned long, caller_ip
)
1393 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1394 __entry
->caller_ip
= caller_ip
;
1397 TP_printk("%s %pS", __entry
->trans_fn
, (void *) __entry
->caller_ip
)
1400 TRACE_EVENT(write_buffer_flush_slowpath
,
1401 TP_PROTO(struct btree_trans
*trans
, size_t slowpath
, size_t total
),
1402 TP_ARGS(trans
, slowpath
, total
),
1405 __field(size_t, slowpath
)
1406 __field(size_t, total
)
1410 __entry
->slowpath
= slowpath
;
1411 __entry
->total
= total
;
1414 TP_printk("%zu/%zu", __entry
->slowpath
, __entry
->total
)
1417 DEFINE_EVENT(fs_str
, rebalance_extent
,
1418 TP_PROTO(struct bch_fs
*c
, const char *str
),
1422 DEFINE_EVENT(fs_str
, data_update
,
1423 TP_PROTO(struct bch_fs
*c
, const char *str
),
1427 TRACE_EVENT(error_downcast
,
1428 TP_PROTO(int bch_err
, int std_err
, unsigned long ip
),
1429 TP_ARGS(bch_err
, std_err
, ip
),
1432 __array(char, bch_err
, 32 )
1433 __array(char, std_err
, 32 )
1434 __array(char, ip
, 32 )
1438 strscpy(__entry
->bch_err
, bch2_err_str(bch_err
), sizeof(__entry
->bch_err
));
1439 strscpy(__entry
->std_err
, bch2_err_str(std_err
), sizeof(__entry
->std_err
));
1440 snprintf(__entry
->ip
, sizeof(__entry
->ip
), "%ps", (void *) ip
);
1443 TP_printk("%s -> %s %s", __entry
->bch_err
, __entry
->std_err
, __entry
->ip
)
1446 #ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
1448 TRACE_EVENT(update_by_path
,
1449 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
,
1450 struct btree_insert_entry
*i
, bool overwrite
),
1451 TP_ARGS(trans
, path
, i
, overwrite
),
1454 __array(char, trans_fn
, 32 )
1455 __field(btree_path_idx_t
, path_idx
)
1456 __field(u8
, btree_id
)
1457 TRACE_BPOS_entries(pos
)
1458 __field(u8
, overwrite
)
1459 __field(btree_path_idx_t
, update_idx
)
1460 __field(btree_path_idx_t
, nr_updates
)
1464 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1465 __entry
->path_idx
= path
- trans
->paths
;
1466 __entry
->btree_id
= path
->btree_id
;
1467 TRACE_BPOS_assign(pos
, path
->pos
);
1468 __entry
->overwrite
= overwrite
;
1469 __entry
->update_idx
= i
- trans
->updates
;
1470 __entry
->nr_updates
= trans
->nr_updates
;
1473 TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
1476 bch2_btree_id_str(__entry
->btree_id
),
1478 __entry
->pos_offset
,
1479 __entry
->pos_snapshot
,
1481 __entry
->update_idx
,
1482 __entry
->nr_updates
)
1485 TRACE_EVENT(btree_path_lock
,
1486 TP_PROTO(struct btree_trans
*trans
,
1487 unsigned long caller_ip
,
1488 struct btree_bkey_cached_common
*b
),
1489 TP_ARGS(trans
, caller_ip
, b
),
1492 __array(char, trans_fn
, 32 )
1493 __field(unsigned long, caller_ip
)
1494 __field(u8
, btree_id
)
1496 __array(char, node
, 24 )
1497 __field(u32
, lock_seq
)
1501 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1502 __entry
->caller_ip
= caller_ip
;
1503 __entry
->btree_id
= b
->btree_id
;
1504 __entry
->level
= b
->level
;
1506 scnprintf(__entry
->node
, sizeof(__entry
->node
), "%px", b
);
1507 __entry
->lock_seq
= six_lock_seq(&b
->lock
);
1510 TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
1512 (void *) __entry
->caller_ip
,
1513 bch2_btree_id_str(__entry
->btree_id
),
1519 DECLARE_EVENT_CLASS(btree_path_ev
,
1520 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
),
1521 TP_ARGS(trans
, path
),
1526 __field(u8
, btree_id
)
1527 TRACE_BPOS_entries(pos
)
1531 __entry
->idx
= path
- trans
->paths
;
1532 __entry
->ref
= path
->ref
;
1533 __entry
->btree_id
= path
->btree_id
;
1534 TRACE_BPOS_assign(pos
, path
->pos
);
1537 TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
1538 __entry
->idx
, __entry
->ref
,
1539 bch2_btree_id_str(__entry
->btree_id
),
1541 __entry
->pos_offset
,
1542 __entry
->pos_snapshot
)
1545 DEFINE_EVENT(btree_path_ev
, btree_path_get_ll
,
1546 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
),
1547 TP_ARGS(trans
, path
)
1550 DEFINE_EVENT(btree_path_ev
, btree_path_put_ll
,
1551 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
),
1552 TP_ARGS(trans
, path
)
1555 DEFINE_EVENT(btree_path_ev
, btree_path_should_be_locked
,
1556 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
),
1557 TP_ARGS(trans
, path
)
1560 TRACE_EVENT(btree_path_alloc
,
1561 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
),
1562 TP_ARGS(trans
, path
),
1565 __field(btree_path_idx_t
, idx
)
1566 __field(u8
, locks_want
)
1567 __field(u8
, btree_id
)
1568 TRACE_BPOS_entries(pos
)
1572 __entry
->idx
= path
- trans
->paths
;
1573 __entry
->locks_want
= path
->locks_want
;
1574 __entry
->btree_id
= path
->btree_id
;
1575 TRACE_BPOS_assign(pos
, path
->pos
);
1578 TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
1580 bch2_btree_id_str(__entry
->btree_id
),
1581 __entry
->locks_want
,
1583 __entry
->pos_offset
,
1584 __entry
->pos_snapshot
)
1587 TRACE_EVENT(btree_path_get
,
1588 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
, struct bpos
*new_pos
),
1589 TP_ARGS(trans
, path
, new_pos
),
1592 __field(btree_path_idx_t
, idx
)
1594 __field(u8
, preserve
)
1595 __field(u8
, locks_want
)
1596 __field(u8
, btree_id
)
1597 TRACE_BPOS_entries(old_pos
)
1598 TRACE_BPOS_entries(new_pos
)
1602 __entry
->idx
= path
- trans
->paths
;
1603 __entry
->ref
= path
->ref
;
1604 __entry
->preserve
= path
->preserve
;
1605 __entry
->locks_want
= path
->locks_want
;
1606 __entry
->btree_id
= path
->btree_id
;
1607 TRACE_BPOS_assign(old_pos
, path
->pos
);
1608 TRACE_BPOS_assign(new_pos
, *new_pos
);
1611 TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
1615 bch2_btree_id_str(__entry
->btree_id
),
1616 __entry
->locks_want
,
1617 __entry
->old_pos_inode
,
1618 __entry
->old_pos_offset
,
1619 __entry
->old_pos_snapshot
,
1620 __entry
->new_pos_inode
,
1621 __entry
->new_pos_offset
,
1622 __entry
->new_pos_snapshot
)
1625 DECLARE_EVENT_CLASS(btree_path_clone
,
1626 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
, struct btree_path
*new),
1627 TP_ARGS(trans
, path
, new),
1630 __field(btree_path_idx_t
, idx
)
1631 __field(u8
, new_idx
)
1632 __field(u8
, btree_id
)
1634 __field(u8
, preserve
)
1635 TRACE_BPOS_entries(pos
)
1639 __entry
->idx
= path
- trans
->paths
;
1640 __entry
->new_idx
= new - trans
->paths
;
1641 __entry
->btree_id
= path
->btree_id
;
1642 __entry
->ref
= path
->ref
;
1643 __entry
->preserve
= path
->preserve
;
1644 TRACE_BPOS_assign(pos
, path
->pos
);
1647 TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
1651 bch2_btree_id_str(__entry
->btree_id
),
1653 __entry
->pos_offset
,
1654 __entry
->pos_snapshot
,
1658 DEFINE_EVENT(btree_path_clone
, btree_path_clone
,
1659 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
, struct btree_path
*new),
1660 TP_ARGS(trans
, path
, new)
1663 DEFINE_EVENT(btree_path_clone
, btree_path_save_pos
,
1664 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
, struct btree_path
*new),
1665 TP_ARGS(trans
, path
, new)
1668 DECLARE_EVENT_CLASS(btree_path_traverse
,
1669 TP_PROTO(struct btree_trans
*trans
,
1670 struct btree_path
*path
),
1671 TP_ARGS(trans
, path
),
1674 __array(char, trans_fn
, 32 )
1675 __field(btree_path_idx_t
, idx
)
1677 __field(u8
, preserve
)
1678 __field(u8
, should_be_locked
)
1679 __field(u8
, btree_id
)
1681 TRACE_BPOS_entries(pos
)
1682 __field(u8
, locks_want
)
1683 __field(u8
, nodes_locked
)
1684 __array(char, node0
, 24 )
1685 __array(char, node1
, 24 )
1686 __array(char, node2
, 24 )
1687 __array(char, node3
, 24 )
1691 strscpy(__entry
->trans_fn
, trans
->fn
, sizeof(__entry
->trans_fn
));
1693 __entry
->idx
= path
- trans
->paths
;
1694 __entry
->ref
= path
->ref
;
1695 __entry
->preserve
= path
->preserve
;
1696 __entry
->btree_id
= path
->btree_id
;
1697 __entry
->level
= path
->level
;
1698 TRACE_BPOS_assign(pos
, path
->pos
);
1700 __entry
->locks_want
= path
->locks_want
;
1701 __entry
->nodes_locked
= path
->nodes_locked
;
1702 struct btree
*b
= path
->l
[0].b
;
1704 strscpy(__entry
->node0
, bch2_err_str(PTR_ERR(b
)), sizeof(__entry
->node0
));
1706 scnprintf(__entry
->node0
, sizeof(__entry
->node0
), "%px", &b
->c
);
1709 strscpy(__entry
->node1
, bch2_err_str(PTR_ERR(b
)), sizeof(__entry
->node0
));
1711 scnprintf(__entry
->node1
, sizeof(__entry
->node0
), "%px", &b
->c
);
1714 strscpy(__entry
->node2
, bch2_err_str(PTR_ERR(b
)), sizeof(__entry
->node0
));
1716 scnprintf(__entry
->node2
, sizeof(__entry
->node0
), "%px", &b
->c
);
1719 strscpy(__entry
->node3
, bch2_err_str(PTR_ERR(b
)), sizeof(__entry
->node0
));
1721 scnprintf(__entry
->node3
, sizeof(__entry
->node0
), "%px", &b
->c
);
1724 TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
1725 "locks %u %u %u %u node %s %s %s %s",
1730 bch2_btree_id_str(__entry
->btree_id
),
1732 __entry
->pos_offset
,
1733 __entry
->pos_snapshot
,
1735 __entry
->locks_want
,
1736 (__entry
->nodes_locked
>> 6) & 3,
1737 (__entry
->nodes_locked
>> 4) & 3,
1738 (__entry
->nodes_locked
>> 2) & 3,
1739 (__entry
->nodes_locked
>> 0) & 3,
1746 DEFINE_EVENT(btree_path_traverse
, btree_path_traverse_start
,
1747 TP_PROTO(struct btree_trans
*trans
,
1748 struct btree_path
*path
),
1749 TP_ARGS(trans
, path
)
1752 DEFINE_EVENT(btree_path_traverse
, btree_path_traverse_end
,
1753 TP_PROTO(struct btree_trans
*trans
, struct btree_path
*path
),
1754 TP_ARGS(trans
, path
)
1757 TRACE_EVENT(btree_path_set_pos
,
1758 TP_PROTO(struct btree_trans
*trans
,
1759 struct btree_path
*path
,
1760 struct bpos
*new_pos
),
1761 TP_ARGS(trans
, path
, new_pos
),
1764 __field(btree_path_idx_t
, idx
)
1766 __field(u8
, preserve
)
1767 __field(u8
, btree_id
)
1768 TRACE_BPOS_entries(old_pos
)
1769 TRACE_BPOS_entries(new_pos
)
1770 __field(u8
, locks_want
)
1771 __field(u8
, nodes_locked
)
1772 __array(char, node0
, 24 )
1773 __array(char, node1
, 24 )
1774 __array(char, node2
, 24 )
1775 __array(char, node3
, 24 )
1779 __entry
->idx
= path
- trans
->paths
;
1780 __entry
->ref
= path
->ref
;
1781 __entry
->preserve
= path
->preserve
;
1782 __entry
->btree_id
= path
->btree_id
;
1783 TRACE_BPOS_assign(old_pos
, path
->pos
);
1784 TRACE_BPOS_assign(new_pos
, *new_pos
);
1786 __entry
->nodes_locked
= path
->nodes_locked
;
1787 struct btree
*b
= path
->l
[0].b
;
1789 strscpy(__entry
->node0
, bch2_err_str(PTR_ERR(b
)), sizeof(__entry
->node0
));
1791 scnprintf(__entry
->node0
, sizeof(__entry
->node0
), "%px", &b
->c
);
1794 strscpy(__entry
->node1
, bch2_err_str(PTR_ERR(b
)), sizeof(__entry
->node0
));
1796 scnprintf(__entry
->node1
, sizeof(__entry
->node0
), "%px", &b
->c
);
1799 strscpy(__entry
->node2
, bch2_err_str(PTR_ERR(b
)), sizeof(__entry
->node0
));
1801 scnprintf(__entry
->node2
, sizeof(__entry
->node0
), "%px", &b
->c
);
1804 strscpy(__entry
->node3
, bch2_err_str(PTR_ERR(b
)), sizeof(__entry
->node0
));
1806 scnprintf(__entry
->node3
, sizeof(__entry
->node0
), "%px", &b
->c
);
1809 TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
1810 "locks %u %u %u %u node %s %s %s %s",
1814 bch2_btree_id_str(__entry
->btree_id
),
1815 __entry
->old_pos_inode
,
1816 __entry
->old_pos_offset
,
1817 __entry
->old_pos_snapshot
,
1818 __entry
->new_pos_inode
,
1819 __entry
->new_pos_offset
,
1820 __entry
->new_pos_snapshot
,
1821 (__entry
->nodes_locked
>> 6) & 3,
1822 (__entry
->nodes_locked
>> 4) & 3,
1823 (__entry
->nodes_locked
>> 2) & 3,
1824 (__entry
->nodes_locked
>> 0) & 3,
1831 TRACE_EVENT(btree_path_free
,
1832 TP_PROTO(struct btree_trans
*trans
, btree_path_idx_t path
, struct btree_path
*dup
),
1833 TP_ARGS(trans
, path
, dup
),
1836 __field(btree_path_idx_t
, idx
)
1837 __field(u8
, preserve
)
1838 __field(u8
, should_be_locked
)
1840 __field(u8
, dup_locked
)
1844 __entry
->idx
= path
;
1845 __entry
->preserve
= trans
->paths
[path
].preserve
;
1846 __entry
->should_be_locked
= trans
->paths
[path
].should_be_locked
;
1847 __entry
->dup
= dup
? dup
- trans
->paths
: -1;
1848 __entry
->dup_locked
= dup
? btree_node_locked(dup
, dup
->level
) : 0;
1851 TP_printk(" path %3u %c %c dup %2i locked %u", __entry
->idx
,
1852 __entry
->preserve
? 'P' : ' ',
1853 __entry
->should_be_locked
? 'S' : ' ',
1855 __entry
->dup_locked
)
1858 TRACE_EVENT(btree_path_free_trans_begin
,
1859 TP_PROTO(btree_path_idx_t path
),
1863 __field(btree_path_idx_t
, idx
)
1867 __entry
->idx
= path
;
1870 TP_printk(" path %3u", __entry
->idx
)
1873 #else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1874 #ifndef _TRACE_BCACHEFS_H
1876 static inline void trace_update_by_path(struct btree_trans
*trans
, struct btree_path
*path
,
1877 struct btree_insert_entry
*i
, bool overwrite
) {}
1878 static inline void trace_btree_path_lock(struct btree_trans
*trans
, unsigned long caller_ip
, struct btree_bkey_cached_common
*b
) {}
1879 static inline void trace_btree_path_get_ll(struct btree_trans
*trans
, struct btree_path
*path
) {}
1880 static inline void trace_btree_path_put_ll(struct btree_trans
*trans
, struct btree_path
*path
) {}
1881 static inline void trace_btree_path_should_be_locked(struct btree_trans
*trans
, struct btree_path
*path
) {}
1882 static inline void trace_btree_path_alloc(struct btree_trans
*trans
, struct btree_path
*path
) {}
1883 static inline void trace_btree_path_get(struct btree_trans
*trans
, struct btree_path
*path
, struct bpos
*new_pos
) {}
1884 static inline void trace_btree_path_clone(struct btree_trans
*trans
, struct btree_path
*path
, struct btree_path
*new) {}
1885 static inline void trace_btree_path_save_pos(struct btree_trans
*trans
, struct btree_path
*path
, struct btree_path
*new) {}
1886 static inline void trace_btree_path_traverse_start(struct btree_trans
*trans
, struct btree_path
*path
) {}
1887 static inline void trace_btree_path_traverse_end(struct btree_trans
*trans
, struct btree_path
*path
) {}
1888 static inline void trace_btree_path_set_pos(struct btree_trans
*trans
, struct btree_path
*path
, struct bpos
*new_pos
) {}
1889 static inline void trace_btree_path_free(struct btree_trans
*trans
, btree_path_idx_t path
, struct btree_path
*dup
) {}
1890 static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path
) {}
1893 #endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
1895 #define _TRACE_BCACHEFS_H
1896 #endif /* _TRACE_BCACHEFS_H */
1898 /* This part must be outside protection */
1899 #undef TRACE_INCLUDE_PATH
1900 #define TRACE_INCLUDE_PATH ../../fs/bcachefs
1902 #undef TRACE_INCLUDE_FILE
1903 #define TRACE_INCLUDE_FILE trace
1905 #include <trace/define_trace.h>