1 // SPDX-License-Identifier: GPL-2.0
3 * Assorted bcachefs debug code
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
13 #include "btree_iter.h"
14 #include "btree_locking.h"
15 #include "btree_update.h"
16 #include "btree_update_interior.h"
25 #include <linux/console.h>
26 #include <linux/debugfs.h>
27 #include <linux/module.h>
28 #include <linux/random.h>
29 #include <linux/seq_file.h>
31 static struct dentry
*bch_debug
;
33 static bool bch2_btree_verify_replica(struct bch_fs
*c
, struct btree
*b
,
34 struct extent_ptr_decoded pick
)
36 struct btree
*v
= c
->verify_data
;
37 struct btree_node
*n_ondisk
= c
->verify_ondisk
;
38 struct btree_node
*n_sorted
= c
->verify_data
->data
;
39 struct bset
*sorted
, *inmemory
= &b
->data
->keys
;
41 bool failed
= false, saw_error
= false;
43 struct bch_dev
*ca
= bch2_dev_get_ioref(c
, pick
.ptr
.dev
, READ
);
47 bio
= bio_alloc_bioset(ca
->disk_sb
.bdev
,
48 buf_pages(n_sorted
, btree_buf_bytes(b
)),
52 bio
->bi_iter
.bi_sector
= pick
.ptr
.offset
;
53 bch2_bio_map(bio
, n_sorted
, btree_buf_bytes(b
));
58 percpu_ref_put(&ca
->io_ref
);
60 memcpy(n_ondisk
, n_sorted
, btree_buf_bytes(b
));
63 if (bch2_btree_node_read_done(c
, ca
, v
, false, &saw_error
) || saw_error
)
66 n_sorted
= c
->verify_data
->data
;
67 sorted
= &n_sorted
->keys
;
69 if (inmemory
->u64s
!= sorted
->u64s
||
70 memcmp(inmemory
->start
,
72 vstruct_end(inmemory
) - (void *) inmemory
->start
)) {
73 unsigned offset
= 0, sectors
;
79 printk(KERN_ERR
"*** in memory:\n");
80 bch2_dump_bset(c
, b
, inmemory
, 0);
82 printk(KERN_ERR
"*** read back in:\n");
83 bch2_dump_bset(c
, v
, sorted
, 0);
85 while (offset
< v
->written
) {
88 sectors
= vstruct_blocks(n_ondisk
, c
->block_bits
) <<
91 struct btree_node_entry
*bne
=
92 (void *) n_ondisk
+ (offset
<< 9);
95 sectors
= vstruct_blocks(bne
, c
->block_bits
) <<
99 printk(KERN_ERR
"*** on disk block %u:\n", offset
);
100 bch2_dump_bset(c
, b
, i
, offset
);
105 for (j
= 0; j
< le16_to_cpu(inmemory
->u64s
); j
++)
106 if (inmemory
->_data
[j
] != sorted
->_data
[j
])
110 bch_err(c
, "verify failed at key %u", j
);
115 if (v
->written
!= b
->written
) {
116 bch_err(c
, "written wrong: expected %u, got %u",
117 b
->written
, v
->written
);
124 void __bch2_btree_verify(struct bch_fs
*c
, struct btree
*b
)
126 struct bkey_ptrs_c ptrs
;
127 struct extent_ptr_decoded p
;
128 const union bch_extent_entry
*entry
;
130 struct bset
*inmemory
= &b
->data
->keys
;
131 struct bkey_packed
*k
;
134 if (c
->opts
.nochanges
)
137 bch2_btree_node_io_lock(b
);
138 mutex_lock(&c
->verify_lock
);
140 if (!c
->verify_ondisk
) {
141 c
->verify_ondisk
= kvmalloc(btree_buf_bytes(b
), GFP_KERNEL
);
142 if (!c
->verify_ondisk
)
146 if (!c
->verify_data
) {
147 c
->verify_data
= __bch2_btree_node_mem_alloc(c
);
151 list_del_init(&c
->verify_data
->list
);
154 BUG_ON(b
->nsets
!= 1);
156 for (k
= inmemory
->start
; k
!= vstruct_last(inmemory
); k
= bkey_p_next(k
))
157 if (k
->type
== KEY_TYPE_btree_ptr_v2
)
158 ((struct bch_btree_ptr_v2
*) bkeyp_val(&b
->format
, k
))->mem_ptr
= 0;
161 bkey_copy(&v
->key
, &b
->key
);
162 v
->c
.level
= b
->c
.level
;
163 v
->c
.btree_id
= b
->c
.btree_id
;
164 bch2_btree_keys_init(v
);
166 ptrs
= bch2_bkey_ptrs_c(bkey_i_to_s_c(&b
->key
));
167 bkey_for_each_ptr_decode(&b
->key
.k
, ptrs
, p
, entry
)
168 failed
|= bch2_btree_verify_replica(c
, b
, p
);
171 struct printbuf buf
= PRINTBUF
;
173 bch2_bkey_val_to_text(&buf
, c
, bkey_i_to_s_c(&b
->key
));
174 bch2_fs_fatal_error(c
, ": btree node verify failed for: %s\n", buf
.buf
);
178 mutex_unlock(&c
->verify_lock
);
179 bch2_btree_node_io_unlock(b
);
182 void bch2_btree_node_ondisk_to_text(struct printbuf
*out
, struct bch_fs
*c
,
183 const struct btree
*b
)
185 struct btree_node
*n_ondisk
= NULL
;
186 struct extent_ptr_decoded pick
;
188 struct bio
*bio
= NULL
;
192 if (bch2_bkey_pick_read_device(c
, bkey_i_to_s_c(&b
->key
), NULL
, &pick
) <= 0) {
193 prt_printf(out
, "error getting device to read from: invalid device\n");
197 ca
= bch2_dev_get_ioref(c
, pick
.ptr
.dev
, READ
);
199 prt_printf(out
, "error getting device to read from: not online\n");
203 n_ondisk
= kvmalloc(btree_buf_bytes(b
), GFP_KERNEL
);
205 prt_printf(out
, "memory allocation failure\n");
209 bio
= bio_alloc_bioset(ca
->disk_sb
.bdev
,
210 buf_pages(n_ondisk
, btree_buf_bytes(b
)),
211 REQ_OP_READ
|REQ_META
,
214 bio
->bi_iter
.bi_sector
= pick
.ptr
.offset
;
215 bch2_bio_map(bio
, n_ondisk
, btree_buf_bytes(b
));
217 ret
= submit_bio_wait(bio
);
219 prt_printf(out
, "IO error reading btree node: %s\n", bch2_err_str(ret
));
223 while (offset
< btree_sectors(c
)) {
226 struct bch_csum csum
;
227 struct bkey_packed
*k
;
233 if (!bch2_checksum_type_valid(c
, BSET_CSUM_TYPE(i
))) {
234 prt_printf(out
, "unknown checksum type at offset %u: %llu\n",
235 offset
, BSET_CSUM_TYPE(i
));
239 nonce
= btree_nonce(i
, offset
<< 9);
240 csum
= csum_vstruct(c
, BSET_CSUM_TYPE(i
), nonce
, n_ondisk
);
242 if (bch2_crc_cmp(csum
, n_ondisk
->csum
)) {
243 prt_printf(out
, "invalid checksum\n");
247 bset_encrypt(c
, i
, offset
<< 9);
249 sectors
= vstruct_sectors(n_ondisk
, c
->block_bits
);
251 struct btree_node_entry
*bne
= (void *) n_ondisk
+ (offset
<< 9);
255 if (i
->seq
!= n_ondisk
->keys
.seq
)
258 if (!bch2_checksum_type_valid(c
, BSET_CSUM_TYPE(i
))) {
259 prt_printf(out
, "unknown checksum type at offset %u: %llu\n",
260 offset
, BSET_CSUM_TYPE(i
));
264 nonce
= btree_nonce(i
, offset
<< 9);
265 csum
= csum_vstruct(c
, BSET_CSUM_TYPE(i
), nonce
, bne
);
267 if (bch2_crc_cmp(csum
, bne
->csum
)) {
268 prt_printf(out
, "invalid checksum");
272 bset_encrypt(c
, i
, offset
<< 9);
274 sectors
= vstruct_sectors(bne
, c
->block_bits
);
277 prt_printf(out
, " offset %u version %u, journal seq %llu\n",
279 le16_to_cpu(i
->version
),
280 le64_to_cpu(i
->journal_seq
));
283 printbuf_indent_add(out
, 4);
285 for (k
= i
->start
; k
!= vstruct_last(i
); k
= bkey_p_next(k
)) {
288 bch2_bkey_val_to_text(out
, c
, bkey_disassemble(b
, k
, &u
));
292 printbuf_indent_sub(out
, 4);
298 percpu_ref_put(&ca
->io_ref
);
301 #ifdef CONFIG_DEBUG_FS
303 /* XXX: bch_fs refcounting */
309 struct bpos prev_node
;
314 char __user
*ubuf
; /* destination user buffer */
315 size_t size
; /* size of requested read */
316 ssize_t ret
; /* bytes read so far */
319 static ssize_t
flush_buf(struct dump_iter
*i
)
322 size_t bytes
= min_t(size_t, i
->buf
.pos
, i
->size
);
323 int copied
= bytes
- copy_to_user(i
->ubuf
, i
->buf
.buf
, bytes
);
328 i
->buf
.pos
-= copied
;
329 memmove(i
->buf
.buf
, i
->buf
.buf
+ copied
, i
->buf
.pos
);
335 return i
->size
? 0 : i
->ret
;
338 static int bch2_dump_open(struct inode
*inode
, struct file
*file
)
340 struct btree_debug
*bd
= inode
->i_private
;
343 i
= kzalloc(sizeof(struct dump_iter
), GFP_KERNEL
);
347 file
->private_data
= i
;
350 i
->c
= container_of(bd
, struct bch_fs
, btree_debug
[bd
->id
]);
357 static int bch2_dump_release(struct inode
*inode
, struct file
*file
)
359 struct dump_iter
*i
= file
->private_data
;
361 printbuf_exit(&i
->buf
);
366 static ssize_t
bch2_read_btree(struct file
*file
, char __user
*buf
,
367 size_t size
, loff_t
*ppos
)
369 struct dump_iter
*i
= file
->private_data
;
375 return flush_buf(i
) ?:
377 for_each_btree_key(trans
, iter
, i
->id
, i
->from
,
379 BTREE_ITER_all_snapshots
, k
, ({
380 bch2_bkey_val_to_text(&i
->buf
, i
->c
, k
);
381 prt_newline(&i
->buf
);
382 bch2_trans_unlock(trans
);
383 i
->from
= bpos_successor(iter
.pos
);
389 static const struct file_operations btree_debug_ops
= {
390 .owner
= THIS_MODULE
,
391 .open
= bch2_dump_open
,
392 .release
= bch2_dump_release
,
393 .read
= bch2_read_btree
,
396 static ssize_t
bch2_read_btree_formats(struct file
*file
, char __user
*buf
,
397 size_t size
, loff_t
*ppos
)
399 struct dump_iter
*i
= file
->private_data
;
405 ssize_t ret
= flush_buf(i
);
409 if (bpos_eq(SPOS_MAX
, i
->from
))
412 return bch2_trans_run(i
->c
,
413 for_each_btree_node(trans
, iter
, i
->id
, i
->from
, 0, b
, ({
414 bch2_btree_node_to_text(&i
->buf
, i
->c
, b
);
415 i
->from
= !bpos_eq(SPOS_MAX
, b
->key
.k
.p
)
416 ? bpos_successor(b
->key
.k
.p
)
419 drop_locks_do(trans
, flush_buf(i
));
423 static const struct file_operations btree_format_debug_ops
= {
424 .owner
= THIS_MODULE
,
425 .open
= bch2_dump_open
,
426 .release
= bch2_dump_release
,
427 .read
= bch2_read_btree_formats
,
430 static ssize_t
bch2_read_bfloat_failed(struct file
*file
, char __user
*buf
,
431 size_t size
, loff_t
*ppos
)
433 struct dump_iter
*i
= file
->private_data
;
439 return flush_buf(i
) ?:
441 for_each_btree_key(trans
, iter
, i
->id
, i
->from
,
443 BTREE_ITER_all_snapshots
, k
, ({
444 struct btree_path_level
*l
=
445 &btree_iter_path(trans
, &iter
)->l
[0];
446 struct bkey_packed
*_k
=
447 bch2_btree_node_iter_peek(&l
->iter
, l
->b
);
449 if (bpos_gt(l
->b
->key
.k
.p
, i
->prev_node
)) {
450 bch2_btree_node_to_text(&i
->buf
, i
->c
, l
->b
);
451 i
->prev_node
= l
->b
->key
.k
.p
;
454 bch2_bfloat_to_text(&i
->buf
, l
->b
, _k
);
455 bch2_trans_unlock(trans
);
456 i
->from
= bpos_successor(iter
.pos
);
462 static const struct file_operations bfloat_failed_debug_ops
= {
463 .owner
= THIS_MODULE
,
464 .open
= bch2_dump_open
,
465 .release
= bch2_dump_release
,
466 .read
= bch2_read_bfloat_failed
,
469 static void bch2_cached_btree_node_to_text(struct printbuf
*out
, struct bch_fs
*c
,
472 if (!out
->nr_tabstops
)
473 printbuf_tabstop_push(out
, 32);
475 prt_printf(out
, "%px btree=%s l=%u\n", b
, bch2_btree_id_str(b
->c
.btree_id
), b
->c
.level
);
477 printbuf_indent_add(out
, 2);
479 bch2_bkey_val_to_text(out
, c
, bkey_i_to_s_c(&b
->key
));
482 prt_printf(out
, "flags:\t");
483 prt_bitflags(out
, bch2_btree_node_flags
, b
->flags
);
486 prt_printf(out
, "pcpu read locks:\t%u\n", b
->c
.lock
.readers
!= NULL
);
487 prt_printf(out
, "written:\t%u\n", b
->written
);
488 prt_printf(out
, "writes blocked:\t%u\n", !list_empty_careful(&b
->write_blocked
));
489 prt_printf(out
, "will make reachable:\t%lx\n", b
->will_make_reachable
);
491 prt_printf(out
, "journal pin %px:\t%llu\n",
492 &b
->writes
[0].journal
, b
->writes
[0].journal
.seq
);
493 prt_printf(out
, "journal pin %px:\t%llu\n",
494 &b
->writes
[1].journal
, b
->writes
[1].journal
.seq
);
496 printbuf_indent_sub(out
, 2);
499 static ssize_t
bch2_cached_btree_nodes_read(struct file
*file
, char __user
*buf
,
500 size_t size
, loff_t
*ppos
)
502 struct dump_iter
*i
= file
->private_data
;
503 struct bch_fs
*c
= i
->c
;
512 struct bucket_table
*tbl
;
513 struct rhash_head
*pos
;
522 tbl
= rht_dereference_rcu(c
->btree_cache
.table
.tbl
,
523 &c
->btree_cache
.table
);
524 if (i
->iter
< tbl
->size
) {
525 rht_for_each_entry_rcu(b
, pos
, tbl
, i
->iter
, hash
)
526 bch2_cached_btree_node_to_text(&i
->buf
, c
, b
);
535 if (i
->buf
.allocation_failure
)
541 return ret
?: i
->ret
;
544 static const struct file_operations cached_btree_nodes_ops
= {
545 .owner
= THIS_MODULE
,
546 .open
= bch2_dump_open
,
547 .release
= bch2_dump_release
,
548 .read
= bch2_cached_btree_nodes_read
,
551 typedef int (*list_cmp_fn
)(const struct list_head
*l
, const struct list_head
*r
);
553 static void list_sort(struct list_head
*head
, list_cmp_fn cmp
)
555 struct list_head
*pos
;
557 list_for_each(pos
, head
)
558 while (!list_is_last(pos
, head
) &&
559 cmp(pos
, pos
->next
) > 0) {
560 struct list_head
*pos2
, *next
= pos
->next
;
563 list_for_each(pos2
, head
)
564 if (cmp(next
, pos2
) < 0)
568 list_add_tail(next
, pos2
);
572 static int list_ptr_order_cmp(const struct list_head
*l
, const struct list_head
*r
)
574 return cmp_int(l
, r
);
577 static ssize_t
bch2_btree_transactions_read(struct file
*file
, char __user
*buf
,
578 size_t size
, loff_t
*ppos
)
580 struct dump_iter
*i
= file
->private_data
;
581 struct bch_fs
*c
= i
->c
;
582 struct btree_trans
*trans
;
589 seqmutex_lock(&c
->btree_trans_lock
);
590 list_sort(&c
->btree_trans_list
, list_ptr_order_cmp
);
592 list_for_each_entry(trans
, &c
->btree_trans_list
, list
) {
593 if ((ulong
) trans
<= i
->iter
)
596 i
->iter
= (ulong
) trans
;
598 if (!closure_get_not_zero(&trans
->ref
))
601 u32 seq
= seqmutex_unlock(&c
->btree_trans_lock
);
603 bch2_btree_trans_to_text(&i
->buf
, trans
);
605 prt_printf(&i
->buf
, "backtrace:\n");
606 printbuf_indent_add(&i
->buf
, 2);
607 bch2_prt_task_backtrace(&i
->buf
, trans
->locking_wait
.task
, 0, GFP_KERNEL
);
608 printbuf_indent_sub(&i
->buf
, 2);
609 prt_newline(&i
->buf
);
611 closure_put(&trans
->ref
);
617 if (!seqmutex_relock(&c
->btree_trans_lock
, seq
))
620 seqmutex_unlock(&c
->btree_trans_lock
);
622 if (i
->buf
.allocation_failure
)
628 return ret
?: i
->ret
;
631 static const struct file_operations btree_transactions_ops
= {
632 .owner
= THIS_MODULE
,
633 .open
= bch2_dump_open
,
634 .release
= bch2_dump_release
,
635 .read
= bch2_btree_transactions_read
,
638 static ssize_t
bch2_journal_pins_read(struct file
*file
, char __user
*buf
,
639 size_t size
, loff_t
*ppos
)
641 struct dump_iter
*i
= file
->private_data
;
642 struct bch_fs
*c
= i
->c
;
661 done
= bch2_journal_seq_pins_to_text(&i
->buf
, &c
->journal
, &i
->iter
);
665 if (i
->buf
.allocation_failure
)
671 static const struct file_operations journal_pins_ops
= {
672 .owner
= THIS_MODULE
,
673 .open
= bch2_dump_open
,
674 .release
= bch2_dump_release
,
675 .read
= bch2_journal_pins_read
,
678 static ssize_t
bch2_btree_updates_read(struct file
*file
, char __user
*buf
,
679 size_t size
, loff_t
*ppos
)
681 struct dump_iter
*i
= file
->private_data
;
682 struct bch_fs
*c
= i
->c
;
690 bch2_btree_updates_to_text(&i
->buf
, c
);
698 if (i
->buf
.allocation_failure
)
704 static const struct file_operations btree_updates_ops
= {
705 .owner
= THIS_MODULE
,
706 .open
= bch2_dump_open
,
707 .release
= bch2_dump_release
,
708 .read
= bch2_btree_updates_read
,
711 static int btree_transaction_stats_open(struct inode
*inode
, struct file
*file
)
713 struct bch_fs
*c
= inode
->i_private
;
716 i
= kzalloc(sizeof(struct dump_iter
), GFP_KERNEL
);
723 file
->private_data
= i
;
728 static int btree_transaction_stats_release(struct inode
*inode
, struct file
*file
)
730 struct dump_iter
*i
= file
->private_data
;
732 printbuf_exit(&i
->buf
);
738 static ssize_t
btree_transaction_stats_read(struct file
*file
, char __user
*buf
,
739 size_t size
, loff_t
*ppos
)
741 struct dump_iter
*i
= file
->private_data
;
742 struct bch_fs
*c
= i
->c
;
750 struct btree_transaction_stats
*s
= &c
->btree_transaction_stats
[i
->iter
];
759 if (i
->iter
== ARRAY_SIZE(bch2_btree_transaction_fns
) ||
760 !bch2_btree_transaction_fns
[i
->iter
])
763 prt_printf(&i
->buf
, "%s:\n", bch2_btree_transaction_fns
[i
->iter
]);
764 printbuf_indent_add(&i
->buf
, 2);
766 mutex_lock(&s
->lock
);
768 prt_printf(&i
->buf
, "Max mem used: %u\n", s
->max_mem
);
769 prt_printf(&i
->buf
, "Transaction duration:\n");
771 printbuf_indent_add(&i
->buf
, 2);
772 bch2_time_stats_to_text(&i
->buf
, &s
->duration
);
773 printbuf_indent_sub(&i
->buf
, 2);
775 if (IS_ENABLED(CONFIG_BCACHEFS_LOCK_TIME_STATS
)) {
776 prt_printf(&i
->buf
, "Lock hold times:\n");
778 printbuf_indent_add(&i
->buf
, 2);
779 bch2_time_stats_to_text(&i
->buf
, &s
->lock_hold_times
);
780 printbuf_indent_sub(&i
->buf
, 2);
783 if (s
->max_paths_text
) {
784 prt_printf(&i
->buf
, "Maximum allocated btree paths (%u):\n", s
->nr_max_paths
);
786 printbuf_indent_add(&i
->buf
, 2);
787 prt_str_indented(&i
->buf
, s
->max_paths_text
);
788 printbuf_indent_sub(&i
->buf
, 2);
791 mutex_unlock(&s
->lock
);
793 printbuf_indent_sub(&i
->buf
, 2);
794 prt_newline(&i
->buf
);
798 if (i
->buf
.allocation_failure
)
804 static const struct file_operations btree_transaction_stats_op
= {
805 .owner
= THIS_MODULE
,
806 .open
= btree_transaction_stats_open
,
807 .release
= btree_transaction_stats_release
,
808 .read
= btree_transaction_stats_read
,
811 /* walk btree transactions until we find a deadlock and print it */
812 static void btree_deadlock_to_text(struct printbuf
*out
, struct bch_fs
*c
)
814 struct btree_trans
*trans
;
817 seqmutex_lock(&c
->btree_trans_lock
);
818 list_sort(&c
->btree_trans_list
, list_ptr_order_cmp
);
820 list_for_each_entry(trans
, &c
->btree_trans_list
, list
) {
821 if ((ulong
) trans
<= iter
)
824 iter
= (ulong
) trans
;
826 if (!closure_get_not_zero(&trans
->ref
))
829 u32 seq
= seqmutex_unlock(&c
->btree_trans_lock
);
831 bool found
= bch2_check_for_deadlock(trans
, out
) != 0;
833 closure_put(&trans
->ref
);
838 if (!seqmutex_relock(&c
->btree_trans_lock
, seq
))
841 seqmutex_unlock(&c
->btree_trans_lock
);
844 static ssize_t
bch2_btree_deadlock_read(struct file
*file
, char __user
*buf
,
845 size_t size
, loff_t
*ppos
)
847 struct dump_iter
*i
= file
->private_data
;
848 struct bch_fs
*c
= i
->c
;
856 btree_deadlock_to_text(&i
->buf
, c
);
860 if (i
->buf
.allocation_failure
)
866 return ret
?: i
->ret
;
869 static const struct file_operations btree_deadlock_ops
= {
870 .owner
= THIS_MODULE
,
871 .open
= bch2_dump_open
,
872 .release
= bch2_dump_release
,
873 .read
= bch2_btree_deadlock_read
,
876 void bch2_fs_debug_exit(struct bch_fs
*c
)
878 if (!IS_ERR_OR_NULL(c
->fs_debug_dir
))
879 debugfs_remove_recursive(c
->fs_debug_dir
);
882 static void bch2_fs_debug_btree_init(struct bch_fs
*c
, struct btree_debug
*bd
)
886 d
= debugfs_create_dir(bch2_btree_id_str(bd
->id
), c
->btree_debug_dir
);
888 debugfs_create_file("keys", 0400, d
, bd
, &btree_debug_ops
);
890 debugfs_create_file("formats", 0400, d
, bd
, &btree_format_debug_ops
);
892 debugfs_create_file("bfloat-failed", 0400, d
, bd
,
893 &bfloat_failed_debug_ops
);
896 void bch2_fs_debug_init(struct bch_fs
*c
)
898 struct btree_debug
*bd
;
901 if (IS_ERR_OR_NULL(bch_debug
))
904 snprintf(name
, sizeof(name
), "%pU", c
->sb
.user_uuid
.b
);
905 c
->fs_debug_dir
= debugfs_create_dir(name
, bch_debug
);
906 if (IS_ERR_OR_NULL(c
->fs_debug_dir
))
909 debugfs_create_file("cached_btree_nodes", 0400, c
->fs_debug_dir
,
910 c
->btree_debug
, &cached_btree_nodes_ops
);
912 debugfs_create_file("btree_transactions", 0400, c
->fs_debug_dir
,
913 c
->btree_debug
, &btree_transactions_ops
);
915 debugfs_create_file("journal_pins", 0400, c
->fs_debug_dir
,
916 c
->btree_debug
, &journal_pins_ops
);
918 debugfs_create_file("btree_updates", 0400, c
->fs_debug_dir
,
919 c
->btree_debug
, &btree_updates_ops
);
921 debugfs_create_file("btree_transaction_stats", 0400, c
->fs_debug_dir
,
922 c
, &btree_transaction_stats_op
);
924 debugfs_create_file("btree_deadlock", 0400, c
->fs_debug_dir
,
925 c
->btree_debug
, &btree_deadlock_ops
);
927 c
->btree_debug_dir
= debugfs_create_dir("btrees", c
->fs_debug_dir
);
928 if (IS_ERR_OR_NULL(c
->btree_debug_dir
))
931 for (bd
= c
->btree_debug
;
932 bd
< c
->btree_debug
+ ARRAY_SIZE(c
->btree_debug
);
934 bd
->id
= bd
- c
->btree_debug
;
935 bch2_fs_debug_btree_init(c
, bd
);
941 void bch2_debug_exit(void)
943 if (!IS_ERR_OR_NULL(bch_debug
))
944 debugfs_remove_recursive(bch_debug
);
947 int __init
bch2_debug_init(void)
949 bch_debug
= debugfs_create_dir("bcachefs", NULL
);