2 * Assorted bcache debug code
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
13 #include <linux/console.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/seq_file.h>
19 static struct dentry
*debug
;
21 #ifdef CONFIG_BCACHE_DEBUG
23 #define for_each_written_bset(b, start, i) \
25 (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
26 i->seq == (start)->seq; \
27 i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
30 void bch_btree_verify(struct btree
*b
)
32 struct btree
*v
= b
->c
->verify_data
;
33 struct bset
*ondisk
, *sorted
, *inmemory
;
36 if (!b
->c
->verify
|| !b
->c
->verify_ondisk
)
40 mutex_lock(&b
->c
->verify_lock
);
42 ondisk
= b
->c
->verify_ondisk
;
43 sorted
= b
->c
->verify_data
->keys
.set
->data
;
44 inmemory
= b
->keys
.set
->data
;
46 bkey_copy(&v
->key
, &b
->key
);
49 v
->keys
.ops
= b
->keys
.ops
;
51 bio
= bch_bbio_alloc(b
->c
);
52 bio_set_dev(bio
, PTR_CACHE(b
->c
, &b
->key
, 0)->bdev
);
53 bio
->bi_iter
.bi_sector
= PTR_OFFSET(&b
->key
, 0);
54 bio
->bi_iter
.bi_size
= KEY_SIZE(&v
->key
) << 9;
55 bio
->bi_opf
= REQ_OP_READ
| REQ_META
;
56 bch_bio_map(bio
, sorted
);
59 bch_bbio_free(bio
, b
->c
);
61 memcpy(ondisk
, sorted
, KEY_SIZE(&v
->key
) << 9);
63 bch_btree_node_read_done(v
);
64 sorted
= v
->keys
.set
->data
;
66 if (inmemory
->keys
!= sorted
->keys
||
67 memcmp(inmemory
->start
,
69 (void *) bset_bkey_last(inmemory
) - (void *) inmemory
->start
)) {
75 printk(KERN_ERR
"*** in memory:\n");
76 bch_dump_bset(&b
->keys
, inmemory
, 0);
78 printk(KERN_ERR
"*** read back in:\n");
79 bch_dump_bset(&v
->keys
, sorted
, 0);
81 for_each_written_bset(b
, ondisk
, i
) {
82 unsigned block
= ((void *) i
- (void *) ondisk
) /
85 printk(KERN_ERR
"*** on disk block %u:\n", block
);
86 bch_dump_bset(&b
->keys
, i
, block
);
89 printk(KERN_ERR
"*** block %zu not written\n",
90 ((void *) i
- (void *) ondisk
) / block_bytes(b
->c
));
92 for (j
= 0; j
< inmemory
->keys
; j
++)
93 if (inmemory
->d
[j
] != sorted
->d
[j
])
96 printk(KERN_ERR
"b->written %u\n", b
->written
);
99 panic("verify failed at %u\n", j
);
102 mutex_unlock(&b
->c
->verify_lock
);
106 void bch_data_verify(struct cached_dev
*dc
, struct bio
*bio
)
108 char name
[BDEVNAME_SIZE
];
110 struct bio_vec bv
, cbv
;
111 struct bvec_iter iter
, citer
= { 0 };
113 check
= bio_clone_kmalloc(bio
, GFP_NOIO
);
116 check
->bi_opf
= REQ_OP_READ
;
118 if (bio_alloc_pages(check
, GFP_NOIO
))
121 submit_bio_wait(check
);
123 citer
.bi_size
= UINT_MAX
;
124 bio_for_each_segment(bv
, bio
, iter
) {
125 void *p1
= kmap_atomic(bv
.bv_page
);
128 cbv
= bio_iter_iovec(check
, citer
);
129 p2
= page_address(cbv
.bv_page
);
131 cache_set_err_on(memcmp(p1
+ bv
.bv_offset
,
135 "verify failed at dev %s sector %llu",
136 bdevname(dc
->bdev
, name
),
137 (uint64_t) bio
->bi_iter
.bi_sector
);
140 bio_advance_iter(check
, &citer
, bv
.bv_len
);
143 bio_free_pages(check
);
150 #ifdef CONFIG_DEBUG_FS
152 /* XXX: cache set refcounting */
154 struct dump_iterator
{
161 static bool dump_pred(struct keybuf
*buf
, struct bkey
*k
)
166 static ssize_t
bch_dump_read(struct file
*file
, char __user
*buf
,
167 size_t size
, loff_t
*ppos
)
169 struct dump_iterator
*i
= file
->private_data
;
174 struct keybuf_key
*w
;
175 unsigned bytes
= min(i
->bytes
, size
);
177 int err
= copy_to_user(buf
, i
->buf
, bytes
);
185 memmove(i
->buf
, i
->buf
+ bytes
, i
->bytes
);
190 w
= bch_keybuf_next_rescan(i
->c
, &i
->keys
, &MAX_KEY
, dump_pred
);
194 bch_extent_to_text(kbuf
, sizeof(kbuf
), &w
->key
);
195 i
->bytes
= snprintf(i
->buf
, PAGE_SIZE
, "%s\n", kbuf
);
196 bch_keybuf_del(&i
->keys
, w
);
202 static int bch_dump_open(struct inode
*inode
, struct file
*file
)
204 struct cache_set
*c
= inode
->i_private
;
205 struct dump_iterator
*i
;
207 i
= kzalloc(sizeof(struct dump_iterator
), GFP_KERNEL
);
211 file
->private_data
= i
;
213 bch_keybuf_init(&i
->keys
);
214 i
->keys
.last_scanned
= KEY(0, 0, 0);
219 static int bch_dump_release(struct inode
*inode
, struct file
*file
)
221 kfree(file
->private_data
);
225 static const struct file_operations cache_set_debug_ops
= {
226 .owner
= THIS_MODULE
,
227 .open
= bch_dump_open
,
228 .read
= bch_dump_read
,
229 .release
= bch_dump_release
232 void bch_debug_init_cache_set(struct cache_set
*c
)
234 if (!IS_ERR_OR_NULL(debug
)) {
236 snprintf(name
, 50, "bcache-%pU", c
->sb
.set_uuid
);
238 c
->debug
= debugfs_create_file(name
, 0400, debug
, c
,
239 &cache_set_debug_ops
);
245 void bch_debug_exit(void)
247 if (!IS_ERR_OR_NULL(debug
))
248 debugfs_remove_recursive(debug
);
251 int __init
bch_debug_init(struct kobject
*kobj
)
255 debug
= debugfs_create_dir("bcache", NULL
);