2 * Assorted bcache debug code
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
13 #include <linux/console.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/seq_file.h>
19 static struct dentry
*debug
;
21 #ifdef CONFIG_BCACHE_DEBUG
23 #define for_each_written_bset(b, start, i) \
25 (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
26 i->seq == (start)->seq; \
27 i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
30 void bch_btree_verify(struct btree
*b
)
32 struct btree
*v
= b
->c
->verify_data
;
33 struct bset
*ondisk
, *sorted
, *inmemory
;
36 if (!b
->c
->verify
|| !b
->c
->verify_ondisk
)
40 mutex_lock(&b
->c
->verify_lock
);
42 ondisk
= b
->c
->verify_ondisk
;
43 sorted
= b
->c
->verify_data
->keys
.set
->data
;
44 inmemory
= b
->keys
.set
->data
;
46 bkey_copy(&v
->key
, &b
->key
);
49 v
->keys
.ops
= b
->keys
.ops
;
51 bio
= bch_bbio_alloc(b
->c
);
52 bio
->bi_bdev
= PTR_CACHE(b
->c
, &b
->key
, 0)->bdev
;
53 bio
->bi_iter
.bi_sector
= PTR_OFFSET(&b
->key
, 0);
54 bio
->bi_iter
.bi_size
= KEY_SIZE(&v
->key
) << 9;
55 bch_bio_map(bio
, sorted
);
57 submit_bio_wait(REQ_META
|READ_SYNC
, bio
);
58 bch_bbio_free(bio
, b
->c
);
60 memcpy(ondisk
, sorted
, KEY_SIZE(&v
->key
) << 9);
62 bch_btree_node_read_done(v
);
63 sorted
= v
->keys
.set
->data
;
65 if (inmemory
->keys
!= sorted
->keys
||
66 memcmp(inmemory
->start
,
68 (void *) bset_bkey_last(inmemory
) - (void *) inmemory
->start
)) {
74 printk(KERN_ERR
"*** in memory:\n");
75 bch_dump_bset(&b
->keys
, inmemory
, 0);
77 printk(KERN_ERR
"*** read back in:\n");
78 bch_dump_bset(&v
->keys
, sorted
, 0);
80 for_each_written_bset(b
, ondisk
, i
) {
81 unsigned block
= ((void *) i
- (void *) ondisk
) /
84 printk(KERN_ERR
"*** on disk block %u:\n", block
);
85 bch_dump_bset(&b
->keys
, i
, block
);
88 printk(KERN_ERR
"*** block %zu not written\n",
89 ((void *) i
- (void *) ondisk
) / block_bytes(b
->c
));
91 for (j
= 0; j
< inmemory
->keys
; j
++)
92 if (inmemory
->d
[j
] != sorted
->d
[j
])
95 printk(KERN_ERR
"b->written %u\n", b
->written
);
98 panic("verify failed at %u\n", j
);
101 mutex_unlock(&b
->c
->verify_lock
);
105 void bch_data_verify(struct cached_dev
*dc
, struct bio
*bio
)
107 char name
[BDEVNAME_SIZE
];
109 struct bio_vec bv
, *bv2
;
110 struct bvec_iter iter
;
113 check
= bio_clone(bio
, GFP_NOIO
);
117 if (bio_alloc_pages(check
, GFP_NOIO
))
120 submit_bio_wait(READ_SYNC
, check
);
122 bio_for_each_segment(bv
, bio
, iter
) {
123 void *p1
= kmap_atomic(bv
.bv_page
);
124 void *p2
= page_address(check
->bi_io_vec
[iter
.bi_idx
].bv_page
);
126 cache_set_err_on(memcmp(p1
+ bv
.bv_offset
,
130 "verify failed at dev %s sector %llu",
131 bdevname(dc
->bdev
, name
),
132 (uint64_t) bio
->bi_iter
.bi_sector
);
137 bio_for_each_segment_all(bv2
, check
, i
)
138 __free_page(bv2
->bv_page
);
145 #ifdef CONFIG_DEBUG_FS
147 /* XXX: cache set refcounting */
149 struct dump_iterator
{
156 static bool dump_pred(struct keybuf
*buf
, struct bkey
*k
)
161 static ssize_t
bch_dump_read(struct file
*file
, char __user
*buf
,
162 size_t size
, loff_t
*ppos
)
164 struct dump_iterator
*i
= file
->private_data
;
169 struct keybuf_key
*w
;
170 unsigned bytes
= min(i
->bytes
, size
);
172 int err
= copy_to_user(buf
, i
->buf
, bytes
);
180 memmove(i
->buf
, i
->buf
+ bytes
, i
->bytes
);
185 w
= bch_keybuf_next_rescan(i
->c
, &i
->keys
, &MAX_KEY
, dump_pred
);
189 bch_extent_to_text(kbuf
, sizeof(kbuf
), &w
->key
);
190 i
->bytes
= snprintf(i
->buf
, PAGE_SIZE
, "%s\n", kbuf
);
191 bch_keybuf_del(&i
->keys
, w
);
197 static int bch_dump_open(struct inode
*inode
, struct file
*file
)
199 struct cache_set
*c
= inode
->i_private
;
200 struct dump_iterator
*i
;
202 i
= kzalloc(sizeof(struct dump_iterator
), GFP_KERNEL
);
206 file
->private_data
= i
;
208 bch_keybuf_init(&i
->keys
);
209 i
->keys
.last_scanned
= KEY(0, 0, 0);
214 static int bch_dump_release(struct inode
*inode
, struct file
*file
)
216 kfree(file
->private_data
);
220 static const struct file_operations cache_set_debug_ops
= {
221 .owner
= THIS_MODULE
,
222 .open
= bch_dump_open
,
223 .read
= bch_dump_read
,
224 .release
= bch_dump_release
227 void bch_debug_init_cache_set(struct cache_set
*c
)
229 if (!IS_ERR_OR_NULL(debug
)) {
231 snprintf(name
, 50, "bcache-%pU", c
->sb
.set_uuid
);
233 c
->debug
= debugfs_create_file(name
, 0400, debug
, c
,
234 &cache_set_debug_ops
);
240 void bch_debug_exit(void)
242 if (!IS_ERR_OR_NULL(debug
))
243 debugfs_remove_recursive(debug
);
246 int __init
bch_debug_init(struct kobject
*kobj
)
250 debug
= debugfs_create_dir("bcache", NULL
);