2 * Assorted bcache debug code
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
13 #include <linux/console.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/seq_file.h>
19 static struct dentry
*debug
;
21 #ifdef CONFIG_BCACHE_DEBUG
23 #define for_each_written_bset(b, start, i) \
25 (void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
26 i->seq == (start)->seq; \
27 i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
30 void bch_btree_verify(struct btree
*b
)
32 struct btree
*v
= b
->c
->verify_data
;
33 struct bset
*ondisk
, *sorted
, *inmemory
;
36 if (!b
->c
->verify
|| !b
->c
->verify_ondisk
)
40 mutex_lock(&b
->c
->verify_lock
);
42 ondisk
= b
->c
->verify_ondisk
;
43 sorted
= b
->c
->verify_data
->keys
.set
->data
;
44 inmemory
= b
->keys
.set
->data
;
46 bkey_copy(&v
->key
, &b
->key
);
49 v
->keys
.ops
= b
->keys
.ops
;
51 bio
= bch_bbio_alloc(b
->c
);
52 bio
->bi_bdev
= PTR_CACHE(b
->c
, &b
->key
, 0)->bdev
;
53 bio
->bi_iter
.bi_sector
= PTR_OFFSET(&b
->key
, 0);
54 bio
->bi_iter
.bi_size
= KEY_SIZE(&v
->key
) << 9;
55 bio_set_op_attrs(bio
, REQ_OP_READ
, REQ_META
|READ_SYNC
);
56 bch_bio_map(bio
, sorted
);
59 bch_bbio_free(bio
, b
->c
);
61 memcpy(ondisk
, sorted
, KEY_SIZE(&v
->key
) << 9);
63 bch_btree_node_read_done(v
);
64 sorted
= v
->keys
.set
->data
;
66 if (inmemory
->keys
!= sorted
->keys
||
67 memcmp(inmemory
->start
,
69 (void *) bset_bkey_last(inmemory
) - (void *) inmemory
->start
)) {
75 printk(KERN_ERR
"*** in memory:\n");
76 bch_dump_bset(&b
->keys
, inmemory
, 0);
78 printk(KERN_ERR
"*** read back in:\n");
79 bch_dump_bset(&v
->keys
, sorted
, 0);
81 for_each_written_bset(b
, ondisk
, i
) {
82 unsigned block
= ((void *) i
- (void *) ondisk
) /
85 printk(KERN_ERR
"*** on disk block %u:\n", block
);
86 bch_dump_bset(&b
->keys
, i
, block
);
89 printk(KERN_ERR
"*** block %zu not written\n",
90 ((void *) i
- (void *) ondisk
) / block_bytes(b
->c
));
92 for (j
= 0; j
< inmemory
->keys
; j
++)
93 if (inmemory
->d
[j
] != sorted
->d
[j
])
96 printk(KERN_ERR
"b->written %u\n", b
->written
);
99 panic("verify failed at %u\n", j
);
102 mutex_unlock(&b
->c
->verify_lock
);
106 void bch_data_verify(struct cached_dev
*dc
, struct bio
*bio
)
108 char name
[BDEVNAME_SIZE
];
111 struct bvec_iter iter
;
113 check
= bio_clone(bio
, GFP_NOIO
);
116 bio_set_op_attrs(check
, REQ_OP_READ
, READ_SYNC
);
118 if (bio_alloc_pages(check
, GFP_NOIO
))
121 submit_bio_wait(check
);
123 bio_for_each_segment(bv
, bio
, iter
) {
124 void *p1
= kmap_atomic(bv
.bv_page
);
125 void *p2
= page_address(check
->bi_io_vec
[iter
.bi_idx
].bv_page
);
127 cache_set_err_on(memcmp(p1
+ bv
.bv_offset
,
131 "verify failed at dev %s sector %llu",
132 bdevname(dc
->bdev
, name
),
133 (uint64_t) bio
->bi_iter
.bi_sector
);
138 bio_free_pages(check
);
145 #ifdef CONFIG_DEBUG_FS
147 /* XXX: cache set refcounting */
149 struct dump_iterator
{
156 static bool dump_pred(struct keybuf
*buf
, struct bkey
*k
)
161 static ssize_t
bch_dump_read(struct file
*file
, char __user
*buf
,
162 size_t size
, loff_t
*ppos
)
164 struct dump_iterator
*i
= file
->private_data
;
169 struct keybuf_key
*w
;
170 unsigned bytes
= min(i
->bytes
, size
);
172 int err
= copy_to_user(buf
, i
->buf
, bytes
);
180 memmove(i
->buf
, i
->buf
+ bytes
, i
->bytes
);
185 w
= bch_keybuf_next_rescan(i
->c
, &i
->keys
, &MAX_KEY
, dump_pred
);
189 bch_extent_to_text(kbuf
, sizeof(kbuf
), &w
->key
);
190 i
->bytes
= snprintf(i
->buf
, PAGE_SIZE
, "%s\n", kbuf
);
191 bch_keybuf_del(&i
->keys
, w
);
197 static int bch_dump_open(struct inode
*inode
, struct file
*file
)
199 struct cache_set
*c
= inode
->i_private
;
200 struct dump_iterator
*i
;
202 i
= kzalloc(sizeof(struct dump_iterator
), GFP_KERNEL
);
206 file
->private_data
= i
;
208 bch_keybuf_init(&i
->keys
);
209 i
->keys
.last_scanned
= KEY(0, 0, 0);
214 static int bch_dump_release(struct inode
*inode
, struct file
*file
)
216 kfree(file
->private_data
);
220 static const struct file_operations cache_set_debug_ops
= {
221 .owner
= THIS_MODULE
,
222 .open
= bch_dump_open
,
223 .read
= bch_dump_read
,
224 .release
= bch_dump_release
227 void bch_debug_init_cache_set(struct cache_set
*c
)
229 if (!IS_ERR_OR_NULL(debug
)) {
231 snprintf(name
, 50, "bcache-%pU", c
->sb
.set_uuid
);
233 c
->debug
= debugfs_create_file(name
, 0400, debug
, c
,
234 &cache_set_debug_ops
);
240 void bch_debug_exit(void)
242 if (!IS_ERR_OR_NULL(debug
))
243 debugfs_remove_recursive(debug
);
246 int __init
bch_debug_init(struct kobject
*kobj
)
250 debug
= debugfs_create_dir("bcache", NULL
);