2 * Assorted bcache debug code
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
13 #include <linux/console.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/seq_file.h>
19 static struct dentry
*debug
;
21 const char *bch_ptr_status(struct cache_set
*c
, const struct bkey
*k
)
25 for (i
= 0; i
< KEY_PTRS(k
); i
++)
26 if (ptr_available(c
, k
, i
)) {
27 struct cache
*ca
= PTR_CACHE(c
, k
, i
);
28 size_t bucket
= PTR_BUCKET_NR(c
, k
, i
);
29 size_t r
= bucket_remainder(c
, PTR_OFFSET(k
, i
));
31 if (KEY_SIZE(k
) + r
> c
->sb
.bucket_size
)
32 return "bad, length too big";
33 if (bucket
< ca
->sb
.first_bucket
)
34 return "bad, short offset";
35 if (bucket
>= ca
->sb
.nbuckets
)
36 return "bad, offset past end of device";
37 if (ptr_stale(c
, k
, i
))
41 if (!bkey_cmp(k
, &ZERO_KEY
))
42 return "bad, null key";
44 return "bad, no pointers";
50 int bch_bkey_to_text(char *buf
, size_t size
, const struct bkey
*k
)
53 char *out
= buf
, *end
= buf
+ size
;
55 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
57 p("%llu:%llu len %llu -> [", KEY_INODE(k
), KEY_OFFSET(k
), KEY_SIZE(k
));
61 p("%llu:%llu gen %llu",
62 PTR_DEV(k
, i
), PTR_OFFSET(k
, i
), PTR_GEN(k
, i
));
64 if (++i
== KEY_PTRS(k
))
75 p(" cs%llu %llx", KEY_CSUM(k
), k
->ptr
[1]);
80 int bch_btree_to_text(char *buf
, size_t size
, const struct btree
*b
)
82 return scnprintf(buf
, size
, "%zu level %i/%i",
83 PTR_BUCKET_NR(b
->c
, &b
->key
, 0),
84 b
->level
, b
->c
->root
? b
->c
->root
->level
: -1);
87 #if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
89 static bool skipped_backwards(struct btree
*b
, struct bkey
*k
)
91 return bkey_cmp(k
, (!b
->level
)
92 ? &START_KEY(bkey_next(k
))
96 static void dump_bset(struct btree
*b
, struct bset
*i
)
102 for (k
= i
->start
; k
< end(i
); k
= bkey_next(k
)) {
103 bch_bkey_to_text(buf
, sizeof(buf
), k
);
104 printk(KERN_ERR
"block %zu key %zi/%u: %s", index(i
, b
),
105 (uint64_t *) k
- i
->d
, i
->keys
, buf
);
107 for (j
= 0; j
< KEY_PTRS(k
); j
++) {
108 size_t n
= PTR_BUCKET_NR(b
->c
, k
, j
);
109 printk(" bucket %zu", n
);
111 if (n
>= b
->c
->sb
.first_bucket
&& n
< b
->c
->sb
.nbuckets
)
113 PTR_BUCKET(b
->c
, k
, j
)->prio
);
116 printk(" %s\n", bch_ptr_status(b
->c
, k
));
118 if (bkey_next(k
) < end(i
) &&
119 skipped_backwards(b
, k
))
120 printk(KERN_ERR
"Key skipped backwards\n");
126 #ifdef CONFIG_BCACHE_DEBUG
128 void bch_btree_verify(struct btree
*b
, struct bset
*new)
130 struct btree
*v
= b
->c
->verify_data
;
132 closure_init_stack(&cl
);
137 closure_wait_event(&b
->io
.wait
, &cl
,
138 atomic_read(&b
->io
.cl
.remaining
) == -1);
140 mutex_lock(&b
->c
->verify_lock
);
142 bkey_copy(&v
->key
, &b
->key
);
146 bch_btree_node_read(v
);
147 closure_wait_event(&v
->io
.wait
, &cl
,
148 atomic_read(&b
->io
.cl
.remaining
) == -1);
150 if (new->keys
!= v
->sets
[0].data
->keys
||
152 v
->sets
[0].data
->start
,
153 (void *) end(new) - (void *) new->start
)) {
158 printk(KERN_ERR
"*** original memory node:\n");
159 for (i
= 0; i
<= b
->nsets
; i
++)
160 dump_bset(b
, b
->sets
[i
].data
);
162 printk(KERN_ERR
"*** sorted memory node:\n");
165 printk(KERN_ERR
"*** on disk node:\n");
166 dump_bset(v
, v
->sets
[0].data
);
168 for (j
= 0; j
< new->keys
; j
++)
169 if (new->d
[j
] != v
->sets
[0].data
->d
[j
])
173 panic("verify failed at %u\n", j
);
176 mutex_unlock(&b
->c
->verify_lock
);
179 static void data_verify_endio(struct bio
*bio
, int error
)
181 struct closure
*cl
= bio
->bi_private
;
185 void bch_data_verify(struct search
*s
)
187 char name
[BDEVNAME_SIZE
];
188 struct cached_dev
*dc
= container_of(s
->d
, struct cached_dev
, disk
);
189 struct closure
*cl
= &s
->cl
;
194 if (!s
->unaligned_bvec
)
195 bio_for_each_segment(bv
, s
->orig_bio
, i
)
196 bv
->bv_offset
= 0, bv
->bv_len
= PAGE_SIZE
;
198 check
= bio_clone(s
->orig_bio
, GFP_NOIO
);
202 if (bio_alloc_pages(check
, GFP_NOIO
))
205 check
->bi_rw
= READ_SYNC
;
206 check
->bi_private
= cl
;
207 check
->bi_end_io
= data_verify_endio
;
209 closure_bio_submit(check
, cl
, &dc
->disk
);
212 bio_for_each_segment(bv
, s
->orig_bio
, i
) {
213 void *p1
= kmap(bv
->bv_page
);
214 void *p2
= kmap(check
->bi_io_vec
[i
].bv_page
);
216 if (memcmp(p1
+ bv
->bv_offset
,
220 "bcache (%s): verify failed at sector %llu\n",
221 bdevname(dc
->bdev
, name
),
222 (uint64_t) s
->orig_bio
->bi_sector
);
225 kunmap(check
->bi_io_vec
[i
].bv_page
);
228 __bio_for_each_segment(bv
, check
, i
, 0)
229 __free_page(bv
->bv_page
);
236 #ifdef CONFIG_BCACHE_EDEBUG
238 unsigned bch_count_data(struct btree
*b
)
241 struct btree_iter iter
;
245 for_each_key(b
, k
, &iter
)
250 static void vdump_bucket_and_panic(struct btree
*b
, const char *fmt
,
258 for (i
= 0; i
<= b
->nsets
; i
++)
259 dump_bset(b
, b
->sets
[i
].data
);
265 bch_btree_to_text(buf
, sizeof(buf
), b
);
266 panic("at %s\n", buf
);
269 void bch_check_key_order_msg(struct btree
*b
, struct bset
*i
,
270 const char *fmt
, ...)
277 for (k
= i
->start
; bkey_next(k
) < end(i
); k
= bkey_next(k
))
278 if (skipped_backwards(b
, k
)) {
282 vdump_bucket_and_panic(b
, fmt
, args
);
287 void bch_check_keys(struct btree
*b
, const char *fmt
, ...)
290 struct bkey
*k
, *p
= NULL
;
291 struct btree_iter iter
;
296 for_each_key(b
, k
, &iter
) {
297 if (p
&& bkey_cmp(&START_KEY(p
), &START_KEY(k
)) > 0) {
298 printk(KERN_ERR
"Keys out of order:\n");
302 if (bch_ptr_invalid(b
, k
))
305 if (p
&& bkey_cmp(p
, &START_KEY(k
)) > 0) {
306 printk(KERN_ERR
"Overlapping keys:\n");
314 vdump_bucket_and_panic(b
, fmt
, args
);
320 #ifdef CONFIG_DEBUG_FS
322 /* XXX: cache set refcounting */
324 struct dump_iterator
{
331 static bool dump_pred(struct keybuf
*buf
, struct bkey
*k
)
336 static ssize_t
bch_dump_read(struct file
*file
, char __user
*buf
,
337 size_t size
, loff_t
*ppos
)
339 struct dump_iterator
*i
= file
->private_data
;
344 struct keybuf_key
*w
;
345 unsigned bytes
= min(i
->bytes
, size
);
347 int err
= copy_to_user(buf
, i
->buf
, bytes
);
355 memmove(i
->buf
, i
->buf
+ bytes
, i
->bytes
);
360 w
= bch_keybuf_next_rescan(i
->c
, &i
->keys
, &MAX_KEY
, dump_pred
);
364 bch_bkey_to_text(kbuf
, sizeof(kbuf
), &w
->key
);
365 i
->bytes
= snprintf(i
->buf
, PAGE_SIZE
, "%s\n", kbuf
);
366 bch_keybuf_del(&i
->keys
, w
);
372 static int bch_dump_open(struct inode
*inode
, struct file
*file
)
374 struct cache_set
*c
= inode
->i_private
;
375 struct dump_iterator
*i
;
377 i
= kzalloc(sizeof(struct dump_iterator
), GFP_KERNEL
);
381 file
->private_data
= i
;
383 bch_keybuf_init(&i
->keys
);
384 i
->keys
.last_scanned
= KEY(0, 0, 0);
389 static int bch_dump_release(struct inode
*inode
, struct file
*file
)
391 kfree(file
->private_data
);
395 static const struct file_operations cache_set_debug_ops
= {
396 .owner
= THIS_MODULE
,
397 .open
= bch_dump_open
,
398 .read
= bch_dump_read
,
399 .release
= bch_dump_release
402 void bch_debug_init_cache_set(struct cache_set
*c
)
404 if (!IS_ERR_OR_NULL(debug
)) {
406 snprintf(name
, 50, "bcache-%pU", c
->sb
.set_uuid
);
408 c
->debug
= debugfs_create_file(name
, 0400, debug
, c
,
409 &cache_set_debug_ops
);
415 void bch_debug_exit(void)
417 if (!IS_ERR_OR_NULL(debug
))
418 debugfs_remove_recursive(debug
);
421 int __init
bch_debug_init(struct kobject
*kobj
)
425 debug
= debugfs_create_dir("bcache", NULL
);