2 * bcache setup/teardown code, and some metadata io - read a superblock and
3 * figure out what to do with it.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
14 #include "writeback.h"
16 #include <linux/blkdev.h>
17 #include <linux/buffer_head.h>
18 #include <linux/debugfs.h>
19 #include <linux/genhd.h>
20 #include <linux/idr.h>
21 #include <linux/kthread.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/reboot.h>
25 #include <linux/sysfs.h>
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
30 static const char bcache_magic
[] = {
31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
35 static const char invalid_uuid
[] = {
36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
40 /* Default is -1; we skip past it for struct cached_dev's cache mode */
41 const char * const bch_cache_modes
[] = {
50 static struct kobject
*bcache_kobj
;
51 struct mutex bch_register_lock
;
52 LIST_HEAD(bch_cache_sets
);
53 static LIST_HEAD(uncached_devices
);
55 static int bcache_major
;
56 static DEFINE_IDA(bcache_minor
);
57 static wait_queue_head_t unregister_wait
;
58 struct workqueue_struct
*bcache_wq
;
60 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
62 static void bio_split_pool_free(struct bio_split_pool
*p
)
64 if (p
->bio_split_hook
)
65 mempool_destroy(p
->bio_split_hook
);
68 bioset_free(p
->bio_split
);
71 static int bio_split_pool_init(struct bio_split_pool
*p
)
73 p
->bio_split
= bioset_create(4, 0);
77 p
->bio_split_hook
= mempool_create_kmalloc_pool(4,
78 sizeof(struct bio_split_hook
));
79 if (!p
->bio_split_hook
)
87 static const char *read_super(struct cache_sb
*sb
, struct block_device
*bdev
,
92 struct buffer_head
*bh
= __bread(bdev
, 1, SB_SIZE
);
98 s
= (struct cache_sb
*) bh
->b_data
;
100 sb
->offset
= le64_to_cpu(s
->offset
);
101 sb
->version
= le64_to_cpu(s
->version
);
103 memcpy(sb
->magic
, s
->magic
, 16);
104 memcpy(sb
->uuid
, s
->uuid
, 16);
105 memcpy(sb
->set_uuid
, s
->set_uuid
, 16);
106 memcpy(sb
->label
, s
->label
, SB_LABEL_SIZE
);
108 sb
->flags
= le64_to_cpu(s
->flags
);
109 sb
->seq
= le64_to_cpu(s
->seq
);
110 sb
->last_mount
= le32_to_cpu(s
->last_mount
);
111 sb
->first_bucket
= le16_to_cpu(s
->first_bucket
);
112 sb
->keys
= le16_to_cpu(s
->keys
);
114 for (i
= 0; i
< SB_JOURNAL_BUCKETS
; i
++)
115 sb
->d
[i
] = le64_to_cpu(s
->d
[i
]);
117 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
118 sb
->version
, sb
->flags
, sb
->seq
, sb
->keys
);
120 err
= "Not a bcache superblock";
121 if (sb
->offset
!= SB_SECTOR
)
124 if (memcmp(sb
->magic
, bcache_magic
, 16))
127 err
= "Too many journal buckets";
128 if (sb
->keys
> SB_JOURNAL_BUCKETS
)
131 err
= "Bad checksum";
132 if (s
->csum
!= csum_set(s
))
136 if (bch_is_zero(sb
->uuid
, 16))
139 sb
->block_size
= le16_to_cpu(s
->block_size
);
141 err
= "Superblock block size smaller than device block size";
142 if (sb
->block_size
<< 9 < bdev_logical_block_size(bdev
))
145 switch (sb
->version
) {
146 case BCACHE_SB_VERSION_BDEV
:
147 sb
->data_offset
= BDEV_DATA_START_DEFAULT
;
149 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET
:
150 sb
->data_offset
= le64_to_cpu(s
->data_offset
);
152 err
= "Bad data offset";
153 if (sb
->data_offset
< BDEV_DATA_START_DEFAULT
)
157 case BCACHE_SB_VERSION_CDEV
:
158 case BCACHE_SB_VERSION_CDEV_WITH_UUID
:
159 sb
->nbuckets
= le64_to_cpu(s
->nbuckets
);
160 sb
->block_size
= le16_to_cpu(s
->block_size
);
161 sb
->bucket_size
= le16_to_cpu(s
->bucket_size
);
163 sb
->nr_in_set
= le16_to_cpu(s
->nr_in_set
);
164 sb
->nr_this_dev
= le16_to_cpu(s
->nr_this_dev
);
166 err
= "Too many buckets";
167 if (sb
->nbuckets
> LONG_MAX
)
170 err
= "Not enough buckets";
171 if (sb
->nbuckets
< 1 << 7)
174 err
= "Bad block/bucket size";
175 if (!is_power_of_2(sb
->block_size
) ||
176 sb
->block_size
> PAGE_SECTORS
||
177 !is_power_of_2(sb
->bucket_size
) ||
178 sb
->bucket_size
< PAGE_SECTORS
)
181 err
= "Invalid superblock: device too small";
182 if (get_capacity(bdev
->bd_disk
) < sb
->bucket_size
* sb
->nbuckets
)
186 if (bch_is_zero(sb
->set_uuid
, 16))
189 err
= "Bad cache device number in set";
190 if (!sb
->nr_in_set
||
191 sb
->nr_in_set
<= sb
->nr_this_dev
||
192 sb
->nr_in_set
> MAX_CACHES_PER_SET
)
195 err
= "Journal buckets not sequential";
196 for (i
= 0; i
< sb
->keys
; i
++)
197 if (sb
->d
[i
] != sb
->first_bucket
+ i
)
200 err
= "Too many journal buckets";
201 if (sb
->first_bucket
+ sb
->keys
> sb
->nbuckets
)
204 err
= "Invalid superblock: first bucket comes before end of super";
205 if (sb
->first_bucket
* sb
->bucket_size
< 16)
210 err
= "Unsupported superblock version";
214 sb
->last_mount
= get_seconds();
217 get_page(bh
->b_page
);
224 static void write_bdev_super_endio(struct bio
*bio
, int error
)
226 struct cached_dev
*dc
= bio
->bi_private
;
227 /* XXX: error checking */
229 closure_put(&dc
->sb_write
);
232 static void __write_super(struct cache_sb
*sb
, struct bio
*bio
)
234 struct cache_sb
*out
= page_address(bio
->bi_io_vec
[0].bv_page
);
237 bio
->bi_iter
.bi_sector
= SB_SECTOR
;
238 bio
->bi_rw
= REQ_SYNC
|REQ_META
;
239 bio
->bi_iter
.bi_size
= SB_SIZE
;
240 bch_bio_map(bio
, NULL
);
242 out
->offset
= cpu_to_le64(sb
->offset
);
243 out
->version
= cpu_to_le64(sb
->version
);
245 memcpy(out
->uuid
, sb
->uuid
, 16);
246 memcpy(out
->set_uuid
, sb
->set_uuid
, 16);
247 memcpy(out
->label
, sb
->label
, SB_LABEL_SIZE
);
249 out
->flags
= cpu_to_le64(sb
->flags
);
250 out
->seq
= cpu_to_le64(sb
->seq
);
252 out
->last_mount
= cpu_to_le32(sb
->last_mount
);
253 out
->first_bucket
= cpu_to_le16(sb
->first_bucket
);
254 out
->keys
= cpu_to_le16(sb
->keys
);
256 for (i
= 0; i
< sb
->keys
; i
++)
257 out
->d
[i
] = cpu_to_le64(sb
->d
[i
]);
259 out
->csum
= csum_set(out
);
261 pr_debug("ver %llu, flags %llu, seq %llu",
262 sb
->version
, sb
->flags
, sb
->seq
);
264 submit_bio(REQ_WRITE
, bio
);
267 static void bch_write_bdev_super_unlock(struct closure
*cl
)
269 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, sb_write
);
271 up(&dc
->sb_write_mutex
);
274 void bch_write_bdev_super(struct cached_dev
*dc
, struct closure
*parent
)
276 struct closure
*cl
= &dc
->sb_write
;
277 struct bio
*bio
= &dc
->sb_bio
;
279 down(&dc
->sb_write_mutex
);
280 closure_init(cl
, parent
);
283 bio
->bi_bdev
= dc
->bdev
;
284 bio
->bi_end_io
= write_bdev_super_endio
;
285 bio
->bi_private
= dc
;
288 __write_super(&dc
->sb
, bio
);
290 closure_return_with_destructor(cl
, bch_write_bdev_super_unlock
);
293 static void write_super_endio(struct bio
*bio
, int error
)
295 struct cache
*ca
= bio
->bi_private
;
297 bch_count_io_errors(ca
, error
, "writing superblock");
298 closure_put(&ca
->set
->sb_write
);
301 static void bcache_write_super_unlock(struct closure
*cl
)
303 struct cache_set
*c
= container_of(cl
, struct cache_set
, sb_write
);
305 up(&c
->sb_write_mutex
);
308 void bcache_write_super(struct cache_set
*c
)
310 struct closure
*cl
= &c
->sb_write
;
314 down(&c
->sb_write_mutex
);
315 closure_init(cl
, &c
->cl
);
319 for_each_cache(ca
, c
, i
) {
320 struct bio
*bio
= &ca
->sb_bio
;
322 ca
->sb
.version
= BCACHE_SB_VERSION_CDEV_WITH_UUID
;
323 ca
->sb
.seq
= c
->sb
.seq
;
324 ca
->sb
.last_mount
= c
->sb
.last_mount
;
326 SET_CACHE_SYNC(&ca
->sb
, CACHE_SYNC(&c
->sb
));
329 bio
->bi_bdev
= ca
->bdev
;
330 bio
->bi_end_io
= write_super_endio
;
331 bio
->bi_private
= ca
;
334 __write_super(&ca
->sb
, bio
);
337 closure_return_with_destructor(cl
, bcache_write_super_unlock
);
342 static void uuid_endio(struct bio
*bio
, int error
)
344 struct closure
*cl
= bio
->bi_private
;
345 struct cache_set
*c
= container_of(cl
, struct cache_set
, uuid_write
);
347 cache_set_err_on(error
, c
, "accessing uuids");
348 bch_bbio_free(bio
, c
);
352 static void uuid_io_unlock(struct closure
*cl
)
354 struct cache_set
*c
= container_of(cl
, struct cache_set
, uuid_write
);
356 up(&c
->uuid_write_mutex
);
359 static void uuid_io(struct cache_set
*c
, unsigned long rw
,
360 struct bkey
*k
, struct closure
*parent
)
362 struct closure
*cl
= &c
->uuid_write
;
363 struct uuid_entry
*u
;
368 down(&c
->uuid_write_mutex
);
369 closure_init(cl
, parent
);
371 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
372 struct bio
*bio
= bch_bbio_alloc(c
);
374 bio
->bi_rw
= REQ_SYNC
|REQ_META
|rw
;
375 bio
->bi_iter
.bi_size
= KEY_SIZE(k
) << 9;
377 bio
->bi_end_io
= uuid_endio
;
378 bio
->bi_private
= cl
;
379 bch_bio_map(bio
, c
->uuids
);
381 bch_submit_bbio(bio
, c
, k
, i
);
387 bch_extent_to_text(buf
, sizeof(buf
), k
);
388 pr_debug("%s UUIDs at %s", rw
& REQ_WRITE
? "wrote" : "read", buf
);
390 for (u
= c
->uuids
; u
< c
->uuids
+ c
->nr_uuids
; u
++)
391 if (!bch_is_zero(u
->uuid
, 16))
392 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
393 u
- c
->uuids
, u
->uuid
, u
->label
,
394 u
->first_reg
, u
->last_reg
, u
->invalidated
);
396 closure_return_with_destructor(cl
, uuid_io_unlock
);
399 static char *uuid_read(struct cache_set
*c
, struct jset
*j
, struct closure
*cl
)
401 struct bkey
*k
= &j
->uuid_bucket
;
403 if (__bch_btree_ptr_invalid(c
, k
))
404 return "bad uuid pointer";
406 bkey_copy(&c
->uuid_bucket
, k
);
407 uuid_io(c
, READ_SYNC
, k
, cl
);
409 if (j
->version
< BCACHE_JSET_VERSION_UUIDv1
) {
410 struct uuid_entry_v0
*u0
= (void *) c
->uuids
;
411 struct uuid_entry
*u1
= (void *) c
->uuids
;
417 * Since the new uuid entry is bigger than the old, we have to
418 * convert starting at the highest memory address and work down
419 * in order to do it in place
422 for (i
= c
->nr_uuids
- 1;
425 memcpy(u1
[i
].uuid
, u0
[i
].uuid
, 16);
426 memcpy(u1
[i
].label
, u0
[i
].label
, 32);
428 u1
[i
].first_reg
= u0
[i
].first_reg
;
429 u1
[i
].last_reg
= u0
[i
].last_reg
;
430 u1
[i
].invalidated
= u0
[i
].invalidated
;
440 static int __uuid_write(struct cache_set
*c
)
444 closure_init_stack(&cl
);
446 lockdep_assert_held(&bch_register_lock
);
448 if (bch_bucket_alloc_set(c
, RESERVE_BTREE
, &k
.key
, 1, true))
451 SET_KEY_SIZE(&k
.key
, c
->sb
.bucket_size
);
452 uuid_io(c
, REQ_WRITE
, &k
.key
, &cl
);
455 bkey_copy(&c
->uuid_bucket
, &k
.key
);
460 int bch_uuid_write(struct cache_set
*c
)
462 int ret
= __uuid_write(c
);
465 bch_journal_meta(c
, NULL
);
470 static struct uuid_entry
*uuid_find(struct cache_set
*c
, const char *uuid
)
472 struct uuid_entry
*u
;
475 u
< c
->uuids
+ c
->nr_uuids
; u
++)
476 if (!memcmp(u
->uuid
, uuid
, 16))
482 static struct uuid_entry
*uuid_find_empty(struct cache_set
*c
)
484 static const char zero_uuid
[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
485 return uuid_find(c
, zero_uuid
);
489 * Bucket priorities/gens:
491 * For each bucket, we store on disk its
495 * See alloc.c for an explanation of the gen. The priority is used to implement
496 * lru (and in the future other) cache replacement policies; for most purposes
497 * it's just an opaque integer.
499 * The gens and the priorities don't have a whole lot to do with each other, and
500 * it's actually the gens that must be written out at specific times - it's no
501 * big deal if the priorities don't get written, if we lose them we just reuse
502 * buckets in suboptimal order.
504 * On disk they're stored in a packed array, and in as many buckets are required
505 * to fit them all. The buckets we use to store them form a list; the journal
506 * header points to the first bucket, the first bucket points to the second
509 * This code is used by the allocation code; periodically (whenever it runs out
510 * of buckets to allocate from) the allocation code will invalidate some
511 * buckets, but it can't use those buckets until their new gens are safely on
515 static void prio_endio(struct bio
*bio
, int error
)
517 struct cache
*ca
= bio
->bi_private
;
519 cache_set_err_on(error
, ca
->set
, "accessing priorities");
520 bch_bbio_free(bio
, ca
->set
);
521 closure_put(&ca
->prio
);
524 static void prio_io(struct cache
*ca
, uint64_t bucket
, unsigned long rw
)
526 struct closure
*cl
= &ca
->prio
;
527 struct bio
*bio
= bch_bbio_alloc(ca
->set
);
529 closure_init_stack(cl
);
531 bio
->bi_iter
.bi_sector
= bucket
* ca
->sb
.bucket_size
;
532 bio
->bi_bdev
= ca
->bdev
;
533 bio
->bi_rw
= REQ_SYNC
|REQ_META
|rw
;
534 bio
->bi_iter
.bi_size
= bucket_bytes(ca
);
536 bio
->bi_end_io
= prio_endio
;
537 bio
->bi_private
= ca
;
538 bch_bio_map(bio
, ca
->disk_buckets
);
540 closure_bio_submit(bio
, &ca
->prio
, ca
);
544 void bch_prio_write(struct cache
*ca
)
550 closure_init_stack(&cl
);
552 lockdep_assert_held(&ca
->set
->bucket_lock
);
554 ca
->disk_buckets
->seq
++;
556 atomic_long_add(ca
->sb
.bucket_size
* prio_buckets(ca
),
557 &ca
->meta_sectors_written
);
559 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
560 // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
562 for (i
= prio_buckets(ca
) - 1; i
>= 0; --i
) {
564 struct prio_set
*p
= ca
->disk_buckets
;
565 struct bucket_disk
*d
= p
->data
;
566 struct bucket_disk
*end
= d
+ prios_per_bucket(ca
);
568 for (b
= ca
->buckets
+ i
* prios_per_bucket(ca
);
569 b
< ca
->buckets
+ ca
->sb
.nbuckets
&& d
< end
;
571 d
->prio
= cpu_to_le16(b
->prio
);
575 p
->next_bucket
= ca
->prio_buckets
[i
+ 1];
576 p
->magic
= pset_magic(&ca
->sb
);
577 p
->csum
= bch_crc64(&p
->magic
, bucket_bytes(ca
) - 8);
579 bucket
= bch_bucket_alloc(ca
, RESERVE_PRIO
, true);
580 BUG_ON(bucket
== -1);
582 mutex_unlock(&ca
->set
->bucket_lock
);
583 prio_io(ca
, bucket
, REQ_WRITE
);
584 mutex_lock(&ca
->set
->bucket_lock
);
586 ca
->prio_buckets
[i
] = bucket
;
587 atomic_dec_bug(&ca
->buckets
[bucket
].pin
);
590 mutex_unlock(&ca
->set
->bucket_lock
);
592 bch_journal_meta(ca
->set
, &cl
);
595 mutex_lock(&ca
->set
->bucket_lock
);
598 * Don't want the old priorities to get garbage collected until after we
599 * finish writing the new ones, and they're journalled
601 for (i
= 0; i
< prio_buckets(ca
); i
++) {
602 if (ca
->prio_last_buckets
[i
])
603 __bch_bucket_free(ca
,
604 &ca
->buckets
[ca
->prio_last_buckets
[i
]]);
606 ca
->prio_last_buckets
[i
] = ca
->prio_buckets
[i
];
610 static void prio_read(struct cache
*ca
, uint64_t bucket
)
612 struct prio_set
*p
= ca
->disk_buckets
;
613 struct bucket_disk
*d
= p
->data
+ prios_per_bucket(ca
), *end
= d
;
615 unsigned bucket_nr
= 0;
617 for (b
= ca
->buckets
;
618 b
< ca
->buckets
+ ca
->sb
.nbuckets
;
621 ca
->prio_buckets
[bucket_nr
] = bucket
;
622 ca
->prio_last_buckets
[bucket_nr
] = bucket
;
625 prio_io(ca
, bucket
, READ_SYNC
);
627 if (p
->csum
!= bch_crc64(&p
->magic
, bucket_bytes(ca
) - 8))
628 pr_warn("bad csum reading priorities");
630 if (p
->magic
!= pset_magic(&ca
->sb
))
631 pr_warn("bad magic reading priorities");
633 bucket
= p
->next_bucket
;
637 b
->prio
= le16_to_cpu(d
->prio
);
638 b
->gen
= b
->last_gc
= d
->gen
;
644 static int open_dev(struct block_device
*b
, fmode_t mode
)
646 struct bcache_device
*d
= b
->bd_disk
->private_data
;
647 if (test_bit(BCACHE_DEV_CLOSING
, &d
->flags
))
654 static void release_dev(struct gendisk
*b
, fmode_t mode
)
656 struct bcache_device
*d
= b
->private_data
;
660 static int ioctl_dev(struct block_device
*b
, fmode_t mode
,
661 unsigned int cmd
, unsigned long arg
)
663 struct bcache_device
*d
= b
->bd_disk
->private_data
;
664 return d
->ioctl(d
, mode
, cmd
, arg
);
667 static const struct block_device_operations bcache_ops
= {
669 .release
= release_dev
,
671 .owner
= THIS_MODULE
,
674 void bcache_device_stop(struct bcache_device
*d
)
676 if (!test_and_set_bit(BCACHE_DEV_CLOSING
, &d
->flags
))
677 closure_queue(&d
->cl
);
680 static void bcache_device_unlink(struct bcache_device
*d
)
682 lockdep_assert_held(&bch_register_lock
);
684 if (d
->c
&& !test_and_set_bit(BCACHE_DEV_UNLINK_DONE
, &d
->flags
)) {
688 sysfs_remove_link(&d
->c
->kobj
, d
->name
);
689 sysfs_remove_link(&d
->kobj
, "cache");
691 for_each_cache(ca
, d
->c
, i
)
692 bd_unlink_disk_holder(ca
->bdev
, d
->disk
);
696 static void bcache_device_link(struct bcache_device
*d
, struct cache_set
*c
,
702 for_each_cache(ca
, d
->c
, i
)
703 bd_link_disk_holder(ca
->bdev
, d
->disk
);
705 snprintf(d
->name
, BCACHEDEVNAME_SIZE
,
706 "%s%u", name
, d
->id
);
708 WARN(sysfs_create_link(&d
->kobj
, &c
->kobj
, "cache") ||
709 sysfs_create_link(&c
->kobj
, &d
->kobj
, d
->name
),
710 "Couldn't create device <-> cache set symlinks");
712 clear_bit(BCACHE_DEV_UNLINK_DONE
, &d
->flags
);
715 static void bcache_device_detach(struct bcache_device
*d
)
717 lockdep_assert_held(&bch_register_lock
);
719 if (test_bit(BCACHE_DEV_DETACHING
, &d
->flags
)) {
720 struct uuid_entry
*u
= d
->c
->uuids
+ d
->id
;
722 SET_UUID_FLASH_ONLY(u
, 0);
723 memcpy(u
->uuid
, invalid_uuid
, 16);
724 u
->invalidated
= cpu_to_le32(get_seconds());
725 bch_uuid_write(d
->c
);
728 bcache_device_unlink(d
);
730 d
->c
->devices
[d
->id
] = NULL
;
731 closure_put(&d
->c
->caching
);
735 static void bcache_device_attach(struct bcache_device
*d
, struct cache_set
*c
,
742 closure_get(&c
->caching
);
745 static void bcache_device_free(struct bcache_device
*d
)
747 lockdep_assert_held(&bch_register_lock
);
749 pr_info("%s stopped", d
->disk
->disk_name
);
752 bcache_device_detach(d
);
753 if (d
->disk
&& d
->disk
->flags
& GENHD_FL_UP
)
754 del_gendisk(d
->disk
);
755 if (d
->disk
&& d
->disk
->queue
)
756 blk_cleanup_queue(d
->disk
->queue
);
758 ida_simple_remove(&bcache_minor
, d
->disk
->first_minor
);
762 bio_split_pool_free(&d
->bio_split_hook
);
764 bioset_free(d
->bio_split
);
765 if (is_vmalloc_addr(d
->full_dirty_stripes
))
766 vfree(d
->full_dirty_stripes
);
768 kfree(d
->full_dirty_stripes
);
769 if (is_vmalloc_addr(d
->stripe_sectors_dirty
))
770 vfree(d
->stripe_sectors_dirty
);
772 kfree(d
->stripe_sectors_dirty
);
774 closure_debug_destroy(&d
->cl
);
777 static int bcache_device_init(struct bcache_device
*d
, unsigned block_size
,
780 struct request_queue
*q
;
785 d
->stripe_size
= 1 << 31;
787 d
->nr_stripes
= DIV_ROUND_UP_ULL(sectors
, d
->stripe_size
);
789 if (!d
->nr_stripes
||
790 d
->nr_stripes
> INT_MAX
||
791 d
->nr_stripes
> SIZE_MAX
/ sizeof(atomic_t
)) {
792 pr_err("nr_stripes too large");
796 n
= d
->nr_stripes
* sizeof(atomic_t
);
797 d
->stripe_sectors_dirty
= n
< PAGE_SIZE
<< 6
798 ? kzalloc(n
, GFP_KERNEL
)
800 if (!d
->stripe_sectors_dirty
)
803 n
= BITS_TO_LONGS(d
->nr_stripes
) * sizeof(unsigned long);
804 d
->full_dirty_stripes
= n
< PAGE_SIZE
<< 6
805 ? kzalloc(n
, GFP_KERNEL
)
807 if (!d
->full_dirty_stripes
)
810 minor
= ida_simple_get(&bcache_minor
, 0, MINORMASK
+ 1, GFP_KERNEL
);
814 if (!(d
->bio_split
= bioset_create(4, offsetof(struct bbio
, bio
))) ||
815 bio_split_pool_init(&d
->bio_split_hook
) ||
816 !(d
->disk
= alloc_disk(1))) {
817 ida_simple_remove(&bcache_minor
, minor
);
821 set_capacity(d
->disk
, sectors
);
822 snprintf(d
->disk
->disk_name
, DISK_NAME_LEN
, "bcache%i", minor
);
824 d
->disk
->major
= bcache_major
;
825 d
->disk
->first_minor
= minor
;
826 d
->disk
->fops
= &bcache_ops
;
827 d
->disk
->private_data
= d
;
829 q
= blk_alloc_queue(GFP_KERNEL
);
833 blk_queue_make_request(q
, NULL
);
836 q
->backing_dev_info
.congested_data
= d
;
837 q
->limits
.max_hw_sectors
= UINT_MAX
;
838 q
->limits
.max_sectors
= UINT_MAX
;
839 q
->limits
.max_segment_size
= UINT_MAX
;
840 q
->limits
.max_segments
= BIO_MAX_PAGES
;
841 q
->limits
.max_discard_sectors
= UINT_MAX
;
842 q
->limits
.discard_granularity
= 512;
843 q
->limits
.io_min
= block_size
;
844 q
->limits
.logical_block_size
= block_size
;
845 q
->limits
.physical_block_size
= block_size
;
846 set_bit(QUEUE_FLAG_NONROT
, &d
->disk
->queue
->queue_flags
);
847 clear_bit(QUEUE_FLAG_ADD_RANDOM
, &d
->disk
->queue
->queue_flags
);
848 set_bit(QUEUE_FLAG_DISCARD
, &d
->disk
->queue
->queue_flags
);
850 blk_queue_flush(q
, REQ_FLUSH
|REQ_FUA
);
857 static void calc_cached_dev_sectors(struct cache_set
*c
)
859 uint64_t sectors
= 0;
860 struct cached_dev
*dc
;
862 list_for_each_entry(dc
, &c
->cached_devs
, list
)
863 sectors
+= bdev_sectors(dc
->bdev
);
865 c
->cached_dev_sectors
= sectors
;
868 void bch_cached_dev_run(struct cached_dev
*dc
)
870 struct bcache_device
*d
= &dc
->disk
;
871 char buf
[SB_LABEL_SIZE
+ 1];
874 kasprintf(GFP_KERNEL
, "CACHED_UUID=%pU", dc
->sb
.uuid
),
879 memcpy(buf
, dc
->sb
.label
, SB_LABEL_SIZE
);
880 buf
[SB_LABEL_SIZE
] = '\0';
881 env
[2] = kasprintf(GFP_KERNEL
, "CACHED_LABEL=%s", buf
);
883 if (atomic_xchg(&dc
->running
, 1)) {
890 BDEV_STATE(&dc
->sb
) != BDEV_STATE_NONE
) {
892 closure_init_stack(&cl
);
894 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_STALE
);
895 bch_write_bdev_super(dc
, &cl
);
900 bd_link_disk_holder(dc
->bdev
, dc
->disk
.disk
);
901 /* won't show up in the uevent file, use udevadm monitor -e instead
902 * only class / kset properties are persistent */
903 kobject_uevent_env(&disk_to_dev(d
->disk
)->kobj
, KOBJ_CHANGE
, env
);
907 if (sysfs_create_link(&d
->kobj
, &disk_to_dev(d
->disk
)->kobj
, "dev") ||
908 sysfs_create_link(&disk_to_dev(d
->disk
)->kobj
, &d
->kobj
, "bcache"))
909 pr_debug("error creating sysfs link");
912 static void cached_dev_detach_finish(struct work_struct
*w
)
914 struct cached_dev
*dc
= container_of(w
, struct cached_dev
, detach
);
915 char buf
[BDEVNAME_SIZE
];
917 closure_init_stack(&cl
);
919 BUG_ON(!test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
));
920 BUG_ON(atomic_read(&dc
->count
));
922 mutex_lock(&bch_register_lock
);
924 memset(&dc
->sb
.set_uuid
, 0, 16);
925 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_NONE
);
927 bch_write_bdev_super(dc
, &cl
);
930 bcache_device_detach(&dc
->disk
);
931 list_move(&dc
->list
, &uncached_devices
);
933 clear_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
);
934 clear_bit(BCACHE_DEV_UNLINK_DONE
, &dc
->disk
.flags
);
936 mutex_unlock(&bch_register_lock
);
938 pr_info("Caching disabled for %s", bdevname(dc
->bdev
, buf
));
940 /* Drop ref we took in cached_dev_detach() */
941 closure_put(&dc
->disk
.cl
);
944 void bch_cached_dev_detach(struct cached_dev
*dc
)
946 lockdep_assert_held(&bch_register_lock
);
948 if (test_bit(BCACHE_DEV_CLOSING
, &dc
->disk
.flags
))
951 if (test_and_set_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
))
955 * Block the device from being closed and freed until we're finished
958 closure_get(&dc
->disk
.cl
);
960 bch_writeback_queue(dc
);
964 int bch_cached_dev_attach(struct cached_dev
*dc
, struct cache_set
*c
)
966 uint32_t rtime
= cpu_to_le32(get_seconds());
967 struct uuid_entry
*u
;
968 char buf
[BDEVNAME_SIZE
];
970 bdevname(dc
->bdev
, buf
);
972 if (memcmp(dc
->sb
.set_uuid
, c
->sb
.set_uuid
, 16))
976 pr_err("Can't attach %s: already attached", buf
);
980 if (test_bit(CACHE_SET_STOPPING
, &c
->flags
)) {
981 pr_err("Can't attach %s: shutting down", buf
);
985 if (dc
->sb
.block_size
< c
->sb
.block_size
) {
987 pr_err("Couldn't attach %s: block size less than set's block size",
992 u
= uuid_find(c
, dc
->sb
.uuid
);
995 (BDEV_STATE(&dc
->sb
) == BDEV_STATE_STALE
||
996 BDEV_STATE(&dc
->sb
) == BDEV_STATE_NONE
)) {
997 memcpy(u
->uuid
, invalid_uuid
, 16);
998 u
->invalidated
= cpu_to_le32(get_seconds());
1003 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_DIRTY
) {
1004 pr_err("Couldn't find uuid for %s in set", buf
);
1008 u
= uuid_find_empty(c
);
1010 pr_err("Not caching %s, no room for UUID", buf
);
1015 /* Deadlocks since we're called via sysfs...
1016 sysfs_remove_file(&dc->kobj, &sysfs_attach);
1019 if (bch_is_zero(u
->uuid
, 16)) {
1021 closure_init_stack(&cl
);
1023 memcpy(u
->uuid
, dc
->sb
.uuid
, 16);
1024 memcpy(u
->label
, dc
->sb
.label
, SB_LABEL_SIZE
);
1025 u
->first_reg
= u
->last_reg
= rtime
;
1028 memcpy(dc
->sb
.set_uuid
, c
->sb
.set_uuid
, 16);
1029 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_CLEAN
);
1031 bch_write_bdev_super(dc
, &cl
);
1034 u
->last_reg
= rtime
;
1038 bcache_device_attach(&dc
->disk
, c
, u
- c
->uuids
);
1039 list_move(&dc
->list
, &c
->cached_devs
);
1040 calc_cached_dev_sectors(c
);
1044 * dc->c must be set before dc->count != 0 - paired with the mb in
1047 atomic_set(&dc
->count
, 1);
1049 /* Block writeback thread, but spawn it */
1050 down_write(&dc
->writeback_lock
);
1051 if (bch_cached_dev_writeback_start(dc
)) {
1052 up_write(&dc
->writeback_lock
);
1056 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_DIRTY
) {
1057 bch_sectors_dirty_init(dc
);
1058 atomic_set(&dc
->has_dirty
, 1);
1059 atomic_inc(&dc
->count
);
1060 bch_writeback_queue(dc
);
1063 bch_cached_dev_run(dc
);
1064 bcache_device_link(&dc
->disk
, c
, "bdev");
1066 /* Allow the writeback thread to proceed */
1067 up_write(&dc
->writeback_lock
);
1069 pr_info("Caching %s as %s on set %pU",
1070 bdevname(dc
->bdev
, buf
), dc
->disk
.disk
->disk_name
,
1071 dc
->disk
.c
->sb
.set_uuid
);
1075 void bch_cached_dev_release(struct kobject
*kobj
)
1077 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
1080 module_put(THIS_MODULE
);
1083 static void cached_dev_free(struct closure
*cl
)
1085 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, disk
.cl
);
1087 cancel_delayed_work_sync(&dc
->writeback_rate_update
);
1088 if (!IS_ERR_OR_NULL(dc
->writeback_thread
))
1089 kthread_stop(dc
->writeback_thread
);
1091 mutex_lock(&bch_register_lock
);
1093 if (atomic_read(&dc
->running
))
1094 bd_unlink_disk_holder(dc
->bdev
, dc
->disk
.disk
);
1095 bcache_device_free(&dc
->disk
);
1096 list_del(&dc
->list
);
1098 mutex_unlock(&bch_register_lock
);
1100 if (!IS_ERR_OR_NULL(dc
->bdev
))
1101 blkdev_put(dc
->bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
1103 wake_up(&unregister_wait
);
1105 kobject_put(&dc
->disk
.kobj
);
1108 static void cached_dev_flush(struct closure
*cl
)
1110 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, disk
.cl
);
1111 struct bcache_device
*d
= &dc
->disk
;
1113 mutex_lock(&bch_register_lock
);
1114 bcache_device_unlink(d
);
1115 mutex_unlock(&bch_register_lock
);
1117 bch_cache_accounting_destroy(&dc
->accounting
);
1118 kobject_del(&d
->kobj
);
1120 continue_at(cl
, cached_dev_free
, system_wq
);
1123 static int cached_dev_init(struct cached_dev
*dc
, unsigned block_size
)
1127 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1129 __module_get(THIS_MODULE
);
1130 INIT_LIST_HEAD(&dc
->list
);
1131 closure_init(&dc
->disk
.cl
, NULL
);
1132 set_closure_fn(&dc
->disk
.cl
, cached_dev_flush
, system_wq
);
1133 kobject_init(&dc
->disk
.kobj
, &bch_cached_dev_ktype
);
1134 INIT_WORK(&dc
->detach
, cached_dev_detach_finish
);
1135 sema_init(&dc
->sb_write_mutex
, 1);
1136 INIT_LIST_HEAD(&dc
->io_lru
);
1137 spin_lock_init(&dc
->io_lock
);
1138 bch_cache_accounting_init(&dc
->accounting
, &dc
->disk
.cl
);
1140 dc
->sequential_cutoff
= 4 << 20;
1142 for (io
= dc
->io
; io
< dc
->io
+ RECENT_IO
; io
++) {
1143 list_add(&io
->lru
, &dc
->io_lru
);
1144 hlist_add_head(&io
->hash
, dc
->io_hash
+ RECENT_IO
);
1147 dc
->disk
.stripe_size
= q
->limits
.io_opt
>> 9;
1149 if (dc
->disk
.stripe_size
)
1150 dc
->partial_stripes_expensive
=
1151 q
->limits
.raid_partial_stripes_expensive
;
1153 ret
= bcache_device_init(&dc
->disk
, block_size
,
1154 dc
->bdev
->bd_part
->nr_sects
- dc
->sb
.data_offset
);
1158 set_capacity(dc
->disk
.disk
,
1159 dc
->bdev
->bd_part
->nr_sects
- dc
->sb
.data_offset
);
1161 dc
->disk
.disk
->queue
->backing_dev_info
.ra_pages
=
1162 max(dc
->disk
.disk
->queue
->backing_dev_info
.ra_pages
,
1163 q
->backing_dev_info
.ra_pages
);
1165 bch_cached_dev_request_init(dc
);
1166 bch_cached_dev_writeback_init(dc
);
1170 /* Cached device - bcache superblock */
1172 static void register_bdev(struct cache_sb
*sb
, struct page
*sb_page
,
1173 struct block_device
*bdev
,
1174 struct cached_dev
*dc
)
1176 char name
[BDEVNAME_SIZE
];
1177 const char *err
= "cannot allocate memory";
1178 struct cache_set
*c
;
1180 memcpy(&dc
->sb
, sb
, sizeof(struct cache_sb
));
1182 dc
->bdev
->bd_holder
= dc
;
1184 bio_init(&dc
->sb_bio
);
1185 dc
->sb_bio
.bi_max_vecs
= 1;
1186 dc
->sb_bio
.bi_io_vec
= dc
->sb_bio
.bi_inline_vecs
;
1187 dc
->sb_bio
.bi_io_vec
[0].bv_page
= sb_page
;
1190 if (cached_dev_init(dc
, sb
->block_size
<< 9))
1193 err
= "error creating kobject";
1194 if (kobject_add(&dc
->disk
.kobj
, &part_to_dev(bdev
->bd_part
)->kobj
,
1197 if (bch_cache_accounting_add_kobjs(&dc
->accounting
, &dc
->disk
.kobj
))
1200 pr_info("registered backing device %s", bdevname(bdev
, name
));
1202 list_add(&dc
->list
, &uncached_devices
);
1203 list_for_each_entry(c
, &bch_cache_sets
, list
)
1204 bch_cached_dev_attach(dc
, c
);
1206 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_NONE
||
1207 BDEV_STATE(&dc
->sb
) == BDEV_STATE_STALE
)
1208 bch_cached_dev_run(dc
);
1212 pr_notice("error opening %s: %s", bdevname(bdev
, name
), err
);
1213 bcache_device_stop(&dc
->disk
);
1216 /* Flash only volumes */
1218 void bch_flash_dev_release(struct kobject
*kobj
)
1220 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
1225 static void flash_dev_free(struct closure
*cl
)
1227 struct bcache_device
*d
= container_of(cl
, struct bcache_device
, cl
);
1228 mutex_lock(&bch_register_lock
);
1229 bcache_device_free(d
);
1230 mutex_unlock(&bch_register_lock
);
1231 kobject_put(&d
->kobj
);
1234 static void flash_dev_flush(struct closure
*cl
)
1236 struct bcache_device
*d
= container_of(cl
, struct bcache_device
, cl
);
1238 mutex_lock(&bch_register_lock
);
1239 bcache_device_unlink(d
);
1240 mutex_unlock(&bch_register_lock
);
1241 kobject_del(&d
->kobj
);
1242 continue_at(cl
, flash_dev_free
, system_wq
);
1245 static int flash_dev_run(struct cache_set
*c
, struct uuid_entry
*u
)
1247 struct bcache_device
*d
= kzalloc(sizeof(struct bcache_device
),
1252 closure_init(&d
->cl
, NULL
);
1253 set_closure_fn(&d
->cl
, flash_dev_flush
, system_wq
);
1255 kobject_init(&d
->kobj
, &bch_flash_dev_ktype
);
1257 if (bcache_device_init(d
, block_bytes(c
), u
->sectors
))
1260 bcache_device_attach(d
, c
, u
- c
->uuids
);
1261 bch_flash_dev_request_init(d
);
1264 if (kobject_add(&d
->kobj
, &disk_to_dev(d
->disk
)->kobj
, "bcache"))
1267 bcache_device_link(d
, c
, "volume");
1271 kobject_put(&d
->kobj
);
1275 static int flash_devs_run(struct cache_set
*c
)
1278 struct uuid_entry
*u
;
1281 u
< c
->uuids
+ c
->nr_uuids
&& !ret
;
1283 if (UUID_FLASH_ONLY(u
))
1284 ret
= flash_dev_run(c
, u
);
1289 int bch_flash_dev_create(struct cache_set
*c
, uint64_t size
)
1291 struct uuid_entry
*u
;
1293 if (test_bit(CACHE_SET_STOPPING
, &c
->flags
))
1296 if (!test_bit(CACHE_SET_RUNNING
, &c
->flags
))
1299 u
= uuid_find_empty(c
);
1301 pr_err("Can't create volume, no room for UUID");
1305 get_random_bytes(u
->uuid
, 16);
1306 memset(u
->label
, 0, 32);
1307 u
->first_reg
= u
->last_reg
= cpu_to_le32(get_seconds());
1309 SET_UUID_FLASH_ONLY(u
, 1);
1310 u
->sectors
= size
>> 9;
1314 return flash_dev_run(c
, u
);
1320 bool bch_cache_set_error(struct cache_set
*c
, const char *fmt
, ...)
1324 if (c
->on_error
!= ON_ERROR_PANIC
&&
1325 test_bit(CACHE_SET_STOPPING
, &c
->flags
))
1328 /* XXX: we can be called from atomic context
1329 acquire_console_sem();
1332 printk(KERN_ERR
"bcache: error on %pU: ", c
->sb
.set_uuid
);
1334 va_start(args
, fmt
);
1338 printk(", disabling caching\n");
1340 if (c
->on_error
== ON_ERROR_PANIC
)
1341 panic("panic forced after error\n");
1343 bch_cache_set_unregister(c
);
1347 void bch_cache_set_release(struct kobject
*kobj
)
1349 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
1351 module_put(THIS_MODULE
);
1354 static void cache_set_free(struct closure
*cl
)
1356 struct cache_set
*c
= container_of(cl
, struct cache_set
, cl
);
1360 if (!IS_ERR_OR_NULL(c
->debug
))
1361 debugfs_remove(c
->debug
);
1363 bch_open_buckets_free(c
);
1364 bch_btree_cache_free(c
);
1365 bch_journal_free(c
);
1367 for_each_cache(ca
, c
, i
)
1370 c
->cache
[ca
->sb
.nr_this_dev
] = NULL
;
1371 kobject_put(&ca
->kobj
);
1374 bch_bset_sort_state_free(&c
->sort
);
1375 free_pages((unsigned long) c
->uuids
, ilog2(bucket_pages(c
)));
1377 if (c
->moving_gc_wq
)
1378 destroy_workqueue(c
->moving_gc_wq
);
1380 bioset_free(c
->bio_split
);
1382 mempool_destroy(c
->fill_iter
);
1384 mempool_destroy(c
->bio_meta
);
1386 mempool_destroy(c
->search
);
1389 mutex_lock(&bch_register_lock
);
1391 mutex_unlock(&bch_register_lock
);
1393 pr_info("Cache set %pU unregistered", c
->sb
.set_uuid
);
1394 wake_up(&unregister_wait
);
1396 closure_debug_destroy(&c
->cl
);
1397 kobject_put(&c
->kobj
);
1400 static void cache_set_flush(struct closure
*cl
)
1402 struct cache_set
*c
= container_of(cl
, struct cache_set
, caching
);
1410 bch_cache_accounting_destroy(&c
->accounting
);
1412 kobject_put(&c
->internal
);
1413 kobject_del(&c
->kobj
);
1416 kthread_stop(c
->gc_thread
);
1418 if (!IS_ERR_OR_NULL(c
->root
))
1419 list_add(&c
->root
->list
, &c
->btree_cache
);
1421 /* Should skip this if we're unregistering because of an error */
1422 list_for_each_entry(b
, &c
->btree_cache
, list
) {
1423 mutex_lock(&b
->write_lock
);
1424 if (btree_node_dirty(b
))
1425 __bch_btree_node_write(b
, NULL
);
1426 mutex_unlock(&b
->write_lock
);
1429 for_each_cache(ca
, c
, i
)
1430 if (ca
->alloc_thread
)
1431 kthread_stop(ca
->alloc_thread
);
1433 if (c
->journal
.cur
) {
1434 cancel_delayed_work_sync(&c
->journal
.work
);
1435 /* flush last journal entry if needed */
1436 c
->journal
.work
.work
.func(&c
->journal
.work
.work
);
1442 static void __cache_set_unregister(struct closure
*cl
)
1444 struct cache_set
*c
= container_of(cl
, struct cache_set
, caching
);
1445 struct cached_dev
*dc
;
1448 mutex_lock(&bch_register_lock
);
1450 for (i
= 0; i
< c
->nr_uuids
; i
++)
1451 if (c
->devices
[i
]) {
1452 if (!UUID_FLASH_ONLY(&c
->uuids
[i
]) &&
1453 test_bit(CACHE_SET_UNREGISTERING
, &c
->flags
)) {
1454 dc
= container_of(c
->devices
[i
],
1455 struct cached_dev
, disk
);
1456 bch_cached_dev_detach(dc
);
1458 bcache_device_stop(c
->devices
[i
]);
1462 mutex_unlock(&bch_register_lock
);
1464 continue_at(cl
, cache_set_flush
, system_wq
);
1467 void bch_cache_set_stop(struct cache_set
*c
)
1469 if (!test_and_set_bit(CACHE_SET_STOPPING
, &c
->flags
))
1470 closure_queue(&c
->caching
);
1473 void bch_cache_set_unregister(struct cache_set
*c
)
1475 set_bit(CACHE_SET_UNREGISTERING
, &c
->flags
);
1476 bch_cache_set_stop(c
);
1479 #define alloc_bucket_pages(gfp, c) \
1480 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1482 struct cache_set
*bch_cache_set_alloc(struct cache_sb
*sb
)
1485 struct cache_set
*c
= kzalloc(sizeof(struct cache_set
), GFP_KERNEL
);
1489 __module_get(THIS_MODULE
);
1490 closure_init(&c
->cl
, NULL
);
1491 set_closure_fn(&c
->cl
, cache_set_free
, system_wq
);
1493 closure_init(&c
->caching
, &c
->cl
);
1494 set_closure_fn(&c
->caching
, __cache_set_unregister
, system_wq
);
1496 /* Maybe create continue_at_noreturn() and use it here? */
1497 closure_set_stopped(&c
->cl
);
1498 closure_put(&c
->cl
);
1500 kobject_init(&c
->kobj
, &bch_cache_set_ktype
);
1501 kobject_init(&c
->internal
, &bch_cache_set_internal_ktype
);
1503 bch_cache_accounting_init(&c
->accounting
, &c
->cl
);
1505 memcpy(c
->sb
.set_uuid
, sb
->set_uuid
, 16);
1506 c
->sb
.block_size
= sb
->block_size
;
1507 c
->sb
.bucket_size
= sb
->bucket_size
;
1508 c
->sb
.nr_in_set
= sb
->nr_in_set
;
1509 c
->sb
.last_mount
= sb
->last_mount
;
1510 c
->bucket_bits
= ilog2(sb
->bucket_size
);
1511 c
->block_bits
= ilog2(sb
->block_size
);
1512 c
->nr_uuids
= bucket_bytes(c
) / sizeof(struct uuid_entry
);
1514 c
->btree_pages
= bucket_pages(c
);
1515 if (c
->btree_pages
> BTREE_MAX_PAGES
)
1516 c
->btree_pages
= max_t(int, c
->btree_pages
/ 4,
1519 sema_init(&c
->sb_write_mutex
, 1);
1520 mutex_init(&c
->bucket_lock
);
1521 init_waitqueue_head(&c
->btree_cache_wait
);
1522 init_waitqueue_head(&c
->bucket_wait
);
1523 sema_init(&c
->uuid_write_mutex
, 1);
1525 spin_lock_init(&c
->btree_gc_time
.lock
);
1526 spin_lock_init(&c
->btree_split_time
.lock
);
1527 spin_lock_init(&c
->btree_read_time
.lock
);
1529 bch_moving_init_cache_set(c
);
1531 INIT_LIST_HEAD(&c
->list
);
1532 INIT_LIST_HEAD(&c
->cached_devs
);
1533 INIT_LIST_HEAD(&c
->btree_cache
);
1534 INIT_LIST_HEAD(&c
->btree_cache_freeable
);
1535 INIT_LIST_HEAD(&c
->btree_cache_freed
);
1536 INIT_LIST_HEAD(&c
->data_buckets
);
1538 c
->search
= mempool_create_slab_pool(32, bch_search_cache
);
1542 iter_size
= (sb
->bucket_size
/ sb
->block_size
+ 1) *
1543 sizeof(struct btree_iter_set
);
1545 if (!(c
->devices
= kzalloc(c
->nr_uuids
* sizeof(void *), GFP_KERNEL
)) ||
1546 !(c
->bio_meta
= mempool_create_kmalloc_pool(2,
1547 sizeof(struct bbio
) + sizeof(struct bio_vec
) *
1548 bucket_pages(c
))) ||
1549 !(c
->fill_iter
= mempool_create_kmalloc_pool(1, iter_size
)) ||
1550 !(c
->bio_split
= bioset_create(4, offsetof(struct bbio
, bio
))) ||
1551 !(c
->uuids
= alloc_bucket_pages(GFP_KERNEL
, c
)) ||
1552 !(c
->moving_gc_wq
= create_workqueue("bcache_gc")) ||
1553 bch_journal_alloc(c
) ||
1554 bch_btree_cache_alloc(c
) ||
1555 bch_open_buckets_alloc(c
) ||
1556 bch_bset_sort_state_init(&c
->sort
, ilog2(c
->btree_pages
)))
1559 c
->congested_read_threshold_us
= 2000;
1560 c
->congested_write_threshold_us
= 20000;
1561 c
->error_limit
= 8 << IO_ERROR_SHIFT
;
1565 bch_cache_set_unregister(c
);
1569 static void run_cache_set(struct cache_set
*c
)
1571 const char *err
= "cannot allocate memory";
1572 struct cached_dev
*dc
, *t
;
1577 closure_init_stack(&cl
);
1579 for_each_cache(ca
, c
, i
)
1580 c
->nbuckets
+= ca
->sb
.nbuckets
;
1582 if (CACHE_SYNC(&c
->sb
)) {
1587 err
= "cannot allocate memory for journal";
1588 if (bch_journal_read(c
, &journal
))
1591 pr_debug("btree_journal_read() done");
1593 err
= "no journal entries found";
1594 if (list_empty(&journal
))
1597 j
= &list_entry(journal
.prev
, struct journal_replay
, list
)->j
;
1599 err
= "IO error reading priorities";
1600 for_each_cache(ca
, c
, i
)
1601 prio_read(ca
, j
->prio_bucket
[ca
->sb
.nr_this_dev
]);
1604 * If prio_read() fails it'll call cache_set_error and we'll
1605 * tear everything down right away, but if we perhaps checked
1606 * sooner we could avoid journal replay.
1611 err
= "bad btree root";
1612 if (__bch_btree_ptr_invalid(c
, k
))
1615 err
= "error reading btree root";
1616 c
->root
= bch_btree_node_get(c
, NULL
, k
, j
->btree_level
, true, NULL
);
1617 if (IS_ERR_OR_NULL(c
->root
))
1620 list_del_init(&c
->root
->list
);
1621 rw_unlock(true, c
->root
);
1623 err
= uuid_read(c
, j
, &cl
);
1627 err
= "error in recovery";
1628 if (bch_btree_check(c
))
1631 bch_journal_mark(c
, &journal
);
1632 bch_initial_gc_finish(c
);
1633 pr_debug("btree_check() done");
1636 * bcache_journal_next() can't happen sooner, or
1637 * btree_gc_finish() will give spurious errors about last_gc >
1638 * gc_gen - this is a hack but oh well.
1640 bch_journal_next(&c
->journal
);
1642 err
= "error starting allocator thread";
1643 for_each_cache(ca
, c
, i
)
1644 if (bch_cache_allocator_start(ca
))
1648 * First place it's safe to allocate: btree_check() and
1649 * btree_gc_finish() have to run before we have buckets to
1650 * allocate, and bch_bucket_alloc_set() might cause a journal
1651 * entry to be written so bcache_journal_next() has to be called
1654 * If the uuids were in the old format we have to rewrite them
1655 * before the next journal entry is written:
1657 if (j
->version
< BCACHE_JSET_VERSION_UUID
)
1660 bch_journal_replay(c
, &journal
);
1662 pr_notice("invalidating existing data");
1664 for_each_cache(ca
, c
, i
) {
1667 ca
->sb
.keys
= clamp_t(int, ca
->sb
.nbuckets
>> 7,
1668 2, SB_JOURNAL_BUCKETS
);
1670 for (j
= 0; j
< ca
->sb
.keys
; j
++)
1671 ca
->sb
.d
[j
] = ca
->sb
.first_bucket
+ j
;
1674 bch_initial_gc_finish(c
);
1676 err
= "error starting allocator thread";
1677 for_each_cache(ca
, c
, i
)
1678 if (bch_cache_allocator_start(ca
))
1681 mutex_lock(&c
->bucket_lock
);
1682 for_each_cache(ca
, c
, i
)
1684 mutex_unlock(&c
->bucket_lock
);
1686 err
= "cannot allocate new UUID bucket";
1687 if (__uuid_write(c
))
1690 err
= "cannot allocate new btree root";
1691 c
->root
= __bch_btree_node_alloc(c
, NULL
, 0, true, NULL
);
1692 if (IS_ERR_OR_NULL(c
->root
))
1695 mutex_lock(&c
->root
->write_lock
);
1696 bkey_copy_key(&c
->root
->key
, &MAX_KEY
);
1697 bch_btree_node_write(c
->root
, &cl
);
1698 mutex_unlock(&c
->root
->write_lock
);
1700 bch_btree_set_root(c
->root
);
1701 rw_unlock(true, c
->root
);
1704 * We don't want to write the first journal entry until
1705 * everything is set up - fortunately journal entries won't be
1706 * written until the SET_CACHE_SYNC() here:
1708 SET_CACHE_SYNC(&c
->sb
, true);
1710 bch_journal_next(&c
->journal
);
1711 bch_journal_meta(c
, &cl
);
1714 err
= "error starting gc thread";
1715 if (bch_gc_thread_start(c
))
1719 c
->sb
.last_mount
= get_seconds();
1720 bcache_write_super(c
);
1722 list_for_each_entry_safe(dc
, t
, &uncached_devices
, list
)
1723 bch_cached_dev_attach(dc
, c
);
1727 set_bit(CACHE_SET_RUNNING
, &c
->flags
);
1731 /* XXX: test this, it's broken */
1732 bch_cache_set_error(c
, "%s", err
);
1735 static bool can_attach_cache(struct cache
*ca
, struct cache_set
*c
)
1737 return ca
->sb
.block_size
== c
->sb
.block_size
&&
1738 ca
->sb
.bucket_size
== c
->sb
.bucket_size
&&
1739 ca
->sb
.nr_in_set
== c
->sb
.nr_in_set
;
1742 static const char *register_cache_set(struct cache
*ca
)
1745 const char *err
= "cannot allocate memory";
1746 struct cache_set
*c
;
1748 list_for_each_entry(c
, &bch_cache_sets
, list
)
1749 if (!memcmp(c
->sb
.set_uuid
, ca
->sb
.set_uuid
, 16)) {
1750 if (c
->cache
[ca
->sb
.nr_this_dev
])
1751 return "duplicate cache set member";
1753 if (!can_attach_cache(ca
, c
))
1754 return "cache sb does not match set";
1756 if (!CACHE_SYNC(&ca
->sb
))
1757 SET_CACHE_SYNC(&c
->sb
, false);
1762 c
= bch_cache_set_alloc(&ca
->sb
);
1766 err
= "error creating kobject";
1767 if (kobject_add(&c
->kobj
, bcache_kobj
, "%pU", c
->sb
.set_uuid
) ||
1768 kobject_add(&c
->internal
, &c
->kobj
, "internal"))
1771 if (bch_cache_accounting_add_kobjs(&c
->accounting
, &c
->kobj
))
1774 bch_debug_init_cache_set(c
);
1776 list_add(&c
->list
, &bch_cache_sets
);
1778 sprintf(buf
, "cache%i", ca
->sb
.nr_this_dev
);
1779 if (sysfs_create_link(&ca
->kobj
, &c
->kobj
, "set") ||
1780 sysfs_create_link(&c
->kobj
, &ca
->kobj
, buf
))
1783 if (ca
->sb
.seq
> c
->sb
.seq
) {
1784 c
->sb
.version
= ca
->sb
.version
;
1785 memcpy(c
->sb
.set_uuid
, ca
->sb
.set_uuid
, 16);
1786 c
->sb
.flags
= ca
->sb
.flags
;
1787 c
->sb
.seq
= ca
->sb
.seq
;
1788 pr_debug("set version = %llu", c
->sb
.version
);
1791 kobject_get(&ca
->kobj
);
1793 ca
->set
->cache
[ca
->sb
.nr_this_dev
] = ca
;
1794 c
->cache_by_alloc
[c
->caches_loaded
++] = ca
;
1796 if (c
->caches_loaded
== c
->sb
.nr_in_set
)
1801 bch_cache_set_unregister(c
);
1807 void bch_cache_release(struct kobject
*kobj
)
1809 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
1813 BUG_ON(ca
->set
->cache
[ca
->sb
.nr_this_dev
] != ca
);
1814 ca
->set
->cache
[ca
->sb
.nr_this_dev
] = NULL
;
1817 bio_split_pool_free(&ca
->bio_split_hook
);
1819 free_pages((unsigned long) ca
->disk_buckets
, ilog2(bucket_pages(ca
)));
1820 kfree(ca
->prio_buckets
);
1823 free_heap(&ca
->heap
);
1824 free_fifo(&ca
->free_inc
);
1826 for (i
= 0; i
< RESERVE_NR
; i
++)
1827 free_fifo(&ca
->free
[i
]);
1829 if (ca
->sb_bio
.bi_inline_vecs
[0].bv_page
)
1830 put_page(ca
->sb_bio
.bi_io_vec
[0].bv_page
);
1832 if (!IS_ERR_OR_NULL(ca
->bdev
))
1833 blkdev_put(ca
->bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
1836 module_put(THIS_MODULE
);
1839 static int cache_alloc(struct cache_sb
*sb
, struct cache
*ca
)
1844 __module_get(THIS_MODULE
);
1845 kobject_init(&ca
->kobj
, &bch_cache_ktype
);
1847 bio_init(&ca
->journal
.bio
);
1848 ca
->journal
.bio
.bi_max_vecs
= 8;
1849 ca
->journal
.bio
.bi_io_vec
= ca
->journal
.bio
.bi_inline_vecs
;
1851 free
= roundup_pow_of_two(ca
->sb
.nbuckets
) >> 10;
1853 if (!init_fifo(&ca
->free
[RESERVE_BTREE
], 8, GFP_KERNEL
) ||
1854 !init_fifo_exact(&ca
->free
[RESERVE_PRIO
], prio_buckets(ca
), GFP_KERNEL
) ||
1855 !init_fifo(&ca
->free
[RESERVE_MOVINGGC
], free
, GFP_KERNEL
) ||
1856 !init_fifo(&ca
->free
[RESERVE_NONE
], free
, GFP_KERNEL
) ||
1857 !init_fifo(&ca
->free_inc
, free
<< 2, GFP_KERNEL
) ||
1858 !init_heap(&ca
->heap
, free
<< 3, GFP_KERNEL
) ||
1859 !(ca
->buckets
= vzalloc(sizeof(struct bucket
) *
1860 ca
->sb
.nbuckets
)) ||
1861 !(ca
->prio_buckets
= kzalloc(sizeof(uint64_t) * prio_buckets(ca
) *
1863 !(ca
->disk_buckets
= alloc_bucket_pages(GFP_KERNEL
, ca
)) ||
1864 bio_split_pool_init(&ca
->bio_split_hook
))
1867 ca
->prio_last_buckets
= ca
->prio_buckets
+ prio_buckets(ca
);
1869 for_each_bucket(b
, ca
)
1870 atomic_set(&b
->pin
, 0);
1875 static int register_cache(struct cache_sb
*sb
, struct page
*sb_page
,
1876 struct block_device
*bdev
, struct cache
*ca
)
1878 char name
[BDEVNAME_SIZE
];
1879 const char *err
= NULL
; /* must be set for any error case */
1882 memcpy(&ca
->sb
, sb
, sizeof(struct cache_sb
));
1884 ca
->bdev
->bd_holder
= ca
;
1886 bio_init(&ca
->sb_bio
);
1887 ca
->sb_bio
.bi_max_vecs
= 1;
1888 ca
->sb_bio
.bi_io_vec
= ca
->sb_bio
.bi_inline_vecs
;
1889 ca
->sb_bio
.bi_io_vec
[0].bv_page
= sb_page
;
1892 if (blk_queue_discard(bdev_get_queue(ca
->bdev
)))
1893 ca
->discard
= CACHE_DISCARD(&ca
->sb
);
1895 ret
= cache_alloc(sb
, ca
);
1898 err
= "cache_alloc(): -ENOMEM";
1900 err
= "cache_alloc(): unknown error";
1904 if (kobject_add(&ca
->kobj
, &part_to_dev(bdev
->bd_part
)->kobj
, "bcache")) {
1905 err
= "error calling kobject_add";
1910 mutex_lock(&bch_register_lock
);
1911 err
= register_cache_set(ca
);
1912 mutex_unlock(&bch_register_lock
);
1919 pr_info("registered cache device %s", bdevname(bdev
, name
));
1922 kobject_put(&ca
->kobj
);
1926 pr_notice("error opening %s: %s", bdevname(bdev
, name
), err
);
1931 /* Global interfaces/init */
1933 static ssize_t
register_bcache(struct kobject
*, struct kobj_attribute
*,
1934 const char *, size_t);
1936 kobj_attribute_write(register, register_bcache
);
1937 kobj_attribute_write(register_quiet
, register_bcache
);
1939 static bool bch_is_open_backing(struct block_device
*bdev
) {
1940 struct cache_set
*c
, *tc
;
1941 struct cached_dev
*dc
, *t
;
1943 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
1944 list_for_each_entry_safe(dc
, t
, &c
->cached_devs
, list
)
1945 if (dc
->bdev
== bdev
)
1947 list_for_each_entry_safe(dc
, t
, &uncached_devices
, list
)
1948 if (dc
->bdev
== bdev
)
1953 static bool bch_is_open_cache(struct block_device
*bdev
) {
1954 struct cache_set
*c
, *tc
;
1958 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
1959 for_each_cache(ca
, c
, i
)
1960 if (ca
->bdev
== bdev
)
1965 static bool bch_is_open(struct block_device
*bdev
) {
1966 return bch_is_open_cache(bdev
) || bch_is_open_backing(bdev
);
1969 static ssize_t
register_bcache(struct kobject
*k
, struct kobj_attribute
*attr
,
1970 const char *buffer
, size_t size
)
1973 const char *err
= "cannot allocate memory";
1975 struct cache_sb
*sb
= NULL
;
1976 struct block_device
*bdev
= NULL
;
1977 struct page
*sb_page
= NULL
;
1979 if (!try_module_get(THIS_MODULE
))
1982 if (!(path
= kstrndup(buffer
, size
, GFP_KERNEL
)) ||
1983 !(sb
= kmalloc(sizeof(struct cache_sb
), GFP_KERNEL
)))
1986 err
= "failed to open device";
1987 bdev
= blkdev_get_by_path(strim(path
),
1988 FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
,
1991 if (bdev
== ERR_PTR(-EBUSY
)) {
1992 bdev
= lookup_bdev(strim(path
));
1993 mutex_lock(&bch_register_lock
);
1994 if (!IS_ERR(bdev
) && bch_is_open(bdev
))
1995 err
= "device already registered";
1997 err
= "device busy";
1998 mutex_unlock(&bch_register_lock
);
1999 if (attr
== &ksysfs_register_quiet
)
2005 err
= "failed to set blocksize";
2006 if (set_blocksize(bdev
, 4096))
2009 err
= read_super(sb
, bdev
, &sb_page
);
2013 if (SB_IS_BDEV(sb
)) {
2014 struct cached_dev
*dc
= kzalloc(sizeof(*dc
), GFP_KERNEL
);
2018 mutex_lock(&bch_register_lock
);
2019 register_bdev(sb
, sb_page
, bdev
, dc
);
2020 mutex_unlock(&bch_register_lock
);
2022 struct cache
*ca
= kzalloc(sizeof(*ca
), GFP_KERNEL
);
2026 if (register_cache(sb
, sb_page
, bdev
, ca
) != 0)
2034 module_put(THIS_MODULE
);
2038 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
2040 pr_info("error opening %s: %s", path
, err
);
2045 static int bcache_reboot(struct notifier_block
*n
, unsigned long code
, void *x
)
2047 if (code
== SYS_DOWN
||
2049 code
== SYS_POWER_OFF
) {
2051 unsigned long start
= jiffies
;
2052 bool stopped
= false;
2054 struct cache_set
*c
, *tc
;
2055 struct cached_dev
*dc
, *tdc
;
2057 mutex_lock(&bch_register_lock
);
2059 if (list_empty(&bch_cache_sets
) &&
2060 list_empty(&uncached_devices
))
2063 pr_info("Stopping all devices:");
2065 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
2066 bch_cache_set_stop(c
);
2068 list_for_each_entry_safe(dc
, tdc
, &uncached_devices
, list
)
2069 bcache_device_stop(&dc
->disk
);
2071 /* What's a condition variable? */
2073 long timeout
= start
+ 2 * HZ
- jiffies
;
2075 stopped
= list_empty(&bch_cache_sets
) &&
2076 list_empty(&uncached_devices
);
2078 if (timeout
< 0 || stopped
)
2081 prepare_to_wait(&unregister_wait
, &wait
,
2082 TASK_UNINTERRUPTIBLE
);
2084 mutex_unlock(&bch_register_lock
);
2085 schedule_timeout(timeout
);
2086 mutex_lock(&bch_register_lock
);
2089 finish_wait(&unregister_wait
, &wait
);
2092 pr_info("All devices stopped");
2094 pr_notice("Timeout waiting for devices to be closed");
2096 mutex_unlock(&bch_register_lock
);
2102 static struct notifier_block reboot
= {
2103 .notifier_call
= bcache_reboot
,
2104 .priority
= INT_MAX
, /* before any real devices */
2107 static void bcache_exit(void)
2112 kobject_put(bcache_kobj
);
2114 destroy_workqueue(bcache_wq
);
2116 unregister_blkdev(bcache_major
, "bcache");
2117 unregister_reboot_notifier(&reboot
);
2120 static int __init
bcache_init(void)
2122 static const struct attribute
*files
[] = {
2123 &ksysfs_register
.attr
,
2124 &ksysfs_register_quiet
.attr
,
2128 mutex_init(&bch_register_lock
);
2129 init_waitqueue_head(&unregister_wait
);
2130 register_reboot_notifier(&reboot
);
2131 closure_debug_init();
2133 bcache_major
= register_blkdev(0, "bcache");
2134 if (bcache_major
< 0) {
2135 unregister_reboot_notifier(&reboot
);
2136 return bcache_major
;
2139 if (!(bcache_wq
= create_workqueue("bcache")) ||
2140 !(bcache_kobj
= kobject_create_and_add("bcache", fs_kobj
)) ||
2141 sysfs_create_files(bcache_kobj
, files
) ||
2142 bch_request_init() ||
2143 bch_debug_init(bcache_kobj
))
2152 module_exit(bcache_exit
);
2153 module_init(bcache_init
);