2 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
9 #include <linux/module.h>
10 #include <linux/crc32.h>
12 #define DM_MSG_PREFIX "zoned metadata"
17 #define DMZ_META_VER 1
20 * On-disk super block magic.
22 #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
23 (((unsigned int)('Z')) << 16) | \
24 (((unsigned int)('B')) << 8) | \
25 ((unsigned int)('D')))
28 * On disk super block.
29 * This uses only 512 B but uses on disk a full 4KB block. This block is
30 * followed on disk by the mapping table of chunks to zones and the bitmap
31 * blocks indicating zone block validity.
32 * The overall resulting metadata format is:
33 * (1) Super block (1 block)
34 * (2) Chunk mapping table (nr_map_blocks)
35 * (3) Bitmap blocks (nr_bitmap_blocks)
36 * All metadata blocks are stored in conventional zones, starting from the
37 * the first conventional zone found on disk.
43 /* Metadata version number */
44 __le32 version
; /* 8 */
46 /* Generation number */
49 /* This block number */
50 __le64 sb_block
; /* 24 */
52 /* The number of metadata blocks, including this super block */
53 __le32 nr_meta_blocks
; /* 28 */
55 /* The number of sequential zones reserved for reclaim */
56 __le32 nr_reserved_seq
; /* 32 */
58 /* The number of entries in the mapping table */
59 __le32 nr_chunks
; /* 36 */
61 /* The number of blocks used for the chunk mapping table */
62 __le32 nr_map_blocks
; /* 40 */
64 /* The number of blocks used for the block bitmaps */
65 __le32 nr_bitmap_blocks
; /* 44 */
70 /* Padding to full 512B sector */
71 u8 reserved
[464]; /* 512 */
75 * Chunk mapping entry: entries are indexed by chunk number
76 * and give the zone ID (dzone_id) mapping the chunk on disk.
77 * This zone may be sequential or random. If it is a sequential
78 * zone, a second zone (bzone_id) used as a write buffer may
79 * also be specified. This second zone will always be a randomly
88 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
90 #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
91 #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
92 #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
93 #define DMZ_MAP_UNMAPPED UINT_MAX
96 * Meta data block descriptor (for cached metadata blocks).
100 struct list_head link
;
109 * Metadata block state flags.
119 * Super block information (one per metadata set).
123 struct dmz_mblock
*mblk
;
124 struct dmz_super
*sb
;
128 * In-memory metadata.
130 struct dmz_metadata
{
133 sector_t zone_bitmap_size
;
134 unsigned int zone_nr_bitmap_blocks
;
135 unsigned int zone_bits_per_mblk
;
137 unsigned int nr_bitmap_blocks
;
138 unsigned int nr_map_blocks
;
140 unsigned int nr_useable_zones
;
141 unsigned int nr_meta_blocks
;
142 unsigned int nr_meta_zones
;
143 unsigned int nr_data_zones
;
144 unsigned int nr_rnd_zones
;
145 unsigned int nr_reserved_seq
;
146 unsigned int nr_chunks
;
148 /* Zone information array */
149 struct dm_zone
*zones
;
151 struct dm_zone
*sb_zone
;
153 unsigned int mblk_primary
;
155 unsigned int min_nr_mblks
;
156 unsigned int max_nr_mblks
;
158 struct rw_semaphore mblk_sem
;
159 struct mutex mblk_flush_lock
;
160 spinlock_t mblk_lock
;
161 struct rb_root mblk_rbtree
;
162 struct list_head mblk_lru_list
;
163 struct list_head mblk_dirty_list
;
164 struct shrinker mblk_shrinker
;
166 /* Zone allocation management */
167 struct mutex map_lock
;
168 struct dmz_mblock
**map_mblk
;
170 atomic_t unmap_nr_rnd
;
171 struct list_head unmap_rnd_list
;
172 struct list_head map_rnd_list
;
175 atomic_t unmap_nr_seq
;
176 struct list_head unmap_seq_list
;
177 struct list_head map_seq_list
;
179 atomic_t nr_reserved_seq_zones
;
180 struct list_head reserved_seq_zones_list
;
182 wait_queue_head_t free_wq
;
188 unsigned int dmz_id(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
190 return ((unsigned int)(zone
- zmd
->zones
));
193 sector_t
dmz_start_sect(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
195 return (sector_t
)dmz_id(zmd
, zone
) << zmd
->dev
->zone_nr_sectors_shift
;
198 sector_t
dmz_start_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
200 return (sector_t
)dmz_id(zmd
, zone
) << zmd
->dev
->zone_nr_blocks_shift
;
203 unsigned int dmz_nr_chunks(struct dmz_metadata
*zmd
)
205 return zmd
->nr_chunks
;
208 unsigned int dmz_nr_rnd_zones(struct dmz_metadata
*zmd
)
213 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata
*zmd
)
215 return atomic_read(&zmd
->unmap_nr_rnd
);
219 * Lock/unlock mapping table.
220 * The map lock also protects all the zone lists.
222 void dmz_lock_map(struct dmz_metadata
*zmd
)
224 mutex_lock(&zmd
->map_lock
);
227 void dmz_unlock_map(struct dmz_metadata
*zmd
)
229 mutex_unlock(&zmd
->map_lock
);
233 * Lock/unlock metadata access. This is a "read" lock on a semaphore
234 * that prevents metadata flush from running while metadata are being
235 * modified. The actual metadata write mutual exclusion is achieved with
236 * the map lock and zone styate management (active and reclaim state are
237 * mutually exclusive).
239 void dmz_lock_metadata(struct dmz_metadata
*zmd
)
241 down_read(&zmd
->mblk_sem
);
244 void dmz_unlock_metadata(struct dmz_metadata
*zmd
)
246 up_read(&zmd
->mblk_sem
);
250 * Lock/unlock flush: prevent concurrent executions
251 * of dmz_flush_metadata as well as metadata modification in reclaim
252 * while flush is being executed.
254 void dmz_lock_flush(struct dmz_metadata
*zmd
)
256 mutex_lock(&zmd
->mblk_flush_lock
);
259 void dmz_unlock_flush(struct dmz_metadata
*zmd
)
261 mutex_unlock(&zmd
->mblk_flush_lock
);
265 * Allocate a metadata block.
267 static struct dmz_mblock
*dmz_alloc_mblock(struct dmz_metadata
*zmd
,
270 struct dmz_mblock
*mblk
= NULL
;
272 /* See if we can reuse cached blocks */
273 if (zmd
->max_nr_mblks
&& atomic_read(&zmd
->nr_mblks
) > zmd
->max_nr_mblks
) {
274 spin_lock(&zmd
->mblk_lock
);
275 mblk
= list_first_entry_or_null(&zmd
->mblk_lru_list
,
276 struct dmz_mblock
, link
);
278 list_del_init(&mblk
->link
);
279 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
282 spin_unlock(&zmd
->mblk_lock
);
287 /* Allocate a new block */
288 mblk
= kmalloc(sizeof(struct dmz_mblock
), GFP_NOIO
);
292 mblk
->page
= alloc_page(GFP_NOIO
);
298 RB_CLEAR_NODE(&mblk
->node
);
299 INIT_LIST_HEAD(&mblk
->link
);
303 mblk
->data
= page_address(mblk
->page
);
305 atomic_inc(&zmd
->nr_mblks
);
311 * Free a metadata block.
313 static void dmz_free_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
315 __free_pages(mblk
->page
, 0);
318 atomic_dec(&zmd
->nr_mblks
);
322 * Insert a metadata block in the rbtree.
324 static void dmz_insert_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
326 struct rb_root
*root
= &zmd
->mblk_rbtree
;
327 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
328 struct dmz_mblock
*b
;
330 /* Figure out where to put the new node */
332 b
= container_of(*new, struct dmz_mblock
, node
);
334 new = (b
->no
< mblk
->no
) ? &((*new)->rb_left
) : &((*new)->rb_right
);
337 /* Add new node and rebalance tree */
338 rb_link_node(&mblk
->node
, parent
, new);
339 rb_insert_color(&mblk
->node
, root
);
343 * Lookup a metadata block in the rbtree. If the block is found, increment
344 * its reference count.
346 static struct dmz_mblock
*dmz_get_mblock_fast(struct dmz_metadata
*zmd
,
349 struct rb_root
*root
= &zmd
->mblk_rbtree
;
350 struct rb_node
*node
= root
->rb_node
;
351 struct dmz_mblock
*mblk
;
354 mblk
= container_of(node
, struct dmz_mblock
, node
);
355 if (mblk
->no
== mblk_no
) {
357 * If this is the first reference to the block,
358 * remove it from the LRU list.
361 if (mblk
->ref
== 1 &&
362 !test_bit(DMZ_META_DIRTY
, &mblk
->state
))
363 list_del_init(&mblk
->link
);
366 node
= (mblk
->no
< mblk_no
) ? node
->rb_left
: node
->rb_right
;
373 * Metadata block BIO end callback.
375 static void dmz_mblock_bio_end_io(struct bio
*bio
)
377 struct dmz_mblock
*mblk
= bio
->bi_private
;
381 set_bit(DMZ_META_ERROR
, &mblk
->state
);
383 if (bio_op(bio
) == REQ_OP_WRITE
)
384 flag
= DMZ_META_WRITING
;
386 flag
= DMZ_META_READING
;
388 clear_bit_unlock(flag
, &mblk
->state
);
389 smp_mb__after_atomic();
390 wake_up_bit(&mblk
->state
, flag
);
396 * Read an uncached metadata block from disk and add it to the cache.
398 static struct dmz_mblock
*dmz_get_mblock_slow(struct dmz_metadata
*zmd
,
401 struct dmz_mblock
*mblk
, *m
;
402 sector_t block
= zmd
->sb
[zmd
->mblk_primary
].block
+ mblk_no
;
405 if (dmz_bdev_is_dying(zmd
->dev
))
406 return ERR_PTR(-EIO
);
408 /* Get a new block and a BIO to read it */
409 mblk
= dmz_alloc_mblock(zmd
, mblk_no
);
411 return ERR_PTR(-ENOMEM
);
413 bio
= bio_alloc(GFP_NOIO
, 1);
415 dmz_free_mblock(zmd
, mblk
);
416 return ERR_PTR(-ENOMEM
);
419 spin_lock(&zmd
->mblk_lock
);
422 * Make sure that another context did not start reading
425 m
= dmz_get_mblock_fast(zmd
, mblk_no
);
427 spin_unlock(&zmd
->mblk_lock
);
428 dmz_free_mblock(zmd
, mblk
);
434 set_bit(DMZ_META_READING
, &mblk
->state
);
435 dmz_insert_mblock(zmd
, mblk
);
437 spin_unlock(&zmd
->mblk_lock
);
439 /* Submit read BIO */
440 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
441 bio_set_dev(bio
, zmd
->dev
->bdev
);
442 bio
->bi_private
= mblk
;
443 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
444 bio_set_op_attrs(bio
, REQ_OP_READ
, REQ_META
| REQ_PRIO
);
445 bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
452 * Free metadata blocks.
454 static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata
*zmd
,
457 struct dmz_mblock
*mblk
;
458 unsigned long count
= 0;
460 if (!zmd
->max_nr_mblks
)
463 while (!list_empty(&zmd
->mblk_lru_list
) &&
464 atomic_read(&zmd
->nr_mblks
) > zmd
->min_nr_mblks
&&
466 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
467 struct dmz_mblock
, link
);
468 list_del_init(&mblk
->link
);
469 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
470 dmz_free_mblock(zmd
, mblk
);
478 * For mblock shrinker: get the number of unused metadata blocks in the cache.
480 static unsigned long dmz_mblock_shrinker_count(struct shrinker
*shrink
,
481 struct shrink_control
*sc
)
483 struct dmz_metadata
*zmd
= container_of(shrink
, struct dmz_metadata
, mblk_shrinker
);
485 return atomic_read(&zmd
->nr_mblks
);
489 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
491 static unsigned long dmz_mblock_shrinker_scan(struct shrinker
*shrink
,
492 struct shrink_control
*sc
)
494 struct dmz_metadata
*zmd
= container_of(shrink
, struct dmz_metadata
, mblk_shrinker
);
497 spin_lock(&zmd
->mblk_lock
);
498 count
= dmz_shrink_mblock_cache(zmd
, sc
->nr_to_scan
);
499 spin_unlock(&zmd
->mblk_lock
);
501 return count
? count
: SHRINK_STOP
;
505 * Release a metadata block.
507 static void dmz_release_mblock(struct dmz_metadata
*zmd
,
508 struct dmz_mblock
*mblk
)
514 spin_lock(&zmd
->mblk_lock
);
517 if (mblk
->ref
== 0) {
518 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
519 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
520 dmz_free_mblock(zmd
, mblk
);
521 } else if (!test_bit(DMZ_META_DIRTY
, &mblk
->state
)) {
522 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
523 dmz_shrink_mblock_cache(zmd
, 1);
527 spin_unlock(&zmd
->mblk_lock
);
531 * Get a metadata block from the rbtree. If the block
532 * is not present, read it from disk.
534 static struct dmz_mblock
*dmz_get_mblock(struct dmz_metadata
*zmd
,
537 struct dmz_mblock
*mblk
;
540 spin_lock(&zmd
->mblk_lock
);
541 mblk
= dmz_get_mblock_fast(zmd
, mblk_no
);
542 spin_unlock(&zmd
->mblk_lock
);
545 /* Cache miss: read the block from disk */
546 mblk
= dmz_get_mblock_slow(zmd
, mblk_no
);
551 /* Wait for on-going read I/O and check for error */
552 wait_on_bit_io(&mblk
->state
, DMZ_META_READING
,
553 TASK_UNINTERRUPTIBLE
);
554 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
555 dmz_release_mblock(zmd
, mblk
);
556 dmz_check_bdev(zmd
->dev
);
557 return ERR_PTR(-EIO
);
564 * Mark a metadata block dirty.
566 static void dmz_dirty_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
568 spin_lock(&zmd
->mblk_lock
);
569 if (!test_and_set_bit(DMZ_META_DIRTY
, &mblk
->state
))
570 list_add_tail(&mblk
->link
, &zmd
->mblk_dirty_list
);
571 spin_unlock(&zmd
->mblk_lock
);
575 * Issue a metadata block write BIO.
577 static int dmz_write_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
,
580 sector_t block
= zmd
->sb
[set
].block
+ mblk
->no
;
583 if (dmz_bdev_is_dying(zmd
->dev
))
586 bio
= bio_alloc(GFP_NOIO
, 1);
588 set_bit(DMZ_META_ERROR
, &mblk
->state
);
592 set_bit(DMZ_META_WRITING
, &mblk
->state
);
594 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
595 bio_set_dev(bio
, zmd
->dev
->bdev
);
596 bio
->bi_private
= mblk
;
597 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
598 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_META
| REQ_PRIO
);
599 bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
606 * Read/write a metadata block.
608 static int dmz_rdwr_block(struct dmz_metadata
*zmd
, int op
, sector_t block
,
614 if (dmz_bdev_is_dying(zmd
->dev
))
617 bio
= bio_alloc(GFP_NOIO
, 1);
621 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
622 bio_set_dev(bio
, zmd
->dev
->bdev
);
623 bio_set_op_attrs(bio
, op
, REQ_SYNC
| REQ_META
| REQ_PRIO
);
624 bio_add_page(bio
, page
, DMZ_BLOCK_SIZE
, 0);
625 ret
= submit_bio_wait(bio
);
629 dmz_check_bdev(zmd
->dev
);
634 * Write super block of the specified metadata set.
636 static int dmz_write_sb(struct dmz_metadata
*zmd
, unsigned int set
)
638 sector_t block
= zmd
->sb
[set
].block
;
639 struct dmz_mblock
*mblk
= zmd
->sb
[set
].mblk
;
640 struct dmz_super
*sb
= zmd
->sb
[set
].sb
;
641 u64 sb_gen
= zmd
->sb_gen
+ 1;
644 sb
->magic
= cpu_to_le32(DMZ_MAGIC
);
645 sb
->version
= cpu_to_le32(DMZ_META_VER
);
647 sb
->gen
= cpu_to_le64(sb_gen
);
649 sb
->sb_block
= cpu_to_le64(block
);
650 sb
->nr_meta_blocks
= cpu_to_le32(zmd
->nr_meta_blocks
);
651 sb
->nr_reserved_seq
= cpu_to_le32(zmd
->nr_reserved_seq
);
652 sb
->nr_chunks
= cpu_to_le32(zmd
->nr_chunks
);
654 sb
->nr_map_blocks
= cpu_to_le32(zmd
->nr_map_blocks
);
655 sb
->nr_bitmap_blocks
= cpu_to_le32(zmd
->nr_bitmap_blocks
);
658 sb
->crc
= cpu_to_le32(crc32_le(sb_gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
));
660 ret
= dmz_rdwr_block(zmd
, REQ_OP_WRITE
, block
, mblk
->page
);
662 ret
= blkdev_issue_flush(zmd
->dev
->bdev
, GFP_NOIO
, NULL
);
668 * Write dirty metadata blocks to the specified set.
670 static int dmz_write_dirty_mblocks(struct dmz_metadata
*zmd
,
671 struct list_head
*write_list
,
674 struct dmz_mblock
*mblk
;
675 struct blk_plug plug
;
676 int ret
= 0, nr_mblks_submitted
= 0;
679 blk_start_plug(&plug
);
680 list_for_each_entry(mblk
, write_list
, link
) {
681 ret
= dmz_write_mblock(zmd
, mblk
, set
);
684 nr_mblks_submitted
++;
686 blk_finish_plug(&plug
);
688 /* Wait for completion */
689 list_for_each_entry(mblk
, write_list
, link
) {
690 if (!nr_mblks_submitted
)
692 wait_on_bit_io(&mblk
->state
, DMZ_META_WRITING
,
693 TASK_UNINTERRUPTIBLE
);
694 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
695 clear_bit(DMZ_META_ERROR
, &mblk
->state
);
696 dmz_check_bdev(zmd
->dev
);
699 nr_mblks_submitted
--;
702 /* Flush drive cache (this will also sync data) */
704 ret
= blkdev_issue_flush(zmd
->dev
->bdev
, GFP_NOIO
, NULL
);
710 * Log dirty metadata blocks.
712 static int dmz_log_dirty_mblocks(struct dmz_metadata
*zmd
,
713 struct list_head
*write_list
)
715 unsigned int log_set
= zmd
->mblk_primary
^ 0x1;
718 /* Write dirty blocks to the log */
719 ret
= dmz_write_dirty_mblocks(zmd
, write_list
, log_set
);
724 * No error so far: now validate the log by updating the
725 * log index super block generation.
727 ret
= dmz_write_sb(zmd
, log_set
);
735 * Flush dirty metadata blocks.
737 int dmz_flush_metadata(struct dmz_metadata
*zmd
)
739 struct dmz_mblock
*mblk
;
740 struct list_head write_list
;
746 INIT_LIST_HEAD(&write_list
);
749 * Make sure that metadata blocks are stable before logging: take
750 * the write lock on the metadata semaphore to prevent target BIOs
751 * from modifying metadata.
753 down_write(&zmd
->mblk_sem
);
756 * This is called from the target flush work and reclaim work.
757 * Concurrent execution is not allowed.
761 if (dmz_bdev_is_dying(zmd
->dev
)) {
766 /* Get dirty blocks */
767 spin_lock(&zmd
->mblk_lock
);
768 list_splice_init(&zmd
->mblk_dirty_list
, &write_list
);
769 spin_unlock(&zmd
->mblk_lock
);
771 /* If there are no dirty metadata blocks, just flush the device cache */
772 if (list_empty(&write_list
)) {
773 ret
= blkdev_issue_flush(zmd
->dev
->bdev
, GFP_NOIO
, NULL
);
778 * The primary metadata set is still clean. Keep it this way until
779 * all updates are successful in the secondary set. That is, use
780 * the secondary set as a log.
782 ret
= dmz_log_dirty_mblocks(zmd
, &write_list
);
787 * The log is on disk. It is now safe to update in place
788 * in the primary metadata set.
790 ret
= dmz_write_dirty_mblocks(zmd
, &write_list
, zmd
->mblk_primary
);
794 ret
= dmz_write_sb(zmd
, zmd
->mblk_primary
);
798 while (!list_empty(&write_list
)) {
799 mblk
= list_first_entry(&write_list
, struct dmz_mblock
, link
);
800 list_del_init(&mblk
->link
);
802 spin_lock(&zmd
->mblk_lock
);
803 clear_bit(DMZ_META_DIRTY
, &mblk
->state
);
805 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
806 spin_unlock(&zmd
->mblk_lock
);
811 dmz_unlock_flush(zmd
);
812 up_write(&zmd
->mblk_sem
);
817 if (!list_empty(&write_list
)) {
818 spin_lock(&zmd
->mblk_lock
);
819 list_splice(&write_list
, &zmd
->mblk_dirty_list
);
820 spin_unlock(&zmd
->mblk_lock
);
822 if (!dmz_check_bdev(zmd
->dev
))
830 static int dmz_check_sb(struct dmz_metadata
*zmd
, struct dmz_super
*sb
)
832 unsigned int nr_meta_zones
, nr_data_zones
;
833 struct dmz_dev
*dev
= zmd
->dev
;
837 gen
= le64_to_cpu(sb
->gen
);
838 stored_crc
= le32_to_cpu(sb
->crc
);
840 crc
= crc32_le(gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
);
841 if (crc
!= stored_crc
) {
842 dmz_dev_err(dev
, "Invalid checksum (needed 0x%08x, got 0x%08x)",
847 if (le32_to_cpu(sb
->magic
) != DMZ_MAGIC
) {
848 dmz_dev_err(dev
, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
849 DMZ_MAGIC
, le32_to_cpu(sb
->magic
));
853 if (le32_to_cpu(sb
->version
) != DMZ_META_VER
) {
854 dmz_dev_err(dev
, "Invalid meta version (needed %d, got %d)",
855 DMZ_META_VER
, le32_to_cpu(sb
->version
));
859 nr_meta_zones
= (le32_to_cpu(sb
->nr_meta_blocks
) + dev
->zone_nr_blocks
- 1)
860 >> dev
->zone_nr_blocks_shift
;
861 if (!nr_meta_zones
||
862 nr_meta_zones
>= zmd
->nr_rnd_zones
) {
863 dmz_dev_err(dev
, "Invalid number of metadata blocks");
867 if (!le32_to_cpu(sb
->nr_reserved_seq
) ||
868 le32_to_cpu(sb
->nr_reserved_seq
) >= (zmd
->nr_useable_zones
- nr_meta_zones
)) {
869 dmz_dev_err(dev
, "Invalid number of reserved sequential zones");
873 nr_data_zones
= zmd
->nr_useable_zones
-
874 (nr_meta_zones
* 2 + le32_to_cpu(sb
->nr_reserved_seq
));
875 if (le32_to_cpu(sb
->nr_chunks
) > nr_data_zones
) {
876 dmz_dev_err(dev
, "Invalid number of chunks %u / %u",
877 le32_to_cpu(sb
->nr_chunks
), nr_data_zones
);
882 zmd
->nr_meta_blocks
= le32_to_cpu(sb
->nr_meta_blocks
);
883 zmd
->nr_reserved_seq
= le32_to_cpu(sb
->nr_reserved_seq
);
884 zmd
->nr_chunks
= le32_to_cpu(sb
->nr_chunks
);
885 zmd
->nr_map_blocks
= le32_to_cpu(sb
->nr_map_blocks
);
886 zmd
->nr_bitmap_blocks
= le32_to_cpu(sb
->nr_bitmap_blocks
);
887 zmd
->nr_meta_zones
= nr_meta_zones
;
888 zmd
->nr_data_zones
= nr_data_zones
;
894 * Read the first or second super block from disk.
896 static int dmz_read_sb(struct dmz_metadata
*zmd
, unsigned int set
)
898 return dmz_rdwr_block(zmd
, REQ_OP_READ
, zmd
->sb
[set
].block
,
899 zmd
->sb
[set
].mblk
->page
);
903 * Determine the position of the secondary super blocks on disk.
904 * This is used only if a corruption of the primary super block
907 static int dmz_lookup_secondary_sb(struct dmz_metadata
*zmd
)
909 unsigned int zone_nr_blocks
= zmd
->dev
->zone_nr_blocks
;
910 struct dmz_mblock
*mblk
;
913 /* Allocate a block */
914 mblk
= dmz_alloc_mblock(zmd
, 0);
918 zmd
->sb
[1].mblk
= mblk
;
919 zmd
->sb
[1].sb
= mblk
->data
;
921 /* Bad first super block: search for the second one */
922 zmd
->sb
[1].block
= zmd
->sb
[0].block
+ zone_nr_blocks
;
923 for (i
= 0; i
< zmd
->nr_rnd_zones
- 1; i
++) {
924 if (dmz_read_sb(zmd
, 1) != 0)
926 if (le32_to_cpu(zmd
->sb
[1].sb
->magic
) == DMZ_MAGIC
)
928 zmd
->sb
[1].block
+= zone_nr_blocks
;
931 dmz_free_mblock(zmd
, mblk
);
932 zmd
->sb
[1].mblk
= NULL
;
938 * Read the first or second super block from disk.
940 static int dmz_get_sb(struct dmz_metadata
*zmd
, unsigned int set
)
942 struct dmz_mblock
*mblk
;
945 /* Allocate a block */
946 mblk
= dmz_alloc_mblock(zmd
, 0);
950 zmd
->sb
[set
].mblk
= mblk
;
951 zmd
->sb
[set
].sb
= mblk
->data
;
953 /* Read super block */
954 ret
= dmz_read_sb(zmd
, set
);
956 dmz_free_mblock(zmd
, mblk
);
957 zmd
->sb
[set
].mblk
= NULL
;
965 * Recover a metadata set.
967 static int dmz_recover_mblocks(struct dmz_metadata
*zmd
, unsigned int dst_set
)
969 unsigned int src_set
= dst_set
^ 0x1;
973 dmz_dev_warn(zmd
->dev
, "Metadata set %u invalid: recovering", dst_set
);
976 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb_zone
);
978 zmd
->sb
[1].block
= zmd
->sb
[0].block
+
979 (zmd
->nr_meta_zones
<< zmd
->dev
->zone_nr_blocks_shift
);
982 page
= alloc_page(GFP_NOIO
);
986 /* Copy metadata blocks */
987 for (i
= 1; i
< zmd
->nr_meta_blocks
; i
++) {
988 ret
= dmz_rdwr_block(zmd
, REQ_OP_READ
,
989 zmd
->sb
[src_set
].block
+ i
, page
);
992 ret
= dmz_rdwr_block(zmd
, REQ_OP_WRITE
,
993 zmd
->sb
[dst_set
].block
+ i
, page
);
998 /* Finalize with the super block */
999 if (!zmd
->sb
[dst_set
].mblk
) {
1000 zmd
->sb
[dst_set
].mblk
= dmz_alloc_mblock(zmd
, 0);
1001 if (!zmd
->sb
[dst_set
].mblk
) {
1005 zmd
->sb
[dst_set
].sb
= zmd
->sb
[dst_set
].mblk
->data
;
1008 ret
= dmz_write_sb(zmd
, dst_set
);
1010 __free_pages(page
, 0);
1016 * Get super block from disk.
1018 static int dmz_load_sb(struct dmz_metadata
*zmd
)
1020 bool sb_good
[2] = {false, false};
1021 u64 sb_gen
[2] = {0, 0};
1024 /* Read and check the primary super block */
1025 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb_zone
);
1026 ret
= dmz_get_sb(zmd
, 0);
1028 dmz_dev_err(zmd
->dev
, "Read primary super block failed");
1032 ret
= dmz_check_sb(zmd
, zmd
->sb
[0].sb
);
1034 /* Read and check secondary super block */
1037 zmd
->sb
[1].block
= zmd
->sb
[0].block
+
1038 (zmd
->nr_meta_zones
<< zmd
->dev
->zone_nr_blocks_shift
);
1039 ret
= dmz_get_sb(zmd
, 1);
1041 ret
= dmz_lookup_secondary_sb(zmd
);
1044 dmz_dev_err(zmd
->dev
, "Read secondary super block failed");
1048 ret
= dmz_check_sb(zmd
, zmd
->sb
[1].sb
);
1052 /* Use highest generation sb first */
1053 if (!sb_good
[0] && !sb_good
[1]) {
1054 dmz_dev_err(zmd
->dev
, "No valid super block found");
1059 sb_gen
[0] = le64_to_cpu(zmd
->sb
[0].sb
->gen
);
1061 ret
= dmz_recover_mblocks(zmd
, 0);
1064 sb_gen
[1] = le64_to_cpu(zmd
->sb
[1].sb
->gen
);
1066 ret
= dmz_recover_mblocks(zmd
, 1);
1069 dmz_dev_err(zmd
->dev
, "Recovery failed");
1073 if (sb_gen
[0] >= sb_gen
[1]) {
1074 zmd
->sb_gen
= sb_gen
[0];
1075 zmd
->mblk_primary
= 0;
1077 zmd
->sb_gen
= sb_gen
[1];
1078 zmd
->mblk_primary
= 1;
1081 dmz_dev_debug(zmd
->dev
, "Using super block %u (gen %llu)",
1082 zmd
->mblk_primary
, zmd
->sb_gen
);
1088 * Initialize a zone descriptor.
1090 static int dmz_init_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
1091 struct blk_zone
*blkz
)
1093 struct dmz_dev
*dev
= zmd
->dev
;
1095 /* Ignore the eventual last runt (smaller) zone */
1096 if (blkz
->len
!= dev
->zone_nr_sectors
) {
1097 if (blkz
->start
+ blkz
->len
== dev
->capacity
)
1102 INIT_LIST_HEAD(&zone
->link
);
1103 atomic_set(&zone
->refcount
, 0);
1104 zone
->chunk
= DMZ_MAP_UNMAPPED
;
1106 if (blkz
->type
== BLK_ZONE_TYPE_CONVENTIONAL
) {
1107 set_bit(DMZ_RND
, &zone
->flags
);
1108 } else if (blkz
->type
== BLK_ZONE_TYPE_SEQWRITE_REQ
||
1109 blkz
->type
== BLK_ZONE_TYPE_SEQWRITE_PREF
) {
1110 set_bit(DMZ_SEQ
, &zone
->flags
);
1114 if (blkz
->cond
== BLK_ZONE_COND_OFFLINE
)
1115 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1116 else if (blkz
->cond
== BLK_ZONE_COND_READONLY
)
1117 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1119 if (dmz_is_rnd(zone
))
1122 zone
->wp_block
= dmz_sect2blk(blkz
->wp
- blkz
->start
);
1124 if (!dmz_is_offline(zone
) && !dmz_is_readonly(zone
)) {
1125 zmd
->nr_useable_zones
++;
1126 if (dmz_is_rnd(zone
)) {
1127 zmd
->nr_rnd_zones
++;
1128 if (!zmd
->sb_zone
) {
1129 /* Super block zone */
1130 zmd
->sb_zone
= zone
;
1139 * Free zones descriptors.
1141 static void dmz_drop_zones(struct dmz_metadata
*zmd
)
1148 * The size of a zone report in number of zones.
1149 * This results in 4096*64B=256KB report zones commands.
1151 #define DMZ_REPORT_NR_ZONES 4096
1154 * Allocate and initialize zone descriptors using the zone
1155 * information from disk.
1157 static int dmz_init_zones(struct dmz_metadata
*zmd
)
1159 struct dmz_dev
*dev
= zmd
->dev
;
1160 struct dm_zone
*zone
;
1161 struct blk_zone
*blkz
;
1162 unsigned int nr_blkz
;
1163 sector_t sector
= 0;
1167 zmd
->zone_bitmap_size
= dev
->zone_nr_blocks
>> 3;
1168 zmd
->zone_nr_bitmap_blocks
=
1169 max_t(sector_t
, 1, zmd
->zone_bitmap_size
>> DMZ_BLOCK_SHIFT
);
1170 zmd
->zone_bits_per_mblk
= min_t(sector_t
, dev
->zone_nr_blocks
,
1171 DMZ_BLOCK_SIZE_BITS
);
1173 /* Allocate zone array */
1174 zmd
->zones
= kcalloc(dev
->nr_zones
, sizeof(struct dm_zone
), GFP_KERNEL
);
1178 dmz_dev_info(dev
, "Using %zu B for zone information",
1179 sizeof(struct dm_zone
) * dev
->nr_zones
);
1181 /* Get zone information */
1182 nr_blkz
= DMZ_REPORT_NR_ZONES
;
1183 blkz
= kcalloc(nr_blkz
, sizeof(struct blk_zone
), GFP_KERNEL
);
1190 * Get zone information and initialize zone descriptors.
1191 * At the same time, determine where the super block
1192 * should be: first block of the first randomly writable
1196 while (sector
< dev
->capacity
) {
1197 /* Get zone information */
1198 nr_blkz
= DMZ_REPORT_NR_ZONES
;
1199 ret
= blkdev_report_zones(dev
->bdev
, sector
, blkz
,
1200 &nr_blkz
, GFP_KERNEL
);
1202 dmz_dev_err(dev
, "Report zones failed %d", ret
);
1209 /* Process report */
1210 for (i
= 0; i
< nr_blkz
; i
++) {
1211 ret
= dmz_init_zone(zmd
, zone
, &blkz
[i
]);
1214 sector
+= dev
->zone_nr_sectors
;
1219 /* The entire zone configuration of the disk should now be known */
1220 if (sector
< dev
->capacity
) {
1221 dmz_dev_err(dev
, "Failed to get correct zone information");
1227 dmz_drop_zones(zmd
);
1233 * Update a zone information.
1235 static int dmz_update_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1237 unsigned int nr_blkz
= 1;
1238 struct blk_zone blkz
;
1241 /* Get zone information from disk */
1242 ret
= blkdev_report_zones(zmd
->dev
->bdev
, dmz_start_sect(zmd
, zone
),
1243 &blkz
, &nr_blkz
, GFP_NOIO
);
1247 dmz_dev_err(zmd
->dev
, "Get zone %u report failed",
1249 dmz_check_bdev(zmd
->dev
);
1253 clear_bit(DMZ_OFFLINE
, &zone
->flags
);
1254 clear_bit(DMZ_READ_ONLY
, &zone
->flags
);
1255 if (blkz
.cond
== BLK_ZONE_COND_OFFLINE
)
1256 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1257 else if (blkz
.cond
== BLK_ZONE_COND_READONLY
)
1258 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1260 if (dmz_is_seq(zone
))
1261 zone
->wp_block
= dmz_sect2blk(blkz
.wp
- blkz
.start
);
1269 * Check a zone write pointer position when the zone is marked
1270 * with the sequential write error flag.
1272 static int dmz_handle_seq_write_err(struct dmz_metadata
*zmd
,
1273 struct dm_zone
*zone
)
1275 unsigned int wp
= 0;
1278 wp
= zone
->wp_block
;
1279 ret
= dmz_update_zone(zmd
, zone
);
1283 dmz_dev_warn(zmd
->dev
, "Processing zone %u write error (zone wp %u/%u)",
1284 dmz_id(zmd
, zone
), zone
->wp_block
, wp
);
1286 if (zone
->wp_block
< wp
) {
1287 dmz_invalidate_blocks(zmd
, zone
, zone
->wp_block
,
1288 wp
- zone
->wp_block
);
1294 static struct dm_zone
*dmz_get(struct dmz_metadata
*zmd
, unsigned int zone_id
)
1296 return &zmd
->zones
[zone_id
];
1300 * Reset a zone write pointer.
1302 static int dmz_reset_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1307 * Ignore offline zones, read only zones,
1308 * and conventional zones.
1310 if (dmz_is_offline(zone
) ||
1311 dmz_is_readonly(zone
) ||
1315 if (!dmz_is_empty(zone
) || dmz_seq_write_err(zone
)) {
1316 struct dmz_dev
*dev
= zmd
->dev
;
1318 ret
= blkdev_reset_zones(dev
->bdev
,
1319 dmz_start_sect(zmd
, zone
),
1320 dev
->zone_nr_sectors
, GFP_NOIO
);
1322 dmz_dev_err(dev
, "Reset zone %u failed %d",
1323 dmz_id(zmd
, zone
), ret
);
1328 /* Clear write error bit and rewind write pointer position */
1329 clear_bit(DMZ_SEQ_WRITE_ERR
, &zone
->flags
);
1335 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
);
1338 * Initialize chunk mapping.
1340 static int dmz_load_mapping(struct dmz_metadata
*zmd
)
1342 struct dmz_dev
*dev
= zmd
->dev
;
1343 struct dm_zone
*dzone
, *bzone
;
1344 struct dmz_mblock
*dmap_mblk
= NULL
;
1345 struct dmz_map
*dmap
;
1346 unsigned int i
= 0, e
= 0, chunk
= 0;
1347 unsigned int dzone_id
;
1348 unsigned int bzone_id
;
1350 /* Metadata block array for the chunk mapping table */
1351 zmd
->map_mblk
= kcalloc(zmd
->nr_map_blocks
,
1352 sizeof(struct dmz_mblk
*), GFP_KERNEL
);
1356 /* Get chunk mapping table blocks and initialize zone mapping */
1357 while (chunk
< zmd
->nr_chunks
) {
1359 /* Get mapping block */
1360 dmap_mblk
= dmz_get_mblock(zmd
, i
+ 1);
1361 if (IS_ERR(dmap_mblk
))
1362 return PTR_ERR(dmap_mblk
);
1363 zmd
->map_mblk
[i
] = dmap_mblk
;
1364 dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1369 /* Check data zone */
1370 dzone_id
= le32_to_cpu(dmap
[e
].dzone_id
);
1371 if (dzone_id
== DMZ_MAP_UNMAPPED
)
1374 if (dzone_id
>= dev
->nr_zones
) {
1375 dmz_dev_err(dev
, "Chunk %u mapping: invalid data zone ID %u",
1380 dzone
= dmz_get(zmd
, dzone_id
);
1381 set_bit(DMZ_DATA
, &dzone
->flags
);
1382 dzone
->chunk
= chunk
;
1383 dmz_get_zone_weight(zmd
, dzone
);
1385 if (dmz_is_rnd(dzone
))
1386 list_add_tail(&dzone
->link
, &zmd
->map_rnd_list
);
1388 list_add_tail(&dzone
->link
, &zmd
->map_seq_list
);
1390 /* Check buffer zone */
1391 bzone_id
= le32_to_cpu(dmap
[e
].bzone_id
);
1392 if (bzone_id
== DMZ_MAP_UNMAPPED
)
1395 if (bzone_id
>= dev
->nr_zones
) {
1396 dmz_dev_err(dev
, "Chunk %u mapping: invalid buffer zone ID %u",
1401 bzone
= dmz_get(zmd
, bzone_id
);
1402 if (!dmz_is_rnd(bzone
)) {
1403 dmz_dev_err(dev
, "Chunk %u mapping: invalid buffer zone %u",
1408 set_bit(DMZ_DATA
, &bzone
->flags
);
1409 set_bit(DMZ_BUF
, &bzone
->flags
);
1410 bzone
->chunk
= chunk
;
1411 bzone
->bzone
= dzone
;
1412 dzone
->bzone
= bzone
;
1413 dmz_get_zone_weight(zmd
, bzone
);
1414 list_add_tail(&bzone
->link
, &zmd
->map_rnd_list
);
1418 if (e
>= DMZ_MAP_ENTRIES
)
1423 * At this point, only meta zones and mapped data zones were
1424 * fully initialized. All remaining zones are unmapped data
1425 * zones. Finish initializing those here.
1427 for (i
= 0; i
< dev
->nr_zones
; i
++) {
1428 dzone
= dmz_get(zmd
, i
);
1429 if (dmz_is_meta(dzone
))
1432 if (dmz_is_rnd(dzone
))
1437 if (dmz_is_data(dzone
)) {
1438 /* Already initialized */
1442 /* Unmapped data zone */
1443 set_bit(DMZ_DATA
, &dzone
->flags
);
1444 dzone
->chunk
= DMZ_MAP_UNMAPPED
;
1445 if (dmz_is_rnd(dzone
)) {
1446 list_add_tail(&dzone
->link
, &zmd
->unmap_rnd_list
);
1447 atomic_inc(&zmd
->unmap_nr_rnd
);
1448 } else if (atomic_read(&zmd
->nr_reserved_seq_zones
) < zmd
->nr_reserved_seq
) {
1449 list_add_tail(&dzone
->link
, &zmd
->reserved_seq_zones_list
);
1450 atomic_inc(&zmd
->nr_reserved_seq_zones
);
1453 list_add_tail(&dzone
->link
, &zmd
->unmap_seq_list
);
1454 atomic_inc(&zmd
->unmap_nr_seq
);
1462 * Set a data chunk mapping.
1464 static void dmz_set_chunk_mapping(struct dmz_metadata
*zmd
, unsigned int chunk
,
1465 unsigned int dzone_id
, unsigned int bzone_id
)
1467 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
1468 struct dmz_map
*dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1469 int map_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
1471 dmap
[map_idx
].dzone_id
= cpu_to_le32(dzone_id
);
1472 dmap
[map_idx
].bzone_id
= cpu_to_le32(bzone_id
);
1473 dmz_dirty_mblock(zmd
, dmap_mblk
);
1477 * The list of mapped zones is maintained in LRU order.
1478 * This rotates a zone at the end of its map list.
1480 static void __dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1482 if (list_empty(&zone
->link
))
1485 list_del_init(&zone
->link
);
1486 if (dmz_is_seq(zone
)) {
1487 /* LRU rotate sequential zone */
1488 list_add_tail(&zone
->link
, &zmd
->map_seq_list
);
1490 /* LRU rotate random zone */
1491 list_add_tail(&zone
->link
, &zmd
->map_rnd_list
);
1496 * The list of mapped random zones is maintained
1497 * in LRU order. This rotates a zone at the end of the list.
1499 static void dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1501 __dmz_lru_zone(zmd
, zone
);
1503 __dmz_lru_zone(zmd
, zone
->bzone
);
1507 * Wait for any zone to be freed.
1509 static void dmz_wait_for_free_zones(struct dmz_metadata
*zmd
)
1513 prepare_to_wait(&zmd
->free_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
1514 dmz_unlock_map(zmd
);
1515 dmz_unlock_metadata(zmd
);
1517 io_schedule_timeout(HZ
);
1519 dmz_lock_metadata(zmd
);
1521 finish_wait(&zmd
->free_wq
, &wait
);
1525 * Lock a zone for reclaim (set the zone RECLAIM bit).
1526 * Returns false if the zone cannot be locked or if it is already locked
1529 int dmz_lock_zone_reclaim(struct dm_zone
*zone
)
1531 /* Active zones cannot be reclaimed */
1532 if (dmz_is_active(zone
))
1535 return !test_and_set_bit(DMZ_RECLAIM
, &zone
->flags
);
1539 * Clear a zone reclaim flag.
1541 void dmz_unlock_zone_reclaim(struct dm_zone
*zone
)
1543 WARN_ON(dmz_is_active(zone
));
1544 WARN_ON(!dmz_in_reclaim(zone
));
1546 clear_bit_unlock(DMZ_RECLAIM
, &zone
->flags
);
1547 smp_mb__after_atomic();
1548 wake_up_bit(&zone
->flags
, DMZ_RECLAIM
);
1552 * Wait for a zone reclaim to complete.
1554 static void dmz_wait_for_reclaim(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1556 dmz_unlock_map(zmd
);
1557 dmz_unlock_metadata(zmd
);
1558 wait_on_bit_timeout(&zone
->flags
, DMZ_RECLAIM
, TASK_UNINTERRUPTIBLE
, HZ
);
1559 dmz_lock_metadata(zmd
);
1564 * Select a random write zone for reclaim.
1566 static struct dm_zone
*dmz_get_rnd_zone_for_reclaim(struct dmz_metadata
*zmd
)
1568 struct dm_zone
*dzone
= NULL
;
1569 struct dm_zone
*zone
;
1571 if (list_empty(&zmd
->map_rnd_list
))
1572 return ERR_PTR(-EBUSY
);
1574 list_for_each_entry(zone
, &zmd
->map_rnd_list
, link
) {
1575 if (dmz_is_buf(zone
))
1576 dzone
= zone
->bzone
;
1579 if (dmz_lock_zone_reclaim(dzone
))
1587 * Select a buffered sequential zone for reclaim.
1589 static struct dm_zone
*dmz_get_seq_zone_for_reclaim(struct dmz_metadata
*zmd
)
1591 struct dm_zone
*zone
;
1593 if (list_empty(&zmd
->map_seq_list
))
1594 return ERR_PTR(-EBUSY
);
1596 list_for_each_entry(zone
, &zmd
->map_seq_list
, link
) {
1599 if (dmz_lock_zone_reclaim(zone
))
1607 * Select a zone for reclaim.
1609 struct dm_zone
*dmz_get_zone_for_reclaim(struct dmz_metadata
*zmd
)
1611 struct dm_zone
*zone
;
1614 * Search for a zone candidate to reclaim: 2 cases are possible.
1615 * (1) There is no free sequential zones. Then a random data zone
1616 * cannot be reclaimed. So choose a sequential zone to reclaim so
1617 * that afterward a random zone can be reclaimed.
1618 * (2) At least one free sequential zone is available, then choose
1619 * the oldest random zone (data or buffer) that can be locked.
1622 if (list_empty(&zmd
->reserved_seq_zones_list
))
1623 zone
= dmz_get_seq_zone_for_reclaim(zmd
);
1625 zone
= dmz_get_rnd_zone_for_reclaim(zmd
);
1626 dmz_unlock_map(zmd
);
1632 * Get the zone mapping a chunk, if the chunk is mapped already.
1633 * If no mapping exist and the operation is WRITE, a zone is
1634 * allocated and used to map the chunk.
1635 * The zone returned will be set to the active state.
1637 struct dm_zone
*dmz_get_chunk_mapping(struct dmz_metadata
*zmd
, unsigned int chunk
, int op
)
1639 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
1640 struct dmz_map
*dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1641 int dmap_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
1642 unsigned int dzone_id
;
1643 struct dm_zone
*dzone
= NULL
;
1648 /* Get the chunk mapping */
1649 dzone_id
= le32_to_cpu(dmap
[dmap_idx
].dzone_id
);
1650 if (dzone_id
== DMZ_MAP_UNMAPPED
) {
1652 * Read or discard in unmapped chunks are fine. But for
1653 * writes, we need a mapping, so get one.
1655 if (op
!= REQ_OP_WRITE
)
1658 /* Alloate a random zone */
1659 dzone
= dmz_alloc_zone(zmd
, DMZ_ALLOC_RND
);
1661 if (dmz_bdev_is_dying(zmd
->dev
)) {
1662 dzone
= ERR_PTR(-EIO
);
1665 dmz_wait_for_free_zones(zmd
);
1669 dmz_map_zone(zmd
, dzone
, chunk
);
1672 /* The chunk is already mapped: get the mapping zone */
1673 dzone
= dmz_get(zmd
, dzone_id
);
1674 if (dzone
->chunk
!= chunk
) {
1675 dzone
= ERR_PTR(-EIO
);
1679 /* Repair write pointer if the sequential dzone has error */
1680 if (dmz_seq_write_err(dzone
)) {
1681 ret
= dmz_handle_seq_write_err(zmd
, dzone
);
1683 dzone
= ERR_PTR(-EIO
);
1686 clear_bit(DMZ_SEQ_WRITE_ERR
, &dzone
->flags
);
1691 * If the zone is being reclaimed, the chunk mapping may change
1692 * to a different zone. So wait for reclaim and retry. Otherwise,
1693 * activate the zone (this will prevent reclaim from touching it).
1695 if (dmz_in_reclaim(dzone
)) {
1696 dmz_wait_for_reclaim(zmd
, dzone
);
1699 dmz_activate_zone(dzone
);
1700 dmz_lru_zone(zmd
, dzone
);
1702 dmz_unlock_map(zmd
);
1708 * Write and discard change the block validity of data zones and their buffer
1709 * zones. Check here that valid blocks are still present. If all blocks are
1710 * invalid, the zones can be unmapped on the fly without waiting for reclaim
1713 void dmz_put_chunk_mapping(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
)
1715 struct dm_zone
*bzone
;
1719 bzone
= dzone
->bzone
;
1721 if (dmz_weight(bzone
))
1722 dmz_lru_zone(zmd
, bzone
);
1724 /* Empty buffer zone: reclaim it */
1725 dmz_unmap_zone(zmd
, bzone
);
1726 dmz_free_zone(zmd
, bzone
);
1731 /* Deactivate the data zone */
1732 dmz_deactivate_zone(dzone
);
1733 if (dmz_is_active(dzone
) || bzone
|| dmz_weight(dzone
))
1734 dmz_lru_zone(zmd
, dzone
);
1736 /* Unbuffered inactive empty data zone: reclaim it */
1737 dmz_unmap_zone(zmd
, dzone
);
1738 dmz_free_zone(zmd
, dzone
);
1741 dmz_unlock_map(zmd
);
1745 * Allocate and map a random zone to buffer a chunk
1746 * already mapped to a sequential zone.
1748 struct dm_zone
*dmz_get_chunk_buffer(struct dmz_metadata
*zmd
,
1749 struct dm_zone
*dzone
)
1751 struct dm_zone
*bzone
;
1755 bzone
= dzone
->bzone
;
1759 /* Alloate a random zone */
1760 bzone
= dmz_alloc_zone(zmd
, DMZ_ALLOC_RND
);
1762 if (dmz_bdev_is_dying(zmd
->dev
)) {
1763 bzone
= ERR_PTR(-EIO
);
1766 dmz_wait_for_free_zones(zmd
);
1770 /* Update the chunk mapping */
1771 dmz_set_chunk_mapping(zmd
, dzone
->chunk
, dmz_id(zmd
, dzone
),
1772 dmz_id(zmd
, bzone
));
1774 set_bit(DMZ_BUF
, &bzone
->flags
);
1775 bzone
->chunk
= dzone
->chunk
;
1776 bzone
->bzone
= dzone
;
1777 dzone
->bzone
= bzone
;
1778 list_add_tail(&bzone
->link
, &zmd
->map_rnd_list
);
1780 dmz_unlock_map(zmd
);
1786 * Get an unmapped (free) zone.
1787 * This must be called with the mapping lock held.
1789 struct dm_zone
*dmz_alloc_zone(struct dmz_metadata
*zmd
, unsigned long flags
)
1791 struct list_head
*list
;
1792 struct dm_zone
*zone
;
1794 if (flags
& DMZ_ALLOC_RND
)
1795 list
= &zmd
->unmap_rnd_list
;
1797 list
= &zmd
->unmap_seq_list
;
1799 if (list_empty(list
)) {
1801 * No free zone: if this is for reclaim, allow using the
1802 * reserved sequential zones.
1804 if (!(flags
& DMZ_ALLOC_RECLAIM
) ||
1805 list_empty(&zmd
->reserved_seq_zones_list
))
1808 zone
= list_first_entry(&zmd
->reserved_seq_zones_list
,
1809 struct dm_zone
, link
);
1810 list_del_init(&zone
->link
);
1811 atomic_dec(&zmd
->nr_reserved_seq_zones
);
1815 zone
= list_first_entry(list
, struct dm_zone
, link
);
1816 list_del_init(&zone
->link
);
1818 if (dmz_is_rnd(zone
))
1819 atomic_dec(&zmd
->unmap_nr_rnd
);
1821 atomic_dec(&zmd
->unmap_nr_seq
);
1823 if (dmz_is_offline(zone
)) {
1824 dmz_dev_warn(zmd
->dev
, "Zone %u is offline", dmz_id(zmd
, zone
));
1834 * This must be called with the mapping lock held.
1836 void dmz_free_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1838 /* If this is a sequential zone, reset it */
1839 if (dmz_is_seq(zone
))
1840 dmz_reset_zone(zmd
, zone
);
1842 /* Return the zone to its type unmap list */
1843 if (dmz_is_rnd(zone
)) {
1844 list_add_tail(&zone
->link
, &zmd
->unmap_rnd_list
);
1845 atomic_inc(&zmd
->unmap_nr_rnd
);
1846 } else if (atomic_read(&zmd
->nr_reserved_seq_zones
) <
1847 zmd
->nr_reserved_seq
) {
1848 list_add_tail(&zone
->link
, &zmd
->reserved_seq_zones_list
);
1849 atomic_inc(&zmd
->nr_reserved_seq_zones
);
1851 list_add_tail(&zone
->link
, &zmd
->unmap_seq_list
);
1852 atomic_inc(&zmd
->unmap_nr_seq
);
1855 wake_up_all(&zmd
->free_wq
);
1859 * Map a chunk to a zone.
1860 * This must be called with the mapping lock held.
1862 void dmz_map_zone(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
,
1865 /* Set the chunk mapping */
1866 dmz_set_chunk_mapping(zmd
, chunk
, dmz_id(zmd
, dzone
),
1868 dzone
->chunk
= chunk
;
1869 if (dmz_is_rnd(dzone
))
1870 list_add_tail(&dzone
->link
, &zmd
->map_rnd_list
);
1872 list_add_tail(&dzone
->link
, &zmd
->map_seq_list
);
1877 * This must be called with the mapping lock held.
1879 void dmz_unmap_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1881 unsigned int chunk
= zone
->chunk
;
1882 unsigned int dzone_id
;
1884 if (chunk
== DMZ_MAP_UNMAPPED
) {
1885 /* Already unmapped */
1889 if (test_and_clear_bit(DMZ_BUF
, &zone
->flags
)) {
1891 * Unmapping the chunk buffer zone: clear only
1892 * the chunk buffer mapping
1894 dzone_id
= dmz_id(zmd
, zone
->bzone
);
1895 zone
->bzone
->bzone
= NULL
;
1900 * Unmapping the chunk data zone: the zone must
1903 if (WARN_ON(zone
->bzone
)) {
1904 zone
->bzone
->bzone
= NULL
;
1907 dzone_id
= DMZ_MAP_UNMAPPED
;
1910 dmz_set_chunk_mapping(zmd
, chunk
, dzone_id
, DMZ_MAP_UNMAPPED
);
1912 zone
->chunk
= DMZ_MAP_UNMAPPED
;
1913 list_del_init(&zone
->link
);
1917 * Set @nr_bits bits in @bitmap starting from @bit.
1918 * Return the number of bits changed from 0 to 1.
1920 static unsigned int dmz_set_bits(unsigned long *bitmap
,
1921 unsigned int bit
, unsigned int nr_bits
)
1923 unsigned long *addr
;
1924 unsigned int end
= bit
+ nr_bits
;
1928 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
1929 ((end
- bit
) >= BITS_PER_LONG
)) {
1930 /* Try to set the whole word at once */
1931 addr
= bitmap
+ BIT_WORD(bit
);
1935 bit
+= BITS_PER_LONG
;
1940 if (!test_and_set_bit(bit
, bitmap
))
1949 * Get the bitmap block storing the bit for chunk_block in zone.
1951 static struct dmz_mblock
*dmz_get_bitmap(struct dmz_metadata
*zmd
,
1952 struct dm_zone
*zone
,
1953 sector_t chunk_block
)
1955 sector_t bitmap_block
= 1 + zmd
->nr_map_blocks
+
1956 (sector_t
)(dmz_id(zmd
, zone
) * zmd
->zone_nr_bitmap_blocks
) +
1957 (chunk_block
>> DMZ_BLOCK_SHIFT_BITS
);
1959 return dmz_get_mblock(zmd
, bitmap_block
);
1963 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
1965 int dmz_copy_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
1966 struct dm_zone
*to_zone
)
1968 struct dmz_mblock
*from_mblk
, *to_mblk
;
1969 sector_t chunk_block
= 0;
1971 /* Get the zones bitmap blocks */
1972 while (chunk_block
< zmd
->dev
->zone_nr_blocks
) {
1973 from_mblk
= dmz_get_bitmap(zmd
, from_zone
, chunk_block
);
1974 if (IS_ERR(from_mblk
))
1975 return PTR_ERR(from_mblk
);
1976 to_mblk
= dmz_get_bitmap(zmd
, to_zone
, chunk_block
);
1977 if (IS_ERR(to_mblk
)) {
1978 dmz_release_mblock(zmd
, from_mblk
);
1979 return PTR_ERR(to_mblk
);
1982 memcpy(to_mblk
->data
, from_mblk
->data
, DMZ_BLOCK_SIZE
);
1983 dmz_dirty_mblock(zmd
, to_mblk
);
1985 dmz_release_mblock(zmd
, to_mblk
);
1986 dmz_release_mblock(zmd
, from_mblk
);
1988 chunk_block
+= zmd
->zone_bits_per_mblk
;
1991 to_zone
->weight
= from_zone
->weight
;
1997 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
1998 * starting from chunk_block.
2000 int dmz_merge_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
2001 struct dm_zone
*to_zone
, sector_t chunk_block
)
2003 unsigned int nr_blocks
;
2006 /* Get the zones bitmap blocks */
2007 while (chunk_block
< zmd
->dev
->zone_nr_blocks
) {
2008 /* Get a valid region from the source zone */
2009 ret
= dmz_first_valid_block(zmd
, from_zone
, &chunk_block
);
2014 ret
= dmz_validate_blocks(zmd
, to_zone
, chunk_block
, nr_blocks
);
2018 chunk_block
+= nr_blocks
;
2025 * Validate all the blocks in the range [block..block+nr_blocks-1].
2027 int dmz_validate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2028 sector_t chunk_block
, unsigned int nr_blocks
)
2030 unsigned int count
, bit
, nr_bits
;
2031 unsigned int zone_nr_blocks
= zmd
->dev
->zone_nr_blocks
;
2032 struct dmz_mblock
*mblk
;
2035 dmz_dev_debug(zmd
->dev
, "=> VALIDATE zone %u, block %llu, %u blocks",
2036 dmz_id(zmd
, zone
), (unsigned long long)chunk_block
,
2039 WARN_ON(chunk_block
+ nr_blocks
> zone_nr_blocks
);
2042 /* Get bitmap block */
2043 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2045 return PTR_ERR(mblk
);
2048 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2049 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2051 count
= dmz_set_bits((unsigned long *)mblk
->data
, bit
, nr_bits
);
2053 dmz_dirty_mblock(zmd
, mblk
);
2056 dmz_release_mblock(zmd
, mblk
);
2058 nr_blocks
-= nr_bits
;
2059 chunk_block
+= nr_bits
;
2062 if (likely(zone
->weight
+ n
<= zone_nr_blocks
))
2065 dmz_dev_warn(zmd
->dev
, "Zone %u: weight %u should be <= %u",
2066 dmz_id(zmd
, zone
), zone
->weight
,
2067 zone_nr_blocks
- n
);
2068 zone
->weight
= zone_nr_blocks
;
2075 * Clear nr_bits bits in bitmap starting from bit.
2076 * Return the number of bits cleared.
2078 static int dmz_clear_bits(unsigned long *bitmap
, int bit
, int nr_bits
)
2080 unsigned long *addr
;
2081 int end
= bit
+ nr_bits
;
2085 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2086 ((end
- bit
) >= BITS_PER_LONG
)) {
2087 /* Try to clear whole word at once */
2088 addr
= bitmap
+ BIT_WORD(bit
);
2089 if (*addr
== ULONG_MAX
) {
2092 bit
+= BITS_PER_LONG
;
2097 if (test_and_clear_bit(bit
, bitmap
))
2106 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2108 int dmz_invalidate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2109 sector_t chunk_block
, unsigned int nr_blocks
)
2111 unsigned int count
, bit
, nr_bits
;
2112 struct dmz_mblock
*mblk
;
2115 dmz_dev_debug(zmd
->dev
, "=> INVALIDATE zone %u, block %llu, %u blocks",
2116 dmz_id(zmd
, zone
), (u64
)chunk_block
, nr_blocks
);
2118 WARN_ON(chunk_block
+ nr_blocks
> zmd
->dev
->zone_nr_blocks
);
2121 /* Get bitmap block */
2122 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2124 return PTR_ERR(mblk
);
2127 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2128 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2130 count
= dmz_clear_bits((unsigned long *)mblk
->data
,
2133 dmz_dirty_mblock(zmd
, mblk
);
2136 dmz_release_mblock(zmd
, mblk
);
2138 nr_blocks
-= nr_bits
;
2139 chunk_block
+= nr_bits
;
2142 if (zone
->weight
>= n
)
2145 dmz_dev_warn(zmd
->dev
, "Zone %u: weight %u should be >= %u",
2146 dmz_id(zmd
, zone
), zone
->weight
, n
);
2154 * Get a block bit value.
2156 static int dmz_test_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2157 sector_t chunk_block
)
2159 struct dmz_mblock
*mblk
;
2162 WARN_ON(chunk_block
>= zmd
->dev
->zone_nr_blocks
);
2164 /* Get bitmap block */
2165 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2167 return PTR_ERR(mblk
);
2170 ret
= test_bit(chunk_block
& DMZ_BLOCK_MASK_BITS
,
2171 (unsigned long *) mblk
->data
) != 0;
2173 dmz_release_mblock(zmd
, mblk
);
2179 * Return the number of blocks from chunk_block to the first block with a bit
2180 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2182 static int dmz_to_next_set_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2183 sector_t chunk_block
, unsigned int nr_blocks
,
2186 struct dmz_mblock
*mblk
;
2187 unsigned int bit
, set_bit
, nr_bits
;
2188 unsigned int zone_bits
= zmd
->zone_bits_per_mblk
;
2189 unsigned long *bitmap
;
2192 WARN_ON(chunk_block
+ nr_blocks
> zmd
->dev
->zone_nr_blocks
);
2195 /* Get bitmap block */
2196 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2198 return PTR_ERR(mblk
);
2201 bitmap
= (unsigned long *) mblk
->data
;
2202 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2203 nr_bits
= min(nr_blocks
, zone_bits
- bit
);
2205 set_bit
= find_next_bit(bitmap
, zone_bits
, bit
);
2207 set_bit
= find_next_zero_bit(bitmap
, zone_bits
, bit
);
2208 dmz_release_mblock(zmd
, mblk
);
2211 if (set_bit
< zone_bits
)
2214 nr_blocks
-= nr_bits
;
2215 chunk_block
+= nr_bits
;
2222 * Test if chunk_block is valid. If it is, the number of consecutive
2223 * valid blocks from chunk_block will be returned.
2225 int dmz_block_valid(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2226 sector_t chunk_block
)
2230 valid
= dmz_test_block(zmd
, zone
, chunk_block
);
2234 /* The block is valid: get the number of valid blocks from block */
2235 return dmz_to_next_set_block(zmd
, zone
, chunk_block
,
2236 zmd
->dev
->zone_nr_blocks
- chunk_block
, 0);
2240 * Find the first valid block from @chunk_block in @zone.
2241 * If such a block is found, its number is returned using
2242 * @chunk_block and the total number of valid blocks from @chunk_block
2245 int dmz_first_valid_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2246 sector_t
*chunk_block
)
2248 sector_t start_block
= *chunk_block
;
2251 ret
= dmz_to_next_set_block(zmd
, zone
, start_block
,
2252 zmd
->dev
->zone_nr_blocks
- start_block
, 1);
2257 *chunk_block
= start_block
;
2259 return dmz_to_next_set_block(zmd
, zone
, start_block
,
2260 zmd
->dev
->zone_nr_blocks
- start_block
, 0);
2264 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2266 static int dmz_count_bits(void *bitmap
, int bit
, int nr_bits
)
2268 unsigned long *addr
;
2269 int end
= bit
+ nr_bits
;
2273 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2274 ((end
- bit
) >= BITS_PER_LONG
)) {
2275 addr
= (unsigned long *)bitmap
+ BIT_WORD(bit
);
2276 if (*addr
== ULONG_MAX
) {
2278 bit
+= BITS_PER_LONG
;
2283 if (test_bit(bit
, bitmap
))
2292 * Get a zone weight.
2294 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
2296 struct dmz_mblock
*mblk
;
2297 sector_t chunk_block
= 0;
2298 unsigned int bit
, nr_bits
;
2299 unsigned int nr_blocks
= zmd
->dev
->zone_nr_blocks
;
2304 /* Get bitmap block */
2305 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2311 /* Count bits in this block */
2312 bitmap
= mblk
->data
;
2313 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2314 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2315 n
+= dmz_count_bits(bitmap
, bit
, nr_bits
);
2317 dmz_release_mblock(zmd
, mblk
);
2319 nr_blocks
-= nr_bits
;
2320 chunk_block
+= nr_bits
;
2327 * Cleanup the zoned metadata resources.
2329 static void dmz_cleanup_metadata(struct dmz_metadata
*zmd
)
2331 struct rb_root
*root
;
2332 struct dmz_mblock
*mblk
, *next
;
2335 /* Release zone mapping resources */
2336 if (zmd
->map_mblk
) {
2337 for (i
= 0; i
< zmd
->nr_map_blocks
; i
++)
2338 dmz_release_mblock(zmd
, zmd
->map_mblk
[i
]);
2339 kfree(zmd
->map_mblk
);
2340 zmd
->map_mblk
= NULL
;
2343 /* Release super blocks */
2344 for (i
= 0; i
< 2; i
++) {
2345 if (zmd
->sb
[i
].mblk
) {
2346 dmz_free_mblock(zmd
, zmd
->sb
[i
].mblk
);
2347 zmd
->sb
[i
].mblk
= NULL
;
2351 /* Free cached blocks */
2352 while (!list_empty(&zmd
->mblk_dirty_list
)) {
2353 mblk
= list_first_entry(&zmd
->mblk_dirty_list
,
2354 struct dmz_mblock
, link
);
2355 dmz_dev_warn(zmd
->dev
, "mblock %llu still in dirty list (ref %u)",
2356 (u64
)mblk
->no
, mblk
->ref
);
2357 list_del_init(&mblk
->link
);
2358 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2359 dmz_free_mblock(zmd
, mblk
);
2362 while (!list_empty(&zmd
->mblk_lru_list
)) {
2363 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
2364 struct dmz_mblock
, link
);
2365 list_del_init(&mblk
->link
);
2366 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2367 dmz_free_mblock(zmd
, mblk
);
2370 /* Sanity checks: the mblock rbtree should now be empty */
2371 root
= &zmd
->mblk_rbtree
;
2372 rbtree_postorder_for_each_entry_safe(mblk
, next
, root
, node
) {
2373 dmz_dev_warn(zmd
->dev
, "mblock %llu ref %u still in rbtree",
2374 (u64
)mblk
->no
, mblk
->ref
);
2376 dmz_free_mblock(zmd
, mblk
);
2379 /* Free the zone descriptors */
2380 dmz_drop_zones(zmd
);
2382 mutex_destroy(&zmd
->mblk_flush_lock
);
2383 mutex_destroy(&zmd
->map_lock
);
2387 * Initialize the zoned metadata.
2389 int dmz_ctr_metadata(struct dmz_dev
*dev
, struct dmz_metadata
**metadata
)
2391 struct dmz_metadata
*zmd
;
2392 unsigned int i
, zid
;
2393 struct dm_zone
*zone
;
2396 zmd
= kzalloc(sizeof(struct dmz_metadata
), GFP_KERNEL
);
2401 zmd
->mblk_rbtree
= RB_ROOT
;
2402 init_rwsem(&zmd
->mblk_sem
);
2403 mutex_init(&zmd
->mblk_flush_lock
);
2404 spin_lock_init(&zmd
->mblk_lock
);
2405 INIT_LIST_HEAD(&zmd
->mblk_lru_list
);
2406 INIT_LIST_HEAD(&zmd
->mblk_dirty_list
);
2408 mutex_init(&zmd
->map_lock
);
2409 atomic_set(&zmd
->unmap_nr_rnd
, 0);
2410 INIT_LIST_HEAD(&zmd
->unmap_rnd_list
);
2411 INIT_LIST_HEAD(&zmd
->map_rnd_list
);
2413 atomic_set(&zmd
->unmap_nr_seq
, 0);
2414 INIT_LIST_HEAD(&zmd
->unmap_seq_list
);
2415 INIT_LIST_HEAD(&zmd
->map_seq_list
);
2417 atomic_set(&zmd
->nr_reserved_seq_zones
, 0);
2418 INIT_LIST_HEAD(&zmd
->reserved_seq_zones_list
);
2420 init_waitqueue_head(&zmd
->free_wq
);
2422 /* Initialize zone descriptors */
2423 ret
= dmz_init_zones(zmd
);
2427 /* Get super block */
2428 ret
= dmz_load_sb(zmd
);
2432 /* Set metadata zones starting from sb_zone */
2433 zid
= dmz_id(zmd
, zmd
->sb_zone
);
2434 for (i
= 0; i
< zmd
->nr_meta_zones
<< 1; i
++) {
2435 zone
= dmz_get(zmd
, zid
+ i
);
2436 if (!dmz_is_rnd(zone
))
2438 set_bit(DMZ_META
, &zone
->flags
);
2441 /* Load mapping table */
2442 ret
= dmz_load_mapping(zmd
);
2447 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2448 * blocks and enough blocks to be able to cache the bitmap blocks of
2449 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2450 * the cache to add 512 more metadata blocks.
2452 zmd
->min_nr_mblks
= 2 + zmd
->nr_map_blocks
+ zmd
->zone_nr_bitmap_blocks
* 16;
2453 zmd
->max_nr_mblks
= zmd
->min_nr_mblks
+ 512;
2454 zmd
->mblk_shrinker
.count_objects
= dmz_mblock_shrinker_count
;
2455 zmd
->mblk_shrinker
.scan_objects
= dmz_mblock_shrinker_scan
;
2456 zmd
->mblk_shrinker
.seeks
= DEFAULT_SEEKS
;
2458 /* Metadata cache shrinker */
2459 ret
= register_shrinker(&zmd
->mblk_shrinker
);
2461 dmz_dev_err(dev
, "Register metadata cache shrinker failed");
2465 dmz_dev_info(dev
, "Host-%s zoned block device",
2466 bdev_zoned_model(dev
->bdev
) == BLK_ZONED_HA
?
2467 "aware" : "managed");
2468 dmz_dev_info(dev
, " %llu 512-byte logical sectors",
2469 (u64
)dev
->capacity
);
2470 dmz_dev_info(dev
, " %u zones of %llu 512-byte logical sectors",
2471 dev
->nr_zones
, (u64
)dev
->zone_nr_sectors
);
2472 dmz_dev_info(dev
, " %u metadata zones",
2473 zmd
->nr_meta_zones
* 2);
2474 dmz_dev_info(dev
, " %u data zones for %u chunks",
2475 zmd
->nr_data_zones
, zmd
->nr_chunks
);
2476 dmz_dev_info(dev
, " %u random zones (%u unmapped)",
2477 zmd
->nr_rnd
, atomic_read(&zmd
->unmap_nr_rnd
));
2478 dmz_dev_info(dev
, " %u sequential zones (%u unmapped)",
2479 zmd
->nr_seq
, atomic_read(&zmd
->unmap_nr_seq
));
2480 dmz_dev_info(dev
, " %u reserved sequential data zones",
2481 zmd
->nr_reserved_seq
);
2483 dmz_dev_debug(dev
, "Format:");
2484 dmz_dev_debug(dev
, "%u metadata blocks per set (%u max cache)",
2485 zmd
->nr_meta_blocks
, zmd
->max_nr_mblks
);
2486 dmz_dev_debug(dev
, " %u data zone mapping blocks",
2487 zmd
->nr_map_blocks
);
2488 dmz_dev_debug(dev
, " %u bitmap blocks",
2489 zmd
->nr_bitmap_blocks
);
2495 dmz_cleanup_metadata(zmd
);
2503 * Cleanup the zoned metadata resources.
2505 void dmz_dtr_metadata(struct dmz_metadata
*zmd
)
2507 unregister_shrinker(&zmd
->mblk_shrinker
);
2508 dmz_cleanup_metadata(zmd
);
2513 * Check zone information on resume.
2515 int dmz_resume_metadata(struct dmz_metadata
*zmd
)
2517 struct dmz_dev
*dev
= zmd
->dev
;
2518 struct dm_zone
*zone
;
2524 for (i
= 0; i
< dev
->nr_zones
; i
++) {
2525 zone
= dmz_get(zmd
, i
);
2527 dmz_dev_err(dev
, "Unable to get zone %u", i
);
2531 wp_block
= zone
->wp_block
;
2533 ret
= dmz_update_zone(zmd
, zone
);
2535 dmz_dev_err(dev
, "Broken zone %u", i
);
2539 if (dmz_is_offline(zone
)) {
2540 dmz_dev_warn(dev
, "Zone %u is offline", i
);
2544 /* Check write pointer */
2545 if (!dmz_is_seq(zone
))
2547 else if (zone
->wp_block
!= wp_block
) {
2548 dmz_dev_err(dev
, "Zone %u: Invalid wp (%llu / %llu)",
2549 i
, (u64
)zone
->wp_block
, (u64
)wp_block
);
2550 zone
->wp_block
= wp_block
;
2551 dmz_invalidate_blocks(zmd
, zone
, zone
->wp_block
,
2552 dev
->zone_nr_blocks
- zone
->wp_block
);