1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/crc32.h>
12 #include <linux/sched/mm.h>
14 #define DM_MSG_PREFIX "zoned metadata"
19 #define DMZ_META_VER 1
22 * On-disk super block magic.
24 #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
25 (((unsigned int)('Z')) << 16) | \
26 (((unsigned int)('B')) << 8) | \
27 ((unsigned int)('D')))
30 * On disk super block.
31 * This uses only 512 B but uses on disk a full 4KB block. This block is
32 * followed on disk by the mapping table of chunks to zones and the bitmap
33 * blocks indicating zone block validity.
34 * The overall resulting metadata format is:
35 * (1) Super block (1 block)
36 * (2) Chunk mapping table (nr_map_blocks)
37 * (3) Bitmap blocks (nr_bitmap_blocks)
38 * All metadata blocks are stored in conventional zones, starting from
39 * the first conventional zone found on disk.
45 /* Metadata version number */
46 __le32 version
; /* 8 */
48 /* Generation number */
51 /* This block number */
52 __le64 sb_block
; /* 24 */
54 /* The number of metadata blocks, including this super block */
55 __le32 nr_meta_blocks
; /* 28 */
57 /* The number of sequential zones reserved for reclaim */
58 __le32 nr_reserved_seq
; /* 32 */
60 /* The number of entries in the mapping table */
61 __le32 nr_chunks
; /* 36 */
63 /* The number of blocks used for the chunk mapping table */
64 __le32 nr_map_blocks
; /* 40 */
66 /* The number of blocks used for the block bitmaps */
67 __le32 nr_bitmap_blocks
; /* 44 */
72 /* Padding to full 512B sector */
73 u8 reserved
[464]; /* 512 */
77 * Chunk mapping entry: entries are indexed by chunk number
78 * and give the zone ID (dzone_id) mapping the chunk on disk.
79 * This zone may be sequential or random. If it is a sequential
80 * zone, a second zone (bzone_id) used as a write buffer may
81 * also be specified. This second zone will always be a randomly
90 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
92 #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
93 #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
94 #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
95 #define DMZ_MAP_UNMAPPED UINT_MAX
98 * Meta data block descriptor (for cached metadata blocks).
102 struct list_head link
;
111 * Metadata block state flags.
121 * Super block information (one per metadata set).
125 struct dmz_mblock
*mblk
;
126 struct dmz_super
*sb
;
130 * In-memory metadata.
132 struct dmz_metadata
{
135 sector_t zone_bitmap_size
;
136 unsigned int zone_nr_bitmap_blocks
;
137 unsigned int zone_bits_per_mblk
;
139 unsigned int nr_bitmap_blocks
;
140 unsigned int nr_map_blocks
;
142 unsigned int nr_useable_zones
;
143 unsigned int nr_meta_blocks
;
144 unsigned int nr_meta_zones
;
145 unsigned int nr_data_zones
;
146 unsigned int nr_rnd_zones
;
147 unsigned int nr_reserved_seq
;
148 unsigned int nr_chunks
;
150 /* Zone information array */
151 struct dm_zone
*zones
;
153 struct dm_zone
*sb_zone
;
155 unsigned int mblk_primary
;
157 unsigned int min_nr_mblks
;
158 unsigned int max_nr_mblks
;
160 struct rw_semaphore mblk_sem
;
161 struct mutex mblk_flush_lock
;
162 spinlock_t mblk_lock
;
163 struct rb_root mblk_rbtree
;
164 struct list_head mblk_lru_list
;
165 struct list_head mblk_dirty_list
;
166 struct shrinker mblk_shrinker
;
168 /* Zone allocation management */
169 struct mutex map_lock
;
170 struct dmz_mblock
**map_mblk
;
172 atomic_t unmap_nr_rnd
;
173 struct list_head unmap_rnd_list
;
174 struct list_head map_rnd_list
;
177 atomic_t unmap_nr_seq
;
178 struct list_head unmap_seq_list
;
179 struct list_head map_seq_list
;
181 atomic_t nr_reserved_seq_zones
;
182 struct list_head reserved_seq_zones_list
;
184 wait_queue_head_t free_wq
;
190 unsigned int dmz_id(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
192 return ((unsigned int)(zone
- zmd
->zones
));
195 sector_t
dmz_start_sect(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
197 return (sector_t
)dmz_id(zmd
, zone
) << zmd
->dev
->zone_nr_sectors_shift
;
200 sector_t
dmz_start_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
202 return (sector_t
)dmz_id(zmd
, zone
) << zmd
->dev
->zone_nr_blocks_shift
;
205 unsigned int dmz_nr_chunks(struct dmz_metadata
*zmd
)
207 return zmd
->nr_chunks
;
210 unsigned int dmz_nr_rnd_zones(struct dmz_metadata
*zmd
)
215 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata
*zmd
)
217 return atomic_read(&zmd
->unmap_nr_rnd
);
221 * Lock/unlock mapping table.
222 * The map lock also protects all the zone lists.
224 void dmz_lock_map(struct dmz_metadata
*zmd
)
226 mutex_lock(&zmd
->map_lock
);
229 void dmz_unlock_map(struct dmz_metadata
*zmd
)
231 mutex_unlock(&zmd
->map_lock
);
235 * Lock/unlock metadata access. This is a "read" lock on a semaphore
236 * that prevents metadata flush from running while metadata are being
237 * modified. The actual metadata write mutual exclusion is achieved with
238 * the map lock and zone state management (active and reclaim state are
239 * mutually exclusive).
241 void dmz_lock_metadata(struct dmz_metadata
*zmd
)
243 down_read(&zmd
->mblk_sem
);
246 void dmz_unlock_metadata(struct dmz_metadata
*zmd
)
248 up_read(&zmd
->mblk_sem
);
252 * Lock/unlock flush: prevent concurrent executions
253 * of dmz_flush_metadata as well as metadata modification in reclaim
254 * while flush is being executed.
256 void dmz_lock_flush(struct dmz_metadata
*zmd
)
258 mutex_lock(&zmd
->mblk_flush_lock
);
261 void dmz_unlock_flush(struct dmz_metadata
*zmd
)
263 mutex_unlock(&zmd
->mblk_flush_lock
);
267 * Allocate a metadata block.
269 static struct dmz_mblock
*dmz_alloc_mblock(struct dmz_metadata
*zmd
,
272 struct dmz_mblock
*mblk
= NULL
;
274 /* See if we can reuse cached blocks */
275 if (zmd
->max_nr_mblks
&& atomic_read(&zmd
->nr_mblks
) > zmd
->max_nr_mblks
) {
276 spin_lock(&zmd
->mblk_lock
);
277 mblk
= list_first_entry_or_null(&zmd
->mblk_lru_list
,
278 struct dmz_mblock
, link
);
280 list_del_init(&mblk
->link
);
281 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
284 spin_unlock(&zmd
->mblk_lock
);
289 /* Allocate a new block */
290 mblk
= kmalloc(sizeof(struct dmz_mblock
), GFP_NOIO
);
294 mblk
->page
= alloc_page(GFP_NOIO
);
300 RB_CLEAR_NODE(&mblk
->node
);
301 INIT_LIST_HEAD(&mblk
->link
);
305 mblk
->data
= page_address(mblk
->page
);
307 atomic_inc(&zmd
->nr_mblks
);
313 * Free a metadata block.
315 static void dmz_free_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
317 __free_pages(mblk
->page
, 0);
320 atomic_dec(&zmd
->nr_mblks
);
324 * Insert a metadata block in the rbtree.
326 static void dmz_insert_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
328 struct rb_root
*root
= &zmd
->mblk_rbtree
;
329 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
330 struct dmz_mblock
*b
;
332 /* Figure out where to put the new node */
334 b
= container_of(*new, struct dmz_mblock
, node
);
336 new = (b
->no
< mblk
->no
) ? &((*new)->rb_left
) : &((*new)->rb_right
);
339 /* Add new node and rebalance tree */
340 rb_link_node(&mblk
->node
, parent
, new);
341 rb_insert_color(&mblk
->node
, root
);
345 * Lookup a metadata block in the rbtree. If the block is found, increment
346 * its reference count.
348 static struct dmz_mblock
*dmz_get_mblock_fast(struct dmz_metadata
*zmd
,
351 struct rb_root
*root
= &zmd
->mblk_rbtree
;
352 struct rb_node
*node
= root
->rb_node
;
353 struct dmz_mblock
*mblk
;
356 mblk
= container_of(node
, struct dmz_mblock
, node
);
357 if (mblk
->no
== mblk_no
) {
359 * If this is the first reference to the block,
360 * remove it from the LRU list.
363 if (mblk
->ref
== 1 &&
364 !test_bit(DMZ_META_DIRTY
, &mblk
->state
))
365 list_del_init(&mblk
->link
);
368 node
= (mblk
->no
< mblk_no
) ? node
->rb_left
: node
->rb_right
;
375 * Metadata block BIO end callback.
377 static void dmz_mblock_bio_end_io(struct bio
*bio
)
379 struct dmz_mblock
*mblk
= bio
->bi_private
;
383 set_bit(DMZ_META_ERROR
, &mblk
->state
);
385 if (bio_op(bio
) == REQ_OP_WRITE
)
386 flag
= DMZ_META_WRITING
;
388 flag
= DMZ_META_READING
;
390 clear_bit_unlock(flag
, &mblk
->state
);
391 smp_mb__after_atomic();
392 wake_up_bit(&mblk
->state
, flag
);
398 * Read an uncached metadata block from disk and add it to the cache.
400 static struct dmz_mblock
*dmz_get_mblock_slow(struct dmz_metadata
*zmd
,
403 struct dmz_mblock
*mblk
, *m
;
404 sector_t block
= zmd
->sb
[zmd
->mblk_primary
].block
+ mblk_no
;
407 if (dmz_bdev_is_dying(zmd
->dev
))
408 return ERR_PTR(-EIO
);
410 /* Get a new block and a BIO to read it */
411 mblk
= dmz_alloc_mblock(zmd
, mblk_no
);
413 return ERR_PTR(-ENOMEM
);
415 bio
= bio_alloc(GFP_NOIO
, 1);
417 dmz_free_mblock(zmd
, mblk
);
418 return ERR_PTR(-ENOMEM
);
421 spin_lock(&zmd
->mblk_lock
);
424 * Make sure that another context did not start reading
427 m
= dmz_get_mblock_fast(zmd
, mblk_no
);
429 spin_unlock(&zmd
->mblk_lock
);
430 dmz_free_mblock(zmd
, mblk
);
436 set_bit(DMZ_META_READING
, &mblk
->state
);
437 dmz_insert_mblock(zmd
, mblk
);
439 spin_unlock(&zmd
->mblk_lock
);
441 /* Submit read BIO */
442 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
443 bio_set_dev(bio
, zmd
->dev
->bdev
);
444 bio
->bi_private
= mblk
;
445 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
446 bio_set_op_attrs(bio
, REQ_OP_READ
, REQ_META
| REQ_PRIO
);
447 bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
454 * Free metadata blocks.
456 static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata
*zmd
,
459 struct dmz_mblock
*mblk
;
460 unsigned long count
= 0;
462 if (!zmd
->max_nr_mblks
)
465 while (!list_empty(&zmd
->mblk_lru_list
) &&
466 atomic_read(&zmd
->nr_mblks
) > zmd
->min_nr_mblks
&&
468 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
469 struct dmz_mblock
, link
);
470 list_del_init(&mblk
->link
);
471 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
472 dmz_free_mblock(zmd
, mblk
);
480 * For mblock shrinker: get the number of unused metadata blocks in the cache.
482 static unsigned long dmz_mblock_shrinker_count(struct shrinker
*shrink
,
483 struct shrink_control
*sc
)
485 struct dmz_metadata
*zmd
= container_of(shrink
, struct dmz_metadata
, mblk_shrinker
);
487 return atomic_read(&zmd
->nr_mblks
);
491 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
493 static unsigned long dmz_mblock_shrinker_scan(struct shrinker
*shrink
,
494 struct shrink_control
*sc
)
496 struct dmz_metadata
*zmd
= container_of(shrink
, struct dmz_metadata
, mblk_shrinker
);
499 spin_lock(&zmd
->mblk_lock
);
500 count
= dmz_shrink_mblock_cache(zmd
, sc
->nr_to_scan
);
501 spin_unlock(&zmd
->mblk_lock
);
503 return count
? count
: SHRINK_STOP
;
507 * Release a metadata block.
509 static void dmz_release_mblock(struct dmz_metadata
*zmd
,
510 struct dmz_mblock
*mblk
)
516 spin_lock(&zmd
->mblk_lock
);
519 if (mblk
->ref
== 0) {
520 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
521 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
522 dmz_free_mblock(zmd
, mblk
);
523 } else if (!test_bit(DMZ_META_DIRTY
, &mblk
->state
)) {
524 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
525 dmz_shrink_mblock_cache(zmd
, 1);
529 spin_unlock(&zmd
->mblk_lock
);
533 * Get a metadata block from the rbtree. If the block
534 * is not present, read it from disk.
536 static struct dmz_mblock
*dmz_get_mblock(struct dmz_metadata
*zmd
,
539 struct dmz_mblock
*mblk
;
542 spin_lock(&zmd
->mblk_lock
);
543 mblk
= dmz_get_mblock_fast(zmd
, mblk_no
);
544 spin_unlock(&zmd
->mblk_lock
);
547 /* Cache miss: read the block from disk */
548 mblk
= dmz_get_mblock_slow(zmd
, mblk_no
);
553 /* Wait for on-going read I/O and check for error */
554 wait_on_bit_io(&mblk
->state
, DMZ_META_READING
,
555 TASK_UNINTERRUPTIBLE
);
556 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
557 dmz_release_mblock(zmd
, mblk
);
558 dmz_check_bdev(zmd
->dev
);
559 return ERR_PTR(-EIO
);
566 * Mark a metadata block dirty.
568 static void dmz_dirty_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
570 spin_lock(&zmd
->mblk_lock
);
571 if (!test_and_set_bit(DMZ_META_DIRTY
, &mblk
->state
))
572 list_add_tail(&mblk
->link
, &zmd
->mblk_dirty_list
);
573 spin_unlock(&zmd
->mblk_lock
);
577 * Issue a metadata block write BIO.
579 static int dmz_write_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
,
582 sector_t block
= zmd
->sb
[set
].block
+ mblk
->no
;
585 if (dmz_bdev_is_dying(zmd
->dev
))
588 bio
= bio_alloc(GFP_NOIO
, 1);
590 set_bit(DMZ_META_ERROR
, &mblk
->state
);
594 set_bit(DMZ_META_WRITING
, &mblk
->state
);
596 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
597 bio_set_dev(bio
, zmd
->dev
->bdev
);
598 bio
->bi_private
= mblk
;
599 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
600 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_META
| REQ_PRIO
);
601 bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
608 * Read/write a metadata block.
610 static int dmz_rdwr_block(struct dmz_metadata
*zmd
, int op
, sector_t block
,
616 if (dmz_bdev_is_dying(zmd
->dev
))
619 bio
= bio_alloc(GFP_NOIO
, 1);
623 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
624 bio_set_dev(bio
, zmd
->dev
->bdev
);
625 bio_set_op_attrs(bio
, op
, REQ_SYNC
| REQ_META
| REQ_PRIO
);
626 bio_add_page(bio
, page
, DMZ_BLOCK_SIZE
, 0);
627 ret
= submit_bio_wait(bio
);
631 dmz_check_bdev(zmd
->dev
);
636 * Write super block of the specified metadata set.
638 static int dmz_write_sb(struct dmz_metadata
*zmd
, unsigned int set
)
640 sector_t block
= zmd
->sb
[set
].block
;
641 struct dmz_mblock
*mblk
= zmd
->sb
[set
].mblk
;
642 struct dmz_super
*sb
= zmd
->sb
[set
].sb
;
643 u64 sb_gen
= zmd
->sb_gen
+ 1;
646 sb
->magic
= cpu_to_le32(DMZ_MAGIC
);
647 sb
->version
= cpu_to_le32(DMZ_META_VER
);
649 sb
->gen
= cpu_to_le64(sb_gen
);
651 sb
->sb_block
= cpu_to_le64(block
);
652 sb
->nr_meta_blocks
= cpu_to_le32(zmd
->nr_meta_blocks
);
653 sb
->nr_reserved_seq
= cpu_to_le32(zmd
->nr_reserved_seq
);
654 sb
->nr_chunks
= cpu_to_le32(zmd
->nr_chunks
);
656 sb
->nr_map_blocks
= cpu_to_le32(zmd
->nr_map_blocks
);
657 sb
->nr_bitmap_blocks
= cpu_to_le32(zmd
->nr_bitmap_blocks
);
660 sb
->crc
= cpu_to_le32(crc32_le(sb_gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
));
662 ret
= dmz_rdwr_block(zmd
, REQ_OP_WRITE
, block
, mblk
->page
);
664 ret
= blkdev_issue_flush(zmd
->dev
->bdev
, GFP_NOIO
, NULL
);
670 * Write dirty metadata blocks to the specified set.
672 static int dmz_write_dirty_mblocks(struct dmz_metadata
*zmd
,
673 struct list_head
*write_list
,
676 struct dmz_mblock
*mblk
;
677 struct blk_plug plug
;
678 int ret
= 0, nr_mblks_submitted
= 0;
681 blk_start_plug(&plug
);
682 list_for_each_entry(mblk
, write_list
, link
) {
683 ret
= dmz_write_mblock(zmd
, mblk
, set
);
686 nr_mblks_submitted
++;
688 blk_finish_plug(&plug
);
690 /* Wait for completion */
691 list_for_each_entry(mblk
, write_list
, link
) {
692 if (!nr_mblks_submitted
)
694 wait_on_bit_io(&mblk
->state
, DMZ_META_WRITING
,
695 TASK_UNINTERRUPTIBLE
);
696 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
697 clear_bit(DMZ_META_ERROR
, &mblk
->state
);
698 dmz_check_bdev(zmd
->dev
);
701 nr_mblks_submitted
--;
704 /* Flush drive cache (this will also sync data) */
706 ret
= blkdev_issue_flush(zmd
->dev
->bdev
, GFP_NOIO
, NULL
);
712 * Log dirty metadata blocks.
714 static int dmz_log_dirty_mblocks(struct dmz_metadata
*zmd
,
715 struct list_head
*write_list
)
717 unsigned int log_set
= zmd
->mblk_primary
^ 0x1;
720 /* Write dirty blocks to the log */
721 ret
= dmz_write_dirty_mblocks(zmd
, write_list
, log_set
);
726 * No error so far: now validate the log by updating the
727 * log index super block generation.
729 ret
= dmz_write_sb(zmd
, log_set
);
737 * Flush dirty metadata blocks.
739 int dmz_flush_metadata(struct dmz_metadata
*zmd
)
741 struct dmz_mblock
*mblk
;
742 struct list_head write_list
;
748 INIT_LIST_HEAD(&write_list
);
751 * Make sure that metadata blocks are stable before logging: take
752 * the write lock on the metadata semaphore to prevent target BIOs
753 * from modifying metadata.
755 down_write(&zmd
->mblk_sem
);
758 * This is called from the target flush work and reclaim work.
759 * Concurrent execution is not allowed.
763 if (dmz_bdev_is_dying(zmd
->dev
)) {
768 /* Get dirty blocks */
769 spin_lock(&zmd
->mblk_lock
);
770 list_splice_init(&zmd
->mblk_dirty_list
, &write_list
);
771 spin_unlock(&zmd
->mblk_lock
);
773 /* If there are no dirty metadata blocks, just flush the device cache */
774 if (list_empty(&write_list
)) {
775 ret
= blkdev_issue_flush(zmd
->dev
->bdev
, GFP_NOIO
, NULL
);
780 * The primary metadata set is still clean. Keep it this way until
781 * all updates are successful in the secondary set. That is, use
782 * the secondary set as a log.
784 ret
= dmz_log_dirty_mblocks(zmd
, &write_list
);
789 * The log is on disk. It is now safe to update in place
790 * in the primary metadata set.
792 ret
= dmz_write_dirty_mblocks(zmd
, &write_list
, zmd
->mblk_primary
);
796 ret
= dmz_write_sb(zmd
, zmd
->mblk_primary
);
800 while (!list_empty(&write_list
)) {
801 mblk
= list_first_entry(&write_list
, struct dmz_mblock
, link
);
802 list_del_init(&mblk
->link
);
804 spin_lock(&zmd
->mblk_lock
);
805 clear_bit(DMZ_META_DIRTY
, &mblk
->state
);
807 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
808 spin_unlock(&zmd
->mblk_lock
);
813 dmz_unlock_flush(zmd
);
814 up_write(&zmd
->mblk_sem
);
819 if (!list_empty(&write_list
)) {
820 spin_lock(&zmd
->mblk_lock
);
821 list_splice(&write_list
, &zmd
->mblk_dirty_list
);
822 spin_unlock(&zmd
->mblk_lock
);
824 if (!dmz_check_bdev(zmd
->dev
))
832 static int dmz_check_sb(struct dmz_metadata
*zmd
, struct dmz_super
*sb
)
834 unsigned int nr_meta_zones
, nr_data_zones
;
835 struct dmz_dev
*dev
= zmd
->dev
;
839 gen
= le64_to_cpu(sb
->gen
);
840 stored_crc
= le32_to_cpu(sb
->crc
);
842 crc
= crc32_le(gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
);
843 if (crc
!= stored_crc
) {
844 dmz_dev_err(dev
, "Invalid checksum (needed 0x%08x, got 0x%08x)",
849 if (le32_to_cpu(sb
->magic
) != DMZ_MAGIC
) {
850 dmz_dev_err(dev
, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
851 DMZ_MAGIC
, le32_to_cpu(sb
->magic
));
855 if (le32_to_cpu(sb
->version
) != DMZ_META_VER
) {
856 dmz_dev_err(dev
, "Invalid meta version (needed %d, got %d)",
857 DMZ_META_VER
, le32_to_cpu(sb
->version
));
861 nr_meta_zones
= (le32_to_cpu(sb
->nr_meta_blocks
) + dev
->zone_nr_blocks
- 1)
862 >> dev
->zone_nr_blocks_shift
;
863 if (!nr_meta_zones
||
864 nr_meta_zones
>= zmd
->nr_rnd_zones
) {
865 dmz_dev_err(dev
, "Invalid number of metadata blocks");
869 if (!le32_to_cpu(sb
->nr_reserved_seq
) ||
870 le32_to_cpu(sb
->nr_reserved_seq
) >= (zmd
->nr_useable_zones
- nr_meta_zones
)) {
871 dmz_dev_err(dev
, "Invalid number of reserved sequential zones");
875 nr_data_zones
= zmd
->nr_useable_zones
-
876 (nr_meta_zones
* 2 + le32_to_cpu(sb
->nr_reserved_seq
));
877 if (le32_to_cpu(sb
->nr_chunks
) > nr_data_zones
) {
878 dmz_dev_err(dev
, "Invalid number of chunks %u / %u",
879 le32_to_cpu(sb
->nr_chunks
), nr_data_zones
);
884 zmd
->nr_meta_blocks
= le32_to_cpu(sb
->nr_meta_blocks
);
885 zmd
->nr_reserved_seq
= le32_to_cpu(sb
->nr_reserved_seq
);
886 zmd
->nr_chunks
= le32_to_cpu(sb
->nr_chunks
);
887 zmd
->nr_map_blocks
= le32_to_cpu(sb
->nr_map_blocks
);
888 zmd
->nr_bitmap_blocks
= le32_to_cpu(sb
->nr_bitmap_blocks
);
889 zmd
->nr_meta_zones
= nr_meta_zones
;
890 zmd
->nr_data_zones
= nr_data_zones
;
896 * Read the first or second super block from disk.
898 static int dmz_read_sb(struct dmz_metadata
*zmd
, unsigned int set
)
900 return dmz_rdwr_block(zmd
, REQ_OP_READ
, zmd
->sb
[set
].block
,
901 zmd
->sb
[set
].mblk
->page
);
905 * Determine the position of the secondary super blocks on disk.
906 * This is used only if a corruption of the primary super block
909 static int dmz_lookup_secondary_sb(struct dmz_metadata
*zmd
)
911 unsigned int zone_nr_blocks
= zmd
->dev
->zone_nr_blocks
;
912 struct dmz_mblock
*mblk
;
915 /* Allocate a block */
916 mblk
= dmz_alloc_mblock(zmd
, 0);
920 zmd
->sb
[1].mblk
= mblk
;
921 zmd
->sb
[1].sb
= mblk
->data
;
923 /* Bad first super block: search for the second one */
924 zmd
->sb
[1].block
= zmd
->sb
[0].block
+ zone_nr_blocks
;
925 for (i
= 0; i
< zmd
->nr_rnd_zones
- 1; i
++) {
926 if (dmz_read_sb(zmd
, 1) != 0)
928 if (le32_to_cpu(zmd
->sb
[1].sb
->magic
) == DMZ_MAGIC
)
930 zmd
->sb
[1].block
+= zone_nr_blocks
;
933 dmz_free_mblock(zmd
, mblk
);
934 zmd
->sb
[1].mblk
= NULL
;
940 * Read the first or second super block from disk.
942 static int dmz_get_sb(struct dmz_metadata
*zmd
, unsigned int set
)
944 struct dmz_mblock
*mblk
;
947 /* Allocate a block */
948 mblk
= dmz_alloc_mblock(zmd
, 0);
952 zmd
->sb
[set
].mblk
= mblk
;
953 zmd
->sb
[set
].sb
= mblk
->data
;
955 /* Read super block */
956 ret
= dmz_read_sb(zmd
, set
);
958 dmz_free_mblock(zmd
, mblk
);
959 zmd
->sb
[set
].mblk
= NULL
;
967 * Recover a metadata set.
969 static int dmz_recover_mblocks(struct dmz_metadata
*zmd
, unsigned int dst_set
)
971 unsigned int src_set
= dst_set
^ 0x1;
975 dmz_dev_warn(zmd
->dev
, "Metadata set %u invalid: recovering", dst_set
);
978 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb_zone
);
980 zmd
->sb
[1].block
= zmd
->sb
[0].block
+
981 (zmd
->nr_meta_zones
<< zmd
->dev
->zone_nr_blocks_shift
);
984 page
= alloc_page(GFP_NOIO
);
988 /* Copy metadata blocks */
989 for (i
= 1; i
< zmd
->nr_meta_blocks
; i
++) {
990 ret
= dmz_rdwr_block(zmd
, REQ_OP_READ
,
991 zmd
->sb
[src_set
].block
+ i
, page
);
994 ret
= dmz_rdwr_block(zmd
, REQ_OP_WRITE
,
995 zmd
->sb
[dst_set
].block
+ i
, page
);
1000 /* Finalize with the super block */
1001 if (!zmd
->sb
[dst_set
].mblk
) {
1002 zmd
->sb
[dst_set
].mblk
= dmz_alloc_mblock(zmd
, 0);
1003 if (!zmd
->sb
[dst_set
].mblk
) {
1007 zmd
->sb
[dst_set
].sb
= zmd
->sb
[dst_set
].mblk
->data
;
1010 ret
= dmz_write_sb(zmd
, dst_set
);
1012 __free_pages(page
, 0);
1018 * Get super block from disk.
1020 static int dmz_load_sb(struct dmz_metadata
*zmd
)
1022 bool sb_good
[2] = {false, false};
1023 u64 sb_gen
[2] = {0, 0};
1026 /* Read and check the primary super block */
1027 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb_zone
);
1028 ret
= dmz_get_sb(zmd
, 0);
1030 dmz_dev_err(zmd
->dev
, "Read primary super block failed");
1034 ret
= dmz_check_sb(zmd
, zmd
->sb
[0].sb
);
1036 /* Read and check secondary super block */
1039 zmd
->sb
[1].block
= zmd
->sb
[0].block
+
1040 (zmd
->nr_meta_zones
<< zmd
->dev
->zone_nr_blocks_shift
);
1041 ret
= dmz_get_sb(zmd
, 1);
1043 ret
= dmz_lookup_secondary_sb(zmd
);
1046 dmz_dev_err(zmd
->dev
, "Read secondary super block failed");
1050 ret
= dmz_check_sb(zmd
, zmd
->sb
[1].sb
);
1054 /* Use highest generation sb first */
1055 if (!sb_good
[0] && !sb_good
[1]) {
1056 dmz_dev_err(zmd
->dev
, "No valid super block found");
1061 sb_gen
[0] = le64_to_cpu(zmd
->sb
[0].sb
->gen
);
1063 ret
= dmz_recover_mblocks(zmd
, 0);
1066 sb_gen
[1] = le64_to_cpu(zmd
->sb
[1].sb
->gen
);
1068 ret
= dmz_recover_mblocks(zmd
, 1);
1071 dmz_dev_err(zmd
->dev
, "Recovery failed");
1075 if (sb_gen
[0] >= sb_gen
[1]) {
1076 zmd
->sb_gen
= sb_gen
[0];
1077 zmd
->mblk_primary
= 0;
1079 zmd
->sb_gen
= sb_gen
[1];
1080 zmd
->mblk_primary
= 1;
1083 dmz_dev_debug(zmd
->dev
, "Using super block %u (gen %llu)",
1084 zmd
->mblk_primary
, zmd
->sb_gen
);
1090 * Initialize a zone descriptor.
1092 static int dmz_init_zone(struct blk_zone
*blkz
, unsigned int idx
, void *data
)
1094 struct dmz_metadata
*zmd
= data
;
1095 struct dm_zone
*zone
= &zmd
->zones
[idx
];
1096 struct dmz_dev
*dev
= zmd
->dev
;
1098 /* Ignore the eventual last runt (smaller) zone */
1099 if (blkz
->len
!= dev
->zone_nr_sectors
) {
1100 if (blkz
->start
+ blkz
->len
== dev
->capacity
)
1105 INIT_LIST_HEAD(&zone
->link
);
1106 atomic_set(&zone
->refcount
, 0);
1107 zone
->chunk
= DMZ_MAP_UNMAPPED
;
1109 switch (blkz
->type
) {
1110 case BLK_ZONE_TYPE_CONVENTIONAL
:
1111 set_bit(DMZ_RND
, &zone
->flags
);
1113 case BLK_ZONE_TYPE_SEQWRITE_REQ
:
1114 case BLK_ZONE_TYPE_SEQWRITE_PREF
:
1115 set_bit(DMZ_SEQ
, &zone
->flags
);
1121 if (dmz_is_rnd(zone
))
1124 zone
->wp_block
= dmz_sect2blk(blkz
->wp
- blkz
->start
);
1126 if (blkz
->cond
== BLK_ZONE_COND_OFFLINE
)
1127 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1128 else if (blkz
->cond
== BLK_ZONE_COND_READONLY
)
1129 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1131 zmd
->nr_useable_zones
++;
1132 if (dmz_is_rnd(zone
)) {
1133 zmd
->nr_rnd_zones
++;
1134 if (!zmd
->sb_zone
) {
1135 /* Super block zone */
1136 zmd
->sb_zone
= zone
;
1145 * Free zones descriptors.
1147 static void dmz_drop_zones(struct dmz_metadata
*zmd
)
1154 * Allocate and initialize zone descriptors using the zone
1155 * information from disk.
1157 static int dmz_init_zones(struct dmz_metadata
*zmd
)
1159 struct dmz_dev
*dev
= zmd
->dev
;
1163 zmd
->zone_bitmap_size
= dev
->zone_nr_blocks
>> 3;
1164 zmd
->zone_nr_bitmap_blocks
=
1165 max_t(sector_t
, 1, zmd
->zone_bitmap_size
>> DMZ_BLOCK_SHIFT
);
1166 zmd
->zone_bits_per_mblk
= min_t(sector_t
, dev
->zone_nr_blocks
,
1167 DMZ_BLOCK_SIZE_BITS
);
1169 /* Allocate zone array */
1170 zmd
->zones
= kcalloc(dev
->nr_zones
, sizeof(struct dm_zone
), GFP_KERNEL
);
1174 dmz_dev_info(dev
, "Using %zu B for zone information",
1175 sizeof(struct dm_zone
) * dev
->nr_zones
);
1178 * Get zone information and initialize zone descriptors. At the same
1179 * time, determine where the super block should be: first block of the
1180 * first randomly writable zone.
1182 ret
= blkdev_report_zones(dev
->bdev
, 0, BLK_ALL_ZONES
, dmz_init_zone
,
1185 dmz_drop_zones(zmd
);
1192 static int dmz_update_zone_cb(struct blk_zone
*blkz
, unsigned int idx
,
1195 struct dm_zone
*zone
= data
;
1197 clear_bit(DMZ_OFFLINE
, &zone
->flags
);
1198 clear_bit(DMZ_READ_ONLY
, &zone
->flags
);
1199 if (blkz
->cond
== BLK_ZONE_COND_OFFLINE
)
1200 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1201 else if (blkz
->cond
== BLK_ZONE_COND_READONLY
)
1202 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1204 if (dmz_is_seq(zone
))
1205 zone
->wp_block
= dmz_sect2blk(blkz
->wp
- blkz
->start
);
1212 * Update a zone information.
1214 static int dmz_update_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1216 unsigned int noio_flag
;
1220 * Get zone information from disk. Since blkdev_report_zones() uses
1221 * GFP_KERNEL by default for memory allocations, set the per-task
1222 * PF_MEMALLOC_NOIO flag so that all allocations are done as if
1223 * GFP_NOIO was specified.
1225 noio_flag
= memalloc_noio_save();
1226 ret
= blkdev_report_zones(zmd
->dev
->bdev
, dmz_start_sect(zmd
, zone
), 1,
1227 dmz_update_zone_cb
, zone
);
1228 memalloc_noio_restore(noio_flag
);
1233 dmz_dev_err(zmd
->dev
, "Get zone %u report failed",
1235 dmz_check_bdev(zmd
->dev
);
1243 * Check a zone write pointer position when the zone is marked
1244 * with the sequential write error flag.
1246 static int dmz_handle_seq_write_err(struct dmz_metadata
*zmd
,
1247 struct dm_zone
*zone
)
1249 unsigned int wp
= 0;
1252 wp
= zone
->wp_block
;
1253 ret
= dmz_update_zone(zmd
, zone
);
1257 dmz_dev_warn(zmd
->dev
, "Processing zone %u write error (zone wp %u/%u)",
1258 dmz_id(zmd
, zone
), zone
->wp_block
, wp
);
1260 if (zone
->wp_block
< wp
) {
1261 dmz_invalidate_blocks(zmd
, zone
, zone
->wp_block
,
1262 wp
- zone
->wp_block
);
1268 static struct dm_zone
*dmz_get(struct dmz_metadata
*zmd
, unsigned int zone_id
)
1270 return &zmd
->zones
[zone_id
];
1274 * Reset a zone write pointer.
1276 static int dmz_reset_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1281 * Ignore offline zones, read only zones,
1282 * and conventional zones.
1284 if (dmz_is_offline(zone
) ||
1285 dmz_is_readonly(zone
) ||
1289 if (!dmz_is_empty(zone
) || dmz_seq_write_err(zone
)) {
1290 struct dmz_dev
*dev
= zmd
->dev
;
1292 ret
= blkdev_zone_mgmt(dev
->bdev
, REQ_OP_ZONE_RESET
,
1293 dmz_start_sect(zmd
, zone
),
1294 dev
->zone_nr_sectors
, GFP_NOIO
);
1296 dmz_dev_err(dev
, "Reset zone %u failed %d",
1297 dmz_id(zmd
, zone
), ret
);
1302 /* Clear write error bit and rewind write pointer position */
1303 clear_bit(DMZ_SEQ_WRITE_ERR
, &zone
->flags
);
1309 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
);
1312 * Initialize chunk mapping.
1314 static int dmz_load_mapping(struct dmz_metadata
*zmd
)
1316 struct dmz_dev
*dev
= zmd
->dev
;
1317 struct dm_zone
*dzone
, *bzone
;
1318 struct dmz_mblock
*dmap_mblk
= NULL
;
1319 struct dmz_map
*dmap
;
1320 unsigned int i
= 0, e
= 0, chunk
= 0;
1321 unsigned int dzone_id
;
1322 unsigned int bzone_id
;
1324 /* Metadata block array for the chunk mapping table */
1325 zmd
->map_mblk
= kcalloc(zmd
->nr_map_blocks
,
1326 sizeof(struct dmz_mblk
*), GFP_KERNEL
);
1330 /* Get chunk mapping table blocks and initialize zone mapping */
1331 while (chunk
< zmd
->nr_chunks
) {
1333 /* Get mapping block */
1334 dmap_mblk
= dmz_get_mblock(zmd
, i
+ 1);
1335 if (IS_ERR(dmap_mblk
))
1336 return PTR_ERR(dmap_mblk
);
1337 zmd
->map_mblk
[i
] = dmap_mblk
;
1338 dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1343 /* Check data zone */
1344 dzone_id
= le32_to_cpu(dmap
[e
].dzone_id
);
1345 if (dzone_id
== DMZ_MAP_UNMAPPED
)
1348 if (dzone_id
>= dev
->nr_zones
) {
1349 dmz_dev_err(dev
, "Chunk %u mapping: invalid data zone ID %u",
1354 dzone
= dmz_get(zmd
, dzone_id
);
1355 set_bit(DMZ_DATA
, &dzone
->flags
);
1356 dzone
->chunk
= chunk
;
1357 dmz_get_zone_weight(zmd
, dzone
);
1359 if (dmz_is_rnd(dzone
))
1360 list_add_tail(&dzone
->link
, &zmd
->map_rnd_list
);
1362 list_add_tail(&dzone
->link
, &zmd
->map_seq_list
);
1364 /* Check buffer zone */
1365 bzone_id
= le32_to_cpu(dmap
[e
].bzone_id
);
1366 if (bzone_id
== DMZ_MAP_UNMAPPED
)
1369 if (bzone_id
>= dev
->nr_zones
) {
1370 dmz_dev_err(dev
, "Chunk %u mapping: invalid buffer zone ID %u",
1375 bzone
= dmz_get(zmd
, bzone_id
);
1376 if (!dmz_is_rnd(bzone
)) {
1377 dmz_dev_err(dev
, "Chunk %u mapping: invalid buffer zone %u",
1382 set_bit(DMZ_DATA
, &bzone
->flags
);
1383 set_bit(DMZ_BUF
, &bzone
->flags
);
1384 bzone
->chunk
= chunk
;
1385 bzone
->bzone
= dzone
;
1386 dzone
->bzone
= bzone
;
1387 dmz_get_zone_weight(zmd
, bzone
);
1388 list_add_tail(&bzone
->link
, &zmd
->map_rnd_list
);
1392 if (e
>= DMZ_MAP_ENTRIES
)
1397 * At this point, only meta zones and mapped data zones were
1398 * fully initialized. All remaining zones are unmapped data
1399 * zones. Finish initializing those here.
1401 for (i
= 0; i
< dev
->nr_zones
; i
++) {
1402 dzone
= dmz_get(zmd
, i
);
1403 if (dmz_is_meta(dzone
))
1406 if (dmz_is_rnd(dzone
))
1411 if (dmz_is_data(dzone
)) {
1412 /* Already initialized */
1416 /* Unmapped data zone */
1417 set_bit(DMZ_DATA
, &dzone
->flags
);
1418 dzone
->chunk
= DMZ_MAP_UNMAPPED
;
1419 if (dmz_is_rnd(dzone
)) {
1420 list_add_tail(&dzone
->link
, &zmd
->unmap_rnd_list
);
1421 atomic_inc(&zmd
->unmap_nr_rnd
);
1422 } else if (atomic_read(&zmd
->nr_reserved_seq_zones
) < zmd
->nr_reserved_seq
) {
1423 list_add_tail(&dzone
->link
, &zmd
->reserved_seq_zones_list
);
1424 atomic_inc(&zmd
->nr_reserved_seq_zones
);
1427 list_add_tail(&dzone
->link
, &zmd
->unmap_seq_list
);
1428 atomic_inc(&zmd
->unmap_nr_seq
);
1436 * Set a data chunk mapping.
1438 static void dmz_set_chunk_mapping(struct dmz_metadata
*zmd
, unsigned int chunk
,
1439 unsigned int dzone_id
, unsigned int bzone_id
)
1441 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
1442 struct dmz_map
*dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1443 int map_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
1445 dmap
[map_idx
].dzone_id
= cpu_to_le32(dzone_id
);
1446 dmap
[map_idx
].bzone_id
= cpu_to_le32(bzone_id
);
1447 dmz_dirty_mblock(zmd
, dmap_mblk
);
1451 * The list of mapped zones is maintained in LRU order.
1452 * This rotates a zone at the end of its map list.
1454 static void __dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1456 if (list_empty(&zone
->link
))
1459 list_del_init(&zone
->link
);
1460 if (dmz_is_seq(zone
)) {
1461 /* LRU rotate sequential zone */
1462 list_add_tail(&zone
->link
, &zmd
->map_seq_list
);
1464 /* LRU rotate random zone */
1465 list_add_tail(&zone
->link
, &zmd
->map_rnd_list
);
1470 * The list of mapped random zones is maintained
1471 * in LRU order. This rotates a zone at the end of the list.
1473 static void dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1475 __dmz_lru_zone(zmd
, zone
);
1477 __dmz_lru_zone(zmd
, zone
->bzone
);
1481 * Wait for any zone to be freed.
1483 static void dmz_wait_for_free_zones(struct dmz_metadata
*zmd
)
1487 prepare_to_wait(&zmd
->free_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
1488 dmz_unlock_map(zmd
);
1489 dmz_unlock_metadata(zmd
);
1491 io_schedule_timeout(HZ
);
1493 dmz_lock_metadata(zmd
);
1495 finish_wait(&zmd
->free_wq
, &wait
);
1499 * Lock a zone for reclaim (set the zone RECLAIM bit).
1500 * Returns false if the zone cannot be locked or if it is already locked
1503 int dmz_lock_zone_reclaim(struct dm_zone
*zone
)
1505 /* Active zones cannot be reclaimed */
1506 if (dmz_is_active(zone
))
1509 return !test_and_set_bit(DMZ_RECLAIM
, &zone
->flags
);
1513 * Clear a zone reclaim flag.
1515 void dmz_unlock_zone_reclaim(struct dm_zone
*zone
)
1517 WARN_ON(dmz_is_active(zone
));
1518 WARN_ON(!dmz_in_reclaim(zone
));
1520 clear_bit_unlock(DMZ_RECLAIM
, &zone
->flags
);
1521 smp_mb__after_atomic();
1522 wake_up_bit(&zone
->flags
, DMZ_RECLAIM
);
1526 * Wait for a zone reclaim to complete.
1528 static void dmz_wait_for_reclaim(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1530 dmz_unlock_map(zmd
);
1531 dmz_unlock_metadata(zmd
);
1532 wait_on_bit_timeout(&zone
->flags
, DMZ_RECLAIM
, TASK_UNINTERRUPTIBLE
, HZ
);
1533 dmz_lock_metadata(zmd
);
1538 * Select a random write zone for reclaim.
1540 static struct dm_zone
*dmz_get_rnd_zone_for_reclaim(struct dmz_metadata
*zmd
)
1542 struct dm_zone
*dzone
= NULL
;
1543 struct dm_zone
*zone
;
1545 if (list_empty(&zmd
->map_rnd_list
))
1546 return ERR_PTR(-EBUSY
);
1548 list_for_each_entry(zone
, &zmd
->map_rnd_list
, link
) {
1549 if (dmz_is_buf(zone
))
1550 dzone
= zone
->bzone
;
1553 if (dmz_lock_zone_reclaim(dzone
))
1557 return ERR_PTR(-EBUSY
);
1561 * Select a buffered sequential zone for reclaim.
1563 static struct dm_zone
*dmz_get_seq_zone_for_reclaim(struct dmz_metadata
*zmd
)
1565 struct dm_zone
*zone
;
1567 if (list_empty(&zmd
->map_seq_list
))
1568 return ERR_PTR(-EBUSY
);
1570 list_for_each_entry(zone
, &zmd
->map_seq_list
, link
) {
1573 if (dmz_lock_zone_reclaim(zone
))
1577 return ERR_PTR(-EBUSY
);
1581 * Select a zone for reclaim.
1583 struct dm_zone
*dmz_get_zone_for_reclaim(struct dmz_metadata
*zmd
)
1585 struct dm_zone
*zone
;
1588 * Search for a zone candidate to reclaim: 2 cases are possible.
1589 * (1) There is no free sequential zones. Then a random data zone
1590 * cannot be reclaimed. So choose a sequential zone to reclaim so
1591 * that afterward a random zone can be reclaimed.
1592 * (2) At least one free sequential zone is available, then choose
1593 * the oldest random zone (data or buffer) that can be locked.
1596 if (list_empty(&zmd
->reserved_seq_zones_list
))
1597 zone
= dmz_get_seq_zone_for_reclaim(zmd
);
1599 zone
= dmz_get_rnd_zone_for_reclaim(zmd
);
1600 dmz_unlock_map(zmd
);
1606 * Get the zone mapping a chunk, if the chunk is mapped already.
1607 * If no mapping exist and the operation is WRITE, a zone is
1608 * allocated and used to map the chunk.
1609 * The zone returned will be set to the active state.
1611 struct dm_zone
*dmz_get_chunk_mapping(struct dmz_metadata
*zmd
, unsigned int chunk
, int op
)
1613 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
1614 struct dmz_map
*dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1615 int dmap_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
1616 unsigned int dzone_id
;
1617 struct dm_zone
*dzone
= NULL
;
1622 /* Get the chunk mapping */
1623 dzone_id
= le32_to_cpu(dmap
[dmap_idx
].dzone_id
);
1624 if (dzone_id
== DMZ_MAP_UNMAPPED
) {
1626 * Read or discard in unmapped chunks are fine. But for
1627 * writes, we need a mapping, so get one.
1629 if (op
!= REQ_OP_WRITE
)
1632 /* Allocate a random zone */
1633 dzone
= dmz_alloc_zone(zmd
, DMZ_ALLOC_RND
);
1635 if (dmz_bdev_is_dying(zmd
->dev
)) {
1636 dzone
= ERR_PTR(-EIO
);
1639 dmz_wait_for_free_zones(zmd
);
1643 dmz_map_zone(zmd
, dzone
, chunk
);
1646 /* The chunk is already mapped: get the mapping zone */
1647 dzone
= dmz_get(zmd
, dzone_id
);
1648 if (dzone
->chunk
!= chunk
) {
1649 dzone
= ERR_PTR(-EIO
);
1653 /* Repair write pointer if the sequential dzone has error */
1654 if (dmz_seq_write_err(dzone
)) {
1655 ret
= dmz_handle_seq_write_err(zmd
, dzone
);
1657 dzone
= ERR_PTR(-EIO
);
1660 clear_bit(DMZ_SEQ_WRITE_ERR
, &dzone
->flags
);
1665 * If the zone is being reclaimed, the chunk mapping may change
1666 * to a different zone. So wait for reclaim and retry. Otherwise,
1667 * activate the zone (this will prevent reclaim from touching it).
1669 if (dmz_in_reclaim(dzone
)) {
1670 dmz_wait_for_reclaim(zmd
, dzone
);
1673 dmz_activate_zone(dzone
);
1674 dmz_lru_zone(zmd
, dzone
);
1676 dmz_unlock_map(zmd
);
1682 * Write and discard change the block validity of data zones and their buffer
1683 * zones. Check here that valid blocks are still present. If all blocks are
1684 * invalid, the zones can be unmapped on the fly without waiting for reclaim
1687 void dmz_put_chunk_mapping(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
)
1689 struct dm_zone
*bzone
;
1693 bzone
= dzone
->bzone
;
1695 if (dmz_weight(bzone
))
1696 dmz_lru_zone(zmd
, bzone
);
1698 /* Empty buffer zone: reclaim it */
1699 dmz_unmap_zone(zmd
, bzone
);
1700 dmz_free_zone(zmd
, bzone
);
1705 /* Deactivate the data zone */
1706 dmz_deactivate_zone(dzone
);
1707 if (dmz_is_active(dzone
) || bzone
|| dmz_weight(dzone
))
1708 dmz_lru_zone(zmd
, dzone
);
1710 /* Unbuffered inactive empty data zone: reclaim it */
1711 dmz_unmap_zone(zmd
, dzone
);
1712 dmz_free_zone(zmd
, dzone
);
1715 dmz_unlock_map(zmd
);
1719 * Allocate and map a random zone to buffer a chunk
1720 * already mapped to a sequential zone.
1722 struct dm_zone
*dmz_get_chunk_buffer(struct dmz_metadata
*zmd
,
1723 struct dm_zone
*dzone
)
1725 struct dm_zone
*bzone
;
1729 bzone
= dzone
->bzone
;
1733 /* Allocate a random zone */
1734 bzone
= dmz_alloc_zone(zmd
, DMZ_ALLOC_RND
);
1736 if (dmz_bdev_is_dying(zmd
->dev
)) {
1737 bzone
= ERR_PTR(-EIO
);
1740 dmz_wait_for_free_zones(zmd
);
1744 /* Update the chunk mapping */
1745 dmz_set_chunk_mapping(zmd
, dzone
->chunk
, dmz_id(zmd
, dzone
),
1746 dmz_id(zmd
, bzone
));
1748 set_bit(DMZ_BUF
, &bzone
->flags
);
1749 bzone
->chunk
= dzone
->chunk
;
1750 bzone
->bzone
= dzone
;
1751 dzone
->bzone
= bzone
;
1752 list_add_tail(&bzone
->link
, &zmd
->map_rnd_list
);
1754 dmz_unlock_map(zmd
);
1760 * Get an unmapped (free) zone.
1761 * This must be called with the mapping lock held.
1763 struct dm_zone
*dmz_alloc_zone(struct dmz_metadata
*zmd
, unsigned long flags
)
1765 struct list_head
*list
;
1766 struct dm_zone
*zone
;
1768 if (flags
& DMZ_ALLOC_RND
)
1769 list
= &zmd
->unmap_rnd_list
;
1771 list
= &zmd
->unmap_seq_list
;
1773 if (list_empty(list
)) {
1775 * No free zone: if this is for reclaim, allow using the
1776 * reserved sequential zones.
1778 if (!(flags
& DMZ_ALLOC_RECLAIM
) ||
1779 list_empty(&zmd
->reserved_seq_zones_list
))
1782 zone
= list_first_entry(&zmd
->reserved_seq_zones_list
,
1783 struct dm_zone
, link
);
1784 list_del_init(&zone
->link
);
1785 atomic_dec(&zmd
->nr_reserved_seq_zones
);
1789 zone
= list_first_entry(list
, struct dm_zone
, link
);
1790 list_del_init(&zone
->link
);
1792 if (dmz_is_rnd(zone
))
1793 atomic_dec(&zmd
->unmap_nr_rnd
);
1795 atomic_dec(&zmd
->unmap_nr_seq
);
1797 if (dmz_is_offline(zone
)) {
1798 dmz_dev_warn(zmd
->dev
, "Zone %u is offline", dmz_id(zmd
, zone
));
1808 * This must be called with the mapping lock held.
1810 void dmz_free_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1812 /* If this is a sequential zone, reset it */
1813 if (dmz_is_seq(zone
))
1814 dmz_reset_zone(zmd
, zone
);
1816 /* Return the zone to its type unmap list */
1817 if (dmz_is_rnd(zone
)) {
1818 list_add_tail(&zone
->link
, &zmd
->unmap_rnd_list
);
1819 atomic_inc(&zmd
->unmap_nr_rnd
);
1820 } else if (atomic_read(&zmd
->nr_reserved_seq_zones
) <
1821 zmd
->nr_reserved_seq
) {
1822 list_add_tail(&zone
->link
, &zmd
->reserved_seq_zones_list
);
1823 atomic_inc(&zmd
->nr_reserved_seq_zones
);
1825 list_add_tail(&zone
->link
, &zmd
->unmap_seq_list
);
1826 atomic_inc(&zmd
->unmap_nr_seq
);
1829 wake_up_all(&zmd
->free_wq
);
1833 * Map a chunk to a zone.
1834 * This must be called with the mapping lock held.
1836 void dmz_map_zone(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
,
1839 /* Set the chunk mapping */
1840 dmz_set_chunk_mapping(zmd
, chunk
, dmz_id(zmd
, dzone
),
1842 dzone
->chunk
= chunk
;
1843 if (dmz_is_rnd(dzone
))
1844 list_add_tail(&dzone
->link
, &zmd
->map_rnd_list
);
1846 list_add_tail(&dzone
->link
, &zmd
->map_seq_list
);
1851 * This must be called with the mapping lock held.
1853 void dmz_unmap_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1855 unsigned int chunk
= zone
->chunk
;
1856 unsigned int dzone_id
;
1858 if (chunk
== DMZ_MAP_UNMAPPED
) {
1859 /* Already unmapped */
1863 if (test_and_clear_bit(DMZ_BUF
, &zone
->flags
)) {
1865 * Unmapping the chunk buffer zone: clear only
1866 * the chunk buffer mapping
1868 dzone_id
= dmz_id(zmd
, zone
->bzone
);
1869 zone
->bzone
->bzone
= NULL
;
1874 * Unmapping the chunk data zone: the zone must
1877 if (WARN_ON(zone
->bzone
)) {
1878 zone
->bzone
->bzone
= NULL
;
1881 dzone_id
= DMZ_MAP_UNMAPPED
;
1884 dmz_set_chunk_mapping(zmd
, chunk
, dzone_id
, DMZ_MAP_UNMAPPED
);
1886 zone
->chunk
= DMZ_MAP_UNMAPPED
;
1887 list_del_init(&zone
->link
);
1891 * Set @nr_bits bits in @bitmap starting from @bit.
1892 * Return the number of bits changed from 0 to 1.
1894 static unsigned int dmz_set_bits(unsigned long *bitmap
,
1895 unsigned int bit
, unsigned int nr_bits
)
1897 unsigned long *addr
;
1898 unsigned int end
= bit
+ nr_bits
;
1902 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
1903 ((end
- bit
) >= BITS_PER_LONG
)) {
1904 /* Try to set the whole word at once */
1905 addr
= bitmap
+ BIT_WORD(bit
);
1909 bit
+= BITS_PER_LONG
;
1914 if (!test_and_set_bit(bit
, bitmap
))
1923 * Get the bitmap block storing the bit for chunk_block in zone.
1925 static struct dmz_mblock
*dmz_get_bitmap(struct dmz_metadata
*zmd
,
1926 struct dm_zone
*zone
,
1927 sector_t chunk_block
)
1929 sector_t bitmap_block
= 1 + zmd
->nr_map_blocks
+
1930 (sector_t
)(dmz_id(zmd
, zone
) * zmd
->zone_nr_bitmap_blocks
) +
1931 (chunk_block
>> DMZ_BLOCK_SHIFT_BITS
);
1933 return dmz_get_mblock(zmd
, bitmap_block
);
1937 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
1939 int dmz_copy_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
1940 struct dm_zone
*to_zone
)
1942 struct dmz_mblock
*from_mblk
, *to_mblk
;
1943 sector_t chunk_block
= 0;
1945 /* Get the zones bitmap blocks */
1946 while (chunk_block
< zmd
->dev
->zone_nr_blocks
) {
1947 from_mblk
= dmz_get_bitmap(zmd
, from_zone
, chunk_block
);
1948 if (IS_ERR(from_mblk
))
1949 return PTR_ERR(from_mblk
);
1950 to_mblk
= dmz_get_bitmap(zmd
, to_zone
, chunk_block
);
1951 if (IS_ERR(to_mblk
)) {
1952 dmz_release_mblock(zmd
, from_mblk
);
1953 return PTR_ERR(to_mblk
);
1956 memcpy(to_mblk
->data
, from_mblk
->data
, DMZ_BLOCK_SIZE
);
1957 dmz_dirty_mblock(zmd
, to_mblk
);
1959 dmz_release_mblock(zmd
, to_mblk
);
1960 dmz_release_mblock(zmd
, from_mblk
);
1962 chunk_block
+= zmd
->zone_bits_per_mblk
;
1965 to_zone
->weight
= from_zone
->weight
;
1971 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
1972 * starting from chunk_block.
1974 int dmz_merge_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
1975 struct dm_zone
*to_zone
, sector_t chunk_block
)
1977 unsigned int nr_blocks
;
1980 /* Get the zones bitmap blocks */
1981 while (chunk_block
< zmd
->dev
->zone_nr_blocks
) {
1982 /* Get a valid region from the source zone */
1983 ret
= dmz_first_valid_block(zmd
, from_zone
, &chunk_block
);
1988 ret
= dmz_validate_blocks(zmd
, to_zone
, chunk_block
, nr_blocks
);
1992 chunk_block
+= nr_blocks
;
1999 * Validate all the blocks in the range [block..block+nr_blocks-1].
2001 int dmz_validate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2002 sector_t chunk_block
, unsigned int nr_blocks
)
2004 unsigned int count
, bit
, nr_bits
;
2005 unsigned int zone_nr_blocks
= zmd
->dev
->zone_nr_blocks
;
2006 struct dmz_mblock
*mblk
;
2009 dmz_dev_debug(zmd
->dev
, "=> VALIDATE zone %u, block %llu, %u blocks",
2010 dmz_id(zmd
, zone
), (unsigned long long)chunk_block
,
2013 WARN_ON(chunk_block
+ nr_blocks
> zone_nr_blocks
);
2016 /* Get bitmap block */
2017 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2019 return PTR_ERR(mblk
);
2022 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2023 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2025 count
= dmz_set_bits((unsigned long *)mblk
->data
, bit
, nr_bits
);
2027 dmz_dirty_mblock(zmd
, mblk
);
2030 dmz_release_mblock(zmd
, mblk
);
2032 nr_blocks
-= nr_bits
;
2033 chunk_block
+= nr_bits
;
2036 if (likely(zone
->weight
+ n
<= zone_nr_blocks
))
2039 dmz_dev_warn(zmd
->dev
, "Zone %u: weight %u should be <= %u",
2040 dmz_id(zmd
, zone
), zone
->weight
,
2041 zone_nr_blocks
- n
);
2042 zone
->weight
= zone_nr_blocks
;
2049 * Clear nr_bits bits in bitmap starting from bit.
2050 * Return the number of bits cleared.
2052 static int dmz_clear_bits(unsigned long *bitmap
, int bit
, int nr_bits
)
2054 unsigned long *addr
;
2055 int end
= bit
+ nr_bits
;
2059 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2060 ((end
- bit
) >= BITS_PER_LONG
)) {
2061 /* Try to clear whole word at once */
2062 addr
= bitmap
+ BIT_WORD(bit
);
2063 if (*addr
== ULONG_MAX
) {
2066 bit
+= BITS_PER_LONG
;
2071 if (test_and_clear_bit(bit
, bitmap
))
2080 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2082 int dmz_invalidate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2083 sector_t chunk_block
, unsigned int nr_blocks
)
2085 unsigned int count
, bit
, nr_bits
;
2086 struct dmz_mblock
*mblk
;
2089 dmz_dev_debug(zmd
->dev
, "=> INVALIDATE zone %u, block %llu, %u blocks",
2090 dmz_id(zmd
, zone
), (u64
)chunk_block
, nr_blocks
);
2092 WARN_ON(chunk_block
+ nr_blocks
> zmd
->dev
->zone_nr_blocks
);
2095 /* Get bitmap block */
2096 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2098 return PTR_ERR(mblk
);
2101 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2102 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2104 count
= dmz_clear_bits((unsigned long *)mblk
->data
,
2107 dmz_dirty_mblock(zmd
, mblk
);
2110 dmz_release_mblock(zmd
, mblk
);
2112 nr_blocks
-= nr_bits
;
2113 chunk_block
+= nr_bits
;
2116 if (zone
->weight
>= n
)
2119 dmz_dev_warn(zmd
->dev
, "Zone %u: weight %u should be >= %u",
2120 dmz_id(zmd
, zone
), zone
->weight
, n
);
2128 * Get a block bit value.
2130 static int dmz_test_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2131 sector_t chunk_block
)
2133 struct dmz_mblock
*mblk
;
2136 WARN_ON(chunk_block
>= zmd
->dev
->zone_nr_blocks
);
2138 /* Get bitmap block */
2139 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2141 return PTR_ERR(mblk
);
2144 ret
= test_bit(chunk_block
& DMZ_BLOCK_MASK_BITS
,
2145 (unsigned long *) mblk
->data
) != 0;
2147 dmz_release_mblock(zmd
, mblk
);
2153 * Return the number of blocks from chunk_block to the first block with a bit
2154 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2156 static int dmz_to_next_set_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2157 sector_t chunk_block
, unsigned int nr_blocks
,
2160 struct dmz_mblock
*mblk
;
2161 unsigned int bit
, set_bit
, nr_bits
;
2162 unsigned int zone_bits
= zmd
->zone_bits_per_mblk
;
2163 unsigned long *bitmap
;
2166 WARN_ON(chunk_block
+ nr_blocks
> zmd
->dev
->zone_nr_blocks
);
2169 /* Get bitmap block */
2170 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2172 return PTR_ERR(mblk
);
2175 bitmap
= (unsigned long *) mblk
->data
;
2176 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2177 nr_bits
= min(nr_blocks
, zone_bits
- bit
);
2179 set_bit
= find_next_bit(bitmap
, zone_bits
, bit
);
2181 set_bit
= find_next_zero_bit(bitmap
, zone_bits
, bit
);
2182 dmz_release_mblock(zmd
, mblk
);
2185 if (set_bit
< zone_bits
)
2188 nr_blocks
-= nr_bits
;
2189 chunk_block
+= nr_bits
;
2196 * Test if chunk_block is valid. If it is, the number of consecutive
2197 * valid blocks from chunk_block will be returned.
2199 int dmz_block_valid(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2200 sector_t chunk_block
)
2204 valid
= dmz_test_block(zmd
, zone
, chunk_block
);
2208 /* The block is valid: get the number of valid blocks from block */
2209 return dmz_to_next_set_block(zmd
, zone
, chunk_block
,
2210 zmd
->dev
->zone_nr_blocks
- chunk_block
, 0);
2214 * Find the first valid block from @chunk_block in @zone.
2215 * If such a block is found, its number is returned using
2216 * @chunk_block and the total number of valid blocks from @chunk_block
2219 int dmz_first_valid_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2220 sector_t
*chunk_block
)
2222 sector_t start_block
= *chunk_block
;
2225 ret
= dmz_to_next_set_block(zmd
, zone
, start_block
,
2226 zmd
->dev
->zone_nr_blocks
- start_block
, 1);
2231 *chunk_block
= start_block
;
2233 return dmz_to_next_set_block(zmd
, zone
, start_block
,
2234 zmd
->dev
->zone_nr_blocks
- start_block
, 0);
2238 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2240 static int dmz_count_bits(void *bitmap
, int bit
, int nr_bits
)
2242 unsigned long *addr
;
2243 int end
= bit
+ nr_bits
;
2247 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2248 ((end
- bit
) >= BITS_PER_LONG
)) {
2249 addr
= (unsigned long *)bitmap
+ BIT_WORD(bit
);
2250 if (*addr
== ULONG_MAX
) {
2252 bit
+= BITS_PER_LONG
;
2257 if (test_bit(bit
, bitmap
))
2266 * Get a zone weight.
2268 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
2270 struct dmz_mblock
*mblk
;
2271 sector_t chunk_block
= 0;
2272 unsigned int bit
, nr_bits
;
2273 unsigned int nr_blocks
= zmd
->dev
->zone_nr_blocks
;
2278 /* Get bitmap block */
2279 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2285 /* Count bits in this block */
2286 bitmap
= mblk
->data
;
2287 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2288 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2289 n
+= dmz_count_bits(bitmap
, bit
, nr_bits
);
2291 dmz_release_mblock(zmd
, mblk
);
2293 nr_blocks
-= nr_bits
;
2294 chunk_block
+= nr_bits
;
2301 * Cleanup the zoned metadata resources.
2303 static void dmz_cleanup_metadata(struct dmz_metadata
*zmd
)
2305 struct rb_root
*root
;
2306 struct dmz_mblock
*mblk
, *next
;
2309 /* Release zone mapping resources */
2310 if (zmd
->map_mblk
) {
2311 for (i
= 0; i
< zmd
->nr_map_blocks
; i
++)
2312 dmz_release_mblock(zmd
, zmd
->map_mblk
[i
]);
2313 kfree(zmd
->map_mblk
);
2314 zmd
->map_mblk
= NULL
;
2317 /* Release super blocks */
2318 for (i
= 0; i
< 2; i
++) {
2319 if (zmd
->sb
[i
].mblk
) {
2320 dmz_free_mblock(zmd
, zmd
->sb
[i
].mblk
);
2321 zmd
->sb
[i
].mblk
= NULL
;
2325 /* Free cached blocks */
2326 while (!list_empty(&zmd
->mblk_dirty_list
)) {
2327 mblk
= list_first_entry(&zmd
->mblk_dirty_list
,
2328 struct dmz_mblock
, link
);
2329 dmz_dev_warn(zmd
->dev
, "mblock %llu still in dirty list (ref %u)",
2330 (u64
)mblk
->no
, mblk
->ref
);
2331 list_del_init(&mblk
->link
);
2332 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2333 dmz_free_mblock(zmd
, mblk
);
2336 while (!list_empty(&zmd
->mblk_lru_list
)) {
2337 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
2338 struct dmz_mblock
, link
);
2339 list_del_init(&mblk
->link
);
2340 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2341 dmz_free_mblock(zmd
, mblk
);
2344 /* Sanity checks: the mblock rbtree should now be empty */
2345 root
= &zmd
->mblk_rbtree
;
2346 rbtree_postorder_for_each_entry_safe(mblk
, next
, root
, node
) {
2347 dmz_dev_warn(zmd
->dev
, "mblock %llu ref %u still in rbtree",
2348 (u64
)mblk
->no
, mblk
->ref
);
2350 dmz_free_mblock(zmd
, mblk
);
2353 /* Free the zone descriptors */
2354 dmz_drop_zones(zmd
);
2356 mutex_destroy(&zmd
->mblk_flush_lock
);
2357 mutex_destroy(&zmd
->map_lock
);
2361 * Initialize the zoned metadata.
2363 int dmz_ctr_metadata(struct dmz_dev
*dev
, struct dmz_metadata
**metadata
)
2365 struct dmz_metadata
*zmd
;
2366 unsigned int i
, zid
;
2367 struct dm_zone
*zone
;
2370 zmd
= kzalloc(sizeof(struct dmz_metadata
), GFP_KERNEL
);
2375 zmd
->mblk_rbtree
= RB_ROOT
;
2376 init_rwsem(&zmd
->mblk_sem
);
2377 mutex_init(&zmd
->mblk_flush_lock
);
2378 spin_lock_init(&zmd
->mblk_lock
);
2379 INIT_LIST_HEAD(&zmd
->mblk_lru_list
);
2380 INIT_LIST_HEAD(&zmd
->mblk_dirty_list
);
2382 mutex_init(&zmd
->map_lock
);
2383 atomic_set(&zmd
->unmap_nr_rnd
, 0);
2384 INIT_LIST_HEAD(&zmd
->unmap_rnd_list
);
2385 INIT_LIST_HEAD(&zmd
->map_rnd_list
);
2387 atomic_set(&zmd
->unmap_nr_seq
, 0);
2388 INIT_LIST_HEAD(&zmd
->unmap_seq_list
);
2389 INIT_LIST_HEAD(&zmd
->map_seq_list
);
2391 atomic_set(&zmd
->nr_reserved_seq_zones
, 0);
2392 INIT_LIST_HEAD(&zmd
->reserved_seq_zones_list
);
2394 init_waitqueue_head(&zmd
->free_wq
);
2396 /* Initialize zone descriptors */
2397 ret
= dmz_init_zones(zmd
);
2401 /* Get super block */
2402 ret
= dmz_load_sb(zmd
);
2406 /* Set metadata zones starting from sb_zone */
2407 zid
= dmz_id(zmd
, zmd
->sb_zone
);
2408 for (i
= 0; i
< zmd
->nr_meta_zones
<< 1; i
++) {
2409 zone
= dmz_get(zmd
, zid
+ i
);
2410 if (!dmz_is_rnd(zone
))
2412 set_bit(DMZ_META
, &zone
->flags
);
2415 /* Load mapping table */
2416 ret
= dmz_load_mapping(zmd
);
2421 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2422 * blocks and enough blocks to be able to cache the bitmap blocks of
2423 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2424 * the cache to add 512 more metadata blocks.
2426 zmd
->min_nr_mblks
= 2 + zmd
->nr_map_blocks
+ zmd
->zone_nr_bitmap_blocks
* 16;
2427 zmd
->max_nr_mblks
= zmd
->min_nr_mblks
+ 512;
2428 zmd
->mblk_shrinker
.count_objects
= dmz_mblock_shrinker_count
;
2429 zmd
->mblk_shrinker
.scan_objects
= dmz_mblock_shrinker_scan
;
2430 zmd
->mblk_shrinker
.seeks
= DEFAULT_SEEKS
;
2432 /* Metadata cache shrinker */
2433 ret
= register_shrinker(&zmd
->mblk_shrinker
);
2435 dmz_dev_err(dev
, "Register metadata cache shrinker failed");
2439 dmz_dev_info(dev
, "Host-%s zoned block device",
2440 bdev_zoned_model(dev
->bdev
) == BLK_ZONED_HA
?
2441 "aware" : "managed");
2442 dmz_dev_info(dev
, " %llu 512-byte logical sectors",
2443 (u64
)dev
->capacity
);
2444 dmz_dev_info(dev
, " %u zones of %llu 512-byte logical sectors",
2445 dev
->nr_zones
, (u64
)dev
->zone_nr_sectors
);
2446 dmz_dev_info(dev
, " %u metadata zones",
2447 zmd
->nr_meta_zones
* 2);
2448 dmz_dev_info(dev
, " %u data zones for %u chunks",
2449 zmd
->nr_data_zones
, zmd
->nr_chunks
);
2450 dmz_dev_info(dev
, " %u random zones (%u unmapped)",
2451 zmd
->nr_rnd
, atomic_read(&zmd
->unmap_nr_rnd
));
2452 dmz_dev_info(dev
, " %u sequential zones (%u unmapped)",
2453 zmd
->nr_seq
, atomic_read(&zmd
->unmap_nr_seq
));
2454 dmz_dev_info(dev
, " %u reserved sequential data zones",
2455 zmd
->nr_reserved_seq
);
2457 dmz_dev_debug(dev
, "Format:");
2458 dmz_dev_debug(dev
, "%u metadata blocks per set (%u max cache)",
2459 zmd
->nr_meta_blocks
, zmd
->max_nr_mblks
);
2460 dmz_dev_debug(dev
, " %u data zone mapping blocks",
2461 zmd
->nr_map_blocks
);
2462 dmz_dev_debug(dev
, " %u bitmap blocks",
2463 zmd
->nr_bitmap_blocks
);
2469 dmz_cleanup_metadata(zmd
);
2477 * Cleanup the zoned metadata resources.
2479 void dmz_dtr_metadata(struct dmz_metadata
*zmd
)
2481 unregister_shrinker(&zmd
->mblk_shrinker
);
2482 dmz_cleanup_metadata(zmd
);
2487 * Check zone information on resume.
2489 int dmz_resume_metadata(struct dmz_metadata
*zmd
)
2491 struct dmz_dev
*dev
= zmd
->dev
;
2492 struct dm_zone
*zone
;
2498 for (i
= 0; i
< dev
->nr_zones
; i
++) {
2499 zone
= dmz_get(zmd
, i
);
2501 dmz_dev_err(dev
, "Unable to get zone %u", i
);
2505 wp_block
= zone
->wp_block
;
2507 ret
= dmz_update_zone(zmd
, zone
);
2509 dmz_dev_err(dev
, "Broken zone %u", i
);
2513 if (dmz_is_offline(zone
)) {
2514 dmz_dev_warn(dev
, "Zone %u is offline", i
);
2518 /* Check write pointer */
2519 if (!dmz_is_seq(zone
))
2521 else if (zone
->wp_block
!= wp_block
) {
2522 dmz_dev_err(dev
, "Zone %u: Invalid wp (%llu / %llu)",
2523 i
, (u64
)zone
->wp_block
, (u64
)wp_block
);
2524 zone
->wp_block
= wp_block
;
2525 dmz_invalidate_blocks(zmd
, zone
, zone
->wp_block
,
2526 dev
->zone_nr_blocks
- zone
->wp_block
);