2 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
9 #include <linux/module.h>
10 #include <linux/crc32.h>
12 #define DM_MSG_PREFIX "zoned metadata"
17 #define DMZ_META_VER 1
20 * On-disk super block magic.
22 #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
23 (((unsigned int)('Z')) << 16) | \
24 (((unsigned int)('B')) << 8) | \
25 ((unsigned int)('D')))
28 * On disk super block.
29 * This uses only 512 B but uses on disk a full 4KB block. This block is
30 * followed on disk by the mapping table of chunks to zones and the bitmap
31 * blocks indicating zone block validity.
32 * The overall resulting metadata format is:
33 * (1) Super block (1 block)
34 * (2) Chunk mapping table (nr_map_blocks)
35 * (3) Bitmap blocks (nr_bitmap_blocks)
36 * All metadata blocks are stored in conventional zones, starting from the
37 * the first conventional zone found on disk.
43 /* Metadata version number */
44 __le32 version
; /* 8 */
46 /* Generation number */
49 /* This block number */
50 __le64 sb_block
; /* 24 */
52 /* The number of metadata blocks, including this super block */
53 __le32 nr_meta_blocks
; /* 28 */
55 /* The number of sequential zones reserved for reclaim */
56 __le32 nr_reserved_seq
; /* 32 */
58 /* The number of entries in the mapping table */
59 __le32 nr_chunks
; /* 36 */
61 /* The number of blocks used for the chunk mapping table */
62 __le32 nr_map_blocks
; /* 40 */
64 /* The number of blocks used for the block bitmaps */
65 __le32 nr_bitmap_blocks
; /* 44 */
70 /* Padding to full 512B sector */
71 u8 reserved
[464]; /* 512 */
75 * Chunk mapping entry: entries are indexed by chunk number
76 * and give the zone ID (dzone_id) mapping the chunk on disk.
77 * This zone may be sequential or random. If it is a sequential
78 * zone, a second zone (bzone_id) used as a write buffer may
79 * also be specified. This second zone will always be a randomly
88 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
90 #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
91 #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
92 #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
93 #define DMZ_MAP_UNMAPPED UINT_MAX
96 * Meta data block descriptor (for cached metadata blocks).
100 struct list_head link
;
109 * Metadata block state flags.
119 * Super block information (one per metadata set).
123 struct dmz_mblock
*mblk
;
124 struct dmz_super
*sb
;
128 * In-memory metadata.
130 struct dmz_metadata
{
133 sector_t zone_bitmap_size
;
134 unsigned int zone_nr_bitmap_blocks
;
136 unsigned int nr_bitmap_blocks
;
137 unsigned int nr_map_blocks
;
139 unsigned int nr_useable_zones
;
140 unsigned int nr_meta_blocks
;
141 unsigned int nr_meta_zones
;
142 unsigned int nr_data_zones
;
143 unsigned int nr_rnd_zones
;
144 unsigned int nr_reserved_seq
;
145 unsigned int nr_chunks
;
147 /* Zone information array */
148 struct dm_zone
*zones
;
150 struct dm_zone
*sb_zone
;
152 unsigned int mblk_primary
;
154 unsigned int min_nr_mblks
;
155 unsigned int max_nr_mblks
;
157 struct rw_semaphore mblk_sem
;
158 struct mutex mblk_flush_lock
;
159 spinlock_t mblk_lock
;
160 struct rb_root mblk_rbtree
;
161 struct list_head mblk_lru_list
;
162 struct list_head mblk_dirty_list
;
163 struct shrinker mblk_shrinker
;
165 /* Zone allocation management */
166 struct mutex map_lock
;
167 struct dmz_mblock
**map_mblk
;
169 atomic_t unmap_nr_rnd
;
170 struct list_head unmap_rnd_list
;
171 struct list_head map_rnd_list
;
174 atomic_t unmap_nr_seq
;
175 struct list_head unmap_seq_list
;
176 struct list_head map_seq_list
;
178 atomic_t nr_reserved_seq_zones
;
179 struct list_head reserved_seq_zones_list
;
181 wait_queue_head_t free_wq
;
187 unsigned int dmz_id(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
189 return ((unsigned int)(zone
- zmd
->zones
));
192 sector_t
dmz_start_sect(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
194 return (sector_t
)dmz_id(zmd
, zone
) << zmd
->dev
->zone_nr_sectors_shift
;
197 sector_t
dmz_start_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
199 return (sector_t
)dmz_id(zmd
, zone
) << zmd
->dev
->zone_nr_blocks_shift
;
202 unsigned int dmz_nr_chunks(struct dmz_metadata
*zmd
)
204 return zmd
->nr_chunks
;
207 unsigned int dmz_nr_rnd_zones(struct dmz_metadata
*zmd
)
212 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata
*zmd
)
214 return atomic_read(&zmd
->unmap_nr_rnd
);
218 * Lock/unlock mapping table.
219 * The map lock also protects all the zone lists.
221 void dmz_lock_map(struct dmz_metadata
*zmd
)
223 mutex_lock(&zmd
->map_lock
);
226 void dmz_unlock_map(struct dmz_metadata
*zmd
)
228 mutex_unlock(&zmd
->map_lock
);
232 * Lock/unlock metadata access. This is a "read" lock on a semaphore
233 * that prevents metadata flush from running while metadata are being
234 * modified. The actual metadata write mutual exclusion is achieved with
235 * the map lock and zone styate management (active and reclaim state are
236 * mutually exclusive).
238 void dmz_lock_metadata(struct dmz_metadata
*zmd
)
240 down_read(&zmd
->mblk_sem
);
243 void dmz_unlock_metadata(struct dmz_metadata
*zmd
)
245 up_read(&zmd
->mblk_sem
);
249 * Lock/unlock flush: prevent concurrent executions
250 * of dmz_flush_metadata as well as metadata modification in reclaim
251 * while flush is being executed.
253 void dmz_lock_flush(struct dmz_metadata
*zmd
)
255 mutex_lock(&zmd
->mblk_flush_lock
);
258 void dmz_unlock_flush(struct dmz_metadata
*zmd
)
260 mutex_unlock(&zmd
->mblk_flush_lock
);
264 * Allocate a metadata block.
266 static struct dmz_mblock
*dmz_alloc_mblock(struct dmz_metadata
*zmd
,
269 struct dmz_mblock
*mblk
= NULL
;
271 /* See if we can reuse cached blocks */
272 if (zmd
->max_nr_mblks
&& atomic_read(&zmd
->nr_mblks
) > zmd
->max_nr_mblks
) {
273 spin_lock(&zmd
->mblk_lock
);
274 mblk
= list_first_entry_or_null(&zmd
->mblk_lru_list
,
275 struct dmz_mblock
, link
);
277 list_del_init(&mblk
->link
);
278 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
281 spin_unlock(&zmd
->mblk_lock
);
286 /* Allocate a new block */
287 mblk
= kmalloc(sizeof(struct dmz_mblock
), GFP_NOIO
);
291 mblk
->page
= alloc_page(GFP_NOIO
);
297 RB_CLEAR_NODE(&mblk
->node
);
298 INIT_LIST_HEAD(&mblk
->link
);
299 atomic_set(&mblk
->ref
, 0);
302 mblk
->data
= page_address(mblk
->page
);
304 atomic_inc(&zmd
->nr_mblks
);
310 * Free a metadata block.
312 static void dmz_free_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
314 __free_pages(mblk
->page
, 0);
317 atomic_dec(&zmd
->nr_mblks
);
321 * Insert a metadata block in the rbtree.
323 static void dmz_insert_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
325 struct rb_root
*root
= &zmd
->mblk_rbtree
;
326 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
327 struct dmz_mblock
*b
;
329 /* Figure out where to put the new node */
331 b
= container_of(*new, struct dmz_mblock
, node
);
333 new = (b
->no
< mblk
->no
) ? &((*new)->rb_left
) : &((*new)->rb_right
);
336 /* Add new node and rebalance tree */
337 rb_link_node(&mblk
->node
, parent
, new);
338 rb_insert_color(&mblk
->node
, root
);
342 * Lookup a metadata block in the rbtree.
344 static struct dmz_mblock
*dmz_lookup_mblock(struct dmz_metadata
*zmd
,
347 struct rb_root
*root
= &zmd
->mblk_rbtree
;
348 struct rb_node
*node
= root
->rb_node
;
349 struct dmz_mblock
*mblk
;
352 mblk
= container_of(node
, struct dmz_mblock
, node
);
353 if (mblk
->no
== mblk_no
)
355 node
= (mblk
->no
< mblk_no
) ? node
->rb_left
: node
->rb_right
;
362 * Metadata block BIO end callback.
364 static void dmz_mblock_bio_end_io(struct bio
*bio
)
366 struct dmz_mblock
*mblk
= bio
->bi_private
;
370 set_bit(DMZ_META_ERROR
, &mblk
->state
);
372 if (bio_op(bio
) == REQ_OP_WRITE
)
373 flag
= DMZ_META_WRITING
;
375 flag
= DMZ_META_READING
;
377 clear_bit_unlock(flag
, &mblk
->state
);
378 smp_mb__after_atomic();
379 wake_up_bit(&mblk
->state
, flag
);
385 * Read a metadata block from disk.
387 static struct dmz_mblock
*dmz_fetch_mblock(struct dmz_metadata
*zmd
,
390 struct dmz_mblock
*mblk
;
391 sector_t block
= zmd
->sb
[zmd
->mblk_primary
].block
+ mblk_no
;
394 /* Get block and insert it */
395 mblk
= dmz_alloc_mblock(zmd
, mblk_no
);
399 spin_lock(&zmd
->mblk_lock
);
400 atomic_inc(&mblk
->ref
);
401 set_bit(DMZ_META_READING
, &mblk
->state
);
402 dmz_insert_mblock(zmd
, mblk
);
403 spin_unlock(&zmd
->mblk_lock
);
405 bio
= bio_alloc(GFP_NOIO
, 1);
407 dmz_free_mblock(zmd
, mblk
);
411 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
412 bio_set_dev(bio
, zmd
->dev
->bdev
);
413 bio
->bi_private
= mblk
;
414 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
415 bio_set_op_attrs(bio
, REQ_OP_READ
, REQ_META
| REQ_PRIO
);
416 bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
423 * Free metadata blocks.
425 static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata
*zmd
,
428 struct dmz_mblock
*mblk
;
429 unsigned long count
= 0;
431 if (!zmd
->max_nr_mblks
)
434 while (!list_empty(&zmd
->mblk_lru_list
) &&
435 atomic_read(&zmd
->nr_mblks
) > zmd
->min_nr_mblks
&&
437 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
438 struct dmz_mblock
, link
);
439 list_del_init(&mblk
->link
);
440 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
441 dmz_free_mblock(zmd
, mblk
);
449 * For mblock shrinker: get the number of unused metadata blocks in the cache.
451 static unsigned long dmz_mblock_shrinker_count(struct shrinker
*shrink
,
452 struct shrink_control
*sc
)
454 struct dmz_metadata
*zmd
= container_of(shrink
, struct dmz_metadata
, mblk_shrinker
);
456 return atomic_read(&zmd
->nr_mblks
);
460 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
462 static unsigned long dmz_mblock_shrinker_scan(struct shrinker
*shrink
,
463 struct shrink_control
*sc
)
465 struct dmz_metadata
*zmd
= container_of(shrink
, struct dmz_metadata
, mblk_shrinker
);
468 spin_lock(&zmd
->mblk_lock
);
469 count
= dmz_shrink_mblock_cache(zmd
, sc
->nr_to_scan
);
470 spin_unlock(&zmd
->mblk_lock
);
472 return count
? count
: SHRINK_STOP
;
476 * Release a metadata block.
478 static void dmz_release_mblock(struct dmz_metadata
*zmd
,
479 struct dmz_mblock
*mblk
)
485 spin_lock(&zmd
->mblk_lock
);
487 if (atomic_dec_and_test(&mblk
->ref
)) {
488 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
489 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
490 dmz_free_mblock(zmd
, mblk
);
491 } else if (!test_bit(DMZ_META_DIRTY
, &mblk
->state
)) {
492 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
493 dmz_shrink_mblock_cache(zmd
, 1);
497 spin_unlock(&zmd
->mblk_lock
);
501 * Get a metadata block from the rbtree. If the block
502 * is not present, read it from disk.
504 static struct dmz_mblock
*dmz_get_mblock(struct dmz_metadata
*zmd
,
507 struct dmz_mblock
*mblk
;
510 spin_lock(&zmd
->mblk_lock
);
511 mblk
= dmz_lookup_mblock(zmd
, mblk_no
);
513 /* Cache hit: remove block from LRU list */
514 if (atomic_inc_return(&mblk
->ref
) == 1 &&
515 !test_bit(DMZ_META_DIRTY
, &mblk
->state
))
516 list_del_init(&mblk
->link
);
518 spin_unlock(&zmd
->mblk_lock
);
521 /* Cache miss: read the block from disk */
522 mblk
= dmz_fetch_mblock(zmd
, mblk_no
);
524 return ERR_PTR(-ENOMEM
);
527 /* Wait for on-going read I/O and check for error */
528 wait_on_bit_io(&mblk
->state
, DMZ_META_READING
,
529 TASK_UNINTERRUPTIBLE
);
530 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
531 dmz_release_mblock(zmd
, mblk
);
532 return ERR_PTR(-EIO
);
539 * Mark a metadata block dirty.
541 static void dmz_dirty_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
543 spin_lock(&zmd
->mblk_lock
);
544 if (!test_and_set_bit(DMZ_META_DIRTY
, &mblk
->state
))
545 list_add_tail(&mblk
->link
, &zmd
->mblk_dirty_list
);
546 spin_unlock(&zmd
->mblk_lock
);
550 * Issue a metadata block write BIO.
552 static void dmz_write_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
,
555 sector_t block
= zmd
->sb
[set
].block
+ mblk
->no
;
558 bio
= bio_alloc(GFP_NOIO
, 1);
560 set_bit(DMZ_META_ERROR
, &mblk
->state
);
564 set_bit(DMZ_META_WRITING
, &mblk
->state
);
566 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
567 bio_set_dev(bio
, zmd
->dev
->bdev
);
568 bio
->bi_private
= mblk
;
569 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
570 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_META
| REQ_PRIO
);
571 bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
576 * Read/write a metadata block.
578 static int dmz_rdwr_block(struct dmz_metadata
*zmd
, int op
, sector_t block
,
584 bio
= bio_alloc(GFP_NOIO
, 1);
588 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
589 bio_set_dev(bio
, zmd
->dev
->bdev
);
590 bio_set_op_attrs(bio
, op
, REQ_SYNC
| REQ_META
| REQ_PRIO
);
591 bio_add_page(bio
, page
, DMZ_BLOCK_SIZE
, 0);
592 ret
= submit_bio_wait(bio
);
599 * Write super block of the specified metadata set.
601 static int dmz_write_sb(struct dmz_metadata
*zmd
, unsigned int set
)
603 sector_t block
= zmd
->sb
[set
].block
;
604 struct dmz_mblock
*mblk
= zmd
->sb
[set
].mblk
;
605 struct dmz_super
*sb
= zmd
->sb
[set
].sb
;
606 u64 sb_gen
= zmd
->sb_gen
+ 1;
609 sb
->magic
= cpu_to_le32(DMZ_MAGIC
);
610 sb
->version
= cpu_to_le32(DMZ_META_VER
);
612 sb
->gen
= cpu_to_le64(sb_gen
);
614 sb
->sb_block
= cpu_to_le64(block
);
615 sb
->nr_meta_blocks
= cpu_to_le32(zmd
->nr_meta_blocks
);
616 sb
->nr_reserved_seq
= cpu_to_le32(zmd
->nr_reserved_seq
);
617 sb
->nr_chunks
= cpu_to_le32(zmd
->nr_chunks
);
619 sb
->nr_map_blocks
= cpu_to_le32(zmd
->nr_map_blocks
);
620 sb
->nr_bitmap_blocks
= cpu_to_le32(zmd
->nr_bitmap_blocks
);
623 sb
->crc
= cpu_to_le32(crc32_le(sb_gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
));
625 ret
= dmz_rdwr_block(zmd
, REQ_OP_WRITE
, block
, mblk
->page
);
627 ret
= blkdev_issue_flush(zmd
->dev
->bdev
, GFP_NOIO
, NULL
);
633 * Write dirty metadata blocks to the specified set.
635 static int dmz_write_dirty_mblocks(struct dmz_metadata
*zmd
,
636 struct list_head
*write_list
,
639 struct dmz_mblock
*mblk
;
640 struct blk_plug plug
;
644 blk_start_plug(&plug
);
645 list_for_each_entry(mblk
, write_list
, link
)
646 dmz_write_mblock(zmd
, mblk
, set
);
647 blk_finish_plug(&plug
);
649 /* Wait for completion */
650 list_for_each_entry(mblk
, write_list
, link
) {
651 wait_on_bit_io(&mblk
->state
, DMZ_META_WRITING
,
652 TASK_UNINTERRUPTIBLE
);
653 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
654 clear_bit(DMZ_META_ERROR
, &mblk
->state
);
659 /* Flush drive cache (this will also sync data) */
661 ret
= blkdev_issue_flush(zmd
->dev
->bdev
, GFP_NOIO
, NULL
);
667 * Log dirty metadata blocks.
669 static int dmz_log_dirty_mblocks(struct dmz_metadata
*zmd
,
670 struct list_head
*write_list
)
672 unsigned int log_set
= zmd
->mblk_primary
^ 0x1;
675 /* Write dirty blocks to the log */
676 ret
= dmz_write_dirty_mblocks(zmd
, write_list
, log_set
);
681 * No error so far: now validate the log by updating the
682 * log index super block generation.
684 ret
= dmz_write_sb(zmd
, log_set
);
692 * Flush dirty metadata blocks.
694 int dmz_flush_metadata(struct dmz_metadata
*zmd
)
696 struct dmz_mblock
*mblk
;
697 struct list_head write_list
;
703 INIT_LIST_HEAD(&write_list
);
706 * Make sure that metadata blocks are stable before logging: take
707 * the write lock on the metadata semaphore to prevent target BIOs
708 * from modifying metadata.
710 down_write(&zmd
->mblk_sem
);
713 * This is called from the target flush work and reclaim work.
714 * Concurrent execution is not allowed.
718 /* Get dirty blocks */
719 spin_lock(&zmd
->mblk_lock
);
720 list_splice_init(&zmd
->mblk_dirty_list
, &write_list
);
721 spin_unlock(&zmd
->mblk_lock
);
723 /* If there are no dirty metadata blocks, just flush the device cache */
724 if (list_empty(&write_list
)) {
725 ret
= blkdev_issue_flush(zmd
->dev
->bdev
, GFP_NOIO
, NULL
);
730 * The primary metadata set is still clean. Keep it this way until
731 * all updates are successful in the secondary set. That is, use
732 * the secondary set as a log.
734 ret
= dmz_log_dirty_mblocks(zmd
, &write_list
);
739 * The log is on disk. It is now safe to update in place
740 * in the primary metadata set.
742 ret
= dmz_write_dirty_mblocks(zmd
, &write_list
, zmd
->mblk_primary
);
746 ret
= dmz_write_sb(zmd
, zmd
->mblk_primary
);
750 while (!list_empty(&write_list
)) {
751 mblk
= list_first_entry(&write_list
, struct dmz_mblock
, link
);
752 list_del_init(&mblk
->link
);
754 spin_lock(&zmd
->mblk_lock
);
755 clear_bit(DMZ_META_DIRTY
, &mblk
->state
);
756 if (atomic_read(&mblk
->ref
) == 0)
757 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
758 spin_unlock(&zmd
->mblk_lock
);
763 if (ret
&& !list_empty(&write_list
)) {
764 spin_lock(&zmd
->mblk_lock
);
765 list_splice(&write_list
, &zmd
->mblk_dirty_list
);
766 spin_unlock(&zmd
->mblk_lock
);
769 dmz_unlock_flush(zmd
);
770 up_write(&zmd
->mblk_sem
);
778 static int dmz_check_sb(struct dmz_metadata
*zmd
, struct dmz_super
*sb
)
780 unsigned int nr_meta_zones
, nr_data_zones
;
781 struct dmz_dev
*dev
= zmd
->dev
;
785 gen
= le64_to_cpu(sb
->gen
);
786 stored_crc
= le32_to_cpu(sb
->crc
);
788 crc
= crc32_le(gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
);
789 if (crc
!= stored_crc
) {
790 dmz_dev_err(dev
, "Invalid checksum (needed 0x%08x, got 0x%08x)",
795 if (le32_to_cpu(sb
->magic
) != DMZ_MAGIC
) {
796 dmz_dev_err(dev
, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
797 DMZ_MAGIC
, le32_to_cpu(sb
->magic
));
801 if (le32_to_cpu(sb
->version
) != DMZ_META_VER
) {
802 dmz_dev_err(dev
, "Invalid meta version (needed %d, got %d)",
803 DMZ_META_VER
, le32_to_cpu(sb
->version
));
807 nr_meta_zones
= (le32_to_cpu(sb
->nr_meta_blocks
) + dev
->zone_nr_blocks
- 1)
808 >> dev
->zone_nr_blocks_shift
;
809 if (!nr_meta_zones
||
810 nr_meta_zones
>= zmd
->nr_rnd_zones
) {
811 dmz_dev_err(dev
, "Invalid number of metadata blocks");
815 if (!le32_to_cpu(sb
->nr_reserved_seq
) ||
816 le32_to_cpu(sb
->nr_reserved_seq
) >= (zmd
->nr_useable_zones
- nr_meta_zones
)) {
817 dmz_dev_err(dev
, "Invalid number of reserved sequential zones");
821 nr_data_zones
= zmd
->nr_useable_zones
-
822 (nr_meta_zones
* 2 + le32_to_cpu(sb
->nr_reserved_seq
));
823 if (le32_to_cpu(sb
->nr_chunks
) > nr_data_zones
) {
824 dmz_dev_err(dev
, "Invalid number of chunks %u / %u",
825 le32_to_cpu(sb
->nr_chunks
), nr_data_zones
);
830 zmd
->nr_meta_blocks
= le32_to_cpu(sb
->nr_meta_blocks
);
831 zmd
->nr_reserved_seq
= le32_to_cpu(sb
->nr_reserved_seq
);
832 zmd
->nr_chunks
= le32_to_cpu(sb
->nr_chunks
);
833 zmd
->nr_map_blocks
= le32_to_cpu(sb
->nr_map_blocks
);
834 zmd
->nr_bitmap_blocks
= le32_to_cpu(sb
->nr_bitmap_blocks
);
835 zmd
->nr_meta_zones
= nr_meta_zones
;
836 zmd
->nr_data_zones
= nr_data_zones
;
842 * Read the first or second super block from disk.
844 static int dmz_read_sb(struct dmz_metadata
*zmd
, unsigned int set
)
846 return dmz_rdwr_block(zmd
, REQ_OP_READ
, zmd
->sb
[set
].block
,
847 zmd
->sb
[set
].mblk
->page
);
851 * Determine the position of the secondary super blocks on disk.
852 * This is used only if a corruption of the primary super block
855 static int dmz_lookup_secondary_sb(struct dmz_metadata
*zmd
)
857 unsigned int zone_nr_blocks
= zmd
->dev
->zone_nr_blocks
;
858 struct dmz_mblock
*mblk
;
861 /* Allocate a block */
862 mblk
= dmz_alloc_mblock(zmd
, 0);
866 zmd
->sb
[1].mblk
= mblk
;
867 zmd
->sb
[1].sb
= mblk
->data
;
869 /* Bad first super block: search for the second one */
870 zmd
->sb
[1].block
= zmd
->sb
[0].block
+ zone_nr_blocks
;
871 for (i
= 0; i
< zmd
->nr_rnd_zones
- 1; i
++) {
872 if (dmz_read_sb(zmd
, 1) != 0)
874 if (le32_to_cpu(zmd
->sb
[1].sb
->magic
) == DMZ_MAGIC
)
876 zmd
->sb
[1].block
+= zone_nr_blocks
;
879 dmz_free_mblock(zmd
, mblk
);
880 zmd
->sb
[1].mblk
= NULL
;
886 * Read the first or second super block from disk.
888 static int dmz_get_sb(struct dmz_metadata
*zmd
, unsigned int set
)
890 struct dmz_mblock
*mblk
;
893 /* Allocate a block */
894 mblk
= dmz_alloc_mblock(zmd
, 0);
898 zmd
->sb
[set
].mblk
= mblk
;
899 zmd
->sb
[set
].sb
= mblk
->data
;
901 /* Read super block */
902 ret
= dmz_read_sb(zmd
, set
);
904 dmz_free_mblock(zmd
, mblk
);
905 zmd
->sb
[set
].mblk
= NULL
;
913 * Recover a metadata set.
915 static int dmz_recover_mblocks(struct dmz_metadata
*zmd
, unsigned int dst_set
)
917 unsigned int src_set
= dst_set
^ 0x1;
921 dmz_dev_warn(zmd
->dev
, "Metadata set %u invalid: recovering", dst_set
);
924 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb_zone
);
926 zmd
->sb
[1].block
= zmd
->sb
[0].block
+
927 (zmd
->nr_meta_zones
<< zmd
->dev
->zone_nr_blocks_shift
);
930 page
= alloc_page(GFP_NOIO
);
934 /* Copy metadata blocks */
935 for (i
= 1; i
< zmd
->nr_meta_blocks
; i
++) {
936 ret
= dmz_rdwr_block(zmd
, REQ_OP_READ
,
937 zmd
->sb
[src_set
].block
+ i
, page
);
940 ret
= dmz_rdwr_block(zmd
, REQ_OP_WRITE
,
941 zmd
->sb
[dst_set
].block
+ i
, page
);
946 /* Finalize with the super block */
947 if (!zmd
->sb
[dst_set
].mblk
) {
948 zmd
->sb
[dst_set
].mblk
= dmz_alloc_mblock(zmd
, 0);
949 if (!zmd
->sb
[dst_set
].mblk
) {
953 zmd
->sb
[dst_set
].sb
= zmd
->sb
[dst_set
].mblk
->data
;
956 ret
= dmz_write_sb(zmd
, dst_set
);
958 __free_pages(page
, 0);
964 * Get super block from disk.
966 static int dmz_load_sb(struct dmz_metadata
*zmd
)
968 bool sb_good
[2] = {false, false};
969 u64 sb_gen
[2] = {0, 0};
972 /* Read and check the primary super block */
973 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb_zone
);
974 ret
= dmz_get_sb(zmd
, 0);
976 dmz_dev_err(zmd
->dev
, "Read primary super block failed");
980 ret
= dmz_check_sb(zmd
, zmd
->sb
[0].sb
);
982 /* Read and check secondary super block */
985 zmd
->sb
[1].block
= zmd
->sb
[0].block
+
986 (zmd
->nr_meta_zones
<< zmd
->dev
->zone_nr_blocks_shift
);
987 ret
= dmz_get_sb(zmd
, 1);
989 ret
= dmz_lookup_secondary_sb(zmd
);
992 dmz_dev_err(zmd
->dev
, "Read secondary super block failed");
996 ret
= dmz_check_sb(zmd
, zmd
->sb
[1].sb
);
1000 /* Use highest generation sb first */
1001 if (!sb_good
[0] && !sb_good
[1]) {
1002 dmz_dev_err(zmd
->dev
, "No valid super block found");
1007 sb_gen
[0] = le64_to_cpu(zmd
->sb
[0].sb
->gen
);
1009 ret
= dmz_recover_mblocks(zmd
, 0);
1012 sb_gen
[1] = le64_to_cpu(zmd
->sb
[1].sb
->gen
);
1014 ret
= dmz_recover_mblocks(zmd
, 1);
1017 dmz_dev_err(zmd
->dev
, "Recovery failed");
1021 if (sb_gen
[0] >= sb_gen
[1]) {
1022 zmd
->sb_gen
= sb_gen
[0];
1023 zmd
->mblk_primary
= 0;
1025 zmd
->sb_gen
= sb_gen
[1];
1026 zmd
->mblk_primary
= 1;
1029 dmz_dev_debug(zmd
->dev
, "Using super block %u (gen %llu)",
1030 zmd
->mblk_primary
, zmd
->sb_gen
);
1036 * Initialize a zone descriptor.
1038 static int dmz_init_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
1039 struct blk_zone
*blkz
)
1041 struct dmz_dev
*dev
= zmd
->dev
;
1043 /* Ignore the eventual last runt (smaller) zone */
1044 if (blkz
->len
!= dev
->zone_nr_sectors
) {
1045 if (blkz
->start
+ blkz
->len
== dev
->capacity
)
1050 INIT_LIST_HEAD(&zone
->link
);
1051 atomic_set(&zone
->refcount
, 0);
1052 zone
->chunk
= DMZ_MAP_UNMAPPED
;
1054 if (blkz
->type
== BLK_ZONE_TYPE_CONVENTIONAL
) {
1055 set_bit(DMZ_RND
, &zone
->flags
);
1056 zmd
->nr_rnd_zones
++;
1057 } else if (blkz
->type
== BLK_ZONE_TYPE_SEQWRITE_REQ
||
1058 blkz
->type
== BLK_ZONE_TYPE_SEQWRITE_PREF
) {
1059 set_bit(DMZ_SEQ
, &zone
->flags
);
1063 if (blkz
->cond
== BLK_ZONE_COND_OFFLINE
)
1064 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1065 else if (blkz
->cond
== BLK_ZONE_COND_READONLY
)
1066 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1068 if (dmz_is_rnd(zone
))
1071 zone
->wp_block
= dmz_sect2blk(blkz
->wp
- blkz
->start
);
1073 if (!dmz_is_offline(zone
) && !dmz_is_readonly(zone
)) {
1074 zmd
->nr_useable_zones
++;
1075 if (dmz_is_rnd(zone
)) {
1076 zmd
->nr_rnd_zones
++;
1077 if (!zmd
->sb_zone
) {
1078 /* Super block zone */
1079 zmd
->sb_zone
= zone
;
1088 * Free zones descriptors.
1090 static void dmz_drop_zones(struct dmz_metadata
*zmd
)
1097 * The size of a zone report in number of zones.
1098 * This results in 4096*64B=256KB report zones commands.
1100 #define DMZ_REPORT_NR_ZONES 4096
1103 * Allocate and initialize zone descriptors using the zone
1104 * information from disk.
1106 static int dmz_init_zones(struct dmz_metadata
*zmd
)
1108 struct dmz_dev
*dev
= zmd
->dev
;
1109 struct dm_zone
*zone
;
1110 struct blk_zone
*blkz
;
1111 unsigned int nr_blkz
;
1112 sector_t sector
= 0;
1116 zmd
->zone_bitmap_size
= dev
->zone_nr_blocks
>> 3;
1117 zmd
->zone_nr_bitmap_blocks
= zmd
->zone_bitmap_size
>> DMZ_BLOCK_SHIFT
;
1119 /* Allocate zone array */
1120 zmd
->zones
= kcalloc(dev
->nr_zones
, sizeof(struct dm_zone
), GFP_KERNEL
);
1124 dmz_dev_info(dev
, "Using %zu B for zone information",
1125 sizeof(struct dm_zone
) * dev
->nr_zones
);
1127 /* Get zone information */
1128 nr_blkz
= DMZ_REPORT_NR_ZONES
;
1129 blkz
= kcalloc(nr_blkz
, sizeof(struct blk_zone
), GFP_KERNEL
);
1136 * Get zone information and initialize zone descriptors.
1137 * At the same time, determine where the super block
1138 * should be: first block of the first randomly writable
1142 while (sector
< dev
->capacity
) {
1143 /* Get zone information */
1144 nr_blkz
= DMZ_REPORT_NR_ZONES
;
1145 ret
= blkdev_report_zones(dev
->bdev
, sector
, blkz
,
1146 &nr_blkz
, GFP_KERNEL
);
1148 dmz_dev_err(dev
, "Report zones failed %d", ret
);
1152 /* Process report */
1153 for (i
= 0; i
< nr_blkz
; i
++) {
1154 ret
= dmz_init_zone(zmd
, zone
, &blkz
[i
]);
1157 sector
+= dev
->zone_nr_sectors
;
1162 /* The entire zone configuration of the disk should now be known */
1163 if (sector
< dev
->capacity
) {
1164 dmz_dev_err(dev
, "Failed to get correct zone information");
1170 dmz_drop_zones(zmd
);
1176 * Update a zone information.
1178 static int dmz_update_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1180 unsigned int nr_blkz
= 1;
1181 struct blk_zone blkz
;
1184 /* Get zone information from disk */
1185 ret
= blkdev_report_zones(zmd
->dev
->bdev
, dmz_start_sect(zmd
, zone
),
1186 &blkz
, &nr_blkz
, GFP_NOIO
);
1188 dmz_dev_err(zmd
->dev
, "Get zone %u report failed",
1193 clear_bit(DMZ_OFFLINE
, &zone
->flags
);
1194 clear_bit(DMZ_READ_ONLY
, &zone
->flags
);
1195 if (blkz
.cond
== BLK_ZONE_COND_OFFLINE
)
1196 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1197 else if (blkz
.cond
== BLK_ZONE_COND_READONLY
)
1198 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1200 if (dmz_is_seq(zone
))
1201 zone
->wp_block
= dmz_sect2blk(blkz
.wp
- blkz
.start
);
1209 * Check a zone write pointer position when the zone is marked
1210 * with the sequential write error flag.
1212 static int dmz_handle_seq_write_err(struct dmz_metadata
*zmd
,
1213 struct dm_zone
*zone
)
1215 unsigned int wp
= 0;
1218 wp
= zone
->wp_block
;
1219 ret
= dmz_update_zone(zmd
, zone
);
1223 dmz_dev_warn(zmd
->dev
, "Processing zone %u write error (zone wp %u/%u)",
1224 dmz_id(zmd
, zone
), zone
->wp_block
, wp
);
1226 if (zone
->wp_block
< wp
) {
1227 dmz_invalidate_blocks(zmd
, zone
, zone
->wp_block
,
1228 wp
- zone
->wp_block
);
1234 static struct dm_zone
*dmz_get(struct dmz_metadata
*zmd
, unsigned int zone_id
)
1236 return &zmd
->zones
[zone_id
];
1240 * Reset a zone write pointer.
1242 static int dmz_reset_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1247 * Ignore offline zones, read only zones,
1248 * and conventional zones.
1250 if (dmz_is_offline(zone
) ||
1251 dmz_is_readonly(zone
) ||
1255 if (!dmz_is_empty(zone
) || dmz_seq_write_err(zone
)) {
1256 struct dmz_dev
*dev
= zmd
->dev
;
1258 ret
= blkdev_reset_zones(dev
->bdev
,
1259 dmz_start_sect(zmd
, zone
),
1260 dev
->zone_nr_sectors
, GFP_NOIO
);
1262 dmz_dev_err(dev
, "Reset zone %u failed %d",
1263 dmz_id(zmd
, zone
), ret
);
1268 /* Clear write error bit and rewind write pointer position */
1269 clear_bit(DMZ_SEQ_WRITE_ERR
, &zone
->flags
);
1275 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
);
1278 * Initialize chunk mapping.
1280 static int dmz_load_mapping(struct dmz_metadata
*zmd
)
1282 struct dmz_dev
*dev
= zmd
->dev
;
1283 struct dm_zone
*dzone
, *bzone
;
1284 struct dmz_mblock
*dmap_mblk
= NULL
;
1285 struct dmz_map
*dmap
;
1286 unsigned int i
= 0, e
= 0, chunk
= 0;
1287 unsigned int dzone_id
;
1288 unsigned int bzone_id
;
1290 /* Metadata block array for the chunk mapping table */
1291 zmd
->map_mblk
= kcalloc(zmd
->nr_map_blocks
,
1292 sizeof(struct dmz_mblk
*), GFP_KERNEL
);
1296 /* Get chunk mapping table blocks and initialize zone mapping */
1297 while (chunk
< zmd
->nr_chunks
) {
1299 /* Get mapping block */
1300 dmap_mblk
= dmz_get_mblock(zmd
, i
+ 1);
1301 if (IS_ERR(dmap_mblk
))
1302 return PTR_ERR(dmap_mblk
);
1303 zmd
->map_mblk
[i
] = dmap_mblk
;
1304 dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1309 /* Check data zone */
1310 dzone_id
= le32_to_cpu(dmap
[e
].dzone_id
);
1311 if (dzone_id
== DMZ_MAP_UNMAPPED
)
1314 if (dzone_id
>= dev
->nr_zones
) {
1315 dmz_dev_err(dev
, "Chunk %u mapping: invalid data zone ID %u",
1320 dzone
= dmz_get(zmd
, dzone_id
);
1321 set_bit(DMZ_DATA
, &dzone
->flags
);
1322 dzone
->chunk
= chunk
;
1323 dmz_get_zone_weight(zmd
, dzone
);
1325 if (dmz_is_rnd(dzone
))
1326 list_add_tail(&dzone
->link
, &zmd
->map_rnd_list
);
1328 list_add_tail(&dzone
->link
, &zmd
->map_seq_list
);
1330 /* Check buffer zone */
1331 bzone_id
= le32_to_cpu(dmap
[e
].bzone_id
);
1332 if (bzone_id
== DMZ_MAP_UNMAPPED
)
1335 if (bzone_id
>= dev
->nr_zones
) {
1336 dmz_dev_err(dev
, "Chunk %u mapping: invalid buffer zone ID %u",
1341 bzone
= dmz_get(zmd
, bzone_id
);
1342 if (!dmz_is_rnd(bzone
)) {
1343 dmz_dev_err(dev
, "Chunk %u mapping: invalid buffer zone %u",
1348 set_bit(DMZ_DATA
, &bzone
->flags
);
1349 set_bit(DMZ_BUF
, &bzone
->flags
);
1350 bzone
->chunk
= chunk
;
1351 bzone
->bzone
= dzone
;
1352 dzone
->bzone
= bzone
;
1353 dmz_get_zone_weight(zmd
, bzone
);
1354 list_add_tail(&bzone
->link
, &zmd
->map_rnd_list
);
1358 if (e
>= DMZ_MAP_ENTRIES
)
1363 * At this point, only meta zones and mapped data zones were
1364 * fully initialized. All remaining zones are unmapped data
1365 * zones. Finish initializing those here.
1367 for (i
= 0; i
< dev
->nr_zones
; i
++) {
1368 dzone
= dmz_get(zmd
, i
);
1369 if (dmz_is_meta(dzone
))
1372 if (dmz_is_rnd(dzone
))
1377 if (dmz_is_data(dzone
)) {
1378 /* Already initialized */
1382 /* Unmapped data zone */
1383 set_bit(DMZ_DATA
, &dzone
->flags
);
1384 dzone
->chunk
= DMZ_MAP_UNMAPPED
;
1385 if (dmz_is_rnd(dzone
)) {
1386 list_add_tail(&dzone
->link
, &zmd
->unmap_rnd_list
);
1387 atomic_inc(&zmd
->unmap_nr_rnd
);
1388 } else if (atomic_read(&zmd
->nr_reserved_seq_zones
) < zmd
->nr_reserved_seq
) {
1389 list_add_tail(&dzone
->link
, &zmd
->reserved_seq_zones_list
);
1390 atomic_inc(&zmd
->nr_reserved_seq_zones
);
1393 list_add_tail(&dzone
->link
, &zmd
->unmap_seq_list
);
1394 atomic_inc(&zmd
->unmap_nr_seq
);
1402 * Set a data chunk mapping.
1404 static void dmz_set_chunk_mapping(struct dmz_metadata
*zmd
, unsigned int chunk
,
1405 unsigned int dzone_id
, unsigned int bzone_id
)
1407 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
1408 struct dmz_map
*dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1409 int map_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
1411 dmap
[map_idx
].dzone_id
= cpu_to_le32(dzone_id
);
1412 dmap
[map_idx
].bzone_id
= cpu_to_le32(bzone_id
);
1413 dmz_dirty_mblock(zmd
, dmap_mblk
);
1417 * The list of mapped zones is maintained in LRU order.
1418 * This rotates a zone at the end of its map list.
1420 static void __dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1422 if (list_empty(&zone
->link
))
1425 list_del_init(&zone
->link
);
1426 if (dmz_is_seq(zone
)) {
1427 /* LRU rotate sequential zone */
1428 list_add_tail(&zone
->link
, &zmd
->map_seq_list
);
1430 /* LRU rotate random zone */
1431 list_add_tail(&zone
->link
, &zmd
->map_rnd_list
);
1436 * The list of mapped random zones is maintained
1437 * in LRU order. This rotates a zone at the end of the list.
1439 static void dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1441 __dmz_lru_zone(zmd
, zone
);
1443 __dmz_lru_zone(zmd
, zone
->bzone
);
1447 * Wait for any zone to be freed.
1449 static void dmz_wait_for_free_zones(struct dmz_metadata
*zmd
)
1453 prepare_to_wait(&zmd
->free_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
1454 dmz_unlock_map(zmd
);
1455 dmz_unlock_metadata(zmd
);
1457 io_schedule_timeout(HZ
);
1459 dmz_lock_metadata(zmd
);
1461 finish_wait(&zmd
->free_wq
, &wait
);
1465 * Lock a zone for reclaim (set the zone RECLAIM bit).
1466 * Returns false if the zone cannot be locked or if it is already locked
1469 int dmz_lock_zone_reclaim(struct dm_zone
*zone
)
1471 /* Active zones cannot be reclaimed */
1472 if (dmz_is_active(zone
))
1475 return !test_and_set_bit(DMZ_RECLAIM
, &zone
->flags
);
1479 * Clear a zone reclaim flag.
1481 void dmz_unlock_zone_reclaim(struct dm_zone
*zone
)
1483 WARN_ON(dmz_is_active(zone
));
1484 WARN_ON(!dmz_in_reclaim(zone
));
1486 clear_bit_unlock(DMZ_RECLAIM
, &zone
->flags
);
1487 smp_mb__after_atomic();
1488 wake_up_bit(&zone
->flags
, DMZ_RECLAIM
);
1492 * Wait for a zone reclaim to complete.
1494 static void dmz_wait_for_reclaim(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1496 dmz_unlock_map(zmd
);
1497 dmz_unlock_metadata(zmd
);
1498 wait_on_bit_timeout(&zone
->flags
, DMZ_RECLAIM
, TASK_UNINTERRUPTIBLE
, HZ
);
1499 dmz_lock_metadata(zmd
);
1504 * Select a random write zone for reclaim.
1506 static struct dm_zone
*dmz_get_rnd_zone_for_reclaim(struct dmz_metadata
*zmd
)
1508 struct dm_zone
*dzone
= NULL
;
1509 struct dm_zone
*zone
;
1511 if (list_empty(&zmd
->map_rnd_list
))
1514 list_for_each_entry(zone
, &zmd
->map_rnd_list
, link
) {
1515 if (dmz_is_buf(zone
))
1516 dzone
= zone
->bzone
;
1519 if (dmz_lock_zone_reclaim(dzone
))
1527 * Select a buffered sequential zone for reclaim.
1529 static struct dm_zone
*dmz_get_seq_zone_for_reclaim(struct dmz_metadata
*zmd
)
1531 struct dm_zone
*zone
;
1533 if (list_empty(&zmd
->map_seq_list
))
1536 list_for_each_entry(zone
, &zmd
->map_seq_list
, link
) {
1539 if (dmz_lock_zone_reclaim(zone
))
1547 * Select a zone for reclaim.
1549 struct dm_zone
*dmz_get_zone_for_reclaim(struct dmz_metadata
*zmd
)
1551 struct dm_zone
*zone
;
1554 * Search for a zone candidate to reclaim: 2 cases are possible.
1555 * (1) There is no free sequential zones. Then a random data zone
1556 * cannot be reclaimed. So choose a sequential zone to reclaim so
1557 * that afterward a random zone can be reclaimed.
1558 * (2) At least one free sequential zone is available, then choose
1559 * the oldest random zone (data or buffer) that can be locked.
1562 if (list_empty(&zmd
->reserved_seq_zones_list
))
1563 zone
= dmz_get_seq_zone_for_reclaim(zmd
);
1565 zone
= dmz_get_rnd_zone_for_reclaim(zmd
);
1566 dmz_unlock_map(zmd
);
1572 * Activate a zone (increment its reference count).
1574 void dmz_activate_zone(struct dm_zone
*zone
)
1576 set_bit(DMZ_ACTIVE
, &zone
->flags
);
1577 atomic_inc(&zone
->refcount
);
1581 * Deactivate a zone. This decrement the zone reference counter
1582 * and clears the active state of the zone once the count reaches 0,
1583 * indicating that all BIOs to the zone have completed. Returns
1584 * true if the zone was deactivated.
1586 void dmz_deactivate_zone(struct dm_zone
*zone
)
1588 if (atomic_dec_and_test(&zone
->refcount
)) {
1589 WARN_ON(!test_bit(DMZ_ACTIVE
, &zone
->flags
));
1590 clear_bit_unlock(DMZ_ACTIVE
, &zone
->flags
);
1591 smp_mb__after_atomic();
1596 * Get the zone mapping a chunk, if the chunk is mapped already.
1597 * If no mapping exist and the operation is WRITE, a zone is
1598 * allocated and used to map the chunk.
1599 * The zone returned will be set to the active state.
1601 struct dm_zone
*dmz_get_chunk_mapping(struct dmz_metadata
*zmd
, unsigned int chunk
, int op
)
1603 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
1604 struct dmz_map
*dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1605 int dmap_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
1606 unsigned int dzone_id
;
1607 struct dm_zone
*dzone
= NULL
;
1612 /* Get the chunk mapping */
1613 dzone_id
= le32_to_cpu(dmap
[dmap_idx
].dzone_id
);
1614 if (dzone_id
== DMZ_MAP_UNMAPPED
) {
1616 * Read or discard in unmapped chunks are fine. But for
1617 * writes, we need a mapping, so get one.
1619 if (op
!= REQ_OP_WRITE
)
1622 /* Alloate a random zone */
1623 dzone
= dmz_alloc_zone(zmd
, DMZ_ALLOC_RND
);
1625 dmz_wait_for_free_zones(zmd
);
1629 dmz_map_zone(zmd
, dzone
, chunk
);
1632 /* The chunk is already mapped: get the mapping zone */
1633 dzone
= dmz_get(zmd
, dzone_id
);
1634 if (dzone
->chunk
!= chunk
) {
1635 dzone
= ERR_PTR(-EIO
);
1639 /* Repair write pointer if the sequential dzone has error */
1640 if (dmz_seq_write_err(dzone
)) {
1641 ret
= dmz_handle_seq_write_err(zmd
, dzone
);
1643 dzone
= ERR_PTR(-EIO
);
1646 clear_bit(DMZ_SEQ_WRITE_ERR
, &dzone
->flags
);
1651 * If the zone is being reclaimed, the chunk mapping may change
1652 * to a different zone. So wait for reclaim and retry. Otherwise,
1653 * activate the zone (this will prevent reclaim from touching it).
1655 if (dmz_in_reclaim(dzone
)) {
1656 dmz_wait_for_reclaim(zmd
, dzone
);
1659 dmz_activate_zone(dzone
);
1660 dmz_lru_zone(zmd
, dzone
);
1662 dmz_unlock_map(zmd
);
1668 * Write and discard change the block validity of data zones and their buffer
1669 * zones. Check here that valid blocks are still present. If all blocks are
1670 * invalid, the zones can be unmapped on the fly without waiting for reclaim
1673 void dmz_put_chunk_mapping(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
)
1675 struct dm_zone
*bzone
;
1679 bzone
= dzone
->bzone
;
1681 if (dmz_weight(bzone
))
1682 dmz_lru_zone(zmd
, bzone
);
1684 /* Empty buffer zone: reclaim it */
1685 dmz_unmap_zone(zmd
, bzone
);
1686 dmz_free_zone(zmd
, bzone
);
1691 /* Deactivate the data zone */
1692 dmz_deactivate_zone(dzone
);
1693 if (dmz_is_active(dzone
) || bzone
|| dmz_weight(dzone
))
1694 dmz_lru_zone(zmd
, dzone
);
1696 /* Unbuffered inactive empty data zone: reclaim it */
1697 dmz_unmap_zone(zmd
, dzone
);
1698 dmz_free_zone(zmd
, dzone
);
1701 dmz_unlock_map(zmd
);
1705 * Allocate and map a random zone to buffer a chunk
1706 * already mapped to a sequential zone.
1708 struct dm_zone
*dmz_get_chunk_buffer(struct dmz_metadata
*zmd
,
1709 struct dm_zone
*dzone
)
1711 struct dm_zone
*bzone
;
1715 bzone
= dzone
->bzone
;
1719 /* Alloate a random zone */
1720 bzone
= dmz_alloc_zone(zmd
, DMZ_ALLOC_RND
);
1722 dmz_wait_for_free_zones(zmd
);
1726 /* Update the chunk mapping */
1727 dmz_set_chunk_mapping(zmd
, dzone
->chunk
, dmz_id(zmd
, dzone
),
1728 dmz_id(zmd
, bzone
));
1730 set_bit(DMZ_BUF
, &bzone
->flags
);
1731 bzone
->chunk
= dzone
->chunk
;
1732 bzone
->bzone
= dzone
;
1733 dzone
->bzone
= bzone
;
1734 list_add_tail(&bzone
->link
, &zmd
->map_rnd_list
);
1736 dmz_unlock_map(zmd
);
1742 * Get an unmapped (free) zone.
1743 * This must be called with the mapping lock held.
1745 struct dm_zone
*dmz_alloc_zone(struct dmz_metadata
*zmd
, unsigned long flags
)
1747 struct list_head
*list
;
1748 struct dm_zone
*zone
;
1750 if (flags
& DMZ_ALLOC_RND
)
1751 list
= &zmd
->unmap_rnd_list
;
1753 list
= &zmd
->unmap_seq_list
;
1755 if (list_empty(list
)) {
1757 * No free zone: if this is for reclaim, allow using the
1758 * reserved sequential zones.
1760 if (!(flags
& DMZ_ALLOC_RECLAIM
) ||
1761 list_empty(&zmd
->reserved_seq_zones_list
))
1764 zone
= list_first_entry(&zmd
->reserved_seq_zones_list
,
1765 struct dm_zone
, link
);
1766 list_del_init(&zone
->link
);
1767 atomic_dec(&zmd
->nr_reserved_seq_zones
);
1771 zone
= list_first_entry(list
, struct dm_zone
, link
);
1772 list_del_init(&zone
->link
);
1774 if (dmz_is_rnd(zone
))
1775 atomic_dec(&zmd
->unmap_nr_rnd
);
1777 atomic_dec(&zmd
->unmap_nr_seq
);
1779 if (dmz_is_offline(zone
)) {
1780 dmz_dev_warn(zmd
->dev
, "Zone %u is offline", dmz_id(zmd
, zone
));
1790 * This must be called with the mapping lock held.
1792 void dmz_free_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1794 /* If this is a sequential zone, reset it */
1795 if (dmz_is_seq(zone
))
1796 dmz_reset_zone(zmd
, zone
);
1798 /* Return the zone to its type unmap list */
1799 if (dmz_is_rnd(zone
)) {
1800 list_add_tail(&zone
->link
, &zmd
->unmap_rnd_list
);
1801 atomic_inc(&zmd
->unmap_nr_rnd
);
1802 } else if (atomic_read(&zmd
->nr_reserved_seq_zones
) <
1803 zmd
->nr_reserved_seq
) {
1804 list_add_tail(&zone
->link
, &zmd
->reserved_seq_zones_list
);
1805 atomic_inc(&zmd
->nr_reserved_seq_zones
);
1807 list_add_tail(&zone
->link
, &zmd
->unmap_seq_list
);
1808 atomic_inc(&zmd
->unmap_nr_seq
);
1811 wake_up_all(&zmd
->free_wq
);
1815 * Map a chunk to a zone.
1816 * This must be called with the mapping lock held.
1818 void dmz_map_zone(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
,
1821 /* Set the chunk mapping */
1822 dmz_set_chunk_mapping(zmd
, chunk
, dmz_id(zmd
, dzone
),
1824 dzone
->chunk
= chunk
;
1825 if (dmz_is_rnd(dzone
))
1826 list_add_tail(&dzone
->link
, &zmd
->map_rnd_list
);
1828 list_add_tail(&dzone
->link
, &zmd
->map_seq_list
);
1833 * This must be called with the mapping lock held.
1835 void dmz_unmap_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1837 unsigned int chunk
= zone
->chunk
;
1838 unsigned int dzone_id
;
1840 if (chunk
== DMZ_MAP_UNMAPPED
) {
1841 /* Already unmapped */
1845 if (test_and_clear_bit(DMZ_BUF
, &zone
->flags
)) {
1847 * Unmapping the chunk buffer zone: clear only
1848 * the chunk buffer mapping
1850 dzone_id
= dmz_id(zmd
, zone
->bzone
);
1851 zone
->bzone
->bzone
= NULL
;
1856 * Unmapping the chunk data zone: the zone must
1859 if (WARN_ON(zone
->bzone
)) {
1860 zone
->bzone
->bzone
= NULL
;
1863 dzone_id
= DMZ_MAP_UNMAPPED
;
1866 dmz_set_chunk_mapping(zmd
, chunk
, dzone_id
, DMZ_MAP_UNMAPPED
);
1868 zone
->chunk
= DMZ_MAP_UNMAPPED
;
1869 list_del_init(&zone
->link
);
1873 * Set @nr_bits bits in @bitmap starting from @bit.
1874 * Return the number of bits changed from 0 to 1.
1876 static unsigned int dmz_set_bits(unsigned long *bitmap
,
1877 unsigned int bit
, unsigned int nr_bits
)
1879 unsigned long *addr
;
1880 unsigned int end
= bit
+ nr_bits
;
1884 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
1885 ((end
- bit
) >= BITS_PER_LONG
)) {
1886 /* Try to set the whole word at once */
1887 addr
= bitmap
+ BIT_WORD(bit
);
1891 bit
+= BITS_PER_LONG
;
1896 if (!test_and_set_bit(bit
, bitmap
))
1905 * Get the bitmap block storing the bit for chunk_block in zone.
1907 static struct dmz_mblock
*dmz_get_bitmap(struct dmz_metadata
*zmd
,
1908 struct dm_zone
*zone
,
1909 sector_t chunk_block
)
1911 sector_t bitmap_block
= 1 + zmd
->nr_map_blocks
+
1912 (sector_t
)(dmz_id(zmd
, zone
) * zmd
->zone_nr_bitmap_blocks
) +
1913 (chunk_block
>> DMZ_BLOCK_SHIFT_BITS
);
1915 return dmz_get_mblock(zmd
, bitmap_block
);
1919 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
1921 int dmz_copy_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
1922 struct dm_zone
*to_zone
)
1924 struct dmz_mblock
*from_mblk
, *to_mblk
;
1925 sector_t chunk_block
= 0;
1927 /* Get the zones bitmap blocks */
1928 while (chunk_block
< zmd
->dev
->zone_nr_blocks
) {
1929 from_mblk
= dmz_get_bitmap(zmd
, from_zone
, chunk_block
);
1930 if (IS_ERR(from_mblk
))
1931 return PTR_ERR(from_mblk
);
1932 to_mblk
= dmz_get_bitmap(zmd
, to_zone
, chunk_block
);
1933 if (IS_ERR(to_mblk
)) {
1934 dmz_release_mblock(zmd
, from_mblk
);
1935 return PTR_ERR(to_mblk
);
1938 memcpy(to_mblk
->data
, from_mblk
->data
, DMZ_BLOCK_SIZE
);
1939 dmz_dirty_mblock(zmd
, to_mblk
);
1941 dmz_release_mblock(zmd
, to_mblk
);
1942 dmz_release_mblock(zmd
, from_mblk
);
1944 chunk_block
+= DMZ_BLOCK_SIZE_BITS
;
1947 to_zone
->weight
= from_zone
->weight
;
1953 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
1954 * starting from chunk_block.
1956 int dmz_merge_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
1957 struct dm_zone
*to_zone
, sector_t chunk_block
)
1959 unsigned int nr_blocks
;
1962 /* Get the zones bitmap blocks */
1963 while (chunk_block
< zmd
->dev
->zone_nr_blocks
) {
1964 /* Get a valid region from the source zone */
1965 ret
= dmz_first_valid_block(zmd
, from_zone
, &chunk_block
);
1970 ret
= dmz_validate_blocks(zmd
, to_zone
, chunk_block
, nr_blocks
);
1974 chunk_block
+= nr_blocks
;
1981 * Validate all the blocks in the range [block..block+nr_blocks-1].
1983 int dmz_validate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
1984 sector_t chunk_block
, unsigned int nr_blocks
)
1986 unsigned int count
, bit
, nr_bits
;
1987 unsigned int zone_nr_blocks
= zmd
->dev
->zone_nr_blocks
;
1988 struct dmz_mblock
*mblk
;
1991 dmz_dev_debug(zmd
->dev
, "=> VALIDATE zone %u, block %llu, %u blocks",
1992 dmz_id(zmd
, zone
), (unsigned long long)chunk_block
,
1995 WARN_ON(chunk_block
+ nr_blocks
> zone_nr_blocks
);
1998 /* Get bitmap block */
1999 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2001 return PTR_ERR(mblk
);
2004 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2005 nr_bits
= min(nr_blocks
, DMZ_BLOCK_SIZE_BITS
- bit
);
2007 count
= dmz_set_bits((unsigned long *)mblk
->data
, bit
, nr_bits
);
2009 dmz_dirty_mblock(zmd
, mblk
);
2012 dmz_release_mblock(zmd
, mblk
);
2014 nr_blocks
-= nr_bits
;
2015 chunk_block
+= nr_bits
;
2018 if (likely(zone
->weight
+ n
<= zone_nr_blocks
))
2021 dmz_dev_warn(zmd
->dev
, "Zone %u: weight %u should be <= %u",
2022 dmz_id(zmd
, zone
), zone
->weight
,
2023 zone_nr_blocks
- n
);
2024 zone
->weight
= zone_nr_blocks
;
2031 * Clear nr_bits bits in bitmap starting from bit.
2032 * Return the number of bits cleared.
2034 static int dmz_clear_bits(unsigned long *bitmap
, int bit
, int nr_bits
)
2036 unsigned long *addr
;
2037 int end
= bit
+ nr_bits
;
2041 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2042 ((end
- bit
) >= BITS_PER_LONG
)) {
2043 /* Try to clear whole word at once */
2044 addr
= bitmap
+ BIT_WORD(bit
);
2045 if (*addr
== ULONG_MAX
) {
2048 bit
+= BITS_PER_LONG
;
2053 if (test_and_clear_bit(bit
, bitmap
))
2062 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2064 int dmz_invalidate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2065 sector_t chunk_block
, unsigned int nr_blocks
)
2067 unsigned int count
, bit
, nr_bits
;
2068 struct dmz_mblock
*mblk
;
2071 dmz_dev_debug(zmd
->dev
, "=> INVALIDATE zone %u, block %llu, %u blocks",
2072 dmz_id(zmd
, zone
), (u64
)chunk_block
, nr_blocks
);
2074 WARN_ON(chunk_block
+ nr_blocks
> zmd
->dev
->zone_nr_blocks
);
2077 /* Get bitmap block */
2078 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2080 return PTR_ERR(mblk
);
2083 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2084 nr_bits
= min(nr_blocks
, DMZ_BLOCK_SIZE_BITS
- bit
);
2086 count
= dmz_clear_bits((unsigned long *)mblk
->data
,
2089 dmz_dirty_mblock(zmd
, mblk
);
2092 dmz_release_mblock(zmd
, mblk
);
2094 nr_blocks
-= nr_bits
;
2095 chunk_block
+= nr_bits
;
2098 if (zone
->weight
>= n
)
2101 dmz_dev_warn(zmd
->dev
, "Zone %u: weight %u should be >= %u",
2102 dmz_id(zmd
, zone
), zone
->weight
, n
);
2110 * Get a block bit value.
2112 static int dmz_test_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2113 sector_t chunk_block
)
2115 struct dmz_mblock
*mblk
;
2118 WARN_ON(chunk_block
>= zmd
->dev
->zone_nr_blocks
);
2120 /* Get bitmap block */
2121 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2123 return PTR_ERR(mblk
);
2126 ret
= test_bit(chunk_block
& DMZ_BLOCK_MASK_BITS
,
2127 (unsigned long *) mblk
->data
) != 0;
2129 dmz_release_mblock(zmd
, mblk
);
2135 * Return the number of blocks from chunk_block to the first block with a bit
2136 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2138 static int dmz_to_next_set_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2139 sector_t chunk_block
, unsigned int nr_blocks
,
2142 struct dmz_mblock
*mblk
;
2143 unsigned int bit
, set_bit
, nr_bits
;
2144 unsigned long *bitmap
;
2147 WARN_ON(chunk_block
+ nr_blocks
> zmd
->dev
->zone_nr_blocks
);
2150 /* Get bitmap block */
2151 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2153 return PTR_ERR(mblk
);
2156 bitmap
= (unsigned long *) mblk
->data
;
2157 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2158 nr_bits
= min(nr_blocks
, DMZ_BLOCK_SIZE_BITS
- bit
);
2160 set_bit
= find_next_bit(bitmap
, DMZ_BLOCK_SIZE_BITS
, bit
);
2162 set_bit
= find_next_zero_bit(bitmap
, DMZ_BLOCK_SIZE_BITS
, bit
);
2163 dmz_release_mblock(zmd
, mblk
);
2166 if (set_bit
< DMZ_BLOCK_SIZE_BITS
)
2169 nr_blocks
-= nr_bits
;
2170 chunk_block
+= nr_bits
;
2177 * Test if chunk_block is valid. If it is, the number of consecutive
2178 * valid blocks from chunk_block will be returned.
2180 int dmz_block_valid(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2181 sector_t chunk_block
)
2185 valid
= dmz_test_block(zmd
, zone
, chunk_block
);
2189 /* The block is valid: get the number of valid blocks from block */
2190 return dmz_to_next_set_block(zmd
, zone
, chunk_block
,
2191 zmd
->dev
->zone_nr_blocks
- chunk_block
, 0);
2195 * Find the first valid block from @chunk_block in @zone.
2196 * If such a block is found, its number is returned using
2197 * @chunk_block and the total number of valid blocks from @chunk_block
2200 int dmz_first_valid_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2201 sector_t
*chunk_block
)
2203 sector_t start_block
= *chunk_block
;
2206 ret
= dmz_to_next_set_block(zmd
, zone
, start_block
,
2207 zmd
->dev
->zone_nr_blocks
- start_block
, 1);
2212 *chunk_block
= start_block
;
2214 return dmz_to_next_set_block(zmd
, zone
, start_block
,
2215 zmd
->dev
->zone_nr_blocks
- start_block
, 0);
2219 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2221 static int dmz_count_bits(void *bitmap
, int bit
, int nr_bits
)
2223 unsigned long *addr
;
2224 int end
= bit
+ nr_bits
;
2228 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2229 ((end
- bit
) >= BITS_PER_LONG
)) {
2230 addr
= (unsigned long *)bitmap
+ BIT_WORD(bit
);
2231 if (*addr
== ULONG_MAX
) {
2233 bit
+= BITS_PER_LONG
;
2238 if (test_bit(bit
, bitmap
))
2247 * Get a zone weight.
2249 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
2251 struct dmz_mblock
*mblk
;
2252 sector_t chunk_block
= 0;
2253 unsigned int bit
, nr_bits
;
2254 unsigned int nr_blocks
= zmd
->dev
->zone_nr_blocks
;
2259 /* Get bitmap block */
2260 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2266 /* Count bits in this block */
2267 bitmap
= mblk
->data
;
2268 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2269 nr_bits
= min(nr_blocks
, DMZ_BLOCK_SIZE_BITS
- bit
);
2270 n
+= dmz_count_bits(bitmap
, bit
, nr_bits
);
2272 dmz_release_mblock(zmd
, mblk
);
2274 nr_blocks
-= nr_bits
;
2275 chunk_block
+= nr_bits
;
2282 * Cleanup the zoned metadata resources.
2284 static void dmz_cleanup_metadata(struct dmz_metadata
*zmd
)
2286 struct rb_root
*root
;
2287 struct dmz_mblock
*mblk
, *next
;
2290 /* Release zone mapping resources */
2291 if (zmd
->map_mblk
) {
2292 for (i
= 0; i
< zmd
->nr_map_blocks
; i
++)
2293 dmz_release_mblock(zmd
, zmd
->map_mblk
[i
]);
2294 kfree(zmd
->map_mblk
);
2295 zmd
->map_mblk
= NULL
;
2298 /* Release super blocks */
2299 for (i
= 0; i
< 2; i
++) {
2300 if (zmd
->sb
[i
].mblk
) {
2301 dmz_free_mblock(zmd
, zmd
->sb
[i
].mblk
);
2302 zmd
->sb
[i
].mblk
= NULL
;
2306 /* Free cached blocks */
2307 while (!list_empty(&zmd
->mblk_dirty_list
)) {
2308 mblk
= list_first_entry(&zmd
->mblk_dirty_list
,
2309 struct dmz_mblock
, link
);
2310 dmz_dev_warn(zmd
->dev
, "mblock %llu still in dirty list (ref %u)",
2311 (u64
)mblk
->no
, atomic_read(&mblk
->ref
));
2312 list_del_init(&mblk
->link
);
2313 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2314 dmz_free_mblock(zmd
, mblk
);
2317 while (!list_empty(&zmd
->mblk_lru_list
)) {
2318 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
2319 struct dmz_mblock
, link
);
2320 list_del_init(&mblk
->link
);
2321 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2322 dmz_free_mblock(zmd
, mblk
);
2325 /* Sanity checks: the mblock rbtree should now be empty */
2326 root
= &zmd
->mblk_rbtree
;
2327 rbtree_postorder_for_each_entry_safe(mblk
, next
, root
, node
) {
2328 dmz_dev_warn(zmd
->dev
, "mblock %llu ref %u still in rbtree",
2329 (u64
)mblk
->no
, atomic_read(&mblk
->ref
));
2330 atomic_set(&mblk
->ref
, 0);
2331 dmz_free_mblock(zmd
, mblk
);
2334 /* Free the zone descriptors */
2335 dmz_drop_zones(zmd
);
2337 mutex_destroy(&zmd
->mblk_flush_lock
);
2338 mutex_destroy(&zmd
->map_lock
);
2342 * Initialize the zoned metadata.
2344 int dmz_ctr_metadata(struct dmz_dev
*dev
, struct dmz_metadata
**metadata
)
2346 struct dmz_metadata
*zmd
;
2347 unsigned int i
, zid
;
2348 struct dm_zone
*zone
;
2351 zmd
= kzalloc(sizeof(struct dmz_metadata
), GFP_KERNEL
);
2356 zmd
->mblk_rbtree
= RB_ROOT
;
2357 init_rwsem(&zmd
->mblk_sem
);
2358 mutex_init(&zmd
->mblk_flush_lock
);
2359 spin_lock_init(&zmd
->mblk_lock
);
2360 INIT_LIST_HEAD(&zmd
->mblk_lru_list
);
2361 INIT_LIST_HEAD(&zmd
->mblk_dirty_list
);
2363 mutex_init(&zmd
->map_lock
);
2364 atomic_set(&zmd
->unmap_nr_rnd
, 0);
2365 INIT_LIST_HEAD(&zmd
->unmap_rnd_list
);
2366 INIT_LIST_HEAD(&zmd
->map_rnd_list
);
2368 atomic_set(&zmd
->unmap_nr_seq
, 0);
2369 INIT_LIST_HEAD(&zmd
->unmap_seq_list
);
2370 INIT_LIST_HEAD(&zmd
->map_seq_list
);
2372 atomic_set(&zmd
->nr_reserved_seq_zones
, 0);
2373 INIT_LIST_HEAD(&zmd
->reserved_seq_zones_list
);
2375 init_waitqueue_head(&zmd
->free_wq
);
2377 /* Initialize zone descriptors */
2378 ret
= dmz_init_zones(zmd
);
2382 /* Get super block */
2383 ret
= dmz_load_sb(zmd
);
2387 /* Set metadata zones starting from sb_zone */
2388 zid
= dmz_id(zmd
, zmd
->sb_zone
);
2389 for (i
= 0; i
< zmd
->nr_meta_zones
<< 1; i
++) {
2390 zone
= dmz_get(zmd
, zid
+ i
);
2391 if (!dmz_is_rnd(zone
))
2393 set_bit(DMZ_META
, &zone
->flags
);
2396 /* Load mapping table */
2397 ret
= dmz_load_mapping(zmd
);
2402 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2403 * blocks and enough blocks to be able to cache the bitmap blocks of
2404 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2405 * the cache to add 512 more metadata blocks.
2407 zmd
->min_nr_mblks
= 2 + zmd
->nr_map_blocks
+ zmd
->zone_nr_bitmap_blocks
* 16;
2408 zmd
->max_nr_mblks
= zmd
->min_nr_mblks
+ 512;
2409 zmd
->mblk_shrinker
.count_objects
= dmz_mblock_shrinker_count
;
2410 zmd
->mblk_shrinker
.scan_objects
= dmz_mblock_shrinker_scan
;
2411 zmd
->mblk_shrinker
.seeks
= DEFAULT_SEEKS
;
2413 /* Metadata cache shrinker */
2414 ret
= register_shrinker(&zmd
->mblk_shrinker
);
2416 dmz_dev_err(dev
, "Register metadata cache shrinker failed");
2420 dmz_dev_info(dev
, "Host-%s zoned block device",
2421 bdev_zoned_model(dev
->bdev
) == BLK_ZONED_HA
?
2422 "aware" : "managed");
2423 dmz_dev_info(dev
, " %llu 512-byte logical sectors",
2424 (u64
)dev
->capacity
);
2425 dmz_dev_info(dev
, " %u zones of %llu 512-byte logical sectors",
2426 dev
->nr_zones
, (u64
)dev
->zone_nr_sectors
);
2427 dmz_dev_info(dev
, " %u metadata zones",
2428 zmd
->nr_meta_zones
* 2);
2429 dmz_dev_info(dev
, " %u data zones for %u chunks",
2430 zmd
->nr_data_zones
, zmd
->nr_chunks
);
2431 dmz_dev_info(dev
, " %u random zones (%u unmapped)",
2432 zmd
->nr_rnd
, atomic_read(&zmd
->unmap_nr_rnd
));
2433 dmz_dev_info(dev
, " %u sequential zones (%u unmapped)",
2434 zmd
->nr_seq
, atomic_read(&zmd
->unmap_nr_seq
));
2435 dmz_dev_info(dev
, " %u reserved sequential data zones",
2436 zmd
->nr_reserved_seq
);
2438 dmz_dev_debug(dev
, "Format:");
2439 dmz_dev_debug(dev
, "%u metadata blocks per set (%u max cache)",
2440 zmd
->nr_meta_blocks
, zmd
->max_nr_mblks
);
2441 dmz_dev_debug(dev
, " %u data zone mapping blocks",
2442 zmd
->nr_map_blocks
);
2443 dmz_dev_debug(dev
, " %u bitmap blocks",
2444 zmd
->nr_bitmap_blocks
);
2450 dmz_cleanup_metadata(zmd
);
2458 * Cleanup the zoned metadata resources.
2460 void dmz_dtr_metadata(struct dmz_metadata
*zmd
)
2462 unregister_shrinker(&zmd
->mblk_shrinker
);
2463 dmz_cleanup_metadata(zmd
);
2468 * Check zone information on resume.
2470 int dmz_resume_metadata(struct dmz_metadata
*zmd
)
2472 struct dmz_dev
*dev
= zmd
->dev
;
2473 struct dm_zone
*zone
;
2479 for (i
= 0; i
< dev
->nr_zones
; i
++) {
2480 zone
= dmz_get(zmd
, i
);
2482 dmz_dev_err(dev
, "Unable to get zone %u", i
);
2486 wp_block
= zone
->wp_block
;
2488 ret
= dmz_update_zone(zmd
, zone
);
2490 dmz_dev_err(dev
, "Broken zone %u", i
);
2494 if (dmz_is_offline(zone
)) {
2495 dmz_dev_warn(dev
, "Zone %u is offline", i
);
2499 /* Check write pointer */
2500 if (!dmz_is_seq(zone
))
2502 else if (zone
->wp_block
!= wp_block
) {
2503 dmz_dev_err(dev
, "Zone %u: Invalid wp (%llu / %llu)",
2504 i
, (u64
)zone
->wp_block
, (u64
)wp_block
);
2505 zone
->wp_block
= wp_block
;
2506 dmz_invalidate_blocks(zmd
, zone
, zone
->wp_block
,
2507 dev
->zone_nr_blocks
- zone
->wp_block
);