1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/crc32.h>
12 #include <linux/sched/mm.h>
14 #define DM_MSG_PREFIX "zoned metadata"
19 #define DMZ_META_VER 2
22 * On-disk super block magic.
24 #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
25 (((unsigned int)('Z')) << 16) | \
26 (((unsigned int)('B')) << 8) | \
27 ((unsigned int)('D')))
30 * On disk super block.
31 * This uses only 512 B but uses on disk a full 4KB block. This block is
32 * followed on disk by the mapping table of chunks to zones and the bitmap
33 * blocks indicating zone block validity.
34 * The overall resulting metadata format is:
35 * (1) Super block (1 block)
36 * (2) Chunk mapping table (nr_map_blocks)
37 * (3) Bitmap blocks (nr_bitmap_blocks)
38 * All metadata blocks are stored in conventional zones, starting from
39 * the first conventional zone found on disk.
45 /* Metadata version number */
46 __le32 version
; /* 8 */
48 /* Generation number */
51 /* This block number */
52 __le64 sb_block
; /* 24 */
54 /* The number of metadata blocks, including this super block */
55 __le32 nr_meta_blocks
; /* 28 */
57 /* The number of sequential zones reserved for reclaim */
58 __le32 nr_reserved_seq
; /* 32 */
60 /* The number of entries in the mapping table */
61 __le32 nr_chunks
; /* 36 */
63 /* The number of blocks used for the chunk mapping table */
64 __le32 nr_map_blocks
; /* 40 */
66 /* The number of blocks used for the block bitmaps */
67 __le32 nr_bitmap_blocks
; /* 44 */
73 u8 dmz_label
[32]; /* 80 */
76 u8 dmz_uuid
[16]; /* 96 */
79 u8 dev_uuid
[16]; /* 112 */
81 /* Padding to full 512B sector */
82 u8 reserved
[400]; /* 512 */
86 * Chunk mapping entry: entries are indexed by chunk number
87 * and give the zone ID (dzone_id) mapping the chunk on disk.
88 * This zone may be sequential or random. If it is a sequential
89 * zone, a second zone (bzone_id) used as a write buffer may
90 * also be specified. This second zone will always be a randomly
99 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
101 #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
102 #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
103 #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
104 #define DMZ_MAP_UNMAPPED UINT_MAX
107 * Meta data block descriptor (for cached metadata blocks).
111 struct list_head link
;
120 * Metadata block state flags.
130 * Super block information (one per metadata set).
135 struct dmz_mblock
*mblk
;
136 struct dmz_super
*sb
;
137 struct dm_zone
*zone
;
141 * In-memory metadata.
143 struct dmz_metadata
{
145 unsigned int nr_devs
;
147 char devname
[BDEVNAME_SIZE
];
148 char label
[BDEVNAME_SIZE
];
151 sector_t zone_bitmap_size
;
152 unsigned int zone_nr_bitmap_blocks
;
153 unsigned int zone_bits_per_mblk
;
155 sector_t zone_nr_blocks
;
156 sector_t zone_nr_blocks_shift
;
158 sector_t zone_nr_sectors
;
159 sector_t zone_nr_sectors_shift
;
161 unsigned int nr_bitmap_blocks
;
162 unsigned int nr_map_blocks
;
164 unsigned int nr_zones
;
165 unsigned int nr_useable_zones
;
166 unsigned int nr_meta_blocks
;
167 unsigned int nr_meta_zones
;
168 unsigned int nr_data_zones
;
169 unsigned int nr_cache_zones
;
170 unsigned int nr_rnd_zones
;
171 unsigned int nr_reserved_seq
;
172 unsigned int nr_chunks
;
174 /* Zone information array */
178 unsigned int mblk_primary
;
179 unsigned int sb_version
;
181 unsigned int min_nr_mblks
;
182 unsigned int max_nr_mblks
;
184 struct rw_semaphore mblk_sem
;
185 struct mutex mblk_flush_lock
;
186 spinlock_t mblk_lock
;
187 struct rb_root mblk_rbtree
;
188 struct list_head mblk_lru_list
;
189 struct list_head mblk_dirty_list
;
190 struct shrinker
*mblk_shrinker
;
192 /* Zone allocation management */
193 struct mutex map_lock
;
194 struct dmz_mblock
**map_mblk
;
196 unsigned int nr_cache
;
197 atomic_t unmap_nr_cache
;
198 struct list_head unmap_cache_list
;
199 struct list_head map_cache_list
;
201 atomic_t nr_reserved_seq_zones
;
202 struct list_head reserved_seq_zones_list
;
204 wait_queue_head_t free_wq
;
207 #define dmz_zmd_info(zmd, format, args...) \
208 DMINFO("(%s): " format, (zmd)->label, ## args)
210 #define dmz_zmd_err(zmd, format, args...) \
211 DMERR("(%s): " format, (zmd)->label, ## args)
213 #define dmz_zmd_warn(zmd, format, args...) \
214 DMWARN("(%s): " format, (zmd)->label, ## args)
216 #define dmz_zmd_debug(zmd, format, args...) \
217 DMDEBUG("(%s): " format, (zmd)->label, ## args)
221 static unsigned int dmz_dev_zone_id(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
226 return zone
->id
- zone
->dev
->zone_offset
;
229 sector_t
dmz_start_sect(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
231 unsigned int zone_id
= dmz_dev_zone_id(zmd
, zone
);
233 return (sector_t
)zone_id
<< zmd
->zone_nr_sectors_shift
;
236 sector_t
dmz_start_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
238 unsigned int zone_id
= dmz_dev_zone_id(zmd
, zone
);
240 return (sector_t
)zone_id
<< zmd
->zone_nr_blocks_shift
;
243 unsigned int dmz_zone_nr_blocks(struct dmz_metadata
*zmd
)
245 return zmd
->zone_nr_blocks
;
248 unsigned int dmz_zone_nr_sectors(struct dmz_metadata
*zmd
)
250 return zmd
->zone_nr_sectors
;
253 unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata
*zmd
)
255 return zmd
->zone_nr_sectors_shift
;
258 unsigned int dmz_nr_zones(struct dmz_metadata
*zmd
)
260 return zmd
->nr_zones
;
263 unsigned int dmz_nr_chunks(struct dmz_metadata
*zmd
)
265 return zmd
->nr_chunks
;
268 unsigned int dmz_nr_rnd_zones(struct dmz_metadata
*zmd
, int idx
)
270 return zmd
->dev
[idx
].nr_rnd
;
273 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata
*zmd
, int idx
)
275 return atomic_read(&zmd
->dev
[idx
].unmap_nr_rnd
);
278 unsigned int dmz_nr_cache_zones(struct dmz_metadata
*zmd
)
280 return zmd
->nr_cache
;
283 unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata
*zmd
)
285 return atomic_read(&zmd
->unmap_nr_cache
);
288 unsigned int dmz_nr_seq_zones(struct dmz_metadata
*zmd
, int idx
)
290 return zmd
->dev
[idx
].nr_seq
;
293 unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata
*zmd
, int idx
)
295 return atomic_read(&zmd
->dev
[idx
].unmap_nr_seq
);
298 static struct dm_zone
*dmz_get(struct dmz_metadata
*zmd
, unsigned int zone_id
)
300 return xa_load(&zmd
->zones
, zone_id
);
303 static struct dm_zone
*dmz_insert(struct dmz_metadata
*zmd
,
304 unsigned int zone_id
, struct dmz_dev
*dev
)
306 struct dm_zone
*zone
= kzalloc(sizeof(struct dm_zone
), GFP_KERNEL
);
309 return ERR_PTR(-ENOMEM
);
311 if (xa_insert(&zmd
->zones
, zone_id
, zone
, GFP_KERNEL
)) {
313 return ERR_PTR(-EBUSY
);
316 INIT_LIST_HEAD(&zone
->link
);
317 atomic_set(&zone
->refcount
, 0);
319 zone
->chunk
= DMZ_MAP_UNMAPPED
;
325 const char *dmz_metadata_label(struct dmz_metadata
*zmd
)
327 return (const char *)zmd
->label
;
330 bool dmz_check_dev(struct dmz_metadata
*zmd
)
334 for (i
= 0; i
< zmd
->nr_devs
; i
++) {
335 if (!dmz_check_bdev(&zmd
->dev
[i
]))
341 bool dmz_dev_is_dying(struct dmz_metadata
*zmd
)
345 for (i
= 0; i
< zmd
->nr_devs
; i
++) {
346 if (dmz_bdev_is_dying(&zmd
->dev
[i
]))
353 * Lock/unlock mapping table.
354 * The map lock also protects all the zone lists.
356 void dmz_lock_map(struct dmz_metadata
*zmd
)
358 mutex_lock(&zmd
->map_lock
);
361 void dmz_unlock_map(struct dmz_metadata
*zmd
)
363 mutex_unlock(&zmd
->map_lock
);
367 * Lock/unlock metadata access. This is a "read" lock on a semaphore
368 * that prevents metadata flush from running while metadata are being
369 * modified. The actual metadata write mutual exclusion is achieved with
370 * the map lock and zone state management (active and reclaim state are
371 * mutually exclusive).
373 void dmz_lock_metadata(struct dmz_metadata
*zmd
)
375 down_read(&zmd
->mblk_sem
);
378 void dmz_unlock_metadata(struct dmz_metadata
*zmd
)
380 up_read(&zmd
->mblk_sem
);
384 * Lock/unlock flush: prevent concurrent executions
385 * of dmz_flush_metadata as well as metadata modification in reclaim
386 * while flush is being executed.
388 void dmz_lock_flush(struct dmz_metadata
*zmd
)
390 mutex_lock(&zmd
->mblk_flush_lock
);
393 void dmz_unlock_flush(struct dmz_metadata
*zmd
)
395 mutex_unlock(&zmd
->mblk_flush_lock
);
399 * Allocate a metadata block.
401 static struct dmz_mblock
*dmz_alloc_mblock(struct dmz_metadata
*zmd
,
404 struct dmz_mblock
*mblk
= NULL
;
406 /* See if we can reuse cached blocks */
407 if (zmd
->max_nr_mblks
&& atomic_read(&zmd
->nr_mblks
) > zmd
->max_nr_mblks
) {
408 spin_lock(&zmd
->mblk_lock
);
409 mblk
= list_first_entry_or_null(&zmd
->mblk_lru_list
,
410 struct dmz_mblock
, link
);
412 list_del_init(&mblk
->link
);
413 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
416 spin_unlock(&zmd
->mblk_lock
);
421 /* Allocate a new block */
422 mblk
= kmalloc(sizeof(struct dmz_mblock
), GFP_NOIO
);
426 mblk
->page
= alloc_page(GFP_NOIO
);
432 RB_CLEAR_NODE(&mblk
->node
);
433 INIT_LIST_HEAD(&mblk
->link
);
437 mblk
->data
= page_address(mblk
->page
);
439 atomic_inc(&zmd
->nr_mblks
);
445 * Free a metadata block.
447 static void dmz_free_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
449 __free_pages(mblk
->page
, 0);
452 atomic_dec(&zmd
->nr_mblks
);
456 * Insert a metadata block in the rbtree.
458 static void dmz_insert_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
460 struct rb_root
*root
= &zmd
->mblk_rbtree
;
461 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
462 struct dmz_mblock
*b
;
464 /* Figure out where to put the new node */
466 b
= container_of(*new, struct dmz_mblock
, node
);
468 new = (b
->no
< mblk
->no
) ? &((*new)->rb_left
) : &((*new)->rb_right
);
471 /* Add new node and rebalance tree */
472 rb_link_node(&mblk
->node
, parent
, new);
473 rb_insert_color(&mblk
->node
, root
);
477 * Lookup a metadata block in the rbtree. If the block is found, increment
478 * its reference count.
480 static struct dmz_mblock
*dmz_get_mblock_fast(struct dmz_metadata
*zmd
,
483 struct rb_root
*root
= &zmd
->mblk_rbtree
;
484 struct rb_node
*node
= root
->rb_node
;
485 struct dmz_mblock
*mblk
;
488 mblk
= container_of(node
, struct dmz_mblock
, node
);
489 if (mblk
->no
== mblk_no
) {
491 * If this is the first reference to the block,
492 * remove it from the LRU list.
495 if (mblk
->ref
== 1 &&
496 !test_bit(DMZ_META_DIRTY
, &mblk
->state
))
497 list_del_init(&mblk
->link
);
500 node
= (mblk
->no
< mblk_no
) ? node
->rb_left
: node
->rb_right
;
507 * Metadata block BIO end callback.
509 static void dmz_mblock_bio_end_io(struct bio
*bio
)
511 struct dmz_mblock
*mblk
= bio
->bi_private
;
515 set_bit(DMZ_META_ERROR
, &mblk
->state
);
517 if (bio_op(bio
) == REQ_OP_WRITE
)
518 flag
= DMZ_META_WRITING
;
520 flag
= DMZ_META_READING
;
522 clear_bit_unlock(flag
, &mblk
->state
);
523 smp_mb__after_atomic();
524 wake_up_bit(&mblk
->state
, flag
);
530 * Read an uncached metadata block from disk and add it to the cache.
532 static struct dmz_mblock
*dmz_get_mblock_slow(struct dmz_metadata
*zmd
,
535 struct dmz_mblock
*mblk
, *m
;
536 sector_t block
= zmd
->sb
[zmd
->mblk_primary
].block
+ mblk_no
;
537 struct dmz_dev
*dev
= zmd
->sb
[zmd
->mblk_primary
].dev
;
540 if (dmz_bdev_is_dying(dev
))
541 return ERR_PTR(-EIO
);
543 /* Get a new block and a BIO to read it */
544 mblk
= dmz_alloc_mblock(zmd
, mblk_no
);
546 return ERR_PTR(-ENOMEM
);
548 bio
= bio_alloc(dev
->bdev
, 1, REQ_OP_READ
| REQ_META
| REQ_PRIO
,
551 spin_lock(&zmd
->mblk_lock
);
554 * Make sure that another context did not start reading
557 m
= dmz_get_mblock_fast(zmd
, mblk_no
);
559 spin_unlock(&zmd
->mblk_lock
);
560 dmz_free_mblock(zmd
, mblk
);
566 set_bit(DMZ_META_READING
, &mblk
->state
);
567 dmz_insert_mblock(zmd
, mblk
);
569 spin_unlock(&zmd
->mblk_lock
);
571 /* Submit read BIO */
572 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
573 bio
->bi_private
= mblk
;
574 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
575 __bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
582 * Free metadata blocks.
584 static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata
*zmd
,
587 struct dmz_mblock
*mblk
;
588 unsigned long count
= 0;
590 if (!zmd
->max_nr_mblks
)
593 while (!list_empty(&zmd
->mblk_lru_list
) &&
594 atomic_read(&zmd
->nr_mblks
) > zmd
->min_nr_mblks
&&
596 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
597 struct dmz_mblock
, link
);
598 list_del_init(&mblk
->link
);
599 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
600 dmz_free_mblock(zmd
, mblk
);
608 * For mblock shrinker: get the number of unused metadata blocks in the cache.
610 static unsigned long dmz_mblock_shrinker_count(struct shrinker
*shrink
,
611 struct shrink_control
*sc
)
613 struct dmz_metadata
*zmd
= shrink
->private_data
;
615 return atomic_read(&zmd
->nr_mblks
);
619 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
621 static unsigned long dmz_mblock_shrinker_scan(struct shrinker
*shrink
,
622 struct shrink_control
*sc
)
624 struct dmz_metadata
*zmd
= shrink
->private_data
;
627 spin_lock(&zmd
->mblk_lock
);
628 count
= dmz_shrink_mblock_cache(zmd
, sc
->nr_to_scan
);
629 spin_unlock(&zmd
->mblk_lock
);
631 return count
? count
: SHRINK_STOP
;
635 * Release a metadata block.
637 static void dmz_release_mblock(struct dmz_metadata
*zmd
,
638 struct dmz_mblock
*mblk
)
644 spin_lock(&zmd
->mblk_lock
);
647 if (mblk
->ref
== 0) {
648 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
649 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
650 dmz_free_mblock(zmd
, mblk
);
651 } else if (!test_bit(DMZ_META_DIRTY
, &mblk
->state
)) {
652 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
653 dmz_shrink_mblock_cache(zmd
, 1);
657 spin_unlock(&zmd
->mblk_lock
);
661 * Get a metadata block from the rbtree. If the block
662 * is not present, read it from disk.
664 static struct dmz_mblock
*dmz_get_mblock(struct dmz_metadata
*zmd
,
667 struct dmz_mblock
*mblk
;
668 struct dmz_dev
*dev
= zmd
->sb
[zmd
->mblk_primary
].dev
;
671 spin_lock(&zmd
->mblk_lock
);
672 mblk
= dmz_get_mblock_fast(zmd
, mblk_no
);
673 spin_unlock(&zmd
->mblk_lock
);
676 /* Cache miss: read the block from disk */
677 mblk
= dmz_get_mblock_slow(zmd
, mblk_no
);
682 /* Wait for on-going read I/O and check for error */
683 wait_on_bit_io(&mblk
->state
, DMZ_META_READING
,
684 TASK_UNINTERRUPTIBLE
);
685 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
686 dmz_release_mblock(zmd
, mblk
);
688 return ERR_PTR(-EIO
);
695 * Mark a metadata block dirty.
697 static void dmz_dirty_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
699 spin_lock(&zmd
->mblk_lock
);
700 if (!test_and_set_bit(DMZ_META_DIRTY
, &mblk
->state
))
701 list_add_tail(&mblk
->link
, &zmd
->mblk_dirty_list
);
702 spin_unlock(&zmd
->mblk_lock
);
706 * Issue a metadata block write BIO.
708 static int dmz_write_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
,
711 struct dmz_dev
*dev
= zmd
->sb
[set
].dev
;
712 sector_t block
= zmd
->sb
[set
].block
+ mblk
->no
;
715 if (dmz_bdev_is_dying(dev
))
718 bio
= bio_alloc(dev
->bdev
, 1, REQ_OP_WRITE
| REQ_META
| REQ_PRIO
,
721 set_bit(DMZ_META_WRITING
, &mblk
->state
);
723 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
724 bio
->bi_private
= mblk
;
725 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
726 __bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
733 * Read/write a metadata block.
735 static int dmz_rdwr_block(struct dmz_dev
*dev
, enum req_op op
,
736 sector_t block
, struct page
*page
)
744 if (dmz_bdev_is_dying(dev
))
747 bio
= bio_alloc(dev
->bdev
, 1, op
| REQ_SYNC
| REQ_META
| REQ_PRIO
,
749 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
750 __bio_add_page(bio
, page
, DMZ_BLOCK_SIZE
, 0);
751 ret
= submit_bio_wait(bio
);
760 * Write super block of the specified metadata set.
762 static int dmz_write_sb(struct dmz_metadata
*zmd
, unsigned int set
)
764 struct dmz_mblock
*mblk
= zmd
->sb
[set
].mblk
;
765 struct dmz_super
*sb
= zmd
->sb
[set
].sb
;
766 struct dmz_dev
*dev
= zmd
->sb
[set
].dev
;
768 u64 sb_gen
= zmd
->sb_gen
+ 1;
771 sb
->magic
= cpu_to_le32(DMZ_MAGIC
);
773 sb
->version
= cpu_to_le32(zmd
->sb_version
);
774 if (zmd
->sb_version
> 1) {
775 BUILD_BUG_ON(UUID_SIZE
!= 16);
776 export_uuid(sb
->dmz_uuid
, &zmd
->uuid
);
777 memcpy(sb
->dmz_label
, zmd
->label
, BDEVNAME_SIZE
);
778 export_uuid(sb
->dev_uuid
, &dev
->uuid
);
781 sb
->gen
= cpu_to_le64(sb_gen
);
784 * The metadata always references the absolute block address,
785 * ie relative to the entire block range, not the per-device
788 sb_block
= zmd
->sb
[set
].zone
->id
<< zmd
->zone_nr_blocks_shift
;
789 sb
->sb_block
= cpu_to_le64(sb_block
);
790 sb
->nr_meta_blocks
= cpu_to_le32(zmd
->nr_meta_blocks
);
791 sb
->nr_reserved_seq
= cpu_to_le32(zmd
->nr_reserved_seq
);
792 sb
->nr_chunks
= cpu_to_le32(zmd
->nr_chunks
);
794 sb
->nr_map_blocks
= cpu_to_le32(zmd
->nr_map_blocks
);
795 sb
->nr_bitmap_blocks
= cpu_to_le32(zmd
->nr_bitmap_blocks
);
798 sb
->crc
= cpu_to_le32(crc32_le(sb_gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
));
800 ret
= dmz_rdwr_block(dev
, REQ_OP_WRITE
, zmd
->sb
[set
].block
,
803 ret
= blkdev_issue_flush(dev
->bdev
);
809 * Write dirty metadata blocks to the specified set.
811 static int dmz_write_dirty_mblocks(struct dmz_metadata
*zmd
,
812 struct list_head
*write_list
,
815 struct dmz_mblock
*mblk
;
816 struct dmz_dev
*dev
= zmd
->sb
[set
].dev
;
817 struct blk_plug plug
;
818 int ret
= 0, nr_mblks_submitted
= 0;
821 blk_start_plug(&plug
);
822 list_for_each_entry(mblk
, write_list
, link
) {
823 ret
= dmz_write_mblock(zmd
, mblk
, set
);
826 nr_mblks_submitted
++;
828 blk_finish_plug(&plug
);
830 /* Wait for completion */
831 list_for_each_entry(mblk
, write_list
, link
) {
832 if (!nr_mblks_submitted
)
834 wait_on_bit_io(&mblk
->state
, DMZ_META_WRITING
,
835 TASK_UNINTERRUPTIBLE
);
836 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
837 clear_bit(DMZ_META_ERROR
, &mblk
->state
);
841 nr_mblks_submitted
--;
844 /* Flush drive cache (this will also sync data) */
846 ret
= blkdev_issue_flush(dev
->bdev
);
852 * Log dirty metadata blocks.
854 static int dmz_log_dirty_mblocks(struct dmz_metadata
*zmd
,
855 struct list_head
*write_list
)
857 unsigned int log_set
= zmd
->mblk_primary
^ 0x1;
860 /* Write dirty blocks to the log */
861 ret
= dmz_write_dirty_mblocks(zmd
, write_list
, log_set
);
866 * No error so far: now validate the log by updating the
867 * log index super block generation.
869 ret
= dmz_write_sb(zmd
, log_set
);
877 * Flush dirty metadata blocks.
879 int dmz_flush_metadata(struct dmz_metadata
*zmd
)
881 struct dmz_mblock
*mblk
;
882 struct list_head write_list
;
889 INIT_LIST_HEAD(&write_list
);
892 * Make sure that metadata blocks are stable before logging: take
893 * the write lock on the metadata semaphore to prevent target BIOs
894 * from modifying metadata.
896 down_write(&zmd
->mblk_sem
);
897 dev
= zmd
->sb
[zmd
->mblk_primary
].dev
;
900 * This is called from the target flush work and reclaim work.
901 * Concurrent execution is not allowed.
905 if (dmz_bdev_is_dying(dev
)) {
910 /* Get dirty blocks */
911 spin_lock(&zmd
->mblk_lock
);
912 list_splice_init(&zmd
->mblk_dirty_list
, &write_list
);
913 spin_unlock(&zmd
->mblk_lock
);
915 /* If there are no dirty metadata blocks, just flush the device cache */
916 if (list_empty(&write_list
)) {
917 ret
= blkdev_issue_flush(dev
->bdev
);
922 * The primary metadata set is still clean. Keep it this way until
923 * all updates are successful in the secondary set. That is, use
924 * the secondary set as a log.
926 ret
= dmz_log_dirty_mblocks(zmd
, &write_list
);
931 * The log is on disk. It is now safe to update in place
932 * in the primary metadata set.
934 ret
= dmz_write_dirty_mblocks(zmd
, &write_list
, zmd
->mblk_primary
);
938 ret
= dmz_write_sb(zmd
, zmd
->mblk_primary
);
942 while (!list_empty(&write_list
)) {
943 mblk
= list_first_entry(&write_list
, struct dmz_mblock
, link
);
944 list_del_init(&mblk
->link
);
946 spin_lock(&zmd
->mblk_lock
);
947 clear_bit(DMZ_META_DIRTY
, &mblk
->state
);
949 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
950 spin_unlock(&zmd
->mblk_lock
);
955 dmz_unlock_flush(zmd
);
956 up_write(&zmd
->mblk_sem
);
961 if (!list_empty(&write_list
)) {
962 spin_lock(&zmd
->mblk_lock
);
963 list_splice(&write_list
, &zmd
->mblk_dirty_list
);
964 spin_unlock(&zmd
->mblk_lock
);
966 if (!dmz_check_bdev(dev
))
974 static int dmz_check_sb(struct dmz_metadata
*zmd
, struct dmz_sb
*dsb
,
977 struct dmz_super
*sb
= dsb
->sb
;
978 struct dmz_dev
*dev
= dsb
->dev
;
979 unsigned int nr_meta_zones
, nr_data_zones
;
983 if (le32_to_cpu(sb
->magic
) != DMZ_MAGIC
) {
984 dmz_dev_err(dev
, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
985 DMZ_MAGIC
, le32_to_cpu(sb
->magic
));
989 zmd
->sb_version
= le32_to_cpu(sb
->version
);
990 if (zmd
->sb_version
> DMZ_META_VER
) {
991 dmz_dev_err(dev
, "Invalid meta version (needed %d, got %d)",
992 DMZ_META_VER
, zmd
->sb_version
);
995 if (zmd
->sb_version
< 2 && tertiary
) {
996 dmz_dev_err(dev
, "Tertiary superblocks are not supported");
1000 gen
= le64_to_cpu(sb
->gen
);
1001 stored_crc
= le32_to_cpu(sb
->crc
);
1003 crc
= crc32_le(gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
);
1004 if (crc
!= stored_crc
) {
1005 dmz_dev_err(dev
, "Invalid checksum (needed 0x%08x, got 0x%08x)",
1010 sb_block
= le64_to_cpu(sb
->sb_block
);
1011 if (sb_block
!= (u64
)dsb
->zone
->id
<< zmd
->zone_nr_blocks_shift
) {
1012 dmz_dev_err(dev
, "Invalid superblock position (is %llu expected %llu)",
1013 sb_block
, (u64
)dsb
->zone
->id
<< zmd
->zone_nr_blocks_shift
);
1016 if (zmd
->sb_version
> 1) {
1019 import_uuid(&sb_uuid
, sb
->dmz_uuid
);
1020 if (uuid_is_null(&sb_uuid
)) {
1021 dmz_dev_err(dev
, "NULL DM-Zoned uuid");
1023 } else if (uuid_is_null(&zmd
->uuid
)) {
1024 uuid_copy(&zmd
->uuid
, &sb_uuid
);
1025 } else if (!uuid_equal(&zmd
->uuid
, &sb_uuid
)) {
1026 dmz_dev_err(dev
, "mismatching DM-Zoned uuid, is %pUl expected %pUl",
1027 &sb_uuid
, &zmd
->uuid
);
1030 if (!strlen(zmd
->label
))
1031 memcpy(zmd
->label
, sb
->dmz_label
, BDEVNAME_SIZE
);
1032 else if (memcmp(zmd
->label
, sb
->dmz_label
, BDEVNAME_SIZE
)) {
1033 dmz_dev_err(dev
, "mismatching DM-Zoned label, is %s expected %s",
1034 sb
->dmz_label
, zmd
->label
);
1037 import_uuid(&dev
->uuid
, sb
->dev_uuid
);
1038 if (uuid_is_null(&dev
->uuid
)) {
1039 dmz_dev_err(dev
, "NULL device uuid");
1045 * Generation number should be 0, but it doesn't
1046 * really matter if it isn't.
1049 dmz_dev_warn(dev
, "Invalid generation %llu",
1055 nr_meta_zones
= (le32_to_cpu(sb
->nr_meta_blocks
) + zmd
->zone_nr_blocks
- 1)
1056 >> zmd
->zone_nr_blocks_shift
;
1057 if (!nr_meta_zones
||
1058 (zmd
->nr_devs
<= 1 && nr_meta_zones
>= zmd
->nr_rnd_zones
) ||
1059 (zmd
->nr_devs
> 1 && nr_meta_zones
>= zmd
->nr_cache_zones
)) {
1060 dmz_dev_err(dev
, "Invalid number of metadata blocks");
1064 if (!le32_to_cpu(sb
->nr_reserved_seq
) ||
1065 le32_to_cpu(sb
->nr_reserved_seq
) >= (zmd
->nr_useable_zones
- nr_meta_zones
)) {
1066 dmz_dev_err(dev
, "Invalid number of reserved sequential zones");
1070 nr_data_zones
= zmd
->nr_useable_zones
-
1071 (nr_meta_zones
* 2 + le32_to_cpu(sb
->nr_reserved_seq
));
1072 if (le32_to_cpu(sb
->nr_chunks
) > nr_data_zones
) {
1073 dmz_dev_err(dev
, "Invalid number of chunks %u / %u",
1074 le32_to_cpu(sb
->nr_chunks
), nr_data_zones
);
1079 zmd
->nr_meta_blocks
= le32_to_cpu(sb
->nr_meta_blocks
);
1080 zmd
->nr_reserved_seq
= le32_to_cpu(sb
->nr_reserved_seq
);
1081 zmd
->nr_chunks
= le32_to_cpu(sb
->nr_chunks
);
1082 zmd
->nr_map_blocks
= le32_to_cpu(sb
->nr_map_blocks
);
1083 zmd
->nr_bitmap_blocks
= le32_to_cpu(sb
->nr_bitmap_blocks
);
1084 zmd
->nr_meta_zones
= nr_meta_zones
;
1085 zmd
->nr_data_zones
= nr_data_zones
;
1091 * Read the first or second super block from disk.
1093 static int dmz_read_sb(struct dmz_metadata
*zmd
, struct dmz_sb
*sb
, int set
)
1095 dmz_zmd_debug(zmd
, "read superblock set %d dev %pg block %llu",
1096 set
, sb
->dev
->bdev
, sb
->block
);
1098 return dmz_rdwr_block(sb
->dev
, REQ_OP_READ
,
1099 sb
->block
, sb
->mblk
->page
);
1103 * Determine the position of the secondary super blocks on disk.
1104 * This is used only if a corruption of the primary super block
1107 static int dmz_lookup_secondary_sb(struct dmz_metadata
*zmd
)
1109 unsigned int zone_nr_blocks
= zmd
->zone_nr_blocks
;
1110 struct dmz_mblock
*mblk
;
1111 unsigned int zone_id
= zmd
->sb
[0].zone
->id
;
1114 /* Allocate a block */
1115 mblk
= dmz_alloc_mblock(zmd
, 0);
1119 zmd
->sb
[1].mblk
= mblk
;
1120 zmd
->sb
[1].sb
= mblk
->data
;
1122 /* Bad first super block: search for the second one */
1123 zmd
->sb
[1].block
= zmd
->sb
[0].block
+ zone_nr_blocks
;
1124 zmd
->sb
[1].zone
= dmz_get(zmd
, zone_id
+ 1);
1125 zmd
->sb
[1].dev
= zmd
->sb
[0].dev
;
1126 for (i
= 1; i
< zmd
->nr_rnd_zones
; i
++) {
1127 if (dmz_read_sb(zmd
, &zmd
->sb
[1], 1) != 0)
1129 if (le32_to_cpu(zmd
->sb
[1].sb
->magic
) == DMZ_MAGIC
)
1131 zmd
->sb
[1].block
+= zone_nr_blocks
;
1132 zmd
->sb
[1].zone
= dmz_get(zmd
, zone_id
+ i
);
1135 dmz_free_mblock(zmd
, mblk
);
1136 zmd
->sb
[1].mblk
= NULL
;
1137 zmd
->sb
[1].zone
= NULL
;
1138 zmd
->sb
[1].dev
= NULL
;
1144 * Read a super block from disk.
1146 static int dmz_get_sb(struct dmz_metadata
*zmd
, struct dmz_sb
*sb
, int set
)
1148 struct dmz_mblock
*mblk
;
1151 /* Allocate a block */
1152 mblk
= dmz_alloc_mblock(zmd
, 0);
1157 sb
->sb
= mblk
->data
;
1159 /* Read super block */
1160 ret
= dmz_read_sb(zmd
, sb
, set
);
1162 dmz_free_mblock(zmd
, mblk
);
1171 * Recover a metadata set.
1173 static int dmz_recover_mblocks(struct dmz_metadata
*zmd
, unsigned int dst_set
)
1175 unsigned int src_set
= dst_set
^ 0x1;
1179 dmz_dev_warn(zmd
->sb
[dst_set
].dev
,
1180 "Metadata set %u invalid: recovering", dst_set
);
1183 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb
[0].zone
);
1185 zmd
->sb
[1].block
= dmz_start_block(zmd
, zmd
->sb
[1].zone
);
1187 page
= alloc_page(GFP_NOIO
);
1191 /* Copy metadata blocks */
1192 for (i
= 1; i
< zmd
->nr_meta_blocks
; i
++) {
1193 ret
= dmz_rdwr_block(zmd
->sb
[src_set
].dev
, REQ_OP_READ
,
1194 zmd
->sb
[src_set
].block
+ i
, page
);
1197 ret
= dmz_rdwr_block(zmd
->sb
[dst_set
].dev
, REQ_OP_WRITE
,
1198 zmd
->sb
[dst_set
].block
+ i
, page
);
1203 /* Finalize with the super block */
1204 if (!zmd
->sb
[dst_set
].mblk
) {
1205 zmd
->sb
[dst_set
].mblk
= dmz_alloc_mblock(zmd
, 0);
1206 if (!zmd
->sb
[dst_set
].mblk
) {
1210 zmd
->sb
[dst_set
].sb
= zmd
->sb
[dst_set
].mblk
->data
;
1213 ret
= dmz_write_sb(zmd
, dst_set
);
1215 __free_pages(page
, 0);
1221 * Get super block from disk.
1223 static int dmz_load_sb(struct dmz_metadata
*zmd
)
1225 bool sb_good
[2] = {false, false};
1226 u64 sb_gen
[2] = {0, 0};
1229 if (!zmd
->sb
[0].zone
) {
1230 dmz_zmd_err(zmd
, "Primary super block zone not set");
1234 /* Read and check the primary super block */
1235 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb
[0].zone
);
1236 zmd
->sb
[0].dev
= zmd
->sb
[0].zone
->dev
;
1237 ret
= dmz_get_sb(zmd
, &zmd
->sb
[0], 0);
1239 dmz_dev_err(zmd
->sb
[0].dev
, "Read primary super block failed");
1243 ret
= dmz_check_sb(zmd
, &zmd
->sb
[0], false);
1245 /* Read and check secondary super block */
1248 if (!zmd
->sb
[1].zone
) {
1249 unsigned int zone_id
=
1250 zmd
->sb
[0].zone
->id
+ zmd
->nr_meta_zones
;
1252 zmd
->sb
[1].zone
= dmz_get(zmd
, zone_id
);
1254 zmd
->sb
[1].block
= dmz_start_block(zmd
, zmd
->sb
[1].zone
);
1255 zmd
->sb
[1].dev
= zmd
->sb
[0].dev
;
1256 ret
= dmz_get_sb(zmd
, &zmd
->sb
[1], 1);
1258 ret
= dmz_lookup_secondary_sb(zmd
);
1261 dmz_dev_err(zmd
->sb
[1].dev
, "Read secondary super block failed");
1265 ret
= dmz_check_sb(zmd
, &zmd
->sb
[1], false);
1269 /* Use highest generation sb first */
1270 if (!sb_good
[0] && !sb_good
[1]) {
1271 dmz_zmd_err(zmd
, "No valid super block found");
1276 sb_gen
[0] = le64_to_cpu(zmd
->sb
[0].sb
->gen
);
1278 ret
= dmz_recover_mblocks(zmd
, 0);
1280 dmz_dev_err(zmd
->sb
[0].dev
,
1281 "Recovery of superblock 0 failed");
1287 sb_gen
[1] = le64_to_cpu(zmd
->sb
[1].sb
->gen
);
1289 ret
= dmz_recover_mblocks(zmd
, 1);
1292 dmz_dev_err(zmd
->sb
[1].dev
,
1293 "Recovery of superblock 1 failed");
1298 if (sb_gen
[0] >= sb_gen
[1]) {
1299 zmd
->sb_gen
= sb_gen
[0];
1300 zmd
->mblk_primary
= 0;
1302 zmd
->sb_gen
= sb_gen
[1];
1303 zmd
->mblk_primary
= 1;
1306 dmz_dev_debug(zmd
->sb
[zmd
->mblk_primary
].dev
,
1307 "Using super block %u (gen %llu)",
1308 zmd
->mblk_primary
, zmd
->sb_gen
);
1310 if (zmd
->sb_version
> 1) {
1314 sb
= kzalloc(sizeof(struct dmz_sb
), GFP_KERNEL
);
1317 for (i
= 1; i
< zmd
->nr_devs
; i
++) {
1319 sb
->zone
= dmz_get(zmd
, zmd
->dev
[i
].zone_offset
);
1320 sb
->dev
= &zmd
->dev
[i
];
1321 if (!dmz_is_meta(sb
->zone
)) {
1322 dmz_dev_err(sb
->dev
,
1323 "Tertiary super block zone %u not marked as metadata zone",
1328 ret
= dmz_get_sb(zmd
, sb
, i
+ 1);
1330 dmz_dev_err(sb
->dev
,
1331 "Read tertiary super block failed");
1332 dmz_free_mblock(zmd
, sb
->mblk
);
1335 ret
= dmz_check_sb(zmd
, sb
, true);
1336 dmz_free_mblock(zmd
, sb
->mblk
);
1347 * Initialize a zone descriptor.
1349 static int dmz_init_zone(struct blk_zone
*blkz
, unsigned int num
, void *data
)
1351 struct dmz_dev
*dev
= data
;
1352 struct dmz_metadata
*zmd
= dev
->metadata
;
1353 int idx
= num
+ dev
->zone_offset
;
1354 struct dm_zone
*zone
;
1356 zone
= dmz_insert(zmd
, idx
, dev
);
1358 return PTR_ERR(zone
);
1360 if (blkz
->len
!= zmd
->zone_nr_sectors
) {
1361 if (zmd
->sb_version
> 1) {
1362 /* Ignore the eventual runt (smaller) zone */
1363 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1365 } else if (blkz
->start
+ blkz
->len
== dev
->capacity
)
1371 * Devices that have zones with a capacity smaller than the zone size
1372 * (e.g. NVMe zoned namespaces) are not supported.
1374 if (blkz
->capacity
!= blkz
->len
)
1377 switch (blkz
->type
) {
1378 case BLK_ZONE_TYPE_CONVENTIONAL
:
1379 set_bit(DMZ_RND
, &zone
->flags
);
1381 case BLK_ZONE_TYPE_SEQWRITE_REQ
:
1382 case BLK_ZONE_TYPE_SEQWRITE_PREF
:
1383 set_bit(DMZ_SEQ
, &zone
->flags
);
1389 if (dmz_is_rnd(zone
))
1392 zone
->wp_block
= dmz_sect2blk(blkz
->wp
- blkz
->start
);
1394 if (blkz
->cond
== BLK_ZONE_COND_OFFLINE
)
1395 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1396 else if (blkz
->cond
== BLK_ZONE_COND_READONLY
)
1397 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1399 zmd
->nr_useable_zones
++;
1400 if (dmz_is_rnd(zone
)) {
1401 zmd
->nr_rnd_zones
++;
1402 if (zmd
->nr_devs
== 1 && !zmd
->sb
[0].zone
) {
1403 /* Primary super block zone */
1404 zmd
->sb
[0].zone
= zone
;
1407 if (zmd
->nr_devs
> 1 && num
== 0) {
1409 * Tertiary superblock zones are always at the
1410 * start of the zoned devices, so mark them
1413 set_bit(DMZ_META
, &zone
->flags
);
1419 static int dmz_emulate_zones(struct dmz_metadata
*zmd
, struct dmz_dev
*dev
)
1422 sector_t zone_offset
= 0;
1424 for (idx
= 0; idx
< dev
->nr_zones
; idx
++) {
1425 struct dm_zone
*zone
;
1427 zone
= dmz_insert(zmd
, idx
, dev
);
1429 return PTR_ERR(zone
);
1430 set_bit(DMZ_CACHE
, &zone
->flags
);
1432 zmd
->nr_cache_zones
++;
1433 zmd
->nr_useable_zones
++;
1434 if (dev
->capacity
- zone_offset
< zmd
->zone_nr_sectors
) {
1435 /* Disable runt zone */
1436 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1439 zone_offset
+= zmd
->zone_nr_sectors
;
1445 * Free zones descriptors.
1447 static void dmz_drop_zones(struct dmz_metadata
*zmd
)
1451 for (idx
= 0; idx
< zmd
->nr_zones
; idx
++) {
1452 struct dm_zone
*zone
= xa_load(&zmd
->zones
, idx
);
1455 xa_erase(&zmd
->zones
, idx
);
1457 xa_destroy(&zmd
->zones
);
1461 * Allocate and initialize zone descriptors using the zone
1462 * information from disk.
1464 static int dmz_init_zones(struct dmz_metadata
*zmd
)
1467 struct dmz_dev
*zoned_dev
= &zmd
->dev
[0];
1470 zmd
->zone_nr_sectors
= zmd
->dev
[0].zone_nr_sectors
;
1471 zmd
->zone_nr_sectors_shift
= ilog2(zmd
->zone_nr_sectors
);
1472 zmd
->zone_nr_blocks
= dmz_sect2blk(zmd
->zone_nr_sectors
);
1473 zmd
->zone_nr_blocks_shift
= ilog2(zmd
->zone_nr_blocks
);
1474 zmd
->zone_bitmap_size
= zmd
->zone_nr_blocks
>> 3;
1475 zmd
->zone_nr_bitmap_blocks
=
1476 max_t(sector_t
, 1, zmd
->zone_bitmap_size
>> DMZ_BLOCK_SHIFT
);
1477 zmd
->zone_bits_per_mblk
= min_t(sector_t
, zmd
->zone_nr_blocks
,
1478 DMZ_BLOCK_SIZE_BITS
);
1480 /* Allocate zone array */
1482 for (i
= 0; i
< zmd
->nr_devs
; i
++) {
1483 struct dmz_dev
*dev
= &zmd
->dev
[i
];
1485 dev
->metadata
= zmd
;
1486 zmd
->nr_zones
+= dev
->nr_zones
;
1488 atomic_set(&dev
->unmap_nr_rnd
, 0);
1489 INIT_LIST_HEAD(&dev
->unmap_rnd_list
);
1490 INIT_LIST_HEAD(&dev
->map_rnd_list
);
1492 atomic_set(&dev
->unmap_nr_seq
, 0);
1493 INIT_LIST_HEAD(&dev
->unmap_seq_list
);
1494 INIT_LIST_HEAD(&dev
->map_seq_list
);
1497 if (!zmd
->nr_zones
) {
1498 DMERR("(%s): No zones found", zmd
->devname
);
1501 xa_init(&zmd
->zones
);
1503 DMDEBUG("(%s): Using %zu B for zone information",
1504 zmd
->devname
, sizeof(struct dm_zone
) * zmd
->nr_zones
);
1506 if (zmd
->nr_devs
> 1) {
1507 ret
= dmz_emulate_zones(zmd
, &zmd
->dev
[0]);
1509 DMDEBUG("(%s): Failed to emulate zones, error %d",
1511 dmz_drop_zones(zmd
);
1516 * Primary superblock zone is always at zone 0 when multiple
1517 * drives are present.
1519 zmd
->sb
[0].zone
= dmz_get(zmd
, 0);
1521 for (i
= 1; i
< zmd
->nr_devs
; i
++) {
1522 zoned_dev
= &zmd
->dev
[i
];
1524 ret
= blkdev_report_zones(zoned_dev
->bdev
, 0,
1526 dmz_init_zone
, zoned_dev
);
1528 DMDEBUG("(%s): Failed to report zones, error %d",
1530 dmz_drop_zones(zmd
);
1538 * Get zone information and initialize zone descriptors. At the same
1539 * time, determine where the super block should be: first block of the
1540 * first randomly writable zone.
1542 ret
= blkdev_report_zones(zoned_dev
->bdev
, 0, BLK_ALL_ZONES
,
1543 dmz_init_zone
, zoned_dev
);
1545 DMDEBUG("(%s): Failed to report zones, error %d",
1547 dmz_drop_zones(zmd
);
1554 static int dmz_update_zone_cb(struct blk_zone
*blkz
, unsigned int idx
,
1557 struct dm_zone
*zone
= data
;
1559 clear_bit(DMZ_OFFLINE
, &zone
->flags
);
1560 clear_bit(DMZ_READ_ONLY
, &zone
->flags
);
1561 if (blkz
->cond
== BLK_ZONE_COND_OFFLINE
)
1562 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1563 else if (blkz
->cond
== BLK_ZONE_COND_READONLY
)
1564 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1566 if (dmz_is_seq(zone
))
1567 zone
->wp_block
= dmz_sect2blk(blkz
->wp
- blkz
->start
);
1574 * Update a zone information.
1576 static int dmz_update_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1578 struct dmz_dev
*dev
= zone
->dev
;
1579 unsigned int noio_flag
;
1582 if (dev
->flags
& DMZ_BDEV_REGULAR
)
1586 * Get zone information from disk. Since blkdev_report_zones() uses
1587 * GFP_KERNEL by default for memory allocations, set the per-task
1588 * PF_MEMALLOC_NOIO flag so that all allocations are done as if
1589 * GFP_NOIO was specified.
1591 noio_flag
= memalloc_noio_save();
1592 ret
= blkdev_report_zones(dev
->bdev
, dmz_start_sect(zmd
, zone
), 1,
1593 dmz_update_zone_cb
, zone
);
1594 memalloc_noio_restore(noio_flag
);
1599 dmz_dev_err(dev
, "Get zone %u report failed",
1601 dmz_check_bdev(dev
);
1609 * Check a zone write pointer position when the zone is marked
1610 * with the sequential write error flag.
1612 static int dmz_handle_seq_write_err(struct dmz_metadata
*zmd
,
1613 struct dm_zone
*zone
)
1615 struct dmz_dev
*dev
= zone
->dev
;
1616 unsigned int wp
= 0;
1619 wp
= zone
->wp_block
;
1620 ret
= dmz_update_zone(zmd
, zone
);
1624 dmz_dev_warn(dev
, "Processing zone %u write error (zone wp %u/%u)",
1625 zone
->id
, zone
->wp_block
, wp
);
1627 if (zone
->wp_block
< wp
) {
1628 dmz_invalidate_blocks(zmd
, zone
, zone
->wp_block
,
1629 wp
- zone
->wp_block
);
1636 * Reset a zone write pointer.
1638 static int dmz_reset_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1643 * Ignore offline zones, read only zones,
1644 * and conventional zones.
1646 if (dmz_is_offline(zone
) ||
1647 dmz_is_readonly(zone
) ||
1651 if (!dmz_is_empty(zone
) || dmz_seq_write_err(zone
)) {
1652 struct dmz_dev
*dev
= zone
->dev
;
1653 unsigned int noio_flag
;
1655 noio_flag
= memalloc_noio_save();
1656 ret
= blkdev_zone_mgmt(dev
->bdev
, REQ_OP_ZONE_RESET
,
1657 dmz_start_sect(zmd
, zone
),
1658 zmd
->zone_nr_sectors
);
1659 memalloc_noio_restore(noio_flag
);
1661 dmz_dev_err(dev
, "Reset zone %u failed %d",
1667 /* Clear write error bit and rewind write pointer position */
1668 clear_bit(DMZ_SEQ_WRITE_ERR
, &zone
->flags
);
1674 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
);
1677 * Initialize chunk mapping.
1679 static int dmz_load_mapping(struct dmz_metadata
*zmd
)
1681 struct dm_zone
*dzone
, *bzone
;
1682 struct dmz_mblock
*dmap_mblk
= NULL
;
1683 struct dmz_map
*dmap
;
1684 unsigned int i
= 0, e
= 0, chunk
= 0;
1685 unsigned int dzone_id
;
1686 unsigned int bzone_id
;
1688 /* Metadata block array for the chunk mapping table */
1689 zmd
->map_mblk
= kcalloc(zmd
->nr_map_blocks
,
1690 sizeof(struct dmz_mblk
*), GFP_KERNEL
);
1694 /* Get chunk mapping table blocks and initialize zone mapping */
1695 while (chunk
< zmd
->nr_chunks
) {
1697 /* Get mapping block */
1698 dmap_mblk
= dmz_get_mblock(zmd
, i
+ 1);
1699 if (IS_ERR(dmap_mblk
))
1700 return PTR_ERR(dmap_mblk
);
1701 zmd
->map_mblk
[i
] = dmap_mblk
;
1702 dmap
= dmap_mblk
->data
;
1707 /* Check data zone */
1708 dzone_id
= le32_to_cpu(dmap
[e
].dzone_id
);
1709 if (dzone_id
== DMZ_MAP_UNMAPPED
)
1712 if (dzone_id
>= zmd
->nr_zones
) {
1713 dmz_zmd_err(zmd
, "Chunk %u mapping: invalid data zone ID %u",
1718 dzone
= dmz_get(zmd
, dzone_id
);
1720 dmz_zmd_err(zmd
, "Chunk %u mapping: data zone %u not present",
1724 set_bit(DMZ_DATA
, &dzone
->flags
);
1725 dzone
->chunk
= chunk
;
1726 dmz_get_zone_weight(zmd
, dzone
);
1728 if (dmz_is_cache(dzone
))
1729 list_add_tail(&dzone
->link
, &zmd
->map_cache_list
);
1730 else if (dmz_is_rnd(dzone
))
1731 list_add_tail(&dzone
->link
, &dzone
->dev
->map_rnd_list
);
1733 list_add_tail(&dzone
->link
, &dzone
->dev
->map_seq_list
);
1735 /* Check buffer zone */
1736 bzone_id
= le32_to_cpu(dmap
[e
].bzone_id
);
1737 if (bzone_id
== DMZ_MAP_UNMAPPED
)
1740 if (bzone_id
>= zmd
->nr_zones
) {
1741 dmz_zmd_err(zmd
, "Chunk %u mapping: invalid buffer zone ID %u",
1746 bzone
= dmz_get(zmd
, bzone_id
);
1748 dmz_zmd_err(zmd
, "Chunk %u mapping: buffer zone %u not present",
1752 if (!dmz_is_rnd(bzone
) && !dmz_is_cache(bzone
)) {
1753 dmz_zmd_err(zmd
, "Chunk %u mapping: invalid buffer zone %u",
1758 set_bit(DMZ_DATA
, &bzone
->flags
);
1759 set_bit(DMZ_BUF
, &bzone
->flags
);
1760 bzone
->chunk
= chunk
;
1761 bzone
->bzone
= dzone
;
1762 dzone
->bzone
= bzone
;
1763 dmz_get_zone_weight(zmd
, bzone
);
1764 if (dmz_is_cache(bzone
))
1765 list_add_tail(&bzone
->link
, &zmd
->map_cache_list
);
1767 list_add_tail(&bzone
->link
, &bzone
->dev
->map_rnd_list
);
1771 if (e
>= DMZ_MAP_ENTRIES
)
1776 * At this point, only meta zones and mapped data zones were
1777 * fully initialized. All remaining zones are unmapped data
1778 * zones. Finish initializing those here.
1780 for (i
= 0; i
< zmd
->nr_zones
; i
++) {
1781 dzone
= dmz_get(zmd
, i
);
1784 if (dmz_is_meta(dzone
))
1786 if (dmz_is_offline(dzone
))
1789 if (dmz_is_cache(dzone
))
1791 else if (dmz_is_rnd(dzone
))
1792 dzone
->dev
->nr_rnd
++;
1794 dzone
->dev
->nr_seq
++;
1796 if (dmz_is_data(dzone
)) {
1797 /* Already initialized */
1801 /* Unmapped data zone */
1802 set_bit(DMZ_DATA
, &dzone
->flags
);
1803 dzone
->chunk
= DMZ_MAP_UNMAPPED
;
1804 if (dmz_is_cache(dzone
)) {
1805 list_add_tail(&dzone
->link
, &zmd
->unmap_cache_list
);
1806 atomic_inc(&zmd
->unmap_nr_cache
);
1807 } else if (dmz_is_rnd(dzone
)) {
1808 list_add_tail(&dzone
->link
,
1809 &dzone
->dev
->unmap_rnd_list
);
1810 atomic_inc(&dzone
->dev
->unmap_nr_rnd
);
1811 } else if (atomic_read(&zmd
->nr_reserved_seq_zones
) < zmd
->nr_reserved_seq
) {
1812 list_add_tail(&dzone
->link
, &zmd
->reserved_seq_zones_list
);
1813 set_bit(DMZ_RESERVED
, &dzone
->flags
);
1814 atomic_inc(&zmd
->nr_reserved_seq_zones
);
1815 dzone
->dev
->nr_seq
--;
1817 list_add_tail(&dzone
->link
,
1818 &dzone
->dev
->unmap_seq_list
);
1819 atomic_inc(&dzone
->dev
->unmap_nr_seq
);
1827 * Set a data chunk mapping.
1829 static void dmz_set_chunk_mapping(struct dmz_metadata
*zmd
, unsigned int chunk
,
1830 unsigned int dzone_id
, unsigned int bzone_id
)
1832 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
1833 struct dmz_map
*dmap
= dmap_mblk
->data
;
1834 int map_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
1836 dmap
[map_idx
].dzone_id
= cpu_to_le32(dzone_id
);
1837 dmap
[map_idx
].bzone_id
= cpu_to_le32(bzone_id
);
1838 dmz_dirty_mblock(zmd
, dmap_mblk
);
1842 * The list of mapped zones is maintained in LRU order.
1843 * This rotates a zone at the end of its map list.
1845 static void __dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1847 if (list_empty(&zone
->link
))
1850 list_del_init(&zone
->link
);
1851 if (dmz_is_seq(zone
)) {
1852 /* LRU rotate sequential zone */
1853 list_add_tail(&zone
->link
, &zone
->dev
->map_seq_list
);
1854 } else if (dmz_is_cache(zone
)) {
1855 /* LRU rotate cache zone */
1856 list_add_tail(&zone
->link
, &zmd
->map_cache_list
);
1858 /* LRU rotate random zone */
1859 list_add_tail(&zone
->link
, &zone
->dev
->map_rnd_list
);
1864 * The list of mapped random zones is maintained
1865 * in LRU order. This rotates a zone at the end of the list.
1867 static void dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1869 __dmz_lru_zone(zmd
, zone
);
1871 __dmz_lru_zone(zmd
, zone
->bzone
);
1875 * Wait for any zone to be freed.
1877 static void dmz_wait_for_free_zones(struct dmz_metadata
*zmd
)
1881 prepare_to_wait(&zmd
->free_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
1882 dmz_unlock_map(zmd
);
1883 dmz_unlock_metadata(zmd
);
1885 io_schedule_timeout(HZ
);
1887 dmz_lock_metadata(zmd
);
1889 finish_wait(&zmd
->free_wq
, &wait
);
1893 * Lock a zone for reclaim (set the zone RECLAIM bit).
1894 * Returns false if the zone cannot be locked or if it is already locked
1897 int dmz_lock_zone_reclaim(struct dm_zone
*zone
)
1899 /* Active zones cannot be reclaimed */
1900 if (dmz_is_active(zone
))
1903 return !test_and_set_bit(DMZ_RECLAIM
, &zone
->flags
);
1907 * Clear a zone reclaim flag.
1909 void dmz_unlock_zone_reclaim(struct dm_zone
*zone
)
1911 WARN_ON(dmz_is_active(zone
));
1912 WARN_ON(!dmz_in_reclaim(zone
));
1914 clear_bit_unlock(DMZ_RECLAIM
, &zone
->flags
);
1915 smp_mb__after_atomic();
1916 wake_up_bit(&zone
->flags
, DMZ_RECLAIM
);
1920 * Wait for a zone reclaim to complete.
1922 static void dmz_wait_for_reclaim(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1924 dmz_unlock_map(zmd
);
1925 dmz_unlock_metadata(zmd
);
1926 set_bit(DMZ_RECLAIM_TERMINATE
, &zone
->flags
);
1927 wait_on_bit_timeout(&zone
->flags
, DMZ_RECLAIM
, TASK_UNINTERRUPTIBLE
, HZ
);
1928 clear_bit(DMZ_RECLAIM_TERMINATE
, &zone
->flags
);
1929 dmz_lock_metadata(zmd
);
1934 * Select a cache or random write zone for reclaim.
1936 static struct dm_zone
*dmz_get_rnd_zone_for_reclaim(struct dmz_metadata
*zmd
,
1937 unsigned int idx
, bool idle
)
1939 struct dm_zone
*dzone
= NULL
;
1940 struct dm_zone
*zone
, *maxw_z
= NULL
;
1941 struct list_head
*zone_list
;
1943 /* If we have cache zones select from the cache zone list */
1944 if (zmd
->nr_cache
) {
1945 zone_list
= &zmd
->map_cache_list
;
1946 /* Try to relaim random zones, too, when idle */
1947 if (idle
&& list_empty(zone_list
))
1948 zone_list
= &zmd
->dev
[idx
].map_rnd_list
;
1950 zone_list
= &zmd
->dev
[idx
].map_rnd_list
;
1953 * Find the buffer zone with the heaviest weight or the first (oldest)
1954 * data zone that can be reclaimed.
1956 list_for_each_entry(zone
, zone_list
, link
) {
1957 if (dmz_is_buf(zone
)) {
1958 dzone
= zone
->bzone
;
1959 if (dmz_is_rnd(dzone
) && dzone
->dev
->dev_idx
!= idx
)
1961 if (!maxw_z
|| maxw_z
->weight
< dzone
->weight
)
1965 if (dmz_lock_zone_reclaim(dzone
))
1970 if (maxw_z
&& dmz_lock_zone_reclaim(maxw_z
))
1974 * If we come here, none of the zones inspected could be locked for
1975 * reclaim. Try again, being more aggressive, that is, find the
1976 * first zone that can be reclaimed regardless of its weitght.
1978 list_for_each_entry(zone
, zone_list
, link
) {
1979 if (dmz_is_buf(zone
)) {
1980 dzone
= zone
->bzone
;
1981 if (dmz_is_rnd(dzone
) && dzone
->dev
->dev_idx
!= idx
)
1985 if (dmz_lock_zone_reclaim(dzone
))
1993 * Select a buffered sequential zone for reclaim.
1995 static struct dm_zone
*dmz_get_seq_zone_for_reclaim(struct dmz_metadata
*zmd
,
1998 struct dm_zone
*zone
;
2000 list_for_each_entry(zone
, &zmd
->dev
[idx
].map_seq_list
, link
) {
2003 if (dmz_lock_zone_reclaim(zone
))
2011 * Select a zone for reclaim.
2013 struct dm_zone
*dmz_get_zone_for_reclaim(struct dmz_metadata
*zmd
,
2014 unsigned int dev_idx
, bool idle
)
2016 struct dm_zone
*zone
= NULL
;
2019 * Search for a zone candidate to reclaim: 2 cases are possible.
2020 * (1) There is no free sequential zones. Then a random data zone
2021 * cannot be reclaimed. So choose a sequential zone to reclaim so
2022 * that afterward a random zone can be reclaimed.
2023 * (2) At least one free sequential zone is available, then choose
2024 * the oldest random zone (data or buffer) that can be locked.
2027 if (list_empty(&zmd
->reserved_seq_zones_list
))
2028 zone
= dmz_get_seq_zone_for_reclaim(zmd
, dev_idx
);
2030 zone
= dmz_get_rnd_zone_for_reclaim(zmd
, dev_idx
, idle
);
2031 dmz_unlock_map(zmd
);
2037 * Get the zone mapping a chunk, if the chunk is mapped already.
2038 * If no mapping exist and the operation is WRITE, a zone is
2039 * allocated and used to map the chunk.
2040 * The zone returned will be set to the active state.
2042 struct dm_zone
*dmz_get_chunk_mapping(struct dmz_metadata
*zmd
,
2043 unsigned int chunk
, enum req_op op
)
2045 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
2046 struct dmz_map
*dmap
= dmap_mblk
->data
;
2047 int dmap_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
2048 unsigned int dzone_id
;
2049 struct dm_zone
*dzone
= NULL
;
2051 int alloc_flags
= zmd
->nr_cache
? DMZ_ALLOC_CACHE
: DMZ_ALLOC_RND
;
2055 /* Get the chunk mapping */
2056 dzone_id
= le32_to_cpu(dmap
[dmap_idx
].dzone_id
);
2057 if (dzone_id
== DMZ_MAP_UNMAPPED
) {
2059 * Read or discard in unmapped chunks are fine. But for
2060 * writes, we need a mapping, so get one.
2062 if (op
!= REQ_OP_WRITE
)
2065 /* Allocate a random zone */
2066 dzone
= dmz_alloc_zone(zmd
, 0, alloc_flags
);
2068 if (dmz_dev_is_dying(zmd
)) {
2069 dzone
= ERR_PTR(-EIO
);
2072 dmz_wait_for_free_zones(zmd
);
2076 dmz_map_zone(zmd
, dzone
, chunk
);
2079 /* The chunk is already mapped: get the mapping zone */
2080 dzone
= dmz_get(zmd
, dzone_id
);
2082 dzone
= ERR_PTR(-EIO
);
2085 if (dzone
->chunk
!= chunk
) {
2086 dzone
= ERR_PTR(-EIO
);
2090 /* Repair write pointer if the sequential dzone has error */
2091 if (dmz_seq_write_err(dzone
)) {
2092 ret
= dmz_handle_seq_write_err(zmd
, dzone
);
2094 dzone
= ERR_PTR(-EIO
);
2097 clear_bit(DMZ_SEQ_WRITE_ERR
, &dzone
->flags
);
2102 * If the zone is being reclaimed, the chunk mapping may change
2103 * to a different zone. So wait for reclaim and retry. Otherwise,
2104 * activate the zone (this will prevent reclaim from touching it).
2106 if (dmz_in_reclaim(dzone
)) {
2107 dmz_wait_for_reclaim(zmd
, dzone
);
2110 dmz_activate_zone(dzone
);
2111 dmz_lru_zone(zmd
, dzone
);
2113 dmz_unlock_map(zmd
);
2119 * Write and discard change the block validity of data zones and their buffer
2120 * zones. Check here that valid blocks are still present. If all blocks are
2121 * invalid, the zones can be unmapped on the fly without waiting for reclaim
2124 void dmz_put_chunk_mapping(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
)
2126 struct dm_zone
*bzone
;
2130 bzone
= dzone
->bzone
;
2132 if (dmz_weight(bzone
))
2133 dmz_lru_zone(zmd
, bzone
);
2135 /* Empty buffer zone: reclaim it */
2136 dmz_unmap_zone(zmd
, bzone
);
2137 dmz_free_zone(zmd
, bzone
);
2142 /* Deactivate the data zone */
2143 dmz_deactivate_zone(dzone
);
2144 if (dmz_is_active(dzone
) || bzone
|| dmz_weight(dzone
))
2145 dmz_lru_zone(zmd
, dzone
);
2147 /* Unbuffered inactive empty data zone: reclaim it */
2148 dmz_unmap_zone(zmd
, dzone
);
2149 dmz_free_zone(zmd
, dzone
);
2152 dmz_unlock_map(zmd
);
2156 * Allocate and map a random zone to buffer a chunk
2157 * already mapped to a sequential zone.
2159 struct dm_zone
*dmz_get_chunk_buffer(struct dmz_metadata
*zmd
,
2160 struct dm_zone
*dzone
)
2162 struct dm_zone
*bzone
;
2163 int alloc_flags
= zmd
->nr_cache
? DMZ_ALLOC_CACHE
: DMZ_ALLOC_RND
;
2167 bzone
= dzone
->bzone
;
2171 /* Allocate a random zone */
2172 bzone
= dmz_alloc_zone(zmd
, 0, alloc_flags
);
2174 if (dmz_dev_is_dying(zmd
)) {
2175 bzone
= ERR_PTR(-EIO
);
2178 dmz_wait_for_free_zones(zmd
);
2182 /* Update the chunk mapping */
2183 dmz_set_chunk_mapping(zmd
, dzone
->chunk
, dzone
->id
, bzone
->id
);
2185 set_bit(DMZ_BUF
, &bzone
->flags
);
2186 bzone
->chunk
= dzone
->chunk
;
2187 bzone
->bzone
= dzone
;
2188 dzone
->bzone
= bzone
;
2189 if (dmz_is_cache(bzone
))
2190 list_add_tail(&bzone
->link
, &zmd
->map_cache_list
);
2192 list_add_tail(&bzone
->link
, &bzone
->dev
->map_rnd_list
);
2194 dmz_unlock_map(zmd
);
2200 * Get an unmapped (free) zone.
2201 * This must be called with the mapping lock held.
2203 struct dm_zone
*dmz_alloc_zone(struct dmz_metadata
*zmd
, unsigned int dev_idx
,
2204 unsigned long flags
)
2206 struct list_head
*list
;
2207 struct dm_zone
*zone
;
2210 /* Schedule reclaim to ensure free zones are available */
2211 if (!(flags
& DMZ_ALLOC_RECLAIM
)) {
2212 for (i
= 0; i
< zmd
->nr_devs
; i
++)
2213 dmz_schedule_reclaim(zmd
->dev
[i
].reclaim
);
2218 if (flags
& DMZ_ALLOC_CACHE
)
2219 list
= &zmd
->unmap_cache_list
;
2220 else if (flags
& DMZ_ALLOC_RND
)
2221 list
= &zmd
->dev
[dev_idx
].unmap_rnd_list
;
2223 list
= &zmd
->dev
[dev_idx
].unmap_seq_list
;
2225 if (list_empty(list
)) {
2227 * No free zone: return NULL if this is for not reclaim.
2229 if (!(flags
& DMZ_ALLOC_RECLAIM
))
2232 * Try to allocate from other devices
2234 if (i
< zmd
->nr_devs
) {
2235 dev_idx
= (dev_idx
+ 1) % zmd
->nr_devs
;
2241 * Fallback to the reserved sequential zones
2243 zone
= list_first_entry_or_null(&zmd
->reserved_seq_zones_list
,
2244 struct dm_zone
, link
);
2246 list_del_init(&zone
->link
);
2247 atomic_dec(&zmd
->nr_reserved_seq_zones
);
2252 zone
= list_first_entry(list
, struct dm_zone
, link
);
2253 list_del_init(&zone
->link
);
2255 if (dmz_is_cache(zone
))
2256 atomic_dec(&zmd
->unmap_nr_cache
);
2257 else if (dmz_is_rnd(zone
))
2258 atomic_dec(&zone
->dev
->unmap_nr_rnd
);
2260 atomic_dec(&zone
->dev
->unmap_nr_seq
);
2262 if (dmz_is_offline(zone
)) {
2263 dmz_zmd_warn(zmd
, "Zone %u is offline", zone
->id
);
2267 if (dmz_is_meta(zone
)) {
2268 dmz_zmd_warn(zmd
, "Zone %u has metadata", zone
->id
);
2277 * This must be called with the mapping lock held.
2279 void dmz_free_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
2281 /* If this is a sequential zone, reset it */
2282 if (dmz_is_seq(zone
))
2283 dmz_reset_zone(zmd
, zone
);
2285 /* Return the zone to its type unmap list */
2286 if (dmz_is_cache(zone
)) {
2287 list_add_tail(&zone
->link
, &zmd
->unmap_cache_list
);
2288 atomic_inc(&zmd
->unmap_nr_cache
);
2289 } else if (dmz_is_rnd(zone
)) {
2290 list_add_tail(&zone
->link
, &zone
->dev
->unmap_rnd_list
);
2291 atomic_inc(&zone
->dev
->unmap_nr_rnd
);
2292 } else if (dmz_is_reserved(zone
)) {
2293 list_add_tail(&zone
->link
, &zmd
->reserved_seq_zones_list
);
2294 atomic_inc(&zmd
->nr_reserved_seq_zones
);
2296 list_add_tail(&zone
->link
, &zone
->dev
->unmap_seq_list
);
2297 atomic_inc(&zone
->dev
->unmap_nr_seq
);
2300 wake_up_all(&zmd
->free_wq
);
2304 * Map a chunk to a zone.
2305 * This must be called with the mapping lock held.
2307 void dmz_map_zone(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
,
2310 /* Set the chunk mapping */
2311 dmz_set_chunk_mapping(zmd
, chunk
, dzone
->id
,
2313 dzone
->chunk
= chunk
;
2314 if (dmz_is_cache(dzone
))
2315 list_add_tail(&dzone
->link
, &zmd
->map_cache_list
);
2316 else if (dmz_is_rnd(dzone
))
2317 list_add_tail(&dzone
->link
, &dzone
->dev
->map_rnd_list
);
2319 list_add_tail(&dzone
->link
, &dzone
->dev
->map_seq_list
);
2324 * This must be called with the mapping lock held.
2326 void dmz_unmap_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
2328 unsigned int chunk
= zone
->chunk
;
2329 unsigned int dzone_id
;
2331 if (chunk
== DMZ_MAP_UNMAPPED
) {
2332 /* Already unmapped */
2336 if (test_and_clear_bit(DMZ_BUF
, &zone
->flags
)) {
2338 * Unmapping the chunk buffer zone: clear only
2339 * the chunk buffer mapping
2341 dzone_id
= zone
->bzone
->id
;
2342 zone
->bzone
->bzone
= NULL
;
2347 * Unmapping the chunk data zone: the zone must
2350 if (WARN_ON(zone
->bzone
)) {
2351 zone
->bzone
->bzone
= NULL
;
2354 dzone_id
= DMZ_MAP_UNMAPPED
;
2357 dmz_set_chunk_mapping(zmd
, chunk
, dzone_id
, DMZ_MAP_UNMAPPED
);
2359 zone
->chunk
= DMZ_MAP_UNMAPPED
;
2360 list_del_init(&zone
->link
);
2364 * Set @nr_bits bits in @bitmap starting from @bit.
2365 * Return the number of bits changed from 0 to 1.
2367 static unsigned int dmz_set_bits(unsigned long *bitmap
,
2368 unsigned int bit
, unsigned int nr_bits
)
2370 unsigned long *addr
;
2371 unsigned int end
= bit
+ nr_bits
;
2375 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2376 ((end
- bit
) >= BITS_PER_LONG
)) {
2377 /* Try to set the whole word at once */
2378 addr
= bitmap
+ BIT_WORD(bit
);
2382 bit
+= BITS_PER_LONG
;
2387 if (!test_and_set_bit(bit
, bitmap
))
2396 * Get the bitmap block storing the bit for chunk_block in zone.
2398 static struct dmz_mblock
*dmz_get_bitmap(struct dmz_metadata
*zmd
,
2399 struct dm_zone
*zone
,
2400 sector_t chunk_block
)
2402 sector_t bitmap_block
= 1 + zmd
->nr_map_blocks
+
2403 (sector_t
)(zone
->id
* zmd
->zone_nr_bitmap_blocks
) +
2404 (chunk_block
>> DMZ_BLOCK_SHIFT_BITS
);
2406 return dmz_get_mblock(zmd
, bitmap_block
);
2410 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
2412 int dmz_copy_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
2413 struct dm_zone
*to_zone
)
2415 struct dmz_mblock
*from_mblk
, *to_mblk
;
2416 sector_t chunk_block
= 0;
2418 /* Get the zones bitmap blocks */
2419 while (chunk_block
< zmd
->zone_nr_blocks
) {
2420 from_mblk
= dmz_get_bitmap(zmd
, from_zone
, chunk_block
);
2421 if (IS_ERR(from_mblk
))
2422 return PTR_ERR(from_mblk
);
2423 to_mblk
= dmz_get_bitmap(zmd
, to_zone
, chunk_block
);
2424 if (IS_ERR(to_mblk
)) {
2425 dmz_release_mblock(zmd
, from_mblk
);
2426 return PTR_ERR(to_mblk
);
2429 memcpy(to_mblk
->data
, from_mblk
->data
, DMZ_BLOCK_SIZE
);
2430 dmz_dirty_mblock(zmd
, to_mblk
);
2432 dmz_release_mblock(zmd
, to_mblk
);
2433 dmz_release_mblock(zmd
, from_mblk
);
2435 chunk_block
+= zmd
->zone_bits_per_mblk
;
2438 to_zone
->weight
= from_zone
->weight
;
2444 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
2445 * starting from chunk_block.
2447 int dmz_merge_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
2448 struct dm_zone
*to_zone
, sector_t chunk_block
)
2450 unsigned int nr_blocks
;
2453 /* Get the zones bitmap blocks */
2454 while (chunk_block
< zmd
->zone_nr_blocks
) {
2455 /* Get a valid region from the source zone */
2456 ret
= dmz_first_valid_block(zmd
, from_zone
, &chunk_block
);
2461 ret
= dmz_validate_blocks(zmd
, to_zone
, chunk_block
, nr_blocks
);
2465 chunk_block
+= nr_blocks
;
2472 * Validate all the blocks in the range [block..block+nr_blocks-1].
2474 int dmz_validate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2475 sector_t chunk_block
, unsigned int nr_blocks
)
2477 unsigned int count
, bit
, nr_bits
;
2478 unsigned int zone_nr_blocks
= zmd
->zone_nr_blocks
;
2479 struct dmz_mblock
*mblk
;
2482 dmz_zmd_debug(zmd
, "=> VALIDATE zone %u, block %llu, %u blocks",
2483 zone
->id
, (unsigned long long)chunk_block
,
2486 WARN_ON(chunk_block
+ nr_blocks
> zone_nr_blocks
);
2489 /* Get bitmap block */
2490 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2492 return PTR_ERR(mblk
);
2495 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2496 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2498 count
= dmz_set_bits((unsigned long *)mblk
->data
, bit
, nr_bits
);
2500 dmz_dirty_mblock(zmd
, mblk
);
2503 dmz_release_mblock(zmd
, mblk
);
2505 nr_blocks
-= nr_bits
;
2506 chunk_block
+= nr_bits
;
2509 if (likely(zone
->weight
+ n
<= zone_nr_blocks
))
2512 dmz_zmd_warn(zmd
, "Zone %u: weight %u should be <= %u",
2513 zone
->id
, zone
->weight
,
2514 zone_nr_blocks
- n
);
2515 zone
->weight
= zone_nr_blocks
;
2522 * Clear nr_bits bits in bitmap starting from bit.
2523 * Return the number of bits cleared.
2525 static int dmz_clear_bits(unsigned long *bitmap
, int bit
, int nr_bits
)
2527 unsigned long *addr
;
2528 int end
= bit
+ nr_bits
;
2532 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2533 ((end
- bit
) >= BITS_PER_LONG
)) {
2534 /* Try to clear whole word at once */
2535 addr
= bitmap
+ BIT_WORD(bit
);
2536 if (*addr
== ULONG_MAX
) {
2539 bit
+= BITS_PER_LONG
;
2544 if (test_and_clear_bit(bit
, bitmap
))
2553 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2555 int dmz_invalidate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2556 sector_t chunk_block
, unsigned int nr_blocks
)
2558 unsigned int count
, bit
, nr_bits
;
2559 struct dmz_mblock
*mblk
;
2562 dmz_zmd_debug(zmd
, "=> INVALIDATE zone %u, block %llu, %u blocks",
2563 zone
->id
, (u64
)chunk_block
, nr_blocks
);
2565 WARN_ON(chunk_block
+ nr_blocks
> zmd
->zone_nr_blocks
);
2568 /* Get bitmap block */
2569 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2571 return PTR_ERR(mblk
);
2574 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2575 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2577 count
= dmz_clear_bits((unsigned long *)mblk
->data
,
2580 dmz_dirty_mblock(zmd
, mblk
);
2583 dmz_release_mblock(zmd
, mblk
);
2585 nr_blocks
-= nr_bits
;
2586 chunk_block
+= nr_bits
;
2589 if (zone
->weight
>= n
)
2592 dmz_zmd_warn(zmd
, "Zone %u: weight %u should be >= %u",
2593 zone
->id
, zone
->weight
, n
);
2601 * Get a block bit value.
2603 static int dmz_test_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2604 sector_t chunk_block
)
2606 struct dmz_mblock
*mblk
;
2609 WARN_ON(chunk_block
>= zmd
->zone_nr_blocks
);
2611 /* Get bitmap block */
2612 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2614 return PTR_ERR(mblk
);
2617 ret
= test_bit(chunk_block
& DMZ_BLOCK_MASK_BITS
,
2618 (unsigned long *) mblk
->data
) != 0;
2620 dmz_release_mblock(zmd
, mblk
);
2626 * Return the number of blocks from chunk_block to the first block with a bit
2627 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2629 static int dmz_to_next_set_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2630 sector_t chunk_block
, unsigned int nr_blocks
,
2633 struct dmz_mblock
*mblk
;
2634 unsigned int bit
, set_bit
, nr_bits
;
2635 unsigned int zone_bits
= zmd
->zone_bits_per_mblk
;
2636 unsigned long *bitmap
;
2639 WARN_ON(chunk_block
+ nr_blocks
> zmd
->zone_nr_blocks
);
2642 /* Get bitmap block */
2643 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2645 return PTR_ERR(mblk
);
2648 bitmap
= (unsigned long *) mblk
->data
;
2649 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2650 nr_bits
= min(nr_blocks
, zone_bits
- bit
);
2652 set_bit
= find_next_bit(bitmap
, zone_bits
, bit
);
2654 set_bit
= find_next_zero_bit(bitmap
, zone_bits
, bit
);
2655 dmz_release_mblock(zmd
, mblk
);
2658 if (set_bit
< zone_bits
)
2661 nr_blocks
-= nr_bits
;
2662 chunk_block
+= nr_bits
;
2669 * Test if chunk_block is valid. If it is, the number of consecutive
2670 * valid blocks from chunk_block will be returned.
2672 int dmz_block_valid(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2673 sector_t chunk_block
)
2677 valid
= dmz_test_block(zmd
, zone
, chunk_block
);
2681 /* The block is valid: get the number of valid blocks from block */
2682 return dmz_to_next_set_block(zmd
, zone
, chunk_block
,
2683 zmd
->zone_nr_blocks
- chunk_block
, 0);
2687 * Find the first valid block from @chunk_block in @zone.
2688 * If such a block is found, its number is returned using
2689 * @chunk_block and the total number of valid blocks from @chunk_block
2692 int dmz_first_valid_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2693 sector_t
*chunk_block
)
2695 sector_t start_block
= *chunk_block
;
2698 ret
= dmz_to_next_set_block(zmd
, zone
, start_block
,
2699 zmd
->zone_nr_blocks
- start_block
, 1);
2704 *chunk_block
= start_block
;
2706 return dmz_to_next_set_block(zmd
, zone
, start_block
,
2707 zmd
->zone_nr_blocks
- start_block
, 0);
2711 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2713 static int dmz_count_bits(void *bitmap
, int bit
, int nr_bits
)
2715 unsigned long *addr
;
2716 int end
= bit
+ nr_bits
;
2720 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2721 ((end
- bit
) >= BITS_PER_LONG
)) {
2722 addr
= (unsigned long *)bitmap
+ BIT_WORD(bit
);
2723 if (*addr
== ULONG_MAX
) {
2725 bit
+= BITS_PER_LONG
;
2730 if (test_bit(bit
, bitmap
))
2739 * Get a zone weight.
2741 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
2743 struct dmz_mblock
*mblk
;
2744 sector_t chunk_block
= 0;
2745 unsigned int bit
, nr_bits
;
2746 unsigned int nr_blocks
= zmd
->zone_nr_blocks
;
2751 /* Get bitmap block */
2752 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2758 /* Count bits in this block */
2759 bitmap
= mblk
->data
;
2760 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2761 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2762 n
+= dmz_count_bits(bitmap
, bit
, nr_bits
);
2764 dmz_release_mblock(zmd
, mblk
);
2766 nr_blocks
-= nr_bits
;
2767 chunk_block
+= nr_bits
;
2774 * Cleanup the zoned metadata resources.
2776 static void dmz_cleanup_metadata(struct dmz_metadata
*zmd
)
2778 struct rb_root
*root
;
2779 struct dmz_mblock
*mblk
, *next
;
2782 /* Release zone mapping resources */
2783 if (zmd
->map_mblk
) {
2784 for (i
= 0; i
< zmd
->nr_map_blocks
; i
++)
2785 dmz_release_mblock(zmd
, zmd
->map_mblk
[i
]);
2786 kfree(zmd
->map_mblk
);
2787 zmd
->map_mblk
= NULL
;
2790 /* Release super blocks */
2791 for (i
= 0; i
< 2; i
++) {
2792 if (zmd
->sb
[i
].mblk
) {
2793 dmz_free_mblock(zmd
, zmd
->sb
[i
].mblk
);
2794 zmd
->sb
[i
].mblk
= NULL
;
2798 /* Free cached blocks */
2799 while (!list_empty(&zmd
->mblk_dirty_list
)) {
2800 mblk
= list_first_entry(&zmd
->mblk_dirty_list
,
2801 struct dmz_mblock
, link
);
2802 dmz_zmd_warn(zmd
, "mblock %llu still in dirty list (ref %u)",
2803 (u64
)mblk
->no
, mblk
->ref
);
2804 list_del_init(&mblk
->link
);
2805 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2806 dmz_free_mblock(zmd
, mblk
);
2809 while (!list_empty(&zmd
->mblk_lru_list
)) {
2810 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
2811 struct dmz_mblock
, link
);
2812 list_del_init(&mblk
->link
);
2813 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2814 dmz_free_mblock(zmd
, mblk
);
2817 /* Sanity checks: the mblock rbtree should now be empty */
2818 root
= &zmd
->mblk_rbtree
;
2819 rbtree_postorder_for_each_entry_safe(mblk
, next
, root
, node
) {
2820 dmz_zmd_warn(zmd
, "mblock %llu ref %u still in rbtree",
2821 (u64
)mblk
->no
, mblk
->ref
);
2823 dmz_free_mblock(zmd
, mblk
);
2826 /* Free the zone descriptors */
2827 dmz_drop_zones(zmd
);
2829 mutex_destroy(&zmd
->mblk_flush_lock
);
2830 mutex_destroy(&zmd
->map_lock
);
2833 static void dmz_print_dev(struct dmz_metadata
*zmd
, int num
)
2835 struct dmz_dev
*dev
= &zmd
->dev
[num
];
2837 if (!bdev_is_zoned(dev
->bdev
))
2838 dmz_dev_info(dev
, "Regular block device");
2840 dmz_dev_info(dev
, "Host-managed zoned block device");
2842 if (zmd
->sb_version
> 1) {
2843 sector_t sector_offset
=
2844 dev
->zone_offset
<< zmd
->zone_nr_sectors_shift
;
2846 dmz_dev_info(dev
, " %llu 512-byte logical sectors (offset %llu)",
2847 (u64
)dev
->capacity
, (u64
)sector_offset
);
2848 dmz_dev_info(dev
, " %u zones of %llu 512-byte logical sectors (offset %llu)",
2849 dev
->nr_zones
, (u64
)zmd
->zone_nr_sectors
,
2850 (u64
)dev
->zone_offset
);
2852 dmz_dev_info(dev
, " %llu 512-byte logical sectors",
2853 (u64
)dev
->capacity
);
2854 dmz_dev_info(dev
, " %u zones of %llu 512-byte logical sectors",
2855 dev
->nr_zones
, (u64
)zmd
->zone_nr_sectors
);
2860 * Initialize the zoned metadata.
2862 int dmz_ctr_metadata(struct dmz_dev
*dev
, int num_dev
,
2863 struct dmz_metadata
**metadata
,
2864 const char *devname
)
2866 struct dmz_metadata
*zmd
;
2868 struct dm_zone
*zone
;
2871 zmd
= kzalloc(sizeof(struct dmz_metadata
), GFP_KERNEL
);
2875 strcpy(zmd
->devname
, devname
);
2877 zmd
->nr_devs
= num_dev
;
2878 zmd
->mblk_rbtree
= RB_ROOT
;
2879 init_rwsem(&zmd
->mblk_sem
);
2880 mutex_init(&zmd
->mblk_flush_lock
);
2881 spin_lock_init(&zmd
->mblk_lock
);
2882 INIT_LIST_HEAD(&zmd
->mblk_lru_list
);
2883 INIT_LIST_HEAD(&zmd
->mblk_dirty_list
);
2885 mutex_init(&zmd
->map_lock
);
2887 atomic_set(&zmd
->unmap_nr_cache
, 0);
2888 INIT_LIST_HEAD(&zmd
->unmap_cache_list
);
2889 INIT_LIST_HEAD(&zmd
->map_cache_list
);
2891 atomic_set(&zmd
->nr_reserved_seq_zones
, 0);
2892 INIT_LIST_HEAD(&zmd
->reserved_seq_zones_list
);
2894 init_waitqueue_head(&zmd
->free_wq
);
2896 /* Initialize zone descriptors */
2897 ret
= dmz_init_zones(zmd
);
2901 /* Get super block */
2902 ret
= dmz_load_sb(zmd
);
2906 /* Set metadata zones starting from sb_zone */
2907 for (i
= 0; i
< zmd
->nr_meta_zones
<< 1; i
++) {
2908 zone
= dmz_get(zmd
, zmd
->sb
[0].zone
->id
+ i
);
2911 "metadata zone %u not present", i
);
2915 if (!dmz_is_rnd(zone
) && !dmz_is_cache(zone
)) {
2917 "metadata zone %d is not random", i
);
2921 set_bit(DMZ_META
, &zone
->flags
);
2923 /* Load mapping table */
2924 ret
= dmz_load_mapping(zmd
);
2929 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2930 * blocks and enough blocks to be able to cache the bitmap blocks of
2931 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2932 * the cache to add 512 more metadata blocks.
2934 zmd
->min_nr_mblks
= 2 + zmd
->nr_map_blocks
+ zmd
->zone_nr_bitmap_blocks
* 16;
2935 zmd
->max_nr_mblks
= zmd
->min_nr_mblks
+ 512;
2937 /* Metadata cache shrinker */
2938 zmd
->mblk_shrinker
= shrinker_alloc(0, "dm-zoned-meta:(%u:%u)",
2939 MAJOR(dev
->bdev
->bd_dev
),
2940 MINOR(dev
->bdev
->bd_dev
));
2941 if (!zmd
->mblk_shrinker
) {
2943 dmz_zmd_err(zmd
, "Allocate metadata cache shrinker failed");
2947 zmd
->mblk_shrinker
->count_objects
= dmz_mblock_shrinker_count
;
2948 zmd
->mblk_shrinker
->scan_objects
= dmz_mblock_shrinker_scan
;
2949 zmd
->mblk_shrinker
->private_data
= zmd
;
2951 shrinker_register(zmd
->mblk_shrinker
);
2953 dmz_zmd_info(zmd
, "DM-Zoned metadata version %d", zmd
->sb_version
);
2954 for (i
= 0; i
< zmd
->nr_devs
; i
++)
2955 dmz_print_dev(zmd
, i
);
2957 dmz_zmd_info(zmd
, " %u zones of %llu 512-byte logical sectors",
2958 zmd
->nr_zones
, (u64
)zmd
->zone_nr_sectors
);
2959 dmz_zmd_debug(zmd
, " %u metadata zones",
2960 zmd
->nr_meta_zones
* 2);
2961 dmz_zmd_debug(zmd
, " %u data zones for %u chunks",
2962 zmd
->nr_data_zones
, zmd
->nr_chunks
);
2963 dmz_zmd_debug(zmd
, " %u cache zones (%u unmapped)",
2964 zmd
->nr_cache
, atomic_read(&zmd
->unmap_nr_cache
));
2965 for (i
= 0; i
< zmd
->nr_devs
; i
++) {
2966 dmz_zmd_debug(zmd
, " %u random zones (%u unmapped)",
2967 dmz_nr_rnd_zones(zmd
, i
),
2968 dmz_nr_unmap_rnd_zones(zmd
, i
));
2969 dmz_zmd_debug(zmd
, " %u sequential zones (%u unmapped)",
2970 dmz_nr_seq_zones(zmd
, i
),
2971 dmz_nr_unmap_seq_zones(zmd
, i
));
2973 dmz_zmd_debug(zmd
, " %u reserved sequential data zones",
2974 zmd
->nr_reserved_seq
);
2975 dmz_zmd_debug(zmd
, "Format:");
2976 dmz_zmd_debug(zmd
, "%u metadata blocks per set (%u max cache)",
2977 zmd
->nr_meta_blocks
, zmd
->max_nr_mblks
);
2978 dmz_zmd_debug(zmd
, " %u data zone mapping blocks",
2979 zmd
->nr_map_blocks
);
2980 dmz_zmd_debug(zmd
, " %u bitmap blocks",
2981 zmd
->nr_bitmap_blocks
);
2987 dmz_cleanup_metadata(zmd
);
2995 * Cleanup the zoned metadata resources.
2997 void dmz_dtr_metadata(struct dmz_metadata
*zmd
)
2999 shrinker_free(zmd
->mblk_shrinker
);
3000 dmz_cleanup_metadata(zmd
);