1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/crc32.h>
12 #include <linux/sched/mm.h>
14 #define DM_MSG_PREFIX "zoned metadata"
19 #define DMZ_META_VER 2
22 * On-disk super block magic.
24 #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
25 (((unsigned int)('Z')) << 16) | \
26 (((unsigned int)('B')) << 8) | \
27 ((unsigned int)('D')))
30 * On disk super block.
31 * This uses only 512 B but uses on disk a full 4KB block. This block is
32 * followed on disk by the mapping table of chunks to zones and the bitmap
33 * blocks indicating zone block validity.
34 * The overall resulting metadata format is:
35 * (1) Super block (1 block)
36 * (2) Chunk mapping table (nr_map_blocks)
37 * (3) Bitmap blocks (nr_bitmap_blocks)
38 * All metadata blocks are stored in conventional zones, starting from
39 * the first conventional zone found on disk.
45 /* Metadata version number */
46 __le32 version
; /* 8 */
48 /* Generation number */
51 /* This block number */
52 __le64 sb_block
; /* 24 */
54 /* The number of metadata blocks, including this super block */
55 __le32 nr_meta_blocks
; /* 28 */
57 /* The number of sequential zones reserved for reclaim */
58 __le32 nr_reserved_seq
; /* 32 */
60 /* The number of entries in the mapping table */
61 __le32 nr_chunks
; /* 36 */
63 /* The number of blocks used for the chunk mapping table */
64 __le32 nr_map_blocks
; /* 40 */
66 /* The number of blocks used for the block bitmaps */
67 __le32 nr_bitmap_blocks
; /* 44 */
73 u8 dmz_label
[32]; /* 80 */
76 u8 dmz_uuid
[16]; /* 96 */
79 u8 dev_uuid
[16]; /* 112 */
81 /* Padding to full 512B sector */
82 u8 reserved
[400]; /* 512 */
86 * Chunk mapping entry: entries are indexed by chunk number
87 * and give the zone ID (dzone_id) mapping the chunk on disk.
88 * This zone may be sequential or random. If it is a sequential
89 * zone, a second zone (bzone_id) used as a write buffer may
90 * also be specified. This second zone will always be a randomly
99 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
101 #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
102 #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
103 #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
104 #define DMZ_MAP_UNMAPPED UINT_MAX
107 * Meta data block descriptor (for cached metadata blocks).
111 struct list_head link
;
120 * Metadata block state flags.
130 * Super block information (one per metadata set).
135 struct dmz_mblock
*mblk
;
136 struct dmz_super
*sb
;
137 struct dm_zone
*zone
;
141 * In-memory metadata.
143 struct dmz_metadata
{
145 unsigned int nr_devs
;
147 char devname
[BDEVNAME_SIZE
];
148 char label
[BDEVNAME_SIZE
];
151 sector_t zone_bitmap_size
;
152 unsigned int zone_nr_bitmap_blocks
;
153 unsigned int zone_bits_per_mblk
;
155 sector_t zone_nr_blocks
;
156 sector_t zone_nr_blocks_shift
;
158 sector_t zone_nr_sectors
;
159 sector_t zone_nr_sectors_shift
;
161 unsigned int nr_bitmap_blocks
;
162 unsigned int nr_map_blocks
;
164 unsigned int nr_zones
;
165 unsigned int nr_useable_zones
;
166 unsigned int nr_meta_blocks
;
167 unsigned int nr_meta_zones
;
168 unsigned int nr_data_zones
;
169 unsigned int nr_cache_zones
;
170 unsigned int nr_rnd_zones
;
171 unsigned int nr_reserved_seq
;
172 unsigned int nr_chunks
;
174 /* Zone information array */
178 unsigned int mblk_primary
;
179 unsigned int sb_version
;
181 unsigned int min_nr_mblks
;
182 unsigned int max_nr_mblks
;
184 struct rw_semaphore mblk_sem
;
185 struct mutex mblk_flush_lock
;
186 spinlock_t mblk_lock
;
187 struct rb_root mblk_rbtree
;
188 struct list_head mblk_lru_list
;
189 struct list_head mblk_dirty_list
;
190 struct shrinker mblk_shrinker
;
192 /* Zone allocation management */
193 struct mutex map_lock
;
194 struct dmz_mblock
**map_mblk
;
196 unsigned int nr_cache
;
197 atomic_t unmap_nr_cache
;
198 struct list_head unmap_cache_list
;
199 struct list_head map_cache_list
;
201 atomic_t nr_reserved_seq_zones
;
202 struct list_head reserved_seq_zones_list
;
204 wait_queue_head_t free_wq
;
207 #define dmz_zmd_info(zmd, format, args...) \
208 DMINFO("(%s): " format, (zmd)->label, ## args)
210 #define dmz_zmd_err(zmd, format, args...) \
211 DMERR("(%s): " format, (zmd)->label, ## args)
213 #define dmz_zmd_warn(zmd, format, args...) \
214 DMWARN("(%s): " format, (zmd)->label, ## args)
216 #define dmz_zmd_debug(zmd, format, args...) \
217 DMDEBUG("(%s): " format, (zmd)->label, ## args)
221 static unsigned int dmz_dev_zone_id(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
226 return zone
->id
- zone
->dev
->zone_offset
;
229 sector_t
dmz_start_sect(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
231 unsigned int zone_id
= dmz_dev_zone_id(zmd
, zone
);
233 return (sector_t
)zone_id
<< zmd
->zone_nr_sectors_shift
;
236 sector_t
dmz_start_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
238 unsigned int zone_id
= dmz_dev_zone_id(zmd
, zone
);
240 return (sector_t
)zone_id
<< zmd
->zone_nr_blocks_shift
;
243 unsigned int dmz_zone_nr_blocks(struct dmz_metadata
*zmd
)
245 return zmd
->zone_nr_blocks
;
248 unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata
*zmd
)
250 return zmd
->zone_nr_blocks_shift
;
253 unsigned int dmz_zone_nr_sectors(struct dmz_metadata
*zmd
)
255 return zmd
->zone_nr_sectors
;
258 unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata
*zmd
)
260 return zmd
->zone_nr_sectors_shift
;
263 unsigned int dmz_nr_zones(struct dmz_metadata
*zmd
)
265 return zmd
->nr_zones
;
268 unsigned int dmz_nr_chunks(struct dmz_metadata
*zmd
)
270 return zmd
->nr_chunks
;
273 unsigned int dmz_nr_rnd_zones(struct dmz_metadata
*zmd
, int idx
)
275 return zmd
->dev
[idx
].nr_rnd
;
278 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata
*zmd
, int idx
)
280 return atomic_read(&zmd
->dev
[idx
].unmap_nr_rnd
);
283 unsigned int dmz_nr_cache_zones(struct dmz_metadata
*zmd
)
285 return zmd
->nr_cache
;
288 unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata
*zmd
)
290 return atomic_read(&zmd
->unmap_nr_cache
);
293 unsigned int dmz_nr_seq_zones(struct dmz_metadata
*zmd
, int idx
)
295 return zmd
->dev
[idx
].nr_seq
;
298 unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata
*zmd
, int idx
)
300 return atomic_read(&zmd
->dev
[idx
].unmap_nr_seq
);
303 static struct dm_zone
*dmz_get(struct dmz_metadata
*zmd
, unsigned int zone_id
)
305 return xa_load(&zmd
->zones
, zone_id
);
308 static struct dm_zone
*dmz_insert(struct dmz_metadata
*zmd
,
309 unsigned int zone_id
, struct dmz_dev
*dev
)
311 struct dm_zone
*zone
= kzalloc(sizeof(struct dm_zone
), GFP_KERNEL
);
314 return ERR_PTR(-ENOMEM
);
316 if (xa_insert(&zmd
->zones
, zone_id
, zone
, GFP_KERNEL
)) {
318 return ERR_PTR(-EBUSY
);
321 INIT_LIST_HEAD(&zone
->link
);
322 atomic_set(&zone
->refcount
, 0);
324 zone
->chunk
= DMZ_MAP_UNMAPPED
;
330 const char *dmz_metadata_label(struct dmz_metadata
*zmd
)
332 return (const char *)zmd
->label
;
335 bool dmz_check_dev(struct dmz_metadata
*zmd
)
339 for (i
= 0; i
< zmd
->nr_devs
; i
++) {
340 if (!dmz_check_bdev(&zmd
->dev
[i
]))
346 bool dmz_dev_is_dying(struct dmz_metadata
*zmd
)
350 for (i
= 0; i
< zmd
->nr_devs
; i
++) {
351 if (dmz_bdev_is_dying(&zmd
->dev
[i
]))
358 * Lock/unlock mapping table.
359 * The map lock also protects all the zone lists.
361 void dmz_lock_map(struct dmz_metadata
*zmd
)
363 mutex_lock(&zmd
->map_lock
);
366 void dmz_unlock_map(struct dmz_metadata
*zmd
)
368 mutex_unlock(&zmd
->map_lock
);
372 * Lock/unlock metadata access. This is a "read" lock on a semaphore
373 * that prevents metadata flush from running while metadata are being
374 * modified. The actual metadata write mutual exclusion is achieved with
375 * the map lock and zone state management (active and reclaim state are
376 * mutually exclusive).
378 void dmz_lock_metadata(struct dmz_metadata
*zmd
)
380 down_read(&zmd
->mblk_sem
);
383 void dmz_unlock_metadata(struct dmz_metadata
*zmd
)
385 up_read(&zmd
->mblk_sem
);
389 * Lock/unlock flush: prevent concurrent executions
390 * of dmz_flush_metadata as well as metadata modification in reclaim
391 * while flush is being executed.
393 void dmz_lock_flush(struct dmz_metadata
*zmd
)
395 mutex_lock(&zmd
->mblk_flush_lock
);
398 void dmz_unlock_flush(struct dmz_metadata
*zmd
)
400 mutex_unlock(&zmd
->mblk_flush_lock
);
404 * Allocate a metadata block.
406 static struct dmz_mblock
*dmz_alloc_mblock(struct dmz_metadata
*zmd
,
409 struct dmz_mblock
*mblk
= NULL
;
411 /* See if we can reuse cached blocks */
412 if (zmd
->max_nr_mblks
&& atomic_read(&zmd
->nr_mblks
) > zmd
->max_nr_mblks
) {
413 spin_lock(&zmd
->mblk_lock
);
414 mblk
= list_first_entry_or_null(&zmd
->mblk_lru_list
,
415 struct dmz_mblock
, link
);
417 list_del_init(&mblk
->link
);
418 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
421 spin_unlock(&zmd
->mblk_lock
);
426 /* Allocate a new block */
427 mblk
= kmalloc(sizeof(struct dmz_mblock
), GFP_NOIO
);
431 mblk
->page
= alloc_page(GFP_NOIO
);
437 RB_CLEAR_NODE(&mblk
->node
);
438 INIT_LIST_HEAD(&mblk
->link
);
442 mblk
->data
= page_address(mblk
->page
);
444 atomic_inc(&zmd
->nr_mblks
);
450 * Free a metadata block.
452 static void dmz_free_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
454 __free_pages(mblk
->page
, 0);
457 atomic_dec(&zmd
->nr_mblks
);
461 * Insert a metadata block in the rbtree.
463 static void dmz_insert_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
465 struct rb_root
*root
= &zmd
->mblk_rbtree
;
466 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
467 struct dmz_mblock
*b
;
469 /* Figure out where to put the new node */
471 b
= container_of(*new, struct dmz_mblock
, node
);
473 new = (b
->no
< mblk
->no
) ? &((*new)->rb_left
) : &((*new)->rb_right
);
476 /* Add new node and rebalance tree */
477 rb_link_node(&mblk
->node
, parent
, new);
478 rb_insert_color(&mblk
->node
, root
);
482 * Lookup a metadata block in the rbtree. If the block is found, increment
483 * its reference count.
485 static struct dmz_mblock
*dmz_get_mblock_fast(struct dmz_metadata
*zmd
,
488 struct rb_root
*root
= &zmd
->mblk_rbtree
;
489 struct rb_node
*node
= root
->rb_node
;
490 struct dmz_mblock
*mblk
;
493 mblk
= container_of(node
, struct dmz_mblock
, node
);
494 if (mblk
->no
== mblk_no
) {
496 * If this is the first reference to the block,
497 * remove it from the LRU list.
500 if (mblk
->ref
== 1 &&
501 !test_bit(DMZ_META_DIRTY
, &mblk
->state
))
502 list_del_init(&mblk
->link
);
505 node
= (mblk
->no
< mblk_no
) ? node
->rb_left
: node
->rb_right
;
512 * Metadata block BIO end callback.
514 static void dmz_mblock_bio_end_io(struct bio
*bio
)
516 struct dmz_mblock
*mblk
= bio
->bi_private
;
520 set_bit(DMZ_META_ERROR
, &mblk
->state
);
522 if (bio_op(bio
) == REQ_OP_WRITE
)
523 flag
= DMZ_META_WRITING
;
525 flag
= DMZ_META_READING
;
527 clear_bit_unlock(flag
, &mblk
->state
);
528 smp_mb__after_atomic();
529 wake_up_bit(&mblk
->state
, flag
);
535 * Read an uncached metadata block from disk and add it to the cache.
537 static struct dmz_mblock
*dmz_get_mblock_slow(struct dmz_metadata
*zmd
,
540 struct dmz_mblock
*mblk
, *m
;
541 sector_t block
= zmd
->sb
[zmd
->mblk_primary
].block
+ mblk_no
;
542 struct dmz_dev
*dev
= zmd
->sb
[zmd
->mblk_primary
].dev
;
545 if (dmz_bdev_is_dying(dev
))
546 return ERR_PTR(-EIO
);
548 /* Get a new block and a BIO to read it */
549 mblk
= dmz_alloc_mblock(zmd
, mblk_no
);
551 return ERR_PTR(-ENOMEM
);
553 bio
= bio_alloc(GFP_NOIO
, 1);
555 dmz_free_mblock(zmd
, mblk
);
556 return ERR_PTR(-ENOMEM
);
559 spin_lock(&zmd
->mblk_lock
);
562 * Make sure that another context did not start reading
565 m
= dmz_get_mblock_fast(zmd
, mblk_no
);
567 spin_unlock(&zmd
->mblk_lock
);
568 dmz_free_mblock(zmd
, mblk
);
574 set_bit(DMZ_META_READING
, &mblk
->state
);
575 dmz_insert_mblock(zmd
, mblk
);
577 spin_unlock(&zmd
->mblk_lock
);
579 /* Submit read BIO */
580 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
581 bio_set_dev(bio
, dev
->bdev
);
582 bio
->bi_private
= mblk
;
583 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
584 bio_set_op_attrs(bio
, REQ_OP_READ
, REQ_META
| REQ_PRIO
);
585 bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
592 * Free metadata blocks.
594 static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata
*zmd
,
597 struct dmz_mblock
*mblk
;
598 unsigned long count
= 0;
600 if (!zmd
->max_nr_mblks
)
603 while (!list_empty(&zmd
->mblk_lru_list
) &&
604 atomic_read(&zmd
->nr_mblks
) > zmd
->min_nr_mblks
&&
606 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
607 struct dmz_mblock
, link
);
608 list_del_init(&mblk
->link
);
609 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
610 dmz_free_mblock(zmd
, mblk
);
618 * For mblock shrinker: get the number of unused metadata blocks in the cache.
620 static unsigned long dmz_mblock_shrinker_count(struct shrinker
*shrink
,
621 struct shrink_control
*sc
)
623 struct dmz_metadata
*zmd
= container_of(shrink
, struct dmz_metadata
, mblk_shrinker
);
625 return atomic_read(&zmd
->nr_mblks
);
629 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
631 static unsigned long dmz_mblock_shrinker_scan(struct shrinker
*shrink
,
632 struct shrink_control
*sc
)
634 struct dmz_metadata
*zmd
= container_of(shrink
, struct dmz_metadata
, mblk_shrinker
);
637 spin_lock(&zmd
->mblk_lock
);
638 count
= dmz_shrink_mblock_cache(zmd
, sc
->nr_to_scan
);
639 spin_unlock(&zmd
->mblk_lock
);
641 return count
? count
: SHRINK_STOP
;
645 * Release a metadata block.
647 static void dmz_release_mblock(struct dmz_metadata
*zmd
,
648 struct dmz_mblock
*mblk
)
654 spin_lock(&zmd
->mblk_lock
);
657 if (mblk
->ref
== 0) {
658 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
659 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
660 dmz_free_mblock(zmd
, mblk
);
661 } else if (!test_bit(DMZ_META_DIRTY
, &mblk
->state
)) {
662 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
663 dmz_shrink_mblock_cache(zmd
, 1);
667 spin_unlock(&zmd
->mblk_lock
);
671 * Get a metadata block from the rbtree. If the block
672 * is not present, read it from disk.
674 static struct dmz_mblock
*dmz_get_mblock(struct dmz_metadata
*zmd
,
677 struct dmz_mblock
*mblk
;
678 struct dmz_dev
*dev
= zmd
->sb
[zmd
->mblk_primary
].dev
;
681 spin_lock(&zmd
->mblk_lock
);
682 mblk
= dmz_get_mblock_fast(zmd
, mblk_no
);
683 spin_unlock(&zmd
->mblk_lock
);
686 /* Cache miss: read the block from disk */
687 mblk
= dmz_get_mblock_slow(zmd
, mblk_no
);
692 /* Wait for on-going read I/O and check for error */
693 wait_on_bit_io(&mblk
->state
, DMZ_META_READING
,
694 TASK_UNINTERRUPTIBLE
);
695 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
696 dmz_release_mblock(zmd
, mblk
);
698 return ERR_PTR(-EIO
);
705 * Mark a metadata block dirty.
707 static void dmz_dirty_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
)
709 spin_lock(&zmd
->mblk_lock
);
710 if (!test_and_set_bit(DMZ_META_DIRTY
, &mblk
->state
))
711 list_add_tail(&mblk
->link
, &zmd
->mblk_dirty_list
);
712 spin_unlock(&zmd
->mblk_lock
);
716 * Issue a metadata block write BIO.
718 static int dmz_write_mblock(struct dmz_metadata
*zmd
, struct dmz_mblock
*mblk
,
721 struct dmz_dev
*dev
= zmd
->sb
[set
].dev
;
722 sector_t block
= zmd
->sb
[set
].block
+ mblk
->no
;
725 if (dmz_bdev_is_dying(dev
))
728 bio
= bio_alloc(GFP_NOIO
, 1);
730 set_bit(DMZ_META_ERROR
, &mblk
->state
);
734 set_bit(DMZ_META_WRITING
, &mblk
->state
);
736 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
737 bio_set_dev(bio
, dev
->bdev
);
738 bio
->bi_private
= mblk
;
739 bio
->bi_end_io
= dmz_mblock_bio_end_io
;
740 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_META
| REQ_PRIO
);
741 bio_add_page(bio
, mblk
->page
, DMZ_BLOCK_SIZE
, 0);
748 * Read/write a metadata block.
750 static int dmz_rdwr_block(struct dmz_dev
*dev
, int op
,
751 sector_t block
, struct page
*page
)
759 if (dmz_bdev_is_dying(dev
))
762 bio
= bio_alloc(GFP_NOIO
, 1);
766 bio
->bi_iter
.bi_sector
= dmz_blk2sect(block
);
767 bio_set_dev(bio
, dev
->bdev
);
768 bio_set_op_attrs(bio
, op
, REQ_SYNC
| REQ_META
| REQ_PRIO
);
769 bio_add_page(bio
, page
, DMZ_BLOCK_SIZE
, 0);
770 ret
= submit_bio_wait(bio
);
779 * Write super block of the specified metadata set.
781 static int dmz_write_sb(struct dmz_metadata
*zmd
, unsigned int set
)
783 struct dmz_mblock
*mblk
= zmd
->sb
[set
].mblk
;
784 struct dmz_super
*sb
= zmd
->sb
[set
].sb
;
785 struct dmz_dev
*dev
= zmd
->sb
[set
].dev
;
787 u64 sb_gen
= zmd
->sb_gen
+ 1;
790 sb
->magic
= cpu_to_le32(DMZ_MAGIC
);
792 sb
->version
= cpu_to_le32(zmd
->sb_version
);
793 if (zmd
->sb_version
> 1) {
794 BUILD_BUG_ON(UUID_SIZE
!= 16);
795 export_uuid(sb
->dmz_uuid
, &zmd
->uuid
);
796 memcpy(sb
->dmz_label
, zmd
->label
, BDEVNAME_SIZE
);
797 export_uuid(sb
->dev_uuid
, &dev
->uuid
);
800 sb
->gen
= cpu_to_le64(sb_gen
);
803 * The metadata always references the absolute block address,
804 * ie relative to the entire block range, not the per-device
807 sb_block
= zmd
->sb
[set
].zone
->id
<< zmd
->zone_nr_blocks_shift
;
808 sb
->sb_block
= cpu_to_le64(sb_block
);
809 sb
->nr_meta_blocks
= cpu_to_le32(zmd
->nr_meta_blocks
);
810 sb
->nr_reserved_seq
= cpu_to_le32(zmd
->nr_reserved_seq
);
811 sb
->nr_chunks
= cpu_to_le32(zmd
->nr_chunks
);
813 sb
->nr_map_blocks
= cpu_to_le32(zmd
->nr_map_blocks
);
814 sb
->nr_bitmap_blocks
= cpu_to_le32(zmd
->nr_bitmap_blocks
);
817 sb
->crc
= cpu_to_le32(crc32_le(sb_gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
));
819 ret
= dmz_rdwr_block(dev
, REQ_OP_WRITE
, zmd
->sb
[set
].block
,
822 ret
= blkdev_issue_flush(dev
->bdev
, GFP_NOIO
);
828 * Write dirty metadata blocks to the specified set.
830 static int dmz_write_dirty_mblocks(struct dmz_metadata
*zmd
,
831 struct list_head
*write_list
,
834 struct dmz_mblock
*mblk
;
835 struct dmz_dev
*dev
= zmd
->sb
[set
].dev
;
836 struct blk_plug plug
;
837 int ret
= 0, nr_mblks_submitted
= 0;
840 blk_start_plug(&plug
);
841 list_for_each_entry(mblk
, write_list
, link
) {
842 ret
= dmz_write_mblock(zmd
, mblk
, set
);
845 nr_mblks_submitted
++;
847 blk_finish_plug(&plug
);
849 /* Wait for completion */
850 list_for_each_entry(mblk
, write_list
, link
) {
851 if (!nr_mblks_submitted
)
853 wait_on_bit_io(&mblk
->state
, DMZ_META_WRITING
,
854 TASK_UNINTERRUPTIBLE
);
855 if (test_bit(DMZ_META_ERROR
, &mblk
->state
)) {
856 clear_bit(DMZ_META_ERROR
, &mblk
->state
);
860 nr_mblks_submitted
--;
863 /* Flush drive cache (this will also sync data) */
865 ret
= blkdev_issue_flush(dev
->bdev
, GFP_NOIO
);
871 * Log dirty metadata blocks.
873 static int dmz_log_dirty_mblocks(struct dmz_metadata
*zmd
,
874 struct list_head
*write_list
)
876 unsigned int log_set
= zmd
->mblk_primary
^ 0x1;
879 /* Write dirty blocks to the log */
880 ret
= dmz_write_dirty_mblocks(zmd
, write_list
, log_set
);
885 * No error so far: now validate the log by updating the
886 * log index super block generation.
888 ret
= dmz_write_sb(zmd
, log_set
);
896 * Flush dirty metadata blocks.
898 int dmz_flush_metadata(struct dmz_metadata
*zmd
)
900 struct dmz_mblock
*mblk
;
901 struct list_head write_list
;
908 INIT_LIST_HEAD(&write_list
);
911 * Make sure that metadata blocks are stable before logging: take
912 * the write lock on the metadata semaphore to prevent target BIOs
913 * from modifying metadata.
915 down_write(&zmd
->mblk_sem
);
916 dev
= zmd
->sb
[zmd
->mblk_primary
].dev
;
919 * This is called from the target flush work and reclaim work.
920 * Concurrent execution is not allowed.
924 if (dmz_bdev_is_dying(dev
)) {
929 /* Get dirty blocks */
930 spin_lock(&zmd
->mblk_lock
);
931 list_splice_init(&zmd
->mblk_dirty_list
, &write_list
);
932 spin_unlock(&zmd
->mblk_lock
);
934 /* If there are no dirty metadata blocks, just flush the device cache */
935 if (list_empty(&write_list
)) {
936 ret
= blkdev_issue_flush(dev
->bdev
, GFP_NOIO
);
941 * The primary metadata set is still clean. Keep it this way until
942 * all updates are successful in the secondary set. That is, use
943 * the secondary set as a log.
945 ret
= dmz_log_dirty_mblocks(zmd
, &write_list
);
950 * The log is on disk. It is now safe to update in place
951 * in the primary metadata set.
953 ret
= dmz_write_dirty_mblocks(zmd
, &write_list
, zmd
->mblk_primary
);
957 ret
= dmz_write_sb(zmd
, zmd
->mblk_primary
);
961 while (!list_empty(&write_list
)) {
962 mblk
= list_first_entry(&write_list
, struct dmz_mblock
, link
);
963 list_del_init(&mblk
->link
);
965 spin_lock(&zmd
->mblk_lock
);
966 clear_bit(DMZ_META_DIRTY
, &mblk
->state
);
968 list_add_tail(&mblk
->link
, &zmd
->mblk_lru_list
);
969 spin_unlock(&zmd
->mblk_lock
);
974 dmz_unlock_flush(zmd
);
975 up_write(&zmd
->mblk_sem
);
980 if (!list_empty(&write_list
)) {
981 spin_lock(&zmd
->mblk_lock
);
982 list_splice(&write_list
, &zmd
->mblk_dirty_list
);
983 spin_unlock(&zmd
->mblk_lock
);
985 if (!dmz_check_bdev(dev
))
993 static int dmz_check_sb(struct dmz_metadata
*zmd
, struct dmz_sb
*dsb
,
996 struct dmz_super
*sb
= dsb
->sb
;
997 struct dmz_dev
*dev
= dsb
->dev
;
998 unsigned int nr_meta_zones
, nr_data_zones
;
1002 if (le32_to_cpu(sb
->magic
) != DMZ_MAGIC
) {
1003 dmz_dev_err(dev
, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
1004 DMZ_MAGIC
, le32_to_cpu(sb
->magic
));
1008 zmd
->sb_version
= le32_to_cpu(sb
->version
);
1009 if (zmd
->sb_version
> DMZ_META_VER
) {
1010 dmz_dev_err(dev
, "Invalid meta version (needed %d, got %d)",
1011 DMZ_META_VER
, zmd
->sb_version
);
1014 if (zmd
->sb_version
< 2 && tertiary
) {
1015 dmz_dev_err(dev
, "Tertiary superblocks are not supported");
1019 gen
= le64_to_cpu(sb
->gen
);
1020 stored_crc
= le32_to_cpu(sb
->crc
);
1022 crc
= crc32_le(gen
, (unsigned char *)sb
, DMZ_BLOCK_SIZE
);
1023 if (crc
!= stored_crc
) {
1024 dmz_dev_err(dev
, "Invalid checksum (needed 0x%08x, got 0x%08x)",
1029 sb_block
= le64_to_cpu(sb
->sb_block
);
1030 if (sb_block
!= (u64
)dsb
->zone
->id
<< zmd
->zone_nr_blocks_shift
) {
1031 dmz_dev_err(dev
, "Invalid superblock position "
1032 "(is %llu expected %llu)",
1034 (u64
)dsb
->zone
->id
<< zmd
->zone_nr_blocks_shift
);
1037 if (zmd
->sb_version
> 1) {
1040 import_uuid(&sb_uuid
, sb
->dmz_uuid
);
1041 if (uuid_is_null(&sb_uuid
)) {
1042 dmz_dev_err(dev
, "NULL DM-Zoned uuid");
1044 } else if (uuid_is_null(&zmd
->uuid
)) {
1045 uuid_copy(&zmd
->uuid
, &sb_uuid
);
1046 } else if (!uuid_equal(&zmd
->uuid
, &sb_uuid
)) {
1047 dmz_dev_err(dev
, "mismatching DM-Zoned uuid, "
1048 "is %pUl expected %pUl",
1049 &sb_uuid
, &zmd
->uuid
);
1052 if (!strlen(zmd
->label
))
1053 memcpy(zmd
->label
, sb
->dmz_label
, BDEVNAME_SIZE
);
1054 else if (memcmp(zmd
->label
, sb
->dmz_label
, BDEVNAME_SIZE
)) {
1055 dmz_dev_err(dev
, "mismatching DM-Zoned label, "
1056 "is %s expected %s",
1057 sb
->dmz_label
, zmd
->label
);
1060 import_uuid(&dev
->uuid
, sb
->dev_uuid
);
1061 if (uuid_is_null(&dev
->uuid
)) {
1062 dmz_dev_err(dev
, "NULL device uuid");
1068 * Generation number should be 0, but it doesn't
1069 * really matter if it isn't.
1072 dmz_dev_warn(dev
, "Invalid generation %llu",
1078 nr_meta_zones
= (le32_to_cpu(sb
->nr_meta_blocks
) + zmd
->zone_nr_blocks
- 1)
1079 >> zmd
->zone_nr_blocks_shift
;
1080 if (!nr_meta_zones
||
1081 (zmd
->nr_devs
<= 1 && nr_meta_zones
>= zmd
->nr_rnd_zones
) ||
1082 (zmd
->nr_devs
> 1 && nr_meta_zones
>= zmd
->nr_cache_zones
)) {
1083 dmz_dev_err(dev
, "Invalid number of metadata blocks");
1087 if (!le32_to_cpu(sb
->nr_reserved_seq
) ||
1088 le32_to_cpu(sb
->nr_reserved_seq
) >= (zmd
->nr_useable_zones
- nr_meta_zones
)) {
1089 dmz_dev_err(dev
, "Invalid number of reserved sequential zones");
1093 nr_data_zones
= zmd
->nr_useable_zones
-
1094 (nr_meta_zones
* 2 + le32_to_cpu(sb
->nr_reserved_seq
));
1095 if (le32_to_cpu(sb
->nr_chunks
) > nr_data_zones
) {
1096 dmz_dev_err(dev
, "Invalid number of chunks %u / %u",
1097 le32_to_cpu(sb
->nr_chunks
), nr_data_zones
);
1102 zmd
->nr_meta_blocks
= le32_to_cpu(sb
->nr_meta_blocks
);
1103 zmd
->nr_reserved_seq
= le32_to_cpu(sb
->nr_reserved_seq
);
1104 zmd
->nr_chunks
= le32_to_cpu(sb
->nr_chunks
);
1105 zmd
->nr_map_blocks
= le32_to_cpu(sb
->nr_map_blocks
);
1106 zmd
->nr_bitmap_blocks
= le32_to_cpu(sb
->nr_bitmap_blocks
);
1107 zmd
->nr_meta_zones
= nr_meta_zones
;
1108 zmd
->nr_data_zones
= nr_data_zones
;
1114 * Read the first or second super block from disk.
1116 static int dmz_read_sb(struct dmz_metadata
*zmd
, struct dmz_sb
*sb
, int set
)
1118 dmz_zmd_debug(zmd
, "read superblock set %d dev %s block %llu",
1119 set
, sb
->dev
->name
, sb
->block
);
1121 return dmz_rdwr_block(sb
->dev
, REQ_OP_READ
,
1122 sb
->block
, sb
->mblk
->page
);
1126 * Determine the position of the secondary super blocks on disk.
1127 * This is used only if a corruption of the primary super block
1130 static int dmz_lookup_secondary_sb(struct dmz_metadata
*zmd
)
1132 unsigned int zone_nr_blocks
= zmd
->zone_nr_blocks
;
1133 struct dmz_mblock
*mblk
;
1134 unsigned int zone_id
= zmd
->sb
[0].zone
->id
;
1137 /* Allocate a block */
1138 mblk
= dmz_alloc_mblock(zmd
, 0);
1142 zmd
->sb
[1].mblk
= mblk
;
1143 zmd
->sb
[1].sb
= mblk
->data
;
1145 /* Bad first super block: search for the second one */
1146 zmd
->sb
[1].block
= zmd
->sb
[0].block
+ zone_nr_blocks
;
1147 zmd
->sb
[1].zone
= dmz_get(zmd
, zone_id
+ 1);
1148 zmd
->sb
[1].dev
= zmd
->sb
[0].dev
;
1149 for (i
= 1; i
< zmd
->nr_rnd_zones
; i
++) {
1150 if (dmz_read_sb(zmd
, &zmd
->sb
[1], 1) != 0)
1152 if (le32_to_cpu(zmd
->sb
[1].sb
->magic
) == DMZ_MAGIC
)
1154 zmd
->sb
[1].block
+= zone_nr_blocks
;
1155 zmd
->sb
[1].zone
= dmz_get(zmd
, zone_id
+ i
);
1158 dmz_free_mblock(zmd
, mblk
);
1159 zmd
->sb
[1].mblk
= NULL
;
1160 zmd
->sb
[1].zone
= NULL
;
1161 zmd
->sb
[1].dev
= NULL
;
1167 * Read a super block from disk.
1169 static int dmz_get_sb(struct dmz_metadata
*zmd
, struct dmz_sb
*sb
, int set
)
1171 struct dmz_mblock
*mblk
;
1174 /* Allocate a block */
1175 mblk
= dmz_alloc_mblock(zmd
, 0);
1180 sb
->sb
= mblk
->data
;
1182 /* Read super block */
1183 ret
= dmz_read_sb(zmd
, sb
, set
);
1185 dmz_free_mblock(zmd
, mblk
);
1194 * Recover a metadata set.
1196 static int dmz_recover_mblocks(struct dmz_metadata
*zmd
, unsigned int dst_set
)
1198 unsigned int src_set
= dst_set
^ 0x1;
1202 dmz_dev_warn(zmd
->sb
[dst_set
].dev
,
1203 "Metadata set %u invalid: recovering", dst_set
);
1206 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb
[0].zone
);
1208 zmd
->sb
[1].block
= dmz_start_block(zmd
, zmd
->sb
[1].zone
);
1210 page
= alloc_page(GFP_NOIO
);
1214 /* Copy metadata blocks */
1215 for (i
= 1; i
< zmd
->nr_meta_blocks
; i
++) {
1216 ret
= dmz_rdwr_block(zmd
->sb
[src_set
].dev
, REQ_OP_READ
,
1217 zmd
->sb
[src_set
].block
+ i
, page
);
1220 ret
= dmz_rdwr_block(zmd
->sb
[dst_set
].dev
, REQ_OP_WRITE
,
1221 zmd
->sb
[dst_set
].block
+ i
, page
);
1226 /* Finalize with the super block */
1227 if (!zmd
->sb
[dst_set
].mblk
) {
1228 zmd
->sb
[dst_set
].mblk
= dmz_alloc_mblock(zmd
, 0);
1229 if (!zmd
->sb
[dst_set
].mblk
) {
1233 zmd
->sb
[dst_set
].sb
= zmd
->sb
[dst_set
].mblk
->data
;
1236 ret
= dmz_write_sb(zmd
, dst_set
);
1238 __free_pages(page
, 0);
1244 * Get super block from disk.
1246 static int dmz_load_sb(struct dmz_metadata
*zmd
)
1248 bool sb_good
[2] = {false, false};
1249 u64 sb_gen
[2] = {0, 0};
1252 if (!zmd
->sb
[0].zone
) {
1253 dmz_zmd_err(zmd
, "Primary super block zone not set");
1257 /* Read and check the primary super block */
1258 zmd
->sb
[0].block
= dmz_start_block(zmd
, zmd
->sb
[0].zone
);
1259 zmd
->sb
[0].dev
= zmd
->sb
[0].zone
->dev
;
1260 ret
= dmz_get_sb(zmd
, &zmd
->sb
[0], 0);
1262 dmz_dev_err(zmd
->sb
[0].dev
, "Read primary super block failed");
1266 ret
= dmz_check_sb(zmd
, &zmd
->sb
[0], false);
1268 /* Read and check secondary super block */
1271 if (!zmd
->sb
[1].zone
) {
1272 unsigned int zone_id
=
1273 zmd
->sb
[0].zone
->id
+ zmd
->nr_meta_zones
;
1275 zmd
->sb
[1].zone
= dmz_get(zmd
, zone_id
);
1277 zmd
->sb
[1].block
= dmz_start_block(zmd
, zmd
->sb
[1].zone
);
1278 zmd
->sb
[1].dev
= zmd
->sb
[0].dev
;
1279 ret
= dmz_get_sb(zmd
, &zmd
->sb
[1], 1);
1281 ret
= dmz_lookup_secondary_sb(zmd
);
1284 dmz_dev_err(zmd
->sb
[1].dev
, "Read secondary super block failed");
1288 ret
= dmz_check_sb(zmd
, &zmd
->sb
[1], false);
1292 /* Use highest generation sb first */
1293 if (!sb_good
[0] && !sb_good
[1]) {
1294 dmz_zmd_err(zmd
, "No valid super block found");
1299 sb_gen
[0] = le64_to_cpu(zmd
->sb
[0].sb
->gen
);
1301 ret
= dmz_recover_mblocks(zmd
, 0);
1303 dmz_dev_err(zmd
->sb
[0].dev
,
1304 "Recovery of superblock 0 failed");
1310 sb_gen
[1] = le64_to_cpu(zmd
->sb
[1].sb
->gen
);
1312 ret
= dmz_recover_mblocks(zmd
, 1);
1315 dmz_dev_err(zmd
->sb
[1].dev
,
1316 "Recovery of superblock 1 failed");
1321 if (sb_gen
[0] >= sb_gen
[1]) {
1322 zmd
->sb_gen
= sb_gen
[0];
1323 zmd
->mblk_primary
= 0;
1325 zmd
->sb_gen
= sb_gen
[1];
1326 zmd
->mblk_primary
= 1;
1329 dmz_dev_debug(zmd
->sb
[zmd
->mblk_primary
].dev
,
1330 "Using super block %u (gen %llu)",
1331 zmd
->mblk_primary
, zmd
->sb_gen
);
1333 if (zmd
->sb_version
> 1) {
1337 sb
= kzalloc(sizeof(struct dmz_sb
), GFP_KERNEL
);
1340 for (i
= 1; i
< zmd
->nr_devs
; i
++) {
1342 sb
->zone
= dmz_get(zmd
, zmd
->dev
[i
].zone_offset
);
1343 sb
->dev
= &zmd
->dev
[i
];
1344 if (!dmz_is_meta(sb
->zone
)) {
1345 dmz_dev_err(sb
->dev
,
1346 "Tertiary super block zone %u not marked as metadata zone",
1351 ret
= dmz_get_sb(zmd
, sb
, i
+ 1);
1353 dmz_dev_err(sb
->dev
,
1354 "Read tertiary super block failed");
1355 dmz_free_mblock(zmd
, sb
->mblk
);
1358 ret
= dmz_check_sb(zmd
, sb
, true);
1359 dmz_free_mblock(zmd
, sb
->mblk
);
1370 * Initialize a zone descriptor.
1372 static int dmz_init_zone(struct blk_zone
*blkz
, unsigned int num
, void *data
)
1374 struct dmz_dev
*dev
= data
;
1375 struct dmz_metadata
*zmd
= dev
->metadata
;
1376 int idx
= num
+ dev
->zone_offset
;
1377 struct dm_zone
*zone
;
1379 zone
= dmz_insert(zmd
, idx
, dev
);
1381 return PTR_ERR(zone
);
1383 if (blkz
->len
!= zmd
->zone_nr_sectors
) {
1384 if (zmd
->sb_version
> 1) {
1385 /* Ignore the eventual runt (smaller) zone */
1386 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1388 } else if (blkz
->start
+ blkz
->len
== dev
->capacity
)
1393 switch (blkz
->type
) {
1394 case BLK_ZONE_TYPE_CONVENTIONAL
:
1395 set_bit(DMZ_RND
, &zone
->flags
);
1397 case BLK_ZONE_TYPE_SEQWRITE_REQ
:
1398 case BLK_ZONE_TYPE_SEQWRITE_PREF
:
1399 set_bit(DMZ_SEQ
, &zone
->flags
);
1405 if (dmz_is_rnd(zone
))
1408 zone
->wp_block
= dmz_sect2blk(blkz
->wp
- blkz
->start
);
1410 if (blkz
->cond
== BLK_ZONE_COND_OFFLINE
)
1411 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1412 else if (blkz
->cond
== BLK_ZONE_COND_READONLY
)
1413 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1415 zmd
->nr_useable_zones
++;
1416 if (dmz_is_rnd(zone
)) {
1417 zmd
->nr_rnd_zones
++;
1418 if (zmd
->nr_devs
== 1 && !zmd
->sb
[0].zone
) {
1419 /* Primary super block zone */
1420 zmd
->sb
[0].zone
= zone
;
1423 if (zmd
->nr_devs
> 1 && num
== 0) {
1425 * Tertiary superblock zones are always at the
1426 * start of the zoned devices, so mark them
1429 set_bit(DMZ_META
, &zone
->flags
);
1435 static int dmz_emulate_zones(struct dmz_metadata
*zmd
, struct dmz_dev
*dev
)
1438 sector_t zone_offset
= 0;
1440 for(idx
= 0; idx
< dev
->nr_zones
; idx
++) {
1441 struct dm_zone
*zone
;
1443 zone
= dmz_insert(zmd
, idx
, dev
);
1445 return PTR_ERR(zone
);
1446 set_bit(DMZ_CACHE
, &zone
->flags
);
1448 zmd
->nr_cache_zones
++;
1449 zmd
->nr_useable_zones
++;
1450 if (dev
->capacity
- zone_offset
< zmd
->zone_nr_sectors
) {
1451 /* Disable runt zone */
1452 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1455 zone_offset
+= zmd
->zone_nr_sectors
;
1461 * Free zones descriptors.
1463 static void dmz_drop_zones(struct dmz_metadata
*zmd
)
1467 for(idx
= 0; idx
< zmd
->nr_zones
; idx
++) {
1468 struct dm_zone
*zone
= xa_load(&zmd
->zones
, idx
);
1471 xa_erase(&zmd
->zones
, idx
);
1473 xa_destroy(&zmd
->zones
);
1477 * Allocate and initialize zone descriptors using the zone
1478 * information from disk.
1480 static int dmz_init_zones(struct dmz_metadata
*zmd
)
1483 struct dmz_dev
*zoned_dev
= &zmd
->dev
[0];
1486 zmd
->zone_nr_sectors
= zmd
->dev
[0].zone_nr_sectors
;
1487 zmd
->zone_nr_sectors_shift
= ilog2(zmd
->zone_nr_sectors
);
1488 zmd
->zone_nr_blocks
= dmz_sect2blk(zmd
->zone_nr_sectors
);
1489 zmd
->zone_nr_blocks_shift
= ilog2(zmd
->zone_nr_blocks
);
1490 zmd
->zone_bitmap_size
= zmd
->zone_nr_blocks
>> 3;
1491 zmd
->zone_nr_bitmap_blocks
=
1492 max_t(sector_t
, 1, zmd
->zone_bitmap_size
>> DMZ_BLOCK_SHIFT
);
1493 zmd
->zone_bits_per_mblk
= min_t(sector_t
, zmd
->zone_nr_blocks
,
1494 DMZ_BLOCK_SIZE_BITS
);
1496 /* Allocate zone array */
1498 for (i
= 0; i
< zmd
->nr_devs
; i
++) {
1499 struct dmz_dev
*dev
= &zmd
->dev
[i
];
1501 dev
->metadata
= zmd
;
1502 zmd
->nr_zones
+= dev
->nr_zones
;
1504 atomic_set(&dev
->unmap_nr_rnd
, 0);
1505 INIT_LIST_HEAD(&dev
->unmap_rnd_list
);
1506 INIT_LIST_HEAD(&dev
->map_rnd_list
);
1508 atomic_set(&dev
->unmap_nr_seq
, 0);
1509 INIT_LIST_HEAD(&dev
->unmap_seq_list
);
1510 INIT_LIST_HEAD(&dev
->map_seq_list
);
1513 if (!zmd
->nr_zones
) {
1514 DMERR("(%s): No zones found", zmd
->devname
);
1517 xa_init(&zmd
->zones
);
1519 DMDEBUG("(%s): Using %zu B for zone information",
1520 zmd
->devname
, sizeof(struct dm_zone
) * zmd
->nr_zones
);
1522 if (zmd
->nr_devs
> 1) {
1523 ret
= dmz_emulate_zones(zmd
, &zmd
->dev
[0]);
1525 DMDEBUG("(%s): Failed to emulate zones, error %d",
1527 dmz_drop_zones(zmd
);
1532 * Primary superblock zone is always at zone 0 when multiple
1533 * drives are present.
1535 zmd
->sb
[0].zone
= dmz_get(zmd
, 0);
1537 for (i
= 1; i
< zmd
->nr_devs
; i
++) {
1538 zoned_dev
= &zmd
->dev
[i
];
1540 ret
= blkdev_report_zones(zoned_dev
->bdev
, 0,
1542 dmz_init_zone
, zoned_dev
);
1544 DMDEBUG("(%s): Failed to report zones, error %d",
1546 dmz_drop_zones(zmd
);
1554 * Get zone information and initialize zone descriptors. At the same
1555 * time, determine where the super block should be: first block of the
1556 * first randomly writable zone.
1558 ret
= blkdev_report_zones(zoned_dev
->bdev
, 0, BLK_ALL_ZONES
,
1559 dmz_init_zone
, zoned_dev
);
1561 DMDEBUG("(%s): Failed to report zones, error %d",
1563 dmz_drop_zones(zmd
);
1570 static int dmz_update_zone_cb(struct blk_zone
*blkz
, unsigned int idx
,
1573 struct dm_zone
*zone
= data
;
1575 clear_bit(DMZ_OFFLINE
, &zone
->flags
);
1576 clear_bit(DMZ_READ_ONLY
, &zone
->flags
);
1577 if (blkz
->cond
== BLK_ZONE_COND_OFFLINE
)
1578 set_bit(DMZ_OFFLINE
, &zone
->flags
);
1579 else if (blkz
->cond
== BLK_ZONE_COND_READONLY
)
1580 set_bit(DMZ_READ_ONLY
, &zone
->flags
);
1582 if (dmz_is_seq(zone
))
1583 zone
->wp_block
= dmz_sect2blk(blkz
->wp
- blkz
->start
);
1590 * Update a zone information.
1592 static int dmz_update_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1594 struct dmz_dev
*dev
= zone
->dev
;
1595 unsigned int noio_flag
;
1598 if (dev
->flags
& DMZ_BDEV_REGULAR
)
1602 * Get zone information from disk. Since blkdev_report_zones() uses
1603 * GFP_KERNEL by default for memory allocations, set the per-task
1604 * PF_MEMALLOC_NOIO flag so that all allocations are done as if
1605 * GFP_NOIO was specified.
1607 noio_flag
= memalloc_noio_save();
1608 ret
= blkdev_report_zones(dev
->bdev
, dmz_start_sect(zmd
, zone
), 1,
1609 dmz_update_zone_cb
, zone
);
1610 memalloc_noio_restore(noio_flag
);
1615 dmz_dev_err(dev
, "Get zone %u report failed",
1617 dmz_check_bdev(dev
);
1625 * Check a zone write pointer position when the zone is marked
1626 * with the sequential write error flag.
1628 static int dmz_handle_seq_write_err(struct dmz_metadata
*zmd
,
1629 struct dm_zone
*zone
)
1631 struct dmz_dev
*dev
= zone
->dev
;
1632 unsigned int wp
= 0;
1635 wp
= zone
->wp_block
;
1636 ret
= dmz_update_zone(zmd
, zone
);
1640 dmz_dev_warn(dev
, "Processing zone %u write error (zone wp %u/%u)",
1641 zone
->id
, zone
->wp_block
, wp
);
1643 if (zone
->wp_block
< wp
) {
1644 dmz_invalidate_blocks(zmd
, zone
, zone
->wp_block
,
1645 wp
- zone
->wp_block
);
1652 * Reset a zone write pointer.
1654 static int dmz_reset_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1659 * Ignore offline zones, read only zones,
1660 * and conventional zones.
1662 if (dmz_is_offline(zone
) ||
1663 dmz_is_readonly(zone
) ||
1667 if (!dmz_is_empty(zone
) || dmz_seq_write_err(zone
)) {
1668 struct dmz_dev
*dev
= zone
->dev
;
1670 ret
= blkdev_zone_mgmt(dev
->bdev
, REQ_OP_ZONE_RESET
,
1671 dmz_start_sect(zmd
, zone
),
1672 zmd
->zone_nr_sectors
, GFP_NOIO
);
1674 dmz_dev_err(dev
, "Reset zone %u failed %d",
1680 /* Clear write error bit and rewind write pointer position */
1681 clear_bit(DMZ_SEQ_WRITE_ERR
, &zone
->flags
);
1687 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
);
1690 * Initialize chunk mapping.
1692 static int dmz_load_mapping(struct dmz_metadata
*zmd
)
1694 struct dm_zone
*dzone
, *bzone
;
1695 struct dmz_mblock
*dmap_mblk
= NULL
;
1696 struct dmz_map
*dmap
;
1697 unsigned int i
= 0, e
= 0, chunk
= 0;
1698 unsigned int dzone_id
;
1699 unsigned int bzone_id
;
1701 /* Metadata block array for the chunk mapping table */
1702 zmd
->map_mblk
= kcalloc(zmd
->nr_map_blocks
,
1703 sizeof(struct dmz_mblk
*), GFP_KERNEL
);
1707 /* Get chunk mapping table blocks and initialize zone mapping */
1708 while (chunk
< zmd
->nr_chunks
) {
1710 /* Get mapping block */
1711 dmap_mblk
= dmz_get_mblock(zmd
, i
+ 1);
1712 if (IS_ERR(dmap_mblk
))
1713 return PTR_ERR(dmap_mblk
);
1714 zmd
->map_mblk
[i
] = dmap_mblk
;
1715 dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1720 /* Check data zone */
1721 dzone_id
= le32_to_cpu(dmap
[e
].dzone_id
);
1722 if (dzone_id
== DMZ_MAP_UNMAPPED
)
1725 if (dzone_id
>= zmd
->nr_zones
) {
1726 dmz_zmd_err(zmd
, "Chunk %u mapping: invalid data zone ID %u",
1731 dzone
= dmz_get(zmd
, dzone_id
);
1733 dmz_zmd_err(zmd
, "Chunk %u mapping: data zone %u not present",
1737 set_bit(DMZ_DATA
, &dzone
->flags
);
1738 dzone
->chunk
= chunk
;
1739 dmz_get_zone_weight(zmd
, dzone
);
1741 if (dmz_is_cache(dzone
))
1742 list_add_tail(&dzone
->link
, &zmd
->map_cache_list
);
1743 else if (dmz_is_rnd(dzone
))
1744 list_add_tail(&dzone
->link
, &dzone
->dev
->map_rnd_list
);
1746 list_add_tail(&dzone
->link
, &dzone
->dev
->map_seq_list
);
1748 /* Check buffer zone */
1749 bzone_id
= le32_to_cpu(dmap
[e
].bzone_id
);
1750 if (bzone_id
== DMZ_MAP_UNMAPPED
)
1753 if (bzone_id
>= zmd
->nr_zones
) {
1754 dmz_zmd_err(zmd
, "Chunk %u mapping: invalid buffer zone ID %u",
1759 bzone
= dmz_get(zmd
, bzone_id
);
1761 dmz_zmd_err(zmd
, "Chunk %u mapping: buffer zone %u not present",
1765 if (!dmz_is_rnd(bzone
) && !dmz_is_cache(bzone
)) {
1766 dmz_zmd_err(zmd
, "Chunk %u mapping: invalid buffer zone %u",
1771 set_bit(DMZ_DATA
, &bzone
->flags
);
1772 set_bit(DMZ_BUF
, &bzone
->flags
);
1773 bzone
->chunk
= chunk
;
1774 bzone
->bzone
= dzone
;
1775 dzone
->bzone
= bzone
;
1776 dmz_get_zone_weight(zmd
, bzone
);
1777 if (dmz_is_cache(bzone
))
1778 list_add_tail(&bzone
->link
, &zmd
->map_cache_list
);
1780 list_add_tail(&bzone
->link
, &bzone
->dev
->map_rnd_list
);
1784 if (e
>= DMZ_MAP_ENTRIES
)
1789 * At this point, only meta zones and mapped data zones were
1790 * fully initialized. All remaining zones are unmapped data
1791 * zones. Finish initializing those here.
1793 for (i
= 0; i
< zmd
->nr_zones
; i
++) {
1794 dzone
= dmz_get(zmd
, i
);
1797 if (dmz_is_meta(dzone
))
1799 if (dmz_is_offline(dzone
))
1802 if (dmz_is_cache(dzone
))
1804 else if (dmz_is_rnd(dzone
))
1805 dzone
->dev
->nr_rnd
++;
1807 dzone
->dev
->nr_seq
++;
1809 if (dmz_is_data(dzone
)) {
1810 /* Already initialized */
1814 /* Unmapped data zone */
1815 set_bit(DMZ_DATA
, &dzone
->flags
);
1816 dzone
->chunk
= DMZ_MAP_UNMAPPED
;
1817 if (dmz_is_cache(dzone
)) {
1818 list_add_tail(&dzone
->link
, &zmd
->unmap_cache_list
);
1819 atomic_inc(&zmd
->unmap_nr_cache
);
1820 } else if (dmz_is_rnd(dzone
)) {
1821 list_add_tail(&dzone
->link
,
1822 &dzone
->dev
->unmap_rnd_list
);
1823 atomic_inc(&dzone
->dev
->unmap_nr_rnd
);
1824 } else if (atomic_read(&zmd
->nr_reserved_seq_zones
) < zmd
->nr_reserved_seq
) {
1825 list_add_tail(&dzone
->link
, &zmd
->reserved_seq_zones_list
);
1826 set_bit(DMZ_RESERVED
, &dzone
->flags
);
1827 atomic_inc(&zmd
->nr_reserved_seq_zones
);
1828 dzone
->dev
->nr_seq
--;
1830 list_add_tail(&dzone
->link
,
1831 &dzone
->dev
->unmap_seq_list
);
1832 atomic_inc(&dzone
->dev
->unmap_nr_seq
);
1840 * Set a data chunk mapping.
1842 static void dmz_set_chunk_mapping(struct dmz_metadata
*zmd
, unsigned int chunk
,
1843 unsigned int dzone_id
, unsigned int bzone_id
)
1845 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
1846 struct dmz_map
*dmap
= (struct dmz_map
*) dmap_mblk
->data
;
1847 int map_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
1849 dmap
[map_idx
].dzone_id
= cpu_to_le32(dzone_id
);
1850 dmap
[map_idx
].bzone_id
= cpu_to_le32(bzone_id
);
1851 dmz_dirty_mblock(zmd
, dmap_mblk
);
1855 * The list of mapped zones is maintained in LRU order.
1856 * This rotates a zone at the end of its map list.
1858 static void __dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1860 if (list_empty(&zone
->link
))
1863 list_del_init(&zone
->link
);
1864 if (dmz_is_seq(zone
)) {
1865 /* LRU rotate sequential zone */
1866 list_add_tail(&zone
->link
, &zone
->dev
->map_seq_list
);
1867 } else if (dmz_is_cache(zone
)) {
1868 /* LRU rotate cache zone */
1869 list_add_tail(&zone
->link
, &zmd
->map_cache_list
);
1871 /* LRU rotate random zone */
1872 list_add_tail(&zone
->link
, &zone
->dev
->map_rnd_list
);
1877 * The list of mapped random zones is maintained
1878 * in LRU order. This rotates a zone at the end of the list.
1880 static void dmz_lru_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1882 __dmz_lru_zone(zmd
, zone
);
1884 __dmz_lru_zone(zmd
, zone
->bzone
);
1888 * Wait for any zone to be freed.
1890 static void dmz_wait_for_free_zones(struct dmz_metadata
*zmd
)
1894 prepare_to_wait(&zmd
->free_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
1895 dmz_unlock_map(zmd
);
1896 dmz_unlock_metadata(zmd
);
1898 io_schedule_timeout(HZ
);
1900 dmz_lock_metadata(zmd
);
1902 finish_wait(&zmd
->free_wq
, &wait
);
1906 * Lock a zone for reclaim (set the zone RECLAIM bit).
1907 * Returns false if the zone cannot be locked or if it is already locked
1910 int dmz_lock_zone_reclaim(struct dm_zone
*zone
)
1912 /* Active zones cannot be reclaimed */
1913 if (dmz_is_active(zone
))
1916 return !test_and_set_bit(DMZ_RECLAIM
, &zone
->flags
);
1920 * Clear a zone reclaim flag.
1922 void dmz_unlock_zone_reclaim(struct dm_zone
*zone
)
1924 WARN_ON(dmz_is_active(zone
));
1925 WARN_ON(!dmz_in_reclaim(zone
));
1927 clear_bit_unlock(DMZ_RECLAIM
, &zone
->flags
);
1928 smp_mb__after_atomic();
1929 wake_up_bit(&zone
->flags
, DMZ_RECLAIM
);
1933 * Wait for a zone reclaim to complete.
1935 static void dmz_wait_for_reclaim(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
1937 dmz_unlock_map(zmd
);
1938 dmz_unlock_metadata(zmd
);
1939 set_bit(DMZ_RECLAIM_TERMINATE
, &zone
->flags
);
1940 wait_on_bit_timeout(&zone
->flags
, DMZ_RECLAIM
, TASK_UNINTERRUPTIBLE
, HZ
);
1941 clear_bit(DMZ_RECLAIM_TERMINATE
, &zone
->flags
);
1942 dmz_lock_metadata(zmd
);
1947 * Select a cache or random write zone for reclaim.
1949 static struct dm_zone
*dmz_get_rnd_zone_for_reclaim(struct dmz_metadata
*zmd
,
1950 unsigned int idx
, bool idle
)
1952 struct dm_zone
*dzone
= NULL
;
1953 struct dm_zone
*zone
, *maxw_z
= NULL
;
1954 struct list_head
*zone_list
;
1956 /* If we have cache zones select from the cache zone list */
1957 if (zmd
->nr_cache
) {
1958 zone_list
= &zmd
->map_cache_list
;
1959 /* Try to relaim random zones, too, when idle */
1960 if (idle
&& list_empty(zone_list
))
1961 zone_list
= &zmd
->dev
[idx
].map_rnd_list
;
1963 zone_list
= &zmd
->dev
[idx
].map_rnd_list
;
1966 * Find the buffer zone with the heaviest weight or the first (oldest)
1967 * data zone that can be reclaimed.
1969 list_for_each_entry(zone
, zone_list
, link
) {
1970 if (dmz_is_buf(zone
)) {
1971 dzone
= zone
->bzone
;
1972 if (dmz_is_rnd(dzone
) && dzone
->dev
->dev_idx
!= idx
)
1974 if (!maxw_z
|| maxw_z
->weight
< dzone
->weight
)
1978 if (dmz_lock_zone_reclaim(dzone
))
1983 if (maxw_z
&& dmz_lock_zone_reclaim(maxw_z
))
1987 * If we come here, none of the zones inspected could be locked for
1988 * reclaim. Try again, being more aggressive, that is, find the
1989 * first zone that can be reclaimed regardless of its weitght.
1991 list_for_each_entry(zone
, zone_list
, link
) {
1992 if (dmz_is_buf(zone
)) {
1993 dzone
= zone
->bzone
;
1994 if (dmz_is_rnd(dzone
) && dzone
->dev
->dev_idx
!= idx
)
1998 if (dmz_lock_zone_reclaim(dzone
))
2006 * Select a buffered sequential zone for reclaim.
2008 static struct dm_zone
*dmz_get_seq_zone_for_reclaim(struct dmz_metadata
*zmd
,
2011 struct dm_zone
*zone
;
2013 list_for_each_entry(zone
, &zmd
->dev
[idx
].map_seq_list
, link
) {
2016 if (dmz_lock_zone_reclaim(zone
))
2024 * Select a zone for reclaim.
2026 struct dm_zone
*dmz_get_zone_for_reclaim(struct dmz_metadata
*zmd
,
2027 unsigned int dev_idx
, bool idle
)
2029 struct dm_zone
*zone
= NULL
;
2032 * Search for a zone candidate to reclaim: 2 cases are possible.
2033 * (1) There is no free sequential zones. Then a random data zone
2034 * cannot be reclaimed. So choose a sequential zone to reclaim so
2035 * that afterward a random zone can be reclaimed.
2036 * (2) At least one free sequential zone is available, then choose
2037 * the oldest random zone (data or buffer) that can be locked.
2040 if (list_empty(&zmd
->reserved_seq_zones_list
))
2041 zone
= dmz_get_seq_zone_for_reclaim(zmd
, dev_idx
);
2043 zone
= dmz_get_rnd_zone_for_reclaim(zmd
, dev_idx
, idle
);
2044 dmz_unlock_map(zmd
);
2050 * Get the zone mapping a chunk, if the chunk is mapped already.
2051 * If no mapping exist and the operation is WRITE, a zone is
2052 * allocated and used to map the chunk.
2053 * The zone returned will be set to the active state.
2055 struct dm_zone
*dmz_get_chunk_mapping(struct dmz_metadata
*zmd
, unsigned int chunk
, int op
)
2057 struct dmz_mblock
*dmap_mblk
= zmd
->map_mblk
[chunk
>> DMZ_MAP_ENTRIES_SHIFT
];
2058 struct dmz_map
*dmap
= (struct dmz_map
*) dmap_mblk
->data
;
2059 int dmap_idx
= chunk
& DMZ_MAP_ENTRIES_MASK
;
2060 unsigned int dzone_id
;
2061 struct dm_zone
*dzone
= NULL
;
2063 int alloc_flags
= zmd
->nr_cache
? DMZ_ALLOC_CACHE
: DMZ_ALLOC_RND
;
2067 /* Get the chunk mapping */
2068 dzone_id
= le32_to_cpu(dmap
[dmap_idx
].dzone_id
);
2069 if (dzone_id
== DMZ_MAP_UNMAPPED
) {
2071 * Read or discard in unmapped chunks are fine. But for
2072 * writes, we need a mapping, so get one.
2074 if (op
!= REQ_OP_WRITE
)
2077 /* Allocate a random zone */
2078 dzone
= dmz_alloc_zone(zmd
, 0, alloc_flags
);
2080 if (dmz_dev_is_dying(zmd
)) {
2081 dzone
= ERR_PTR(-EIO
);
2084 dmz_wait_for_free_zones(zmd
);
2088 dmz_map_zone(zmd
, dzone
, chunk
);
2091 /* The chunk is already mapped: get the mapping zone */
2092 dzone
= dmz_get(zmd
, dzone_id
);
2094 dzone
= ERR_PTR(-EIO
);
2097 if (dzone
->chunk
!= chunk
) {
2098 dzone
= ERR_PTR(-EIO
);
2102 /* Repair write pointer if the sequential dzone has error */
2103 if (dmz_seq_write_err(dzone
)) {
2104 ret
= dmz_handle_seq_write_err(zmd
, dzone
);
2106 dzone
= ERR_PTR(-EIO
);
2109 clear_bit(DMZ_SEQ_WRITE_ERR
, &dzone
->flags
);
2114 * If the zone is being reclaimed, the chunk mapping may change
2115 * to a different zone. So wait for reclaim and retry. Otherwise,
2116 * activate the zone (this will prevent reclaim from touching it).
2118 if (dmz_in_reclaim(dzone
)) {
2119 dmz_wait_for_reclaim(zmd
, dzone
);
2122 dmz_activate_zone(dzone
);
2123 dmz_lru_zone(zmd
, dzone
);
2125 dmz_unlock_map(zmd
);
2131 * Write and discard change the block validity of data zones and their buffer
2132 * zones. Check here that valid blocks are still present. If all blocks are
2133 * invalid, the zones can be unmapped on the fly without waiting for reclaim
2136 void dmz_put_chunk_mapping(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
)
2138 struct dm_zone
*bzone
;
2142 bzone
= dzone
->bzone
;
2144 if (dmz_weight(bzone
))
2145 dmz_lru_zone(zmd
, bzone
);
2147 /* Empty buffer zone: reclaim it */
2148 dmz_unmap_zone(zmd
, bzone
);
2149 dmz_free_zone(zmd
, bzone
);
2154 /* Deactivate the data zone */
2155 dmz_deactivate_zone(dzone
);
2156 if (dmz_is_active(dzone
) || bzone
|| dmz_weight(dzone
))
2157 dmz_lru_zone(zmd
, dzone
);
2159 /* Unbuffered inactive empty data zone: reclaim it */
2160 dmz_unmap_zone(zmd
, dzone
);
2161 dmz_free_zone(zmd
, dzone
);
2164 dmz_unlock_map(zmd
);
2168 * Allocate and map a random zone to buffer a chunk
2169 * already mapped to a sequential zone.
2171 struct dm_zone
*dmz_get_chunk_buffer(struct dmz_metadata
*zmd
,
2172 struct dm_zone
*dzone
)
2174 struct dm_zone
*bzone
;
2175 int alloc_flags
= zmd
->nr_cache
? DMZ_ALLOC_CACHE
: DMZ_ALLOC_RND
;
2179 bzone
= dzone
->bzone
;
2183 /* Allocate a random zone */
2184 bzone
= dmz_alloc_zone(zmd
, 0, alloc_flags
);
2186 if (dmz_dev_is_dying(zmd
)) {
2187 bzone
= ERR_PTR(-EIO
);
2190 dmz_wait_for_free_zones(zmd
);
2194 /* Update the chunk mapping */
2195 dmz_set_chunk_mapping(zmd
, dzone
->chunk
, dzone
->id
, bzone
->id
);
2197 set_bit(DMZ_BUF
, &bzone
->flags
);
2198 bzone
->chunk
= dzone
->chunk
;
2199 bzone
->bzone
= dzone
;
2200 dzone
->bzone
= bzone
;
2201 if (dmz_is_cache(bzone
))
2202 list_add_tail(&bzone
->link
, &zmd
->map_cache_list
);
2204 list_add_tail(&bzone
->link
, &bzone
->dev
->map_rnd_list
);
2206 dmz_unlock_map(zmd
);
2212 * Get an unmapped (free) zone.
2213 * This must be called with the mapping lock held.
2215 struct dm_zone
*dmz_alloc_zone(struct dmz_metadata
*zmd
, unsigned int dev_idx
,
2216 unsigned long flags
)
2218 struct list_head
*list
;
2219 struct dm_zone
*zone
;
2222 /* Schedule reclaim to ensure free zones are available */
2223 if (!(flags
& DMZ_ALLOC_RECLAIM
)) {
2224 for (i
= 0; i
< zmd
->nr_devs
; i
++)
2225 dmz_schedule_reclaim(zmd
->dev
[i
].reclaim
);
2230 if (flags
& DMZ_ALLOC_CACHE
)
2231 list
= &zmd
->unmap_cache_list
;
2232 else if (flags
& DMZ_ALLOC_RND
)
2233 list
= &zmd
->dev
[dev_idx
].unmap_rnd_list
;
2235 list
= &zmd
->dev
[dev_idx
].unmap_seq_list
;
2237 if (list_empty(list
)) {
2239 * No free zone: return NULL if this is for not reclaim.
2241 if (!(flags
& DMZ_ALLOC_RECLAIM
))
2244 * Try to allocate from other devices
2246 if (i
< zmd
->nr_devs
) {
2247 dev_idx
= (dev_idx
+ 1) % zmd
->nr_devs
;
2253 * Fallback to the reserved sequential zones
2255 zone
= list_first_entry_or_null(&zmd
->reserved_seq_zones_list
,
2256 struct dm_zone
, link
);
2258 list_del_init(&zone
->link
);
2259 atomic_dec(&zmd
->nr_reserved_seq_zones
);
2264 zone
= list_first_entry(list
, struct dm_zone
, link
);
2265 list_del_init(&zone
->link
);
2267 if (dmz_is_cache(zone
))
2268 atomic_dec(&zmd
->unmap_nr_cache
);
2269 else if (dmz_is_rnd(zone
))
2270 atomic_dec(&zone
->dev
->unmap_nr_rnd
);
2272 atomic_dec(&zone
->dev
->unmap_nr_seq
);
2274 if (dmz_is_offline(zone
)) {
2275 dmz_zmd_warn(zmd
, "Zone %u is offline", zone
->id
);
2279 if (dmz_is_meta(zone
)) {
2280 dmz_zmd_warn(zmd
, "Zone %u has metadata", zone
->id
);
2289 * This must be called with the mapping lock held.
2291 void dmz_free_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
2293 /* If this is a sequential zone, reset it */
2294 if (dmz_is_seq(zone
))
2295 dmz_reset_zone(zmd
, zone
);
2297 /* Return the zone to its type unmap list */
2298 if (dmz_is_cache(zone
)) {
2299 list_add_tail(&zone
->link
, &zmd
->unmap_cache_list
);
2300 atomic_inc(&zmd
->unmap_nr_cache
);
2301 } else if (dmz_is_rnd(zone
)) {
2302 list_add_tail(&zone
->link
, &zone
->dev
->unmap_rnd_list
);
2303 atomic_inc(&zone
->dev
->unmap_nr_rnd
);
2304 } else if (dmz_is_reserved(zone
)) {
2305 list_add_tail(&zone
->link
, &zmd
->reserved_seq_zones_list
);
2306 atomic_inc(&zmd
->nr_reserved_seq_zones
);
2308 list_add_tail(&zone
->link
, &zone
->dev
->unmap_seq_list
);
2309 atomic_inc(&zone
->dev
->unmap_nr_seq
);
2312 wake_up_all(&zmd
->free_wq
);
2316 * Map a chunk to a zone.
2317 * This must be called with the mapping lock held.
2319 void dmz_map_zone(struct dmz_metadata
*zmd
, struct dm_zone
*dzone
,
2322 /* Set the chunk mapping */
2323 dmz_set_chunk_mapping(zmd
, chunk
, dzone
->id
,
2325 dzone
->chunk
= chunk
;
2326 if (dmz_is_cache(dzone
))
2327 list_add_tail(&dzone
->link
, &zmd
->map_cache_list
);
2328 else if (dmz_is_rnd(dzone
))
2329 list_add_tail(&dzone
->link
, &dzone
->dev
->map_rnd_list
);
2331 list_add_tail(&dzone
->link
, &dzone
->dev
->map_seq_list
);
2336 * This must be called with the mapping lock held.
2338 void dmz_unmap_zone(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
2340 unsigned int chunk
= zone
->chunk
;
2341 unsigned int dzone_id
;
2343 if (chunk
== DMZ_MAP_UNMAPPED
) {
2344 /* Already unmapped */
2348 if (test_and_clear_bit(DMZ_BUF
, &zone
->flags
)) {
2350 * Unmapping the chunk buffer zone: clear only
2351 * the chunk buffer mapping
2353 dzone_id
= zone
->bzone
->id
;
2354 zone
->bzone
->bzone
= NULL
;
2359 * Unmapping the chunk data zone: the zone must
2362 if (WARN_ON(zone
->bzone
)) {
2363 zone
->bzone
->bzone
= NULL
;
2366 dzone_id
= DMZ_MAP_UNMAPPED
;
2369 dmz_set_chunk_mapping(zmd
, chunk
, dzone_id
, DMZ_MAP_UNMAPPED
);
2371 zone
->chunk
= DMZ_MAP_UNMAPPED
;
2372 list_del_init(&zone
->link
);
2376 * Set @nr_bits bits in @bitmap starting from @bit.
2377 * Return the number of bits changed from 0 to 1.
2379 static unsigned int dmz_set_bits(unsigned long *bitmap
,
2380 unsigned int bit
, unsigned int nr_bits
)
2382 unsigned long *addr
;
2383 unsigned int end
= bit
+ nr_bits
;
2387 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2388 ((end
- bit
) >= BITS_PER_LONG
)) {
2389 /* Try to set the whole word at once */
2390 addr
= bitmap
+ BIT_WORD(bit
);
2394 bit
+= BITS_PER_LONG
;
2399 if (!test_and_set_bit(bit
, bitmap
))
2408 * Get the bitmap block storing the bit for chunk_block in zone.
2410 static struct dmz_mblock
*dmz_get_bitmap(struct dmz_metadata
*zmd
,
2411 struct dm_zone
*zone
,
2412 sector_t chunk_block
)
2414 sector_t bitmap_block
= 1 + zmd
->nr_map_blocks
+
2415 (sector_t
)(zone
->id
* zmd
->zone_nr_bitmap_blocks
) +
2416 (chunk_block
>> DMZ_BLOCK_SHIFT_BITS
);
2418 return dmz_get_mblock(zmd
, bitmap_block
);
2422 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
2424 int dmz_copy_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
2425 struct dm_zone
*to_zone
)
2427 struct dmz_mblock
*from_mblk
, *to_mblk
;
2428 sector_t chunk_block
= 0;
2430 /* Get the zones bitmap blocks */
2431 while (chunk_block
< zmd
->zone_nr_blocks
) {
2432 from_mblk
= dmz_get_bitmap(zmd
, from_zone
, chunk_block
);
2433 if (IS_ERR(from_mblk
))
2434 return PTR_ERR(from_mblk
);
2435 to_mblk
= dmz_get_bitmap(zmd
, to_zone
, chunk_block
);
2436 if (IS_ERR(to_mblk
)) {
2437 dmz_release_mblock(zmd
, from_mblk
);
2438 return PTR_ERR(to_mblk
);
2441 memcpy(to_mblk
->data
, from_mblk
->data
, DMZ_BLOCK_SIZE
);
2442 dmz_dirty_mblock(zmd
, to_mblk
);
2444 dmz_release_mblock(zmd
, to_mblk
);
2445 dmz_release_mblock(zmd
, from_mblk
);
2447 chunk_block
+= zmd
->zone_bits_per_mblk
;
2450 to_zone
->weight
= from_zone
->weight
;
2456 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
2457 * starting from chunk_block.
2459 int dmz_merge_valid_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*from_zone
,
2460 struct dm_zone
*to_zone
, sector_t chunk_block
)
2462 unsigned int nr_blocks
;
2465 /* Get the zones bitmap blocks */
2466 while (chunk_block
< zmd
->zone_nr_blocks
) {
2467 /* Get a valid region from the source zone */
2468 ret
= dmz_first_valid_block(zmd
, from_zone
, &chunk_block
);
2473 ret
= dmz_validate_blocks(zmd
, to_zone
, chunk_block
, nr_blocks
);
2477 chunk_block
+= nr_blocks
;
2484 * Validate all the blocks in the range [block..block+nr_blocks-1].
2486 int dmz_validate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2487 sector_t chunk_block
, unsigned int nr_blocks
)
2489 unsigned int count
, bit
, nr_bits
;
2490 unsigned int zone_nr_blocks
= zmd
->zone_nr_blocks
;
2491 struct dmz_mblock
*mblk
;
2494 dmz_zmd_debug(zmd
, "=> VALIDATE zone %u, block %llu, %u blocks",
2495 zone
->id
, (unsigned long long)chunk_block
,
2498 WARN_ON(chunk_block
+ nr_blocks
> zone_nr_blocks
);
2501 /* Get bitmap block */
2502 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2504 return PTR_ERR(mblk
);
2507 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2508 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2510 count
= dmz_set_bits((unsigned long *)mblk
->data
, bit
, nr_bits
);
2512 dmz_dirty_mblock(zmd
, mblk
);
2515 dmz_release_mblock(zmd
, mblk
);
2517 nr_blocks
-= nr_bits
;
2518 chunk_block
+= nr_bits
;
2521 if (likely(zone
->weight
+ n
<= zone_nr_blocks
))
2524 dmz_zmd_warn(zmd
, "Zone %u: weight %u should be <= %u",
2525 zone
->id
, zone
->weight
,
2526 zone_nr_blocks
- n
);
2527 zone
->weight
= zone_nr_blocks
;
2534 * Clear nr_bits bits in bitmap starting from bit.
2535 * Return the number of bits cleared.
2537 static int dmz_clear_bits(unsigned long *bitmap
, int bit
, int nr_bits
)
2539 unsigned long *addr
;
2540 int end
= bit
+ nr_bits
;
2544 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2545 ((end
- bit
) >= BITS_PER_LONG
)) {
2546 /* Try to clear whole word at once */
2547 addr
= bitmap
+ BIT_WORD(bit
);
2548 if (*addr
== ULONG_MAX
) {
2551 bit
+= BITS_PER_LONG
;
2556 if (test_and_clear_bit(bit
, bitmap
))
2565 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2567 int dmz_invalidate_blocks(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2568 sector_t chunk_block
, unsigned int nr_blocks
)
2570 unsigned int count
, bit
, nr_bits
;
2571 struct dmz_mblock
*mblk
;
2574 dmz_zmd_debug(zmd
, "=> INVALIDATE zone %u, block %llu, %u blocks",
2575 zone
->id
, (u64
)chunk_block
, nr_blocks
);
2577 WARN_ON(chunk_block
+ nr_blocks
> zmd
->zone_nr_blocks
);
2580 /* Get bitmap block */
2581 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2583 return PTR_ERR(mblk
);
2586 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2587 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2589 count
= dmz_clear_bits((unsigned long *)mblk
->data
,
2592 dmz_dirty_mblock(zmd
, mblk
);
2595 dmz_release_mblock(zmd
, mblk
);
2597 nr_blocks
-= nr_bits
;
2598 chunk_block
+= nr_bits
;
2601 if (zone
->weight
>= n
)
2604 dmz_zmd_warn(zmd
, "Zone %u: weight %u should be >= %u",
2605 zone
->id
, zone
->weight
, n
);
2613 * Get a block bit value.
2615 static int dmz_test_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2616 sector_t chunk_block
)
2618 struct dmz_mblock
*mblk
;
2621 WARN_ON(chunk_block
>= zmd
->zone_nr_blocks
);
2623 /* Get bitmap block */
2624 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2626 return PTR_ERR(mblk
);
2629 ret
= test_bit(chunk_block
& DMZ_BLOCK_MASK_BITS
,
2630 (unsigned long *) mblk
->data
) != 0;
2632 dmz_release_mblock(zmd
, mblk
);
2638 * Return the number of blocks from chunk_block to the first block with a bit
2639 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2641 static int dmz_to_next_set_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2642 sector_t chunk_block
, unsigned int nr_blocks
,
2645 struct dmz_mblock
*mblk
;
2646 unsigned int bit
, set_bit
, nr_bits
;
2647 unsigned int zone_bits
= zmd
->zone_bits_per_mblk
;
2648 unsigned long *bitmap
;
2651 WARN_ON(chunk_block
+ nr_blocks
> zmd
->zone_nr_blocks
);
2654 /* Get bitmap block */
2655 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2657 return PTR_ERR(mblk
);
2660 bitmap
= (unsigned long *) mblk
->data
;
2661 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2662 nr_bits
= min(nr_blocks
, zone_bits
- bit
);
2664 set_bit
= find_next_bit(bitmap
, zone_bits
, bit
);
2666 set_bit
= find_next_zero_bit(bitmap
, zone_bits
, bit
);
2667 dmz_release_mblock(zmd
, mblk
);
2670 if (set_bit
< zone_bits
)
2673 nr_blocks
-= nr_bits
;
2674 chunk_block
+= nr_bits
;
2681 * Test if chunk_block is valid. If it is, the number of consecutive
2682 * valid blocks from chunk_block will be returned.
2684 int dmz_block_valid(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2685 sector_t chunk_block
)
2689 valid
= dmz_test_block(zmd
, zone
, chunk_block
);
2693 /* The block is valid: get the number of valid blocks from block */
2694 return dmz_to_next_set_block(zmd
, zone
, chunk_block
,
2695 zmd
->zone_nr_blocks
- chunk_block
, 0);
2699 * Find the first valid block from @chunk_block in @zone.
2700 * If such a block is found, its number is returned using
2701 * @chunk_block and the total number of valid blocks from @chunk_block
2704 int dmz_first_valid_block(struct dmz_metadata
*zmd
, struct dm_zone
*zone
,
2705 sector_t
*chunk_block
)
2707 sector_t start_block
= *chunk_block
;
2710 ret
= dmz_to_next_set_block(zmd
, zone
, start_block
,
2711 zmd
->zone_nr_blocks
- start_block
, 1);
2716 *chunk_block
= start_block
;
2718 return dmz_to_next_set_block(zmd
, zone
, start_block
,
2719 zmd
->zone_nr_blocks
- start_block
, 0);
2723 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2725 static int dmz_count_bits(void *bitmap
, int bit
, int nr_bits
)
2727 unsigned long *addr
;
2728 int end
= bit
+ nr_bits
;
2732 if (((bit
& (BITS_PER_LONG
- 1)) == 0) &&
2733 ((end
- bit
) >= BITS_PER_LONG
)) {
2734 addr
= (unsigned long *)bitmap
+ BIT_WORD(bit
);
2735 if (*addr
== ULONG_MAX
) {
2737 bit
+= BITS_PER_LONG
;
2742 if (test_bit(bit
, bitmap
))
2751 * Get a zone weight.
2753 static void dmz_get_zone_weight(struct dmz_metadata
*zmd
, struct dm_zone
*zone
)
2755 struct dmz_mblock
*mblk
;
2756 sector_t chunk_block
= 0;
2757 unsigned int bit
, nr_bits
;
2758 unsigned int nr_blocks
= zmd
->zone_nr_blocks
;
2763 /* Get bitmap block */
2764 mblk
= dmz_get_bitmap(zmd
, zone
, chunk_block
);
2770 /* Count bits in this block */
2771 bitmap
= mblk
->data
;
2772 bit
= chunk_block
& DMZ_BLOCK_MASK_BITS
;
2773 nr_bits
= min(nr_blocks
, zmd
->zone_bits_per_mblk
- bit
);
2774 n
+= dmz_count_bits(bitmap
, bit
, nr_bits
);
2776 dmz_release_mblock(zmd
, mblk
);
2778 nr_blocks
-= nr_bits
;
2779 chunk_block
+= nr_bits
;
2786 * Cleanup the zoned metadata resources.
2788 static void dmz_cleanup_metadata(struct dmz_metadata
*zmd
)
2790 struct rb_root
*root
;
2791 struct dmz_mblock
*mblk
, *next
;
2794 /* Release zone mapping resources */
2795 if (zmd
->map_mblk
) {
2796 for (i
= 0; i
< zmd
->nr_map_blocks
; i
++)
2797 dmz_release_mblock(zmd
, zmd
->map_mblk
[i
]);
2798 kfree(zmd
->map_mblk
);
2799 zmd
->map_mblk
= NULL
;
2802 /* Release super blocks */
2803 for (i
= 0; i
< 2; i
++) {
2804 if (zmd
->sb
[i
].mblk
) {
2805 dmz_free_mblock(zmd
, zmd
->sb
[i
].mblk
);
2806 zmd
->sb
[i
].mblk
= NULL
;
2810 /* Free cached blocks */
2811 while (!list_empty(&zmd
->mblk_dirty_list
)) {
2812 mblk
= list_first_entry(&zmd
->mblk_dirty_list
,
2813 struct dmz_mblock
, link
);
2814 dmz_zmd_warn(zmd
, "mblock %llu still in dirty list (ref %u)",
2815 (u64
)mblk
->no
, mblk
->ref
);
2816 list_del_init(&mblk
->link
);
2817 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2818 dmz_free_mblock(zmd
, mblk
);
2821 while (!list_empty(&zmd
->mblk_lru_list
)) {
2822 mblk
= list_first_entry(&zmd
->mblk_lru_list
,
2823 struct dmz_mblock
, link
);
2824 list_del_init(&mblk
->link
);
2825 rb_erase(&mblk
->node
, &zmd
->mblk_rbtree
);
2826 dmz_free_mblock(zmd
, mblk
);
2829 /* Sanity checks: the mblock rbtree should now be empty */
2830 root
= &zmd
->mblk_rbtree
;
2831 rbtree_postorder_for_each_entry_safe(mblk
, next
, root
, node
) {
2832 dmz_zmd_warn(zmd
, "mblock %llu ref %u still in rbtree",
2833 (u64
)mblk
->no
, mblk
->ref
);
2835 dmz_free_mblock(zmd
, mblk
);
2838 /* Free the zone descriptors */
2839 dmz_drop_zones(zmd
);
2841 mutex_destroy(&zmd
->mblk_flush_lock
);
2842 mutex_destroy(&zmd
->map_lock
);
2845 static void dmz_print_dev(struct dmz_metadata
*zmd
, int num
)
2847 struct dmz_dev
*dev
= &zmd
->dev
[num
];
2849 if (bdev_zoned_model(dev
->bdev
) == BLK_ZONED_NONE
)
2850 dmz_dev_info(dev
, "Regular block device");
2852 dmz_dev_info(dev
, "Host-%s zoned block device",
2853 bdev_zoned_model(dev
->bdev
) == BLK_ZONED_HA
?
2854 "aware" : "managed");
2855 if (zmd
->sb_version
> 1) {
2856 sector_t sector_offset
=
2857 dev
->zone_offset
<< zmd
->zone_nr_sectors_shift
;
2859 dmz_dev_info(dev
, " %llu 512-byte logical sectors (offset %llu)",
2860 (u64
)dev
->capacity
, (u64
)sector_offset
);
2861 dmz_dev_info(dev
, " %u zones of %llu 512-byte logical sectors (offset %llu)",
2862 dev
->nr_zones
, (u64
)zmd
->zone_nr_sectors
,
2863 (u64
)dev
->zone_offset
);
2865 dmz_dev_info(dev
, " %llu 512-byte logical sectors",
2866 (u64
)dev
->capacity
);
2867 dmz_dev_info(dev
, " %u zones of %llu 512-byte logical sectors",
2868 dev
->nr_zones
, (u64
)zmd
->zone_nr_sectors
);
2873 * Initialize the zoned metadata.
2875 int dmz_ctr_metadata(struct dmz_dev
*dev
, int num_dev
,
2876 struct dmz_metadata
**metadata
,
2877 const char *devname
)
2879 struct dmz_metadata
*zmd
;
2881 struct dm_zone
*zone
;
2884 zmd
= kzalloc(sizeof(struct dmz_metadata
), GFP_KERNEL
);
2888 strcpy(zmd
->devname
, devname
);
2890 zmd
->nr_devs
= num_dev
;
2891 zmd
->mblk_rbtree
= RB_ROOT
;
2892 init_rwsem(&zmd
->mblk_sem
);
2893 mutex_init(&zmd
->mblk_flush_lock
);
2894 spin_lock_init(&zmd
->mblk_lock
);
2895 INIT_LIST_HEAD(&zmd
->mblk_lru_list
);
2896 INIT_LIST_HEAD(&zmd
->mblk_dirty_list
);
2898 mutex_init(&zmd
->map_lock
);
2900 atomic_set(&zmd
->unmap_nr_cache
, 0);
2901 INIT_LIST_HEAD(&zmd
->unmap_cache_list
);
2902 INIT_LIST_HEAD(&zmd
->map_cache_list
);
2904 atomic_set(&zmd
->nr_reserved_seq_zones
, 0);
2905 INIT_LIST_HEAD(&zmd
->reserved_seq_zones_list
);
2907 init_waitqueue_head(&zmd
->free_wq
);
2909 /* Initialize zone descriptors */
2910 ret
= dmz_init_zones(zmd
);
2914 /* Get super block */
2915 ret
= dmz_load_sb(zmd
);
2919 /* Set metadata zones starting from sb_zone */
2920 for (i
= 0; i
< zmd
->nr_meta_zones
<< 1; i
++) {
2921 zone
= dmz_get(zmd
, zmd
->sb
[0].zone
->id
+ i
);
2924 "metadata zone %u not present", i
);
2928 if (!dmz_is_rnd(zone
) && !dmz_is_cache(zone
)) {
2930 "metadata zone %d is not random", i
);
2934 set_bit(DMZ_META
, &zone
->flags
);
2936 /* Load mapping table */
2937 ret
= dmz_load_mapping(zmd
);
2942 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2943 * blocks and enough blocks to be able to cache the bitmap blocks of
2944 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2945 * the cache to add 512 more metadata blocks.
2947 zmd
->min_nr_mblks
= 2 + zmd
->nr_map_blocks
+ zmd
->zone_nr_bitmap_blocks
* 16;
2948 zmd
->max_nr_mblks
= zmd
->min_nr_mblks
+ 512;
2949 zmd
->mblk_shrinker
.count_objects
= dmz_mblock_shrinker_count
;
2950 zmd
->mblk_shrinker
.scan_objects
= dmz_mblock_shrinker_scan
;
2951 zmd
->mblk_shrinker
.seeks
= DEFAULT_SEEKS
;
2953 /* Metadata cache shrinker */
2954 ret
= register_shrinker(&zmd
->mblk_shrinker
);
2956 dmz_zmd_err(zmd
, "Register metadata cache shrinker failed");
2960 dmz_zmd_info(zmd
, "DM-Zoned metadata version %d", zmd
->sb_version
);
2961 for (i
= 0; i
< zmd
->nr_devs
; i
++)
2962 dmz_print_dev(zmd
, i
);
2964 dmz_zmd_info(zmd
, " %u zones of %llu 512-byte logical sectors",
2965 zmd
->nr_zones
, (u64
)zmd
->zone_nr_sectors
);
2966 dmz_zmd_debug(zmd
, " %u metadata zones",
2967 zmd
->nr_meta_zones
* 2);
2968 dmz_zmd_debug(zmd
, " %u data zones for %u chunks",
2969 zmd
->nr_data_zones
, zmd
->nr_chunks
);
2970 dmz_zmd_debug(zmd
, " %u cache zones (%u unmapped)",
2971 zmd
->nr_cache
, atomic_read(&zmd
->unmap_nr_cache
));
2972 for (i
= 0; i
< zmd
->nr_devs
; i
++) {
2973 dmz_zmd_debug(zmd
, " %u random zones (%u unmapped)",
2974 dmz_nr_rnd_zones(zmd
, i
),
2975 dmz_nr_unmap_rnd_zones(zmd
, i
));
2976 dmz_zmd_debug(zmd
, " %u sequential zones (%u unmapped)",
2977 dmz_nr_seq_zones(zmd
, i
),
2978 dmz_nr_unmap_seq_zones(zmd
, i
));
2980 dmz_zmd_debug(zmd
, " %u reserved sequential data zones",
2981 zmd
->nr_reserved_seq
);
2982 dmz_zmd_debug(zmd
, "Format:");
2983 dmz_zmd_debug(zmd
, "%u metadata blocks per set (%u max cache)",
2984 zmd
->nr_meta_blocks
, zmd
->max_nr_mblks
);
2985 dmz_zmd_debug(zmd
, " %u data zone mapping blocks",
2986 zmd
->nr_map_blocks
);
2987 dmz_zmd_debug(zmd
, " %u bitmap blocks",
2988 zmd
->nr_bitmap_blocks
);
2994 dmz_cleanup_metadata(zmd
);
3002 * Cleanup the zoned metadata resources.
3004 void dmz_dtr_metadata(struct dmz_metadata
*zmd
)
3006 unregister_shrinker(&zmd
->mblk_shrinker
);
3007 dmz_cleanup_metadata(zmd
);
3012 * Check zone information on resume.
3014 int dmz_resume_metadata(struct dmz_metadata
*zmd
)
3016 struct dm_zone
*zone
;
3022 for (i
= 0; i
< zmd
->nr_zones
; i
++) {
3023 zone
= dmz_get(zmd
, i
);
3025 dmz_zmd_err(zmd
, "Unable to get zone %u", i
);
3028 wp_block
= zone
->wp_block
;
3030 ret
= dmz_update_zone(zmd
, zone
);
3032 dmz_zmd_err(zmd
, "Broken zone %u", i
);
3036 if (dmz_is_offline(zone
)) {
3037 dmz_zmd_warn(zmd
, "Zone %u is offline", i
);
3041 /* Check write pointer */
3042 if (!dmz_is_seq(zone
))
3044 else if (zone
->wp_block
!= wp_block
) {
3045 dmz_zmd_err(zmd
, "Zone %u: Invalid wp (%llu / %llu)",
3046 i
, (u64
)zone
->wp_block
, (u64
)wp_block
);
3047 zone
->wp_block
= wp_block
;
3048 dmz_invalidate_blocks(zmd
, zone
, zone
->wp_block
,
3049 zmd
->zone_nr_blocks
- zone
->wp_block
);