1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
5 * This file is released under the GPL.
10 #include <linux/module.h>
12 #define DM_MSG_PREFIX "zoned reclaim"
15 struct dmz_metadata
*metadata
;
17 struct delayed_work work
;
18 struct workqueue_struct
*wq
;
20 struct dm_kcopyd_client
*kc
;
21 struct dm_kcopyd_throttle kc_throttle
;
28 /* Last target access time */
33 * Reclaim state flags.
40 * Number of seconds of target BIO inactivity to consider the target idle.
42 #define DMZ_IDLE_PERIOD (10UL * HZ)
45 * Percentage of unmapped (free) random zones below which reclaim starts
46 * even if the target is busy.
48 #define DMZ_RECLAIM_LOW_UNMAP_ZONES 30
51 * Percentage of unmapped (free) random zones above which reclaim will
52 * stop if the target is busy.
54 #define DMZ_RECLAIM_HIGH_UNMAP_ZONES 50
57 * Align a sequential zone write pointer to chunk_block.
59 static int dmz_reclaim_align_wp(struct dmz_reclaim
*zrc
, struct dm_zone
*zone
,
62 struct dmz_metadata
*zmd
= zrc
->metadata
;
63 struct dmz_dev
*dev
= zone
->dev
;
64 sector_t wp_block
= zone
->wp_block
;
65 unsigned int nr_blocks
;
68 if (wp_block
== block
)
75 * Zeroout the space between the write
76 * pointer and the requested position.
78 nr_blocks
= block
- wp_block
;
79 ret
= blkdev_issue_zeroout(dev
->bdev
,
80 dmz_start_sect(zmd
, zone
) + dmz_blk2sect(wp_block
),
81 dmz_blk2sect(nr_blocks
), GFP_NOIO
, 0);
84 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
85 zone
->id
, (unsigned long long)wp_block
,
86 (unsigned long long)block
, nr_blocks
, ret
);
91 zone
->wp_block
= block
;
97 * dm_kcopyd_copy end notification.
99 static void dmz_reclaim_kcopy_end(int read_err
, unsigned long write_err
,
102 struct dmz_reclaim
*zrc
= context
;
104 if (read_err
|| write_err
)
109 clear_bit_unlock(DMZ_RECLAIM_KCOPY
, &zrc
->flags
);
110 smp_mb__after_atomic();
111 wake_up_bit(&zrc
->flags
, DMZ_RECLAIM_KCOPY
);
115 * Copy valid blocks of src_zone into dst_zone.
117 static int dmz_reclaim_copy(struct dmz_reclaim
*zrc
,
118 struct dm_zone
*src_zone
, struct dm_zone
*dst_zone
)
120 struct dmz_metadata
*zmd
= zrc
->metadata
;
121 struct dm_io_region src
, dst
;
122 sector_t block
= 0, end_block
;
124 sector_t src_zone_block
;
125 sector_t dst_zone_block
;
126 unsigned long flags
= 0;
129 if (dmz_is_seq(src_zone
))
130 end_block
= src_zone
->wp_block
;
132 end_block
= dmz_zone_nr_blocks(zmd
);
133 src_zone_block
= dmz_start_block(zmd
, src_zone
);
134 dst_zone_block
= dmz_start_block(zmd
, dst_zone
);
136 if (dmz_is_seq(dst_zone
))
137 set_bit(DM_KCOPYD_WRITE_SEQ
, &flags
);
139 while (block
< end_block
) {
140 if (src_zone
->dev
->flags
& DMZ_BDEV_DYING
)
142 if (dst_zone
->dev
->flags
& DMZ_BDEV_DYING
)
145 if (dmz_reclaim_should_terminate(src_zone
))
148 /* Get a valid region from the source zone */
149 ret
= dmz_first_valid_block(zmd
, src_zone
, &block
);
155 * If we are writing in a sequential zone, we must make sure
156 * that writes are sequential. So Zeroout any eventual hole
159 if (dmz_is_seq(dst_zone
)) {
160 ret
= dmz_reclaim_align_wp(zrc
, dst_zone
, block
);
165 src
.bdev
= src_zone
->dev
->bdev
;
166 src
.sector
= dmz_blk2sect(src_zone_block
+ block
);
167 src
.count
= dmz_blk2sect(nr_blocks
);
169 dst
.bdev
= dst_zone
->dev
->bdev
;
170 dst
.sector
= dmz_blk2sect(dst_zone_block
+ block
);
171 dst
.count
= src
.count
;
173 /* Copy the valid region */
174 set_bit(DMZ_RECLAIM_KCOPY
, &zrc
->flags
);
175 dm_kcopyd_copy(zrc
->kc
, &src
, 1, &dst
, flags
,
176 dmz_reclaim_kcopy_end
, zrc
);
178 /* Wait for copy to complete */
179 wait_on_bit_io(&zrc
->flags
, DMZ_RECLAIM_KCOPY
,
180 TASK_UNINTERRUPTIBLE
);
185 if (dmz_is_seq(dst_zone
))
186 dst_zone
->wp_block
= block
;
193 * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
194 * and free the buffer zone.
196 static int dmz_reclaim_buf(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
198 struct dm_zone
*bzone
= dzone
->bzone
;
199 sector_t chunk_block
= dzone
->wp_block
;
200 struct dmz_metadata
*zmd
= zrc
->metadata
;
203 DMDEBUG("(%s/%u): Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
204 dmz_metadata_label(zmd
), zrc
->dev_idx
,
205 dzone
->chunk
, bzone
->id
, dmz_weight(bzone
),
206 dzone
->id
, dmz_weight(dzone
));
208 /* Flush data zone into the buffer zone */
209 ret
= dmz_reclaim_copy(zrc
, bzone
, dzone
);
215 /* Validate copied blocks */
216 ret
= dmz_merge_valid_blocks(zmd
, bzone
, dzone
, chunk_block
);
218 /* Free the buffer zone */
219 dmz_invalidate_blocks(zmd
, bzone
, 0, dmz_zone_nr_blocks(zmd
));
221 dmz_unmap_zone(zmd
, bzone
);
222 dmz_unlock_zone_reclaim(dzone
);
223 dmz_free_zone(zmd
, bzone
);
227 dmz_unlock_flush(zmd
);
233 * Merge valid blocks of dzone into its buffer zone and free dzone.
235 static int dmz_reclaim_seq_data(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
237 unsigned int chunk
= dzone
->chunk
;
238 struct dm_zone
*bzone
= dzone
->bzone
;
239 struct dmz_metadata
*zmd
= zrc
->metadata
;
242 DMDEBUG("(%s/%u): Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
243 dmz_metadata_label(zmd
), zrc
->dev_idx
,
244 chunk
, dzone
->id
, dmz_weight(dzone
),
245 bzone
->id
, dmz_weight(bzone
));
247 /* Flush data zone into the buffer zone */
248 ret
= dmz_reclaim_copy(zrc
, dzone
, bzone
);
254 /* Validate copied blocks */
255 ret
= dmz_merge_valid_blocks(zmd
, dzone
, bzone
, 0);
258 * Free the data zone and remap the chunk to
261 dmz_invalidate_blocks(zmd
, dzone
, 0, dmz_zone_nr_blocks(zmd
));
263 dmz_unmap_zone(zmd
, bzone
);
264 dmz_unmap_zone(zmd
, dzone
);
265 dmz_unlock_zone_reclaim(dzone
);
266 dmz_free_zone(zmd
, dzone
);
267 dmz_map_zone(zmd
, bzone
, chunk
);
271 dmz_unlock_flush(zmd
);
277 * Move valid blocks of the random data zone dzone into a free sequential zone.
278 * Once blocks are moved, remap the zone chunk to the sequential zone.
280 static int dmz_reclaim_rnd_data(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
282 unsigned int chunk
= dzone
->chunk
;
283 struct dm_zone
*szone
= NULL
;
284 struct dmz_metadata
*zmd
= zrc
->metadata
;
286 int alloc_flags
= DMZ_ALLOC_SEQ
;
288 /* Get a free random or sequential zone */
291 szone
= dmz_alloc_zone(zmd
, zrc
->dev_idx
,
292 alloc_flags
| DMZ_ALLOC_RECLAIM
);
293 if (!szone
&& alloc_flags
== DMZ_ALLOC_SEQ
&& dmz_nr_cache_zones(zmd
)) {
294 alloc_flags
= DMZ_ALLOC_RND
;
301 DMDEBUG("(%s/%u): Chunk %u, move %s zone %u (weight %u) to %s zone %u",
302 dmz_metadata_label(zmd
), zrc
->dev_idx
, chunk
,
303 dmz_is_cache(dzone
) ? "cache" : "rnd",
304 dzone
->id
, dmz_weight(dzone
),
305 dmz_is_rnd(szone
) ? "rnd" : "seq", szone
->id
);
307 /* Flush the random data zone into the sequential zone */
308 ret
= dmz_reclaim_copy(zrc
, dzone
, szone
);
313 /* Validate copied blocks */
314 ret
= dmz_copy_valid_blocks(zmd
, dzone
, szone
);
317 /* Free the sequential zone */
319 dmz_free_zone(zmd
, szone
);
322 /* Free the data zone and remap the chunk */
323 dmz_invalidate_blocks(zmd
, dzone
, 0, dmz_zone_nr_blocks(zmd
));
325 dmz_unmap_zone(zmd
, dzone
);
326 dmz_unlock_zone_reclaim(dzone
);
327 dmz_free_zone(zmd
, dzone
);
328 dmz_map_zone(zmd
, szone
, chunk
);
332 dmz_unlock_flush(zmd
);
338 * Reclaim an empty zone.
340 static void dmz_reclaim_empty(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
342 struct dmz_metadata
*zmd
= zrc
->metadata
;
346 dmz_unmap_zone(zmd
, dzone
);
347 dmz_unlock_zone_reclaim(dzone
);
348 dmz_free_zone(zmd
, dzone
);
350 dmz_unlock_flush(zmd
);
354 * Test if the target device is idle.
356 static inline int dmz_target_idle(struct dmz_reclaim
*zrc
)
358 return time_is_before_jiffies(zrc
->atime
+ DMZ_IDLE_PERIOD
);
362 * Find a candidate zone for reclaim and process it.
364 static int dmz_do_reclaim(struct dmz_reclaim
*zrc
)
366 struct dmz_metadata
*zmd
= zrc
->metadata
;
367 struct dm_zone
*dzone
;
368 struct dm_zone
*rzone
;
372 /* Get a data zone */
373 dzone
= dmz_get_zone_for_reclaim(zmd
, zrc
->dev_idx
,
374 dmz_target_idle(zrc
));
376 DMDEBUG("(%s/%u): No zone found to reclaim",
377 dmz_metadata_label(zmd
), zrc
->dev_idx
);
383 if (dmz_is_cache(dzone
) || dmz_is_rnd(dzone
)) {
384 if (!dmz_weight(dzone
)) {
386 dmz_reclaim_empty(zrc
, dzone
);
390 * Reclaim the random data zone by moving its
391 * valid data blocks to a free sequential zone.
393 ret
= dmz_reclaim_rnd_data(zrc
, dzone
);
396 struct dm_zone
*bzone
= dzone
->bzone
;
397 sector_t chunk_block
= 0;
399 ret
= dmz_first_valid_block(zmd
, bzone
, &chunk_block
);
403 if (ret
== 0 || chunk_block
>= dzone
->wp_block
) {
405 * The buffer zone is empty or its valid blocks are
406 * after the data zone write pointer.
408 ret
= dmz_reclaim_buf(zrc
, dzone
);
412 * Reclaim the data zone by merging it into the
413 * buffer zone so that the buffer zone itself can
414 * be later reclaimed.
416 ret
= dmz_reclaim_seq_data(zrc
, dzone
);
422 DMDEBUG("(%s/%u): reclaim zone %u interrupted",
423 dmz_metadata_label(zmd
), zrc
->dev_idx
,
426 DMDEBUG("(%s/%u): Failed to reclaim zone %u, err %d",
427 dmz_metadata_label(zmd
), zrc
->dev_idx
,
429 dmz_unlock_zone_reclaim(dzone
);
433 ret
= dmz_flush_metadata(zrc
->metadata
);
435 DMDEBUG("(%s/%u): Metadata flush for zone %u failed, err %d",
436 dmz_metadata_label(zmd
), zrc
->dev_idx
, rzone
->id
, ret
);
440 DMDEBUG("(%s/%u): Reclaimed zone %u in %u ms",
441 dmz_metadata_label(zmd
), zrc
->dev_idx
,
442 rzone
->id
, jiffies_to_msecs(jiffies
- start
));
446 static unsigned int dmz_reclaim_percentage(struct dmz_reclaim
*zrc
)
448 struct dmz_metadata
*zmd
= zrc
->metadata
;
449 unsigned int nr_cache
= dmz_nr_cache_zones(zmd
);
450 unsigned int nr_unmap
, nr_zones
;
454 nr_unmap
= dmz_nr_unmap_cache_zones(zmd
);
456 nr_zones
= dmz_nr_rnd_zones(zmd
, zrc
->dev_idx
);
457 nr_unmap
= dmz_nr_unmap_rnd_zones(zmd
, zrc
->dev_idx
);
461 return nr_unmap
* 100 / nr_zones
;
465 * Test if reclaim is necessary.
467 static bool dmz_should_reclaim(struct dmz_reclaim
*zrc
, unsigned int p_unmap
)
469 unsigned int nr_reclaim
;
471 nr_reclaim
= dmz_nr_rnd_zones(zrc
->metadata
, zrc
->dev_idx
);
473 if (dmz_nr_cache_zones(zrc
->metadata
)) {
475 * The first device in a multi-device
476 * setup only contains cache zones, so
477 * never start reclaim there.
479 if (zrc
->dev_idx
== 0)
481 nr_reclaim
+= dmz_nr_cache_zones(zrc
->metadata
);
484 /* Reclaim when idle */
485 if (dmz_target_idle(zrc
) && nr_reclaim
)
488 /* If there are still plenty of cache zones, do not reclaim */
489 if (p_unmap
>= DMZ_RECLAIM_HIGH_UNMAP_ZONES
)
493 * If the percentage of unmapped cache zones is low,
494 * reclaim even if the target is busy.
496 return p_unmap
<= DMZ_RECLAIM_LOW_UNMAP_ZONES
;
500 * Reclaim work function.
502 static void dmz_reclaim_work(struct work_struct
*work
)
504 struct dmz_reclaim
*zrc
= container_of(work
, struct dmz_reclaim
, work
.work
);
505 struct dmz_metadata
*zmd
= zrc
->metadata
;
506 unsigned int p_unmap
;
509 if (dmz_dev_is_dying(zmd
))
512 p_unmap
= dmz_reclaim_percentage(zrc
);
513 if (!dmz_should_reclaim(zrc
, p_unmap
)) {
514 mod_delayed_work(zrc
->wq
, &zrc
->work
, DMZ_IDLE_PERIOD
);
519 * We need to start reclaiming random zones: set up zone copy
520 * throttling to either go fast if we are very low on random zones
521 * and slower if there are still some free random zones to avoid
522 * as much as possible to negatively impact the user workload.
524 if (dmz_target_idle(zrc
) || p_unmap
< DMZ_RECLAIM_LOW_UNMAP_ZONES
/ 2) {
525 /* Idle or very low percentage: go fast */
526 zrc
->kc_throttle
.throttle
= 100;
528 /* Busy but we still have some random zone: throttle */
529 zrc
->kc_throttle
.throttle
= min(75U, 100U - p_unmap
/ 2);
532 DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
533 dmz_metadata_label(zmd
), zrc
->dev_idx
,
534 zrc
->kc_throttle
.throttle
,
535 (dmz_target_idle(zrc
) ? "Idle" : "Busy"),
536 p_unmap
, dmz_nr_unmap_cache_zones(zmd
),
537 dmz_nr_cache_zones(zmd
),
538 dmz_nr_unmap_rnd_zones(zmd
, zrc
->dev_idx
),
539 dmz_nr_rnd_zones(zmd
, zrc
->dev_idx
));
541 ret
= dmz_do_reclaim(zrc
);
542 if (ret
&& ret
!= -EINTR
) {
543 if (!dmz_check_dev(zmd
))
547 dmz_schedule_reclaim(zrc
);
551 * Initialize reclaim.
553 int dmz_ctr_reclaim(struct dmz_metadata
*zmd
,
554 struct dmz_reclaim
**reclaim
, int idx
)
556 struct dmz_reclaim
*zrc
;
559 zrc
= kzalloc(sizeof(struct dmz_reclaim
), GFP_KERNEL
);
564 zrc
->atime
= jiffies
;
567 /* Reclaim kcopyd client */
568 zrc
->kc
= dm_kcopyd_client_create(&zrc
->kc_throttle
);
569 if (IS_ERR(zrc
->kc
)) {
570 ret
= PTR_ERR(zrc
->kc
);
576 INIT_DELAYED_WORK(&zrc
->work
, dmz_reclaim_work
);
577 zrc
->wq
= alloc_ordered_workqueue("dmz_rwq_%s_%d", WQ_MEM_RECLAIM
,
578 dmz_metadata_label(zmd
), idx
);
585 queue_delayed_work(zrc
->wq
, &zrc
->work
, 0);
590 dm_kcopyd_client_destroy(zrc
->kc
);
599 void dmz_dtr_reclaim(struct dmz_reclaim
*zrc
)
601 cancel_delayed_work_sync(&zrc
->work
);
602 destroy_workqueue(zrc
->wq
);
603 dm_kcopyd_client_destroy(zrc
->kc
);
610 void dmz_suspend_reclaim(struct dmz_reclaim
*zrc
)
612 cancel_delayed_work_sync(&zrc
->work
);
618 void dmz_resume_reclaim(struct dmz_reclaim
*zrc
)
620 queue_delayed_work(zrc
->wq
, &zrc
->work
, DMZ_IDLE_PERIOD
);
626 void dmz_reclaim_bio_acc(struct dmz_reclaim
*zrc
)
628 zrc
->atime
= jiffies
;
632 * Start reclaim if necessary.
634 void dmz_schedule_reclaim(struct dmz_reclaim
*zrc
)
636 unsigned int p_unmap
= dmz_reclaim_percentage(zrc
);
638 if (dmz_should_reclaim(zrc
, p_unmap
))
639 mod_delayed_work(zrc
->wq
, &zrc
->work
, 0);