1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
5 * This file is released under the GPL.
10 #include <linux/module.h>
12 #define DM_MSG_PREFIX "zoned reclaim"
15 struct dmz_metadata
*metadata
;
18 struct delayed_work work
;
19 struct workqueue_struct
*wq
;
21 struct dm_kcopyd_client
*kc
;
22 struct dm_kcopyd_throttle kc_throttle
;
27 /* Last target access time */
32 * Reclaim state flags.
39 * Number of seconds of target BIO inactivity to consider the target idle.
41 #define DMZ_IDLE_PERIOD (10UL * HZ)
44 * Percentage of unmapped (free) random zones below which reclaim starts
45 * even if the target is busy.
47 #define DMZ_RECLAIM_LOW_UNMAP_RND 30
50 * Percentage of unmapped (free) random zones above which reclaim will
51 * stop if the target is busy.
53 #define DMZ_RECLAIM_HIGH_UNMAP_RND 50
56 * Align a sequential zone write pointer to chunk_block.
58 static int dmz_reclaim_align_wp(struct dmz_reclaim
*zrc
, struct dm_zone
*zone
,
61 struct dmz_metadata
*zmd
= zrc
->metadata
;
62 sector_t wp_block
= zone
->wp_block
;
63 unsigned int nr_blocks
;
66 if (wp_block
== block
)
73 * Zeroout the space between the write
74 * pointer and the requested position.
76 nr_blocks
= block
- wp_block
;
77 ret
= blkdev_issue_zeroout(zrc
->dev
->bdev
,
78 dmz_start_sect(zmd
, zone
) + dmz_blk2sect(wp_block
),
79 dmz_blk2sect(nr_blocks
), GFP_NOIO
, 0);
82 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
83 dmz_id(zmd
, zone
), (unsigned long long)wp_block
,
84 (unsigned long long)block
, nr_blocks
, ret
);
85 dmz_check_bdev(zrc
->dev
);
89 zone
->wp_block
= block
;
95 * dm_kcopyd_copy end notification.
97 static void dmz_reclaim_kcopy_end(int read_err
, unsigned long write_err
,
100 struct dmz_reclaim
*zrc
= context
;
102 if (read_err
|| write_err
)
107 clear_bit_unlock(DMZ_RECLAIM_KCOPY
, &zrc
->flags
);
108 smp_mb__after_atomic();
109 wake_up_bit(&zrc
->flags
, DMZ_RECLAIM_KCOPY
);
113 * Copy valid blocks of src_zone into dst_zone.
115 static int dmz_reclaim_copy(struct dmz_reclaim
*zrc
,
116 struct dm_zone
*src_zone
, struct dm_zone
*dst_zone
)
118 struct dmz_metadata
*zmd
= zrc
->metadata
;
119 struct dmz_dev
*dev
= zrc
->dev
;
120 struct dm_io_region src
, dst
;
121 sector_t block
= 0, end_block
;
123 sector_t src_zone_block
;
124 sector_t dst_zone_block
;
125 unsigned long flags
= 0;
128 if (dmz_is_seq(src_zone
))
129 end_block
= src_zone
->wp_block
;
131 end_block
= dev
->zone_nr_blocks
;
132 src_zone_block
= dmz_start_block(zmd
, src_zone
);
133 dst_zone_block
= dmz_start_block(zmd
, dst_zone
);
135 if (dmz_is_seq(dst_zone
))
136 set_bit(DM_KCOPYD_WRITE_SEQ
, &flags
);
138 while (block
< end_block
) {
139 if (dev
->flags
& DMZ_BDEV_DYING
)
142 /* Get a valid region from the source zone */
143 ret
= dmz_first_valid_block(zmd
, src_zone
, &block
);
149 * If we are writing in a sequential zone, we must make sure
150 * that writes are sequential. So Zeroout any eventual hole
153 if (dmz_is_seq(dst_zone
)) {
154 ret
= dmz_reclaim_align_wp(zrc
, dst_zone
, block
);
159 src
.bdev
= dev
->bdev
;
160 src
.sector
= dmz_blk2sect(src_zone_block
+ block
);
161 src
.count
= dmz_blk2sect(nr_blocks
);
163 dst
.bdev
= dev
->bdev
;
164 dst
.sector
= dmz_blk2sect(dst_zone_block
+ block
);
165 dst
.count
= src
.count
;
167 /* Copy the valid region */
168 set_bit(DMZ_RECLAIM_KCOPY
, &zrc
->flags
);
169 dm_kcopyd_copy(zrc
->kc
, &src
, 1, &dst
, flags
,
170 dmz_reclaim_kcopy_end
, zrc
);
172 /* Wait for copy to complete */
173 wait_on_bit_io(&zrc
->flags
, DMZ_RECLAIM_KCOPY
,
174 TASK_UNINTERRUPTIBLE
);
179 if (dmz_is_seq(dst_zone
))
180 dst_zone
->wp_block
= block
;
187 * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
188 * and free the buffer zone.
190 static int dmz_reclaim_buf(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
192 struct dm_zone
*bzone
= dzone
->bzone
;
193 sector_t chunk_block
= dzone
->wp_block
;
194 struct dmz_metadata
*zmd
= zrc
->metadata
;
197 dmz_dev_debug(zrc
->dev
,
198 "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
199 dzone
->chunk
, dmz_id(zmd
, bzone
), dmz_weight(bzone
),
200 dmz_id(zmd
, dzone
), dmz_weight(dzone
));
202 /* Flush data zone into the buffer zone */
203 ret
= dmz_reclaim_copy(zrc
, bzone
, dzone
);
209 /* Validate copied blocks */
210 ret
= dmz_merge_valid_blocks(zmd
, bzone
, dzone
, chunk_block
);
212 /* Free the buffer zone */
213 dmz_invalidate_blocks(zmd
, bzone
, 0, zrc
->dev
->zone_nr_blocks
);
215 dmz_unmap_zone(zmd
, bzone
);
216 dmz_unlock_zone_reclaim(dzone
);
217 dmz_free_zone(zmd
, bzone
);
221 dmz_unlock_flush(zmd
);
227 * Merge valid blocks of dzone into its buffer zone and free dzone.
229 static int dmz_reclaim_seq_data(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
231 unsigned int chunk
= dzone
->chunk
;
232 struct dm_zone
*bzone
= dzone
->bzone
;
233 struct dmz_metadata
*zmd
= zrc
->metadata
;
236 dmz_dev_debug(zrc
->dev
,
237 "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
238 chunk
, dmz_id(zmd
, dzone
), dmz_weight(dzone
),
239 dmz_id(zmd
, bzone
), dmz_weight(bzone
));
241 /* Flush data zone into the buffer zone */
242 ret
= dmz_reclaim_copy(zrc
, dzone
, bzone
);
248 /* Validate copied blocks */
249 ret
= dmz_merge_valid_blocks(zmd
, dzone
, bzone
, 0);
252 * Free the data zone and remap the chunk to
255 dmz_invalidate_blocks(zmd
, dzone
, 0, zrc
->dev
->zone_nr_blocks
);
257 dmz_unmap_zone(zmd
, bzone
);
258 dmz_unmap_zone(zmd
, dzone
);
259 dmz_unlock_zone_reclaim(dzone
);
260 dmz_free_zone(zmd
, dzone
);
261 dmz_map_zone(zmd
, bzone
, chunk
);
265 dmz_unlock_flush(zmd
);
271 * Move valid blocks of the random data zone dzone into a free sequential zone.
272 * Once blocks are moved, remap the zone chunk to the sequential zone.
274 static int dmz_reclaim_rnd_data(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
276 unsigned int chunk
= dzone
->chunk
;
277 struct dm_zone
*szone
= NULL
;
278 struct dmz_metadata
*zmd
= zrc
->metadata
;
281 /* Get a free sequential zone */
283 szone
= dmz_alloc_zone(zmd
, DMZ_ALLOC_RECLAIM
);
288 dmz_dev_debug(zrc
->dev
,
289 "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
290 chunk
, dmz_id(zmd
, dzone
), dmz_weight(dzone
),
293 /* Flush the random data zone into the sequential zone */
294 ret
= dmz_reclaim_copy(zrc
, dzone
, szone
);
299 /* Validate copied blocks */
300 ret
= dmz_copy_valid_blocks(zmd
, dzone
, szone
);
303 /* Free the sequential zone */
305 dmz_free_zone(zmd
, szone
);
308 /* Free the data zone and remap the chunk */
309 dmz_invalidate_blocks(zmd
, dzone
, 0, zrc
->dev
->zone_nr_blocks
);
311 dmz_unmap_zone(zmd
, dzone
);
312 dmz_unlock_zone_reclaim(dzone
);
313 dmz_free_zone(zmd
, dzone
);
314 dmz_map_zone(zmd
, szone
, chunk
);
318 dmz_unlock_flush(zmd
);
324 * Reclaim an empty zone.
326 static void dmz_reclaim_empty(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
328 struct dmz_metadata
*zmd
= zrc
->metadata
;
332 dmz_unmap_zone(zmd
, dzone
);
333 dmz_unlock_zone_reclaim(dzone
);
334 dmz_free_zone(zmd
, dzone
);
336 dmz_unlock_flush(zmd
);
340 * Find a candidate zone for reclaim and process it.
342 static int dmz_do_reclaim(struct dmz_reclaim
*zrc
)
344 struct dmz_metadata
*zmd
= zrc
->metadata
;
345 struct dm_zone
*dzone
;
346 struct dm_zone
*rzone
;
350 /* Get a data zone */
351 dzone
= dmz_get_zone_for_reclaim(zmd
);
353 return PTR_ERR(dzone
);
357 if (dmz_is_rnd(dzone
)) {
358 if (!dmz_weight(dzone
)) {
360 dmz_reclaim_empty(zrc
, dzone
);
364 * Reclaim the random data zone by moving its
365 * valid data blocks to a free sequential zone.
367 ret
= dmz_reclaim_rnd_data(zrc
, dzone
);
372 struct dm_zone
*bzone
= dzone
->bzone
;
373 sector_t chunk_block
= 0;
375 ret
= dmz_first_valid_block(zmd
, bzone
, &chunk_block
);
379 if (ret
== 0 || chunk_block
>= dzone
->wp_block
) {
381 * The buffer zone is empty or its valid blocks are
382 * after the data zone write pointer.
384 ret
= dmz_reclaim_buf(zrc
, dzone
);
388 * Reclaim the data zone by merging it into the
389 * buffer zone so that the buffer zone itself can
390 * be later reclaimed.
392 ret
= dmz_reclaim_seq_data(zrc
, dzone
);
398 dmz_unlock_zone_reclaim(dzone
);
402 ret
= dmz_flush_metadata(zrc
->metadata
);
404 dmz_dev_debug(zrc
->dev
,
405 "Metadata flush for zone %u failed, err %d\n",
406 dmz_id(zmd
, rzone
), ret
);
410 dmz_dev_debug(zrc
->dev
, "Reclaimed zone %u in %u ms",
411 dmz_id(zmd
, rzone
), jiffies_to_msecs(jiffies
- start
));
416 * Test if the target device is idle.
418 static inline int dmz_target_idle(struct dmz_reclaim
*zrc
)
420 return time_is_before_jiffies(zrc
->atime
+ DMZ_IDLE_PERIOD
);
424 * Test if reclaim is necessary.
426 static bool dmz_should_reclaim(struct dmz_reclaim
*zrc
)
428 struct dmz_metadata
*zmd
= zrc
->metadata
;
429 unsigned int nr_rnd
= dmz_nr_rnd_zones(zmd
);
430 unsigned int nr_unmap_rnd
= dmz_nr_unmap_rnd_zones(zmd
);
431 unsigned int p_unmap_rnd
= nr_unmap_rnd
* 100 / nr_rnd
;
433 /* Reclaim when idle */
434 if (dmz_target_idle(zrc
) && nr_unmap_rnd
< nr_rnd
)
437 /* If there are still plenty of random zones, do not reclaim */
438 if (p_unmap_rnd
>= DMZ_RECLAIM_HIGH_UNMAP_RND
)
442 * If the percentage of unmapped random zones is low,
443 * reclaim even if the target is busy.
445 return p_unmap_rnd
<= DMZ_RECLAIM_LOW_UNMAP_RND
;
449 * Reclaim work function.
451 static void dmz_reclaim_work(struct work_struct
*work
)
453 struct dmz_reclaim
*zrc
= container_of(work
, struct dmz_reclaim
, work
.work
);
454 struct dmz_metadata
*zmd
= zrc
->metadata
;
455 unsigned int nr_rnd
, nr_unmap_rnd
;
456 unsigned int p_unmap_rnd
;
459 if (dmz_bdev_is_dying(zrc
->dev
))
462 if (!dmz_should_reclaim(zrc
)) {
463 mod_delayed_work(zrc
->wq
, &zrc
->work
, DMZ_IDLE_PERIOD
);
468 * We need to start reclaiming random zones: set up zone copy
469 * throttling to either go fast if we are very low on random zones
470 * and slower if there are still some free random zones to avoid
471 * as much as possible to negatively impact the user workload.
473 nr_rnd
= dmz_nr_rnd_zones(zmd
);
474 nr_unmap_rnd
= dmz_nr_unmap_rnd_zones(zmd
);
475 p_unmap_rnd
= nr_unmap_rnd
* 100 / nr_rnd
;
476 if (dmz_target_idle(zrc
) || p_unmap_rnd
< DMZ_RECLAIM_LOW_UNMAP_RND
/ 2) {
477 /* Idle or very low percentage: go fast */
478 zrc
->kc_throttle
.throttle
= 100;
480 /* Busy but we still have some random zone: throttle */
481 zrc
->kc_throttle
.throttle
= min(75U, 100U - p_unmap_rnd
/ 2);
484 dmz_dev_debug(zrc
->dev
,
485 "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
486 zrc
->kc_throttle
.throttle
,
487 (dmz_target_idle(zrc
) ? "Idle" : "Busy"),
488 p_unmap_rnd
, nr_unmap_rnd
, nr_rnd
);
490 ret
= dmz_do_reclaim(zrc
);
492 dmz_dev_debug(zrc
->dev
, "Reclaim error %d\n", ret
);
493 if (!dmz_check_bdev(zrc
->dev
))
497 dmz_schedule_reclaim(zrc
);
501 * Initialize reclaim.
503 int dmz_ctr_reclaim(struct dmz_dev
*dev
, struct dmz_metadata
*zmd
,
504 struct dmz_reclaim
**reclaim
)
506 struct dmz_reclaim
*zrc
;
509 zrc
= kzalloc(sizeof(struct dmz_reclaim
), GFP_KERNEL
);
515 zrc
->atime
= jiffies
;
517 /* Reclaim kcopyd client */
518 zrc
->kc
= dm_kcopyd_client_create(&zrc
->kc_throttle
);
519 if (IS_ERR(zrc
->kc
)) {
520 ret
= PTR_ERR(zrc
->kc
);
526 INIT_DELAYED_WORK(&zrc
->work
, dmz_reclaim_work
);
527 zrc
->wq
= alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM
,
535 queue_delayed_work(zrc
->wq
, &zrc
->work
, 0);
540 dm_kcopyd_client_destroy(zrc
->kc
);
549 void dmz_dtr_reclaim(struct dmz_reclaim
*zrc
)
551 cancel_delayed_work_sync(&zrc
->work
);
552 destroy_workqueue(zrc
->wq
);
553 dm_kcopyd_client_destroy(zrc
->kc
);
560 void dmz_suspend_reclaim(struct dmz_reclaim
*zrc
)
562 cancel_delayed_work_sync(&zrc
->work
);
568 void dmz_resume_reclaim(struct dmz_reclaim
*zrc
)
570 queue_delayed_work(zrc
->wq
, &zrc
->work
, DMZ_IDLE_PERIOD
);
576 void dmz_reclaim_bio_acc(struct dmz_reclaim
*zrc
)
578 zrc
->atime
= jiffies
;
582 * Start reclaim if necessary.
584 void dmz_schedule_reclaim(struct dmz_reclaim
*zrc
)
586 if (dmz_should_reclaim(zrc
))
587 mod_delayed_work(zrc
->wq
, &zrc
->work
, 0);