2 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
9 #include <linux/module.h>
11 #define DM_MSG_PREFIX "zoned reclaim"
14 struct dmz_metadata
*metadata
;
17 struct delayed_work work
;
18 struct workqueue_struct
*wq
;
20 struct dm_kcopyd_client
*kc
;
21 struct dm_kcopyd_throttle kc_throttle
;
26 /* Last target access time */
31 * Reclaim state flags.
38 * Number of seconds of target BIO inactivity to consider the target idle.
40 #define DMZ_IDLE_PERIOD (10UL * HZ)
43 * Percentage of unmapped (free) random zones below which reclaim starts
44 * even if the target is busy.
46 #define DMZ_RECLAIM_LOW_UNMAP_RND 30
49 * Percentage of unmapped (free) random zones above which reclaim will
50 * stop if the target is busy.
52 #define DMZ_RECLAIM_HIGH_UNMAP_RND 50
55 * Align a sequential zone write pointer to chunk_block.
57 static int dmz_reclaim_align_wp(struct dmz_reclaim
*zrc
, struct dm_zone
*zone
,
60 struct dmz_metadata
*zmd
= zrc
->metadata
;
61 sector_t wp_block
= zone
->wp_block
;
62 unsigned int nr_blocks
;
65 if (wp_block
== block
)
72 * Zeroout the space between the write
73 * pointer and the requested position.
75 nr_blocks
= block
- wp_block
;
76 ret
= blkdev_issue_zeroout(zrc
->dev
->bdev
,
77 dmz_start_sect(zmd
, zone
) + dmz_blk2sect(wp_block
),
78 dmz_blk2sect(nr_blocks
), GFP_NOIO
, 0);
81 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
82 dmz_id(zmd
, zone
), (unsigned long long)wp_block
,
83 (unsigned long long)block
, nr_blocks
, ret
);
84 dmz_check_bdev(zrc
->dev
);
88 zone
->wp_block
= block
;
94 * dm_kcopyd_copy end notification.
96 static void dmz_reclaim_kcopy_end(int read_err
, unsigned long write_err
,
99 struct dmz_reclaim
*zrc
= context
;
101 if (read_err
|| write_err
)
106 clear_bit_unlock(DMZ_RECLAIM_KCOPY
, &zrc
->flags
);
107 smp_mb__after_atomic();
108 wake_up_bit(&zrc
->flags
, DMZ_RECLAIM_KCOPY
);
112 * Copy valid blocks of src_zone into dst_zone.
114 static int dmz_reclaim_copy(struct dmz_reclaim
*zrc
,
115 struct dm_zone
*src_zone
, struct dm_zone
*dst_zone
)
117 struct dmz_metadata
*zmd
= zrc
->metadata
;
118 struct dmz_dev
*dev
= zrc
->dev
;
119 struct dm_io_region src
, dst
;
120 sector_t block
= 0, end_block
;
122 sector_t src_zone_block
;
123 sector_t dst_zone_block
;
124 unsigned long flags
= 0;
127 if (dmz_is_seq(src_zone
))
128 end_block
= src_zone
->wp_block
;
130 end_block
= dev
->zone_nr_blocks
;
131 src_zone_block
= dmz_start_block(zmd
, src_zone
);
132 dst_zone_block
= dmz_start_block(zmd
, dst_zone
);
134 if (dmz_is_seq(dst_zone
))
135 set_bit(DM_KCOPYD_WRITE_SEQ
, &flags
);
137 while (block
< end_block
) {
138 if (dev
->flags
& DMZ_BDEV_DYING
)
141 /* Get a valid region from the source zone */
142 ret
= dmz_first_valid_block(zmd
, src_zone
, &block
);
148 * If we are writing in a sequential zone, we must make sure
149 * that writes are sequential. So Zeroout any eventual hole
152 if (dmz_is_seq(dst_zone
)) {
153 ret
= dmz_reclaim_align_wp(zrc
, dst_zone
, block
);
158 src
.bdev
= dev
->bdev
;
159 src
.sector
= dmz_blk2sect(src_zone_block
+ block
);
160 src
.count
= dmz_blk2sect(nr_blocks
);
162 dst
.bdev
= dev
->bdev
;
163 dst
.sector
= dmz_blk2sect(dst_zone_block
+ block
);
164 dst
.count
= src
.count
;
166 /* Copy the valid region */
167 set_bit(DMZ_RECLAIM_KCOPY
, &zrc
->flags
);
168 dm_kcopyd_copy(zrc
->kc
, &src
, 1, &dst
, flags
,
169 dmz_reclaim_kcopy_end
, zrc
);
171 /* Wait for copy to complete */
172 wait_on_bit_io(&zrc
->flags
, DMZ_RECLAIM_KCOPY
,
173 TASK_UNINTERRUPTIBLE
);
178 if (dmz_is_seq(dst_zone
))
179 dst_zone
->wp_block
= block
;
186 * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
187 * and free the buffer zone.
189 static int dmz_reclaim_buf(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
191 struct dm_zone
*bzone
= dzone
->bzone
;
192 sector_t chunk_block
= dzone
->wp_block
;
193 struct dmz_metadata
*zmd
= zrc
->metadata
;
196 dmz_dev_debug(zrc
->dev
,
197 "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
198 dzone
->chunk
, dmz_id(zmd
, bzone
), dmz_weight(bzone
),
199 dmz_id(zmd
, dzone
), dmz_weight(dzone
));
201 /* Flush data zone into the buffer zone */
202 ret
= dmz_reclaim_copy(zrc
, bzone
, dzone
);
208 /* Validate copied blocks */
209 ret
= dmz_merge_valid_blocks(zmd
, bzone
, dzone
, chunk_block
);
211 /* Free the buffer zone */
212 dmz_invalidate_blocks(zmd
, bzone
, 0, zrc
->dev
->zone_nr_blocks
);
214 dmz_unmap_zone(zmd
, bzone
);
215 dmz_unlock_zone_reclaim(dzone
);
216 dmz_free_zone(zmd
, bzone
);
220 dmz_unlock_flush(zmd
);
226 * Merge valid blocks of dzone into its buffer zone and free dzone.
228 static int dmz_reclaim_seq_data(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
230 unsigned int chunk
= dzone
->chunk
;
231 struct dm_zone
*bzone
= dzone
->bzone
;
232 struct dmz_metadata
*zmd
= zrc
->metadata
;
235 dmz_dev_debug(zrc
->dev
,
236 "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
237 chunk
, dmz_id(zmd
, dzone
), dmz_weight(dzone
),
238 dmz_id(zmd
, bzone
), dmz_weight(bzone
));
240 /* Flush data zone into the buffer zone */
241 ret
= dmz_reclaim_copy(zrc
, dzone
, bzone
);
247 /* Validate copied blocks */
248 ret
= dmz_merge_valid_blocks(zmd
, dzone
, bzone
, 0);
251 * Free the data zone and remap the chunk to
254 dmz_invalidate_blocks(zmd
, dzone
, 0, zrc
->dev
->zone_nr_blocks
);
256 dmz_unmap_zone(zmd
, bzone
);
257 dmz_unmap_zone(zmd
, dzone
);
258 dmz_unlock_zone_reclaim(dzone
);
259 dmz_free_zone(zmd
, dzone
);
260 dmz_map_zone(zmd
, bzone
, chunk
);
264 dmz_unlock_flush(zmd
);
270 * Move valid blocks of the random data zone dzone into a free sequential zone.
271 * Once blocks are moved, remap the zone chunk to the sequential zone.
273 static int dmz_reclaim_rnd_data(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
275 unsigned int chunk
= dzone
->chunk
;
276 struct dm_zone
*szone
= NULL
;
277 struct dmz_metadata
*zmd
= zrc
->metadata
;
280 /* Get a free sequential zone */
282 szone
= dmz_alloc_zone(zmd
, DMZ_ALLOC_RECLAIM
);
287 dmz_dev_debug(zrc
->dev
,
288 "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
289 chunk
, dmz_id(zmd
, dzone
), dmz_weight(dzone
),
292 /* Flush the random data zone into the sequential zone */
293 ret
= dmz_reclaim_copy(zrc
, dzone
, szone
);
298 /* Validate copied blocks */
299 ret
= dmz_copy_valid_blocks(zmd
, dzone
, szone
);
302 /* Free the sequential zone */
304 dmz_free_zone(zmd
, szone
);
307 /* Free the data zone and remap the chunk */
308 dmz_invalidate_blocks(zmd
, dzone
, 0, zrc
->dev
->zone_nr_blocks
);
310 dmz_unmap_zone(zmd
, dzone
);
311 dmz_unlock_zone_reclaim(dzone
);
312 dmz_free_zone(zmd
, dzone
);
313 dmz_map_zone(zmd
, szone
, chunk
);
317 dmz_unlock_flush(zmd
);
323 * Reclaim an empty zone.
325 static void dmz_reclaim_empty(struct dmz_reclaim
*zrc
, struct dm_zone
*dzone
)
327 struct dmz_metadata
*zmd
= zrc
->metadata
;
331 dmz_unmap_zone(zmd
, dzone
);
332 dmz_unlock_zone_reclaim(dzone
);
333 dmz_free_zone(zmd
, dzone
);
335 dmz_unlock_flush(zmd
);
339 * Find a candidate zone for reclaim and process it.
341 static int dmz_do_reclaim(struct dmz_reclaim
*zrc
)
343 struct dmz_metadata
*zmd
= zrc
->metadata
;
344 struct dm_zone
*dzone
;
345 struct dm_zone
*rzone
;
349 /* Get a data zone */
350 dzone
= dmz_get_zone_for_reclaim(zmd
);
356 if (dmz_is_rnd(dzone
)) {
357 if (!dmz_weight(dzone
)) {
359 dmz_reclaim_empty(zrc
, dzone
);
363 * Reclaim the random data zone by moving its
364 * valid data blocks to a free sequential zone.
366 ret
= dmz_reclaim_rnd_data(zrc
, dzone
);
371 struct dm_zone
*bzone
= dzone
->bzone
;
372 sector_t chunk_block
= 0;
374 ret
= dmz_first_valid_block(zmd
, bzone
, &chunk_block
);
378 if (ret
== 0 || chunk_block
>= dzone
->wp_block
) {
380 * The buffer zone is empty or its valid blocks are
381 * after the data zone write pointer.
383 ret
= dmz_reclaim_buf(zrc
, dzone
);
387 * Reclaim the data zone by merging it into the
388 * buffer zone so that the buffer zone itself can
389 * be later reclaimed.
391 ret
= dmz_reclaim_seq_data(zrc
, dzone
);
397 dmz_unlock_zone_reclaim(dzone
);
401 ret
= dmz_flush_metadata(zrc
->metadata
);
403 dmz_dev_debug(zrc
->dev
,
404 "Metadata flush for zone %u failed, err %d\n",
405 dmz_id(zmd
, rzone
), ret
);
409 dmz_dev_debug(zrc
->dev
, "Reclaimed zone %u in %u ms",
410 dmz_id(zmd
, rzone
), jiffies_to_msecs(jiffies
- start
));
415 * Test if the target device is idle.
417 static inline int dmz_target_idle(struct dmz_reclaim
*zrc
)
419 return time_is_before_jiffies(zrc
->atime
+ DMZ_IDLE_PERIOD
);
423 * Test if reclaim is necessary.
425 static bool dmz_should_reclaim(struct dmz_reclaim
*zrc
)
427 struct dmz_metadata
*zmd
= zrc
->metadata
;
428 unsigned int nr_rnd
= dmz_nr_rnd_zones(zmd
);
429 unsigned int nr_unmap_rnd
= dmz_nr_unmap_rnd_zones(zmd
);
430 unsigned int p_unmap_rnd
= nr_unmap_rnd
* 100 / nr_rnd
;
432 /* Reclaim when idle */
433 if (dmz_target_idle(zrc
) && nr_unmap_rnd
< nr_rnd
)
436 /* If there are still plenty of random zones, do not reclaim */
437 if (p_unmap_rnd
>= DMZ_RECLAIM_HIGH_UNMAP_RND
)
441 * If the percentage of unmappped random zones is low,
442 * reclaim even if the target is busy.
444 return p_unmap_rnd
<= DMZ_RECLAIM_LOW_UNMAP_RND
;
448 * Reclaim work function.
450 static void dmz_reclaim_work(struct work_struct
*work
)
452 struct dmz_reclaim
*zrc
= container_of(work
, struct dmz_reclaim
, work
.work
);
453 struct dmz_metadata
*zmd
= zrc
->metadata
;
454 unsigned int nr_rnd
, nr_unmap_rnd
;
455 unsigned int p_unmap_rnd
;
458 if (dmz_bdev_is_dying(zrc
->dev
))
461 if (!dmz_should_reclaim(zrc
)) {
462 mod_delayed_work(zrc
->wq
, &zrc
->work
, DMZ_IDLE_PERIOD
);
467 * We need to start reclaiming random zones: set up zone copy
468 * throttling to either go fast if we are very low on random zones
469 * and slower if there are still some free random zones to avoid
470 * as much as possible to negatively impact the user workload.
472 nr_rnd
= dmz_nr_rnd_zones(zmd
);
473 nr_unmap_rnd
= dmz_nr_unmap_rnd_zones(zmd
);
474 p_unmap_rnd
= nr_unmap_rnd
* 100 / nr_rnd
;
475 if (dmz_target_idle(zrc
) || p_unmap_rnd
< DMZ_RECLAIM_LOW_UNMAP_RND
/ 2) {
476 /* Idle or very low percentage: go fast */
477 zrc
->kc_throttle
.throttle
= 100;
479 /* Busy but we still have some random zone: throttle */
480 zrc
->kc_throttle
.throttle
= min(75U, 100U - p_unmap_rnd
/ 2);
483 dmz_dev_debug(zrc
->dev
,
484 "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
485 zrc
->kc_throttle
.throttle
,
486 (dmz_target_idle(zrc
) ? "Idle" : "Busy"),
487 p_unmap_rnd
, nr_unmap_rnd
, nr_rnd
);
489 ret
= dmz_do_reclaim(zrc
);
491 dmz_dev_debug(zrc
->dev
, "Reclaim error %d\n", ret
);
492 if (!dmz_check_bdev(zrc
->dev
))
496 dmz_schedule_reclaim(zrc
);
500 * Initialize reclaim.
502 int dmz_ctr_reclaim(struct dmz_dev
*dev
, struct dmz_metadata
*zmd
,
503 struct dmz_reclaim
**reclaim
)
505 struct dmz_reclaim
*zrc
;
508 zrc
= kzalloc(sizeof(struct dmz_reclaim
), GFP_KERNEL
);
514 zrc
->atime
= jiffies
;
516 /* Reclaim kcopyd client */
517 zrc
->kc
= dm_kcopyd_client_create(&zrc
->kc_throttle
);
518 if (IS_ERR(zrc
->kc
)) {
519 ret
= PTR_ERR(zrc
->kc
);
525 INIT_DELAYED_WORK(&zrc
->work
, dmz_reclaim_work
);
526 zrc
->wq
= alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM
,
534 queue_delayed_work(zrc
->wq
, &zrc
->work
, 0);
539 dm_kcopyd_client_destroy(zrc
->kc
);
548 void dmz_dtr_reclaim(struct dmz_reclaim
*zrc
)
550 cancel_delayed_work_sync(&zrc
->work
);
551 destroy_workqueue(zrc
->wq
);
552 dm_kcopyd_client_destroy(zrc
->kc
);
559 void dmz_suspend_reclaim(struct dmz_reclaim
*zrc
)
561 cancel_delayed_work_sync(&zrc
->work
);
567 void dmz_resume_reclaim(struct dmz_reclaim
*zrc
)
569 queue_delayed_work(zrc
->wq
, &zrc
->work
, DMZ_IDLE_PERIOD
);
575 void dmz_reclaim_bio_acc(struct dmz_reclaim
*zrc
)
577 zrc
->atime
= jiffies
;
581 * Start reclaim if necessary.
583 void dmz_schedule_reclaim(struct dmz_reclaim
*zrc
)
585 if (dmz_should_reclaim(zrc
))
586 mod_delayed_work(zrc
->wq
, &zrc
->work
, 0);