Linux 4.16.11
[linux/fpc-iii.git] / drivers / md / dm-zoned-reclaim.c
blob44a119e12f1abd8eb5b4e4ae15689c83a9502536
1 /*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 * This file is released under the GPL.
5 */
7 #include "dm-zoned.h"
9 #include <linux/module.h>
11 #define DM_MSG_PREFIX "zoned reclaim"
13 struct dmz_reclaim {
14 struct dmz_metadata *metadata;
15 struct dmz_dev *dev;
17 struct delayed_work work;
18 struct workqueue_struct *wq;
20 struct dm_kcopyd_client *kc;
21 struct dm_kcopyd_throttle kc_throttle;
22 int kc_err;
24 unsigned long flags;
26 /* Last target access time */
27 unsigned long atime;
31 * Reclaim state flags.
33 enum {
34 DMZ_RECLAIM_KCOPY,
38 * Number of seconds of target BIO inactivity to consider the target idle.
40 #define DMZ_IDLE_PERIOD (10UL * HZ)
43 * Percentage of unmapped (free) random zones below which reclaim starts
44 * even if the target is busy.
46 #define DMZ_RECLAIM_LOW_UNMAP_RND 30
49 * Percentage of unmapped (free) random zones above which reclaim will
50 * stop if the target is busy.
52 #define DMZ_RECLAIM_HIGH_UNMAP_RND 50
55 * Align a sequential zone write pointer to chunk_block.
57 static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
58 sector_t block)
60 struct dmz_metadata *zmd = zrc->metadata;
61 sector_t wp_block = zone->wp_block;
62 unsigned int nr_blocks;
63 int ret;
65 if (wp_block == block)
66 return 0;
68 if (wp_block > block)
69 return -EIO;
72 * Zeroout the space between the write
73 * pointer and the requested position.
75 nr_blocks = block - wp_block;
76 ret = blkdev_issue_zeroout(zrc->dev->bdev,
77 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
78 dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
79 if (ret) {
80 dmz_dev_err(zrc->dev,
81 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
82 dmz_id(zmd, zone), (unsigned long long)wp_block,
83 (unsigned long long)block, nr_blocks, ret);
84 return ret;
87 zone->wp_block = block;
89 return 0;
93 * dm_kcopyd_copy end notification.
95 static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
96 void *context)
98 struct dmz_reclaim *zrc = context;
100 if (read_err || write_err)
101 zrc->kc_err = -EIO;
102 else
103 zrc->kc_err = 0;
105 clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
106 smp_mb__after_atomic();
107 wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
111 * Copy valid blocks of src_zone into dst_zone.
113 static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
114 struct dm_zone *src_zone, struct dm_zone *dst_zone)
116 struct dmz_metadata *zmd = zrc->metadata;
117 struct dmz_dev *dev = zrc->dev;
118 struct dm_io_region src, dst;
119 sector_t block = 0, end_block;
120 sector_t nr_blocks;
121 sector_t src_zone_block;
122 sector_t dst_zone_block;
123 unsigned long flags = 0;
124 int ret;
126 if (dmz_is_seq(src_zone))
127 end_block = src_zone->wp_block;
128 else
129 end_block = dev->zone_nr_blocks;
130 src_zone_block = dmz_start_block(zmd, src_zone);
131 dst_zone_block = dmz_start_block(zmd, dst_zone);
133 if (dmz_is_seq(dst_zone))
134 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
136 while (block < end_block) {
137 /* Get a valid region from the source zone */
138 ret = dmz_first_valid_block(zmd, src_zone, &block);
139 if (ret <= 0)
140 return ret;
141 nr_blocks = ret;
144 * If we are writing in a sequential zone, we must make sure
145 * that writes are sequential. So Zeroout any eventual hole
146 * between writes.
148 if (dmz_is_seq(dst_zone)) {
149 ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
150 if (ret)
151 return ret;
154 src.bdev = dev->bdev;
155 src.sector = dmz_blk2sect(src_zone_block + block);
156 src.count = dmz_blk2sect(nr_blocks);
158 dst.bdev = dev->bdev;
159 dst.sector = dmz_blk2sect(dst_zone_block + block);
160 dst.count = src.count;
162 /* Copy the valid region */
163 set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
164 ret = dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
165 dmz_reclaim_kcopy_end, zrc);
166 if (ret)
167 return ret;
169 /* Wait for copy to complete */
170 wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
171 TASK_UNINTERRUPTIBLE);
172 if (zrc->kc_err)
173 return zrc->kc_err;
175 block += nr_blocks;
176 if (dmz_is_seq(dst_zone))
177 dst_zone->wp_block = block;
180 return 0;
184 * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
185 * and free the buffer zone.
187 static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
189 struct dm_zone *bzone = dzone->bzone;
190 sector_t chunk_block = dzone->wp_block;
191 struct dmz_metadata *zmd = zrc->metadata;
192 int ret;
194 dmz_dev_debug(zrc->dev,
195 "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
196 dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
197 dmz_id(zmd, dzone), dmz_weight(dzone));
199 /* Flush data zone into the buffer zone */
200 ret = dmz_reclaim_copy(zrc, bzone, dzone);
201 if (ret < 0)
202 return ret;
204 dmz_lock_flush(zmd);
206 /* Validate copied blocks */
207 ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
208 if (ret == 0) {
209 /* Free the buffer zone */
210 dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
211 dmz_lock_map(zmd);
212 dmz_unmap_zone(zmd, bzone);
213 dmz_unlock_zone_reclaim(dzone);
214 dmz_free_zone(zmd, bzone);
215 dmz_unlock_map(zmd);
218 dmz_unlock_flush(zmd);
220 return 0;
224 * Merge valid blocks of dzone into its buffer zone and free dzone.
226 static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
228 unsigned int chunk = dzone->chunk;
229 struct dm_zone *bzone = dzone->bzone;
230 struct dmz_metadata *zmd = zrc->metadata;
231 int ret = 0;
233 dmz_dev_debug(zrc->dev,
234 "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
235 chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
236 dmz_id(zmd, bzone), dmz_weight(bzone));
238 /* Flush data zone into the buffer zone */
239 ret = dmz_reclaim_copy(zrc, dzone, bzone);
240 if (ret < 0)
241 return ret;
243 dmz_lock_flush(zmd);
245 /* Validate copied blocks */
246 ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
247 if (ret == 0) {
249 * Free the data zone and remap the chunk to
250 * the buffer zone.
252 dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
253 dmz_lock_map(zmd);
254 dmz_unmap_zone(zmd, bzone);
255 dmz_unmap_zone(zmd, dzone);
256 dmz_unlock_zone_reclaim(dzone);
257 dmz_free_zone(zmd, dzone);
258 dmz_map_zone(zmd, bzone, chunk);
259 dmz_unlock_map(zmd);
262 dmz_unlock_flush(zmd);
264 return 0;
268 * Move valid blocks of the random data zone dzone into a free sequential zone.
269 * Once blocks are moved, remap the zone chunk to the sequential zone.
271 static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
273 unsigned int chunk = dzone->chunk;
274 struct dm_zone *szone = NULL;
275 struct dmz_metadata *zmd = zrc->metadata;
276 int ret;
278 /* Get a free sequential zone */
279 dmz_lock_map(zmd);
280 szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
281 dmz_unlock_map(zmd);
282 if (!szone)
283 return -ENOSPC;
285 dmz_dev_debug(zrc->dev,
286 "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
287 chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
288 dmz_id(zmd, szone));
290 /* Flush the random data zone into the sequential zone */
291 ret = dmz_reclaim_copy(zrc, dzone, szone);
293 dmz_lock_flush(zmd);
295 if (ret == 0) {
296 /* Validate copied blocks */
297 ret = dmz_copy_valid_blocks(zmd, dzone, szone);
299 if (ret) {
300 /* Free the sequential zone */
301 dmz_lock_map(zmd);
302 dmz_free_zone(zmd, szone);
303 dmz_unlock_map(zmd);
304 } else {
305 /* Free the data zone and remap the chunk */
306 dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
307 dmz_lock_map(zmd);
308 dmz_unmap_zone(zmd, dzone);
309 dmz_unlock_zone_reclaim(dzone);
310 dmz_free_zone(zmd, dzone);
311 dmz_map_zone(zmd, szone, chunk);
312 dmz_unlock_map(zmd);
315 dmz_unlock_flush(zmd);
317 return 0;
321 * Reclaim an empty zone.
323 static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
325 struct dmz_metadata *zmd = zrc->metadata;
327 dmz_lock_flush(zmd);
328 dmz_lock_map(zmd);
329 dmz_unmap_zone(zmd, dzone);
330 dmz_unlock_zone_reclaim(dzone);
331 dmz_free_zone(zmd, dzone);
332 dmz_unlock_map(zmd);
333 dmz_unlock_flush(zmd);
337 * Find a candidate zone for reclaim and process it.
339 static void dmz_reclaim(struct dmz_reclaim *zrc)
341 struct dmz_metadata *zmd = zrc->metadata;
342 struct dm_zone *dzone;
343 struct dm_zone *rzone;
344 unsigned long start;
345 int ret;
347 /* Get a data zone */
348 dzone = dmz_get_zone_for_reclaim(zmd);
349 if (!dzone)
350 return;
352 start = jiffies;
354 if (dmz_is_rnd(dzone)) {
355 if (!dmz_weight(dzone)) {
356 /* Empty zone */
357 dmz_reclaim_empty(zrc, dzone);
358 ret = 0;
359 } else {
361 * Reclaim the random data zone by moving its
362 * valid data blocks to a free sequential zone.
364 ret = dmz_reclaim_rnd_data(zrc, dzone);
366 rzone = dzone;
368 } else {
369 struct dm_zone *bzone = dzone->bzone;
370 sector_t chunk_block = 0;
372 ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
373 if (ret < 0)
374 goto out;
376 if (ret == 0 || chunk_block >= dzone->wp_block) {
378 * The buffer zone is empty or its valid blocks are
379 * after the data zone write pointer.
381 ret = dmz_reclaim_buf(zrc, dzone);
382 rzone = bzone;
383 } else {
385 * Reclaim the data zone by merging it into the
386 * buffer zone so that the buffer zone itself can
387 * be later reclaimed.
389 ret = dmz_reclaim_seq_data(zrc, dzone);
390 rzone = dzone;
393 out:
394 if (ret) {
395 dmz_unlock_zone_reclaim(dzone);
396 return;
399 (void) dmz_flush_metadata(zrc->metadata);
401 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
402 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
406 * Test if the target device is idle.
408 static inline int dmz_target_idle(struct dmz_reclaim *zrc)
410 return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
414 * Test if reclaim is necessary.
416 static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
418 struct dmz_metadata *zmd = zrc->metadata;
419 unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
420 unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
421 unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
423 /* Reclaim when idle */
424 if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
425 return true;
427 /* If there are still plenty of random zones, do not reclaim */
428 if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
429 return false;
432 * If the percentage of unmappped random zones is low,
433 * reclaim even if the target is busy.
435 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
439 * Reclaim work function.
441 static void dmz_reclaim_work(struct work_struct *work)
443 struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
444 struct dmz_metadata *zmd = zrc->metadata;
445 unsigned int nr_rnd, nr_unmap_rnd;
446 unsigned int p_unmap_rnd;
448 if (!dmz_should_reclaim(zrc)) {
449 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
450 return;
454 * We need to start reclaiming random zones: set up zone copy
455 * throttling to either go fast if we are very low on random zones
456 * and slower if there are still some free random zones to avoid
457 * as much as possible to negatively impact the user workload.
459 nr_rnd = dmz_nr_rnd_zones(zmd);
460 nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
461 p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
462 if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
463 /* Idle or very low percentage: go fast */
464 zrc->kc_throttle.throttle = 100;
465 } else {
466 /* Busy but we still have some random zone: throttle */
467 zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
470 dmz_dev_debug(zrc->dev,
471 "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
472 zrc->kc_throttle.throttle,
473 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
474 p_unmap_rnd, nr_unmap_rnd, nr_rnd);
476 dmz_reclaim(zrc);
478 dmz_schedule_reclaim(zrc);
482 * Initialize reclaim.
484 int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
485 struct dmz_reclaim **reclaim)
487 struct dmz_reclaim *zrc;
488 int ret;
490 zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
491 if (!zrc)
492 return -ENOMEM;
494 zrc->dev = dev;
495 zrc->metadata = zmd;
496 zrc->atime = jiffies;
498 /* Reclaim kcopyd client */
499 zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
500 if (IS_ERR(zrc->kc)) {
501 ret = PTR_ERR(zrc->kc);
502 zrc->kc = NULL;
503 goto err;
506 /* Reclaim work */
507 INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
508 zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
509 dev->name);
510 if (!zrc->wq) {
511 ret = -ENOMEM;
512 goto err;
515 *reclaim = zrc;
516 queue_delayed_work(zrc->wq, &zrc->work, 0);
518 return 0;
519 err:
520 if (zrc->kc)
521 dm_kcopyd_client_destroy(zrc->kc);
522 kfree(zrc);
524 return ret;
528 * Terminate reclaim.
530 void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
532 cancel_delayed_work_sync(&zrc->work);
533 destroy_workqueue(zrc->wq);
534 dm_kcopyd_client_destroy(zrc->kc);
535 kfree(zrc);
539 * Suspend reclaim.
541 void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
543 cancel_delayed_work_sync(&zrc->work);
547 * Resume reclaim.
549 void dmz_resume_reclaim(struct dmz_reclaim *zrc)
551 queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
555 * BIO accounting.
557 void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
559 zrc->atime = jiffies;
563 * Start reclaim if necessary.
565 void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
567 if (dmz_should_reclaim(zrc))
568 mod_delayed_work(zrc->wq, &zrc->work, 0);