treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / lightnvm / pblk-gc.c
blob2581eebcfc41d5d4363a62ba457fae15152d3090
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-gc.c - pblk's garbage collector
19 #include "pblk.h"
20 #include "pblk-trace.h"
21 #include <linux/delay.h>
24 static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
26 if (gc_rq->data)
27 vfree(gc_rq->data);
28 kfree(gc_rq);
31 static int pblk_gc_write(struct pblk *pblk)
33 struct pblk_gc *gc = &pblk->gc;
34 struct pblk_gc_rq *gc_rq, *tgc_rq;
35 LIST_HEAD(w_list);
37 spin_lock(&gc->w_lock);
38 if (list_empty(&gc->w_list)) {
39 spin_unlock(&gc->w_lock);
40 return 1;
43 list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
44 gc->w_entries = 0;
45 spin_unlock(&gc->w_lock);
47 list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
48 pblk_write_gc_to_cache(pblk, gc_rq);
49 list_del(&gc_rq->list);
50 kref_put(&gc_rq->line->ref, pblk_line_put);
51 pblk_gc_free_gc_rq(gc_rq);
54 return 0;
57 static void pblk_gc_writer_kick(struct pblk_gc *gc)
59 wake_up_process(gc->gc_writer_ts);
62 void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
64 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
65 struct list_head *move_list;
67 spin_lock(&l_mg->gc_lock);
68 spin_lock(&line->lock);
69 WARN_ON(line->state != PBLK_LINESTATE_GC);
70 line->state = PBLK_LINESTATE_CLOSED;
71 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
72 line->state);
74 /* We need to reset gc_group in order to ensure that
75 * pblk_line_gc_list will return proper move_list
76 * since right now current line is not on any of the
77 * gc lists.
79 line->gc_group = PBLK_LINEGC_NONE;
80 move_list = pblk_line_gc_list(pblk, line);
81 spin_unlock(&line->lock);
82 list_add_tail(&line->list, move_list);
83 spin_unlock(&l_mg->gc_lock);
86 static void pblk_gc_line_ws(struct work_struct *work)
88 struct pblk_line_ws *gc_rq_ws = container_of(work,
89 struct pblk_line_ws, ws);
90 struct pblk *pblk = gc_rq_ws->pblk;
91 struct pblk_gc *gc = &pblk->gc;
92 struct pblk_line *line = gc_rq_ws->line;
93 struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
94 int ret;
96 up(&gc->gc_sem);
98 /* Read from GC victim block */
99 ret = pblk_submit_read_gc(pblk, gc_rq);
100 if (ret) {
101 line->w_err_gc->has_gc_err = 1;
102 goto out;
105 if (!gc_rq->secs_to_gc)
106 goto out;
108 retry:
109 spin_lock(&gc->w_lock);
110 if (gc->w_entries >= PBLK_GC_RQ_QD) {
111 spin_unlock(&gc->w_lock);
112 pblk_gc_writer_kick(&pblk->gc);
113 usleep_range(128, 256);
114 goto retry;
116 gc->w_entries++;
117 list_add_tail(&gc_rq->list, &gc->w_list);
118 spin_unlock(&gc->w_lock);
120 pblk_gc_writer_kick(&pblk->gc);
122 kfree(gc_rq_ws);
123 return;
125 out:
126 pblk_gc_free_gc_rq(gc_rq);
127 kref_put(&line->ref, pblk_line_put);
128 kfree(gc_rq_ws);
131 static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
132 struct pblk_line *line)
134 struct line_emeta *emeta_buf;
135 struct pblk_line_meta *lm = &pblk->lm;
136 unsigned int lba_list_size = lm->emeta_len[2];
137 __le64 *lba_list;
138 int ret;
140 emeta_buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
141 if (!emeta_buf)
142 return NULL;
144 ret = pblk_line_emeta_read(pblk, line, emeta_buf);
145 if (ret) {
146 pblk_err(pblk, "line %d read emeta failed (%d)\n",
147 line->id, ret);
148 kvfree(emeta_buf);
149 return NULL;
152 /* If this read fails, it means that emeta is corrupted.
153 * For now, leave the line untouched.
154 * TODO: Implement a recovery routine that scans and moves
155 * all sectors on the line.
158 ret = pblk_recov_check_emeta(pblk, emeta_buf);
159 if (ret) {
160 pblk_err(pblk, "inconsistent emeta (line %d)\n",
161 line->id);
162 kvfree(emeta_buf);
163 return NULL;
166 lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
168 if (lba_list)
169 memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
171 kvfree(emeta_buf);
173 return lba_list;
176 static void pblk_gc_line_prepare_ws(struct work_struct *work)
178 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
179 ws);
180 struct pblk *pblk = line_ws->pblk;
181 struct pblk_line *line = line_ws->line;
182 struct pblk_line_meta *lm = &pblk->lm;
183 struct nvm_tgt_dev *dev = pblk->dev;
184 struct nvm_geo *geo = &dev->geo;
185 struct pblk_gc *gc = &pblk->gc;
186 struct pblk_line_ws *gc_rq_ws;
187 struct pblk_gc_rq *gc_rq;
188 __le64 *lba_list;
189 unsigned long *invalid_bitmap;
190 int sec_left, nr_secs, bit;
192 invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
193 if (!invalid_bitmap)
194 goto fail_free_ws;
196 if (line->w_err_gc->has_write_err) {
197 lba_list = line->w_err_gc->lba_list;
198 line->w_err_gc->lba_list = NULL;
199 } else {
200 lba_list = get_lba_list_from_emeta(pblk, line);
201 if (!lba_list) {
202 pblk_err(pblk, "could not interpret emeta (line %d)\n",
203 line->id);
204 goto fail_free_invalid_bitmap;
208 spin_lock(&line->lock);
209 bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
210 sec_left = pblk_line_vsc(line);
211 spin_unlock(&line->lock);
213 if (sec_left < 0) {
214 pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
215 goto fail_free_lba_list;
218 bit = -1;
219 next_rq:
220 gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
221 if (!gc_rq)
222 goto fail_free_lba_list;
224 nr_secs = 0;
225 do {
226 bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
227 bit + 1);
228 if (bit > line->emeta_ssec)
229 break;
231 gc_rq->paddr_list[nr_secs] = bit;
232 gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
233 } while (nr_secs < pblk->max_write_pgs);
235 if (unlikely(!nr_secs)) {
236 kfree(gc_rq);
237 goto out;
240 gc_rq->nr_secs = nr_secs;
241 gc_rq->line = line;
243 gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
244 if (!gc_rq->data)
245 goto fail_free_gc_rq;
247 gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
248 if (!gc_rq_ws)
249 goto fail_free_gc_data;
251 gc_rq_ws->pblk = pblk;
252 gc_rq_ws->line = line;
253 gc_rq_ws->priv = gc_rq;
255 /* The write GC path can be much slower than the read GC one due to
256 * the budget imposed by the rate-limiter. Balance in case that we get
257 * back pressure from the write GC path.
259 while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
260 io_schedule();
262 kref_get(&line->ref);
264 INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
265 queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
267 sec_left -= nr_secs;
268 if (sec_left > 0)
269 goto next_rq;
271 out:
272 kvfree(lba_list);
273 kfree(line_ws);
274 kfree(invalid_bitmap);
276 kref_put(&line->ref, pblk_line_put);
277 atomic_dec(&gc->read_inflight_gc);
279 return;
281 fail_free_gc_data:
282 vfree(gc_rq->data);
283 fail_free_gc_rq:
284 kfree(gc_rq);
285 fail_free_lba_list:
286 kvfree(lba_list);
287 fail_free_invalid_bitmap:
288 kfree(invalid_bitmap);
289 fail_free_ws:
290 kfree(line_ws);
292 /* Line goes back to closed state, so we cannot release additional
293 * reference for line, since we do that only when we want to do
294 * gc to free line state transition.
296 pblk_put_line_back(pblk, line);
297 atomic_dec(&gc->read_inflight_gc);
299 pblk_err(pblk, "failed to GC line %d\n", line->id);
302 static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
304 struct pblk_gc *gc = &pblk->gc;
305 struct pblk_line_ws *line_ws;
307 pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
309 line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
310 if (!line_ws)
311 return -ENOMEM;
313 line_ws->pblk = pblk;
314 line_ws->line = line;
316 atomic_inc(&gc->pipeline_gc);
317 INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
318 queue_work(gc->gc_reader_wq, &line_ws->ws);
320 return 0;
323 static void pblk_gc_reader_kick(struct pblk_gc *gc)
325 wake_up_process(gc->gc_reader_ts);
328 static void pblk_gc_kick(struct pblk *pblk)
330 struct pblk_gc *gc = &pblk->gc;
332 pblk_gc_writer_kick(gc);
333 pblk_gc_reader_kick(gc);
335 /* If we're shutting down GC, let's not start it up again */
336 if (gc->gc_enabled) {
337 wake_up_process(gc->gc_ts);
338 mod_timer(&gc->gc_timer,
339 jiffies + msecs_to_jiffies(GC_TIME_MSECS));
343 static int pblk_gc_read(struct pblk *pblk)
345 struct pblk_gc *gc = &pblk->gc;
346 struct pblk_line *line;
348 spin_lock(&gc->r_lock);
349 if (list_empty(&gc->r_list)) {
350 spin_unlock(&gc->r_lock);
351 return 1;
354 line = list_first_entry(&gc->r_list, struct pblk_line, list);
355 list_del(&line->list);
356 spin_unlock(&gc->r_lock);
358 pblk_gc_kick(pblk);
360 if (pblk_gc_line(pblk, line)) {
361 pblk_err(pblk, "failed to GC line %d\n", line->id);
362 /* rollback */
363 spin_lock(&gc->r_lock);
364 list_add_tail(&line->list, &gc->r_list);
365 spin_unlock(&gc->r_lock);
368 return 0;
371 static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
372 struct list_head *group_list)
374 struct pblk_line *line, *victim;
375 unsigned int line_vsc = ~0x0L, victim_vsc = ~0x0L;
377 victim = list_first_entry(group_list, struct pblk_line, list);
379 list_for_each_entry(line, group_list, list) {
380 if (!atomic_read(&line->sec_to_update))
381 line_vsc = le32_to_cpu(*line->vsc);
382 if (line_vsc < victim_vsc) {
383 victim = line;
384 victim_vsc = le32_to_cpu(*victim->vsc);
388 if (victim_vsc == ~0x0)
389 return NULL;
391 return victim;
394 static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
396 unsigned int nr_blocks_free, nr_blocks_need;
397 unsigned int werr_lines = atomic_read(&rl->werr_lines);
399 nr_blocks_need = pblk_rl_high_thrs(rl);
400 nr_blocks_free = pblk_rl_nr_free_blks(rl);
402 /* This is not critical, no need to take lock here */
403 return ((werr_lines > 0) ||
404 ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
407 void pblk_gc_free_full_lines(struct pblk *pblk)
409 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
410 struct pblk_gc *gc = &pblk->gc;
411 struct pblk_line *line;
413 do {
414 spin_lock(&l_mg->gc_lock);
415 if (list_empty(&l_mg->gc_full_list)) {
416 spin_unlock(&l_mg->gc_lock);
417 return;
420 line = list_first_entry(&l_mg->gc_full_list,
421 struct pblk_line, list);
423 spin_lock(&line->lock);
424 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
425 line->state = PBLK_LINESTATE_GC;
426 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
427 line->state);
428 spin_unlock(&line->lock);
430 list_del(&line->list);
431 spin_unlock(&l_mg->gc_lock);
433 atomic_inc(&gc->pipeline_gc);
434 kref_put(&line->ref, pblk_line_put);
435 } while (1);
439 * Lines with no valid sectors will be returned to the free list immediately. If
440 * GC is activated - either because the free block count is under the determined
441 * threshold, or because it is being forced from user space - only lines with a
442 * high count of invalid sectors will be recycled.
444 static void pblk_gc_run(struct pblk *pblk)
446 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
447 struct pblk_gc *gc = &pblk->gc;
448 struct pblk_line *line;
449 struct list_head *group_list;
450 bool run_gc;
451 int read_inflight_gc, gc_group = 0, prev_group = 0;
453 pblk_gc_free_full_lines(pblk);
455 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
456 if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
457 return;
459 next_gc_group:
460 group_list = l_mg->gc_lists[gc_group++];
462 do {
463 spin_lock(&l_mg->gc_lock);
465 line = pblk_gc_get_victim_line(pblk, group_list);
466 if (!line) {
467 spin_unlock(&l_mg->gc_lock);
468 break;
471 spin_lock(&line->lock);
472 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
473 line->state = PBLK_LINESTATE_GC;
474 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
475 line->state);
476 spin_unlock(&line->lock);
478 list_del(&line->list);
479 spin_unlock(&l_mg->gc_lock);
481 spin_lock(&gc->r_lock);
482 list_add_tail(&line->list, &gc->r_list);
483 spin_unlock(&gc->r_lock);
485 read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
486 pblk_gc_reader_kick(gc);
488 prev_group = 1;
490 /* No need to queue up more GC lines than we can handle */
491 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
492 if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
493 break;
494 } while (1);
496 if (!prev_group && pblk->rl.rb_state > gc_group &&
497 gc_group < PBLK_GC_NR_LISTS)
498 goto next_gc_group;
501 static void pblk_gc_timer(struct timer_list *t)
503 struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
505 pblk_gc_kick(pblk);
508 static int pblk_gc_ts(void *data)
510 struct pblk *pblk = data;
512 while (!kthread_should_stop()) {
513 pblk_gc_run(pblk);
514 set_current_state(TASK_INTERRUPTIBLE);
515 io_schedule();
518 return 0;
521 static int pblk_gc_writer_ts(void *data)
523 struct pblk *pblk = data;
525 while (!kthread_should_stop()) {
526 if (!pblk_gc_write(pblk))
527 continue;
528 set_current_state(TASK_INTERRUPTIBLE);
529 io_schedule();
532 return 0;
535 static int pblk_gc_reader_ts(void *data)
537 struct pblk *pblk = data;
538 struct pblk_gc *gc = &pblk->gc;
540 while (!kthread_should_stop()) {
541 if (!pblk_gc_read(pblk))
542 continue;
543 set_current_state(TASK_INTERRUPTIBLE);
544 io_schedule();
547 #ifdef CONFIG_NVM_PBLK_DEBUG
548 pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
549 atomic_read(&gc->pipeline_gc));
550 #endif
552 do {
553 if (!atomic_read(&gc->pipeline_gc))
554 break;
556 schedule();
557 } while (1);
559 return 0;
562 static void pblk_gc_start(struct pblk *pblk)
564 pblk->gc.gc_active = 1;
565 pblk_debug(pblk, "gc start\n");
568 void pblk_gc_should_start(struct pblk *pblk)
570 struct pblk_gc *gc = &pblk->gc;
572 if (gc->gc_enabled && !gc->gc_active) {
573 pblk_gc_start(pblk);
574 pblk_gc_kick(pblk);
578 void pblk_gc_should_stop(struct pblk *pblk)
580 struct pblk_gc *gc = &pblk->gc;
582 if (gc->gc_active && !gc->gc_forced)
583 gc->gc_active = 0;
586 void pblk_gc_should_kick(struct pblk *pblk)
588 pblk_rl_update_rates(&pblk->rl);
591 void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
592 int *gc_active)
594 struct pblk_gc *gc = &pblk->gc;
596 spin_lock(&gc->lock);
597 *gc_enabled = gc->gc_enabled;
598 *gc_active = gc->gc_active;
599 spin_unlock(&gc->lock);
602 int pblk_gc_sysfs_force(struct pblk *pblk, int force)
604 struct pblk_gc *gc = &pblk->gc;
606 if (force < 0 || force > 1)
607 return -EINVAL;
609 spin_lock(&gc->lock);
610 gc->gc_forced = force;
612 if (force)
613 gc->gc_enabled = 1;
614 else
615 gc->gc_enabled = 0;
616 spin_unlock(&gc->lock);
618 pblk_gc_should_start(pblk);
620 return 0;
623 int pblk_gc_init(struct pblk *pblk)
625 struct pblk_gc *gc = &pblk->gc;
626 int ret;
628 gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
629 if (IS_ERR(gc->gc_ts)) {
630 pblk_err(pblk, "could not allocate GC main kthread\n");
631 return PTR_ERR(gc->gc_ts);
634 gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
635 "pblk-gc-writer-ts");
636 if (IS_ERR(gc->gc_writer_ts)) {
637 pblk_err(pblk, "could not allocate GC writer kthread\n");
638 ret = PTR_ERR(gc->gc_writer_ts);
639 goto fail_free_main_kthread;
642 gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
643 "pblk-gc-reader-ts");
644 if (IS_ERR(gc->gc_reader_ts)) {
645 pblk_err(pblk, "could not allocate GC reader kthread\n");
646 ret = PTR_ERR(gc->gc_reader_ts);
647 goto fail_free_writer_kthread;
650 timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
651 mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
653 gc->gc_active = 0;
654 gc->gc_forced = 0;
655 gc->gc_enabled = 1;
656 gc->w_entries = 0;
657 atomic_set(&gc->read_inflight_gc, 0);
658 atomic_set(&gc->pipeline_gc, 0);
660 /* Workqueue that reads valid sectors from a line and submit them to the
661 * GC writer to be recycled.
663 gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
664 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
665 if (!gc->gc_line_reader_wq) {
666 pblk_err(pblk, "could not allocate GC line reader workqueue\n");
667 ret = -ENOMEM;
668 goto fail_free_reader_kthread;
671 /* Workqueue that prepare lines for GC */
672 gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
673 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
674 if (!gc->gc_reader_wq) {
675 pblk_err(pblk, "could not allocate GC reader workqueue\n");
676 ret = -ENOMEM;
677 goto fail_free_reader_line_wq;
680 spin_lock_init(&gc->lock);
681 spin_lock_init(&gc->w_lock);
682 spin_lock_init(&gc->r_lock);
684 sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
686 INIT_LIST_HEAD(&gc->w_list);
687 INIT_LIST_HEAD(&gc->r_list);
689 return 0;
691 fail_free_reader_line_wq:
692 destroy_workqueue(gc->gc_line_reader_wq);
693 fail_free_reader_kthread:
694 kthread_stop(gc->gc_reader_ts);
695 fail_free_writer_kthread:
696 kthread_stop(gc->gc_writer_ts);
697 fail_free_main_kthread:
698 kthread_stop(gc->gc_ts);
700 return ret;
703 void pblk_gc_exit(struct pblk *pblk, bool graceful)
705 struct pblk_gc *gc = &pblk->gc;
707 gc->gc_enabled = 0;
708 del_timer_sync(&gc->gc_timer);
709 gc->gc_active = 0;
711 if (gc->gc_ts)
712 kthread_stop(gc->gc_ts);
714 if (gc->gc_reader_ts)
715 kthread_stop(gc->gc_reader_ts);
717 if (graceful) {
718 flush_workqueue(gc->gc_reader_wq);
719 flush_workqueue(gc->gc_line_reader_wq);
722 destroy_workqueue(gc->gc_reader_wq);
723 destroy_workqueue(gc->gc_line_reader_wq);
725 if (gc->gc_writer_ts)
726 kthread_stop(gc->gc_writer_ts);