1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-gc.c - pblk's garbage collector
20 #include "pblk-trace.h"
21 #include <linux/delay.h>
24 static void pblk_gc_free_gc_rq(struct pblk_gc_rq
*gc_rq
)
31 static int pblk_gc_write(struct pblk
*pblk
)
33 struct pblk_gc
*gc
= &pblk
->gc
;
34 struct pblk_gc_rq
*gc_rq
, *tgc_rq
;
37 spin_lock(&gc
->w_lock
);
38 if (list_empty(&gc
->w_list
)) {
39 spin_unlock(&gc
->w_lock
);
43 list_cut_position(&w_list
, &gc
->w_list
, gc
->w_list
.prev
);
45 spin_unlock(&gc
->w_lock
);
47 list_for_each_entry_safe(gc_rq
, tgc_rq
, &w_list
, list
) {
48 pblk_write_gc_to_cache(pblk
, gc_rq
);
49 list_del(&gc_rq
->list
);
50 kref_put(&gc_rq
->line
->ref
, pblk_line_put
);
51 pblk_gc_free_gc_rq(gc_rq
);
57 static void pblk_gc_writer_kick(struct pblk_gc
*gc
)
59 wake_up_process(gc
->gc_writer_ts
);
62 void pblk_put_line_back(struct pblk
*pblk
, struct pblk_line
*line
)
64 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
65 struct list_head
*move_list
;
67 spin_lock(&l_mg
->gc_lock
);
68 spin_lock(&line
->lock
);
69 WARN_ON(line
->state
!= PBLK_LINESTATE_GC
);
70 line
->state
= PBLK_LINESTATE_CLOSED
;
71 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
74 /* We need to reset gc_group in order to ensure that
75 * pblk_line_gc_list will return proper move_list
76 * since right now current line is not on any of the
79 line
->gc_group
= PBLK_LINEGC_NONE
;
80 move_list
= pblk_line_gc_list(pblk
, line
);
81 spin_unlock(&line
->lock
);
82 list_add_tail(&line
->list
, move_list
);
83 spin_unlock(&l_mg
->gc_lock
);
86 static void pblk_gc_line_ws(struct work_struct
*work
)
88 struct pblk_line_ws
*gc_rq_ws
= container_of(work
,
89 struct pblk_line_ws
, ws
);
90 struct pblk
*pblk
= gc_rq_ws
->pblk
;
91 struct pblk_gc
*gc
= &pblk
->gc
;
92 struct pblk_line
*line
= gc_rq_ws
->line
;
93 struct pblk_gc_rq
*gc_rq
= gc_rq_ws
->priv
;
98 /* Read from GC victim block */
99 ret
= pblk_submit_read_gc(pblk
, gc_rq
);
101 line
->w_err_gc
->has_gc_err
= 1;
105 if (!gc_rq
->secs_to_gc
)
109 spin_lock(&gc
->w_lock
);
110 if (gc
->w_entries
>= PBLK_GC_RQ_QD
) {
111 spin_unlock(&gc
->w_lock
);
112 pblk_gc_writer_kick(&pblk
->gc
);
113 usleep_range(128, 256);
117 list_add_tail(&gc_rq
->list
, &gc
->w_list
);
118 spin_unlock(&gc
->w_lock
);
120 pblk_gc_writer_kick(&pblk
->gc
);
126 pblk_gc_free_gc_rq(gc_rq
);
127 kref_put(&line
->ref
, pblk_line_put
);
131 static __le64
*get_lba_list_from_emeta(struct pblk
*pblk
,
132 struct pblk_line
*line
)
134 struct line_emeta
*emeta_buf
;
135 struct pblk_line_meta
*lm
= &pblk
->lm
;
136 unsigned int lba_list_size
= lm
->emeta_len
[2];
140 emeta_buf
= kvmalloc(lm
->emeta_len
[0], GFP_KERNEL
);
144 ret
= pblk_line_emeta_read(pblk
, line
, emeta_buf
);
146 pblk_err(pblk
, "line %d read emeta failed (%d)\n",
152 /* If this read fails, it means that emeta is corrupted.
153 * For now, leave the line untouched.
154 * TODO: Implement a recovery routine that scans and moves
155 * all sectors on the line.
158 ret
= pblk_recov_check_emeta(pblk
, emeta_buf
);
160 pblk_err(pblk
, "inconsistent emeta (line %d)\n",
166 lba_list
= kvmalloc(lba_list_size
, GFP_KERNEL
);
169 memcpy(lba_list
, emeta_to_lbas(pblk
, emeta_buf
), lba_list_size
);
176 static void pblk_gc_line_prepare_ws(struct work_struct
*work
)
178 struct pblk_line_ws
*line_ws
= container_of(work
, struct pblk_line_ws
,
180 struct pblk
*pblk
= line_ws
->pblk
;
181 struct pblk_line
*line
= line_ws
->line
;
182 struct pblk_line_meta
*lm
= &pblk
->lm
;
183 struct nvm_tgt_dev
*dev
= pblk
->dev
;
184 struct nvm_geo
*geo
= &dev
->geo
;
185 struct pblk_gc
*gc
= &pblk
->gc
;
186 struct pblk_line_ws
*gc_rq_ws
;
187 struct pblk_gc_rq
*gc_rq
;
189 unsigned long *invalid_bitmap
;
190 int sec_left
, nr_secs
, bit
;
192 invalid_bitmap
= kmalloc(lm
->sec_bitmap_len
, GFP_KERNEL
);
196 if (line
->w_err_gc
->has_write_err
) {
197 lba_list
= line
->w_err_gc
->lba_list
;
198 line
->w_err_gc
->lba_list
= NULL
;
200 lba_list
= get_lba_list_from_emeta(pblk
, line
);
202 pblk_err(pblk
, "could not interpret emeta (line %d)\n",
204 goto fail_free_invalid_bitmap
;
208 spin_lock(&line
->lock
);
209 bitmap_copy(invalid_bitmap
, line
->invalid_bitmap
, lm
->sec_per_line
);
210 sec_left
= pblk_line_vsc(line
);
211 spin_unlock(&line
->lock
);
214 pblk_err(pblk
, "corrupted GC line (%d)\n", line
->id
);
215 goto fail_free_lba_list
;
220 gc_rq
= kmalloc(sizeof(struct pblk_gc_rq
), GFP_KERNEL
);
222 goto fail_free_lba_list
;
226 bit
= find_next_zero_bit(invalid_bitmap
, lm
->sec_per_line
,
228 if (bit
> line
->emeta_ssec
)
231 gc_rq
->paddr_list
[nr_secs
] = bit
;
232 gc_rq
->lba_list
[nr_secs
++] = le64_to_cpu(lba_list
[bit
]);
233 } while (nr_secs
< pblk
->max_write_pgs
);
235 if (unlikely(!nr_secs
)) {
240 gc_rq
->nr_secs
= nr_secs
;
243 gc_rq
->data
= vmalloc(array_size(gc_rq
->nr_secs
, geo
->csecs
));
245 goto fail_free_gc_rq
;
247 gc_rq_ws
= kmalloc(sizeof(struct pblk_line_ws
), GFP_KERNEL
);
249 goto fail_free_gc_data
;
251 gc_rq_ws
->pblk
= pblk
;
252 gc_rq_ws
->line
= line
;
253 gc_rq_ws
->priv
= gc_rq
;
255 /* The write GC path can be much slower than the read GC one due to
256 * the budget imposed by the rate-limiter. Balance in case that we get
257 * back pressure from the write GC path.
259 while (down_timeout(&gc
->gc_sem
, msecs_to_jiffies(30000)))
262 kref_get(&line
->ref
);
264 INIT_WORK(&gc_rq_ws
->ws
, pblk_gc_line_ws
);
265 queue_work(gc
->gc_line_reader_wq
, &gc_rq_ws
->ws
);
274 kfree(invalid_bitmap
);
276 kref_put(&line
->ref
, pblk_line_put
);
277 atomic_dec(&gc
->read_inflight_gc
);
287 fail_free_invalid_bitmap
:
288 kfree(invalid_bitmap
);
292 /* Line goes back to closed state, so we cannot release additional
293 * reference for line, since we do that only when we want to do
294 * gc to free line state transition.
296 pblk_put_line_back(pblk
, line
);
297 atomic_dec(&gc
->read_inflight_gc
);
299 pblk_err(pblk
, "failed to GC line %d\n", line
->id
);
302 static int pblk_gc_line(struct pblk
*pblk
, struct pblk_line
*line
)
304 struct pblk_gc
*gc
= &pblk
->gc
;
305 struct pblk_line_ws
*line_ws
;
307 pblk_debug(pblk
, "line '%d' being reclaimed for GC\n", line
->id
);
309 line_ws
= kmalloc(sizeof(struct pblk_line_ws
), GFP_KERNEL
);
313 line_ws
->pblk
= pblk
;
314 line_ws
->line
= line
;
316 atomic_inc(&gc
->pipeline_gc
);
317 INIT_WORK(&line_ws
->ws
, pblk_gc_line_prepare_ws
);
318 queue_work(gc
->gc_reader_wq
, &line_ws
->ws
);
323 static void pblk_gc_reader_kick(struct pblk_gc
*gc
)
325 wake_up_process(gc
->gc_reader_ts
);
328 static void pblk_gc_kick(struct pblk
*pblk
)
330 struct pblk_gc
*gc
= &pblk
->gc
;
332 pblk_gc_writer_kick(gc
);
333 pblk_gc_reader_kick(gc
);
335 /* If we're shutting down GC, let's not start it up again */
336 if (gc
->gc_enabled
) {
337 wake_up_process(gc
->gc_ts
);
338 mod_timer(&gc
->gc_timer
,
339 jiffies
+ msecs_to_jiffies(GC_TIME_MSECS
));
343 static int pblk_gc_read(struct pblk
*pblk
)
345 struct pblk_gc
*gc
= &pblk
->gc
;
346 struct pblk_line
*line
;
348 spin_lock(&gc
->r_lock
);
349 if (list_empty(&gc
->r_list
)) {
350 spin_unlock(&gc
->r_lock
);
354 line
= list_first_entry(&gc
->r_list
, struct pblk_line
, list
);
355 list_del(&line
->list
);
356 spin_unlock(&gc
->r_lock
);
360 if (pblk_gc_line(pblk
, line
)) {
361 pblk_err(pblk
, "failed to GC line %d\n", line
->id
);
363 spin_lock(&gc
->r_lock
);
364 list_add_tail(&line
->list
, &gc
->r_list
);
365 spin_unlock(&gc
->r_lock
);
371 static struct pblk_line
*pblk_gc_get_victim_line(struct pblk
*pblk
,
372 struct list_head
*group_list
)
374 struct pblk_line
*line
, *victim
;
375 unsigned int line_vsc
= ~0x0L
, victim_vsc
= ~0x0L
;
377 victim
= list_first_entry(group_list
, struct pblk_line
, list
);
379 list_for_each_entry(line
, group_list
, list
) {
380 if (!atomic_read(&line
->sec_to_update
))
381 line_vsc
= le32_to_cpu(*line
->vsc
);
382 if (line_vsc
< victim_vsc
) {
384 victim_vsc
= le32_to_cpu(*victim
->vsc
);
388 if (victim_vsc
== ~0x0)
394 static bool pblk_gc_should_run(struct pblk_gc
*gc
, struct pblk_rl
*rl
)
396 unsigned int nr_blocks_free
, nr_blocks_need
;
397 unsigned int werr_lines
= atomic_read(&rl
->werr_lines
);
399 nr_blocks_need
= pblk_rl_high_thrs(rl
);
400 nr_blocks_free
= pblk_rl_nr_free_blks(rl
);
402 /* This is not critical, no need to take lock here */
403 return ((werr_lines
> 0) ||
404 ((gc
->gc_active
) && (nr_blocks_need
> nr_blocks_free
)));
407 void pblk_gc_free_full_lines(struct pblk
*pblk
)
409 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
410 struct pblk_gc
*gc
= &pblk
->gc
;
411 struct pblk_line
*line
;
414 spin_lock(&l_mg
->gc_lock
);
415 if (list_empty(&l_mg
->gc_full_list
)) {
416 spin_unlock(&l_mg
->gc_lock
);
420 line
= list_first_entry(&l_mg
->gc_full_list
,
421 struct pblk_line
, list
);
423 spin_lock(&line
->lock
);
424 WARN_ON(line
->state
!= PBLK_LINESTATE_CLOSED
);
425 line
->state
= PBLK_LINESTATE_GC
;
426 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
428 spin_unlock(&line
->lock
);
430 list_del(&line
->list
);
431 spin_unlock(&l_mg
->gc_lock
);
433 atomic_inc(&gc
->pipeline_gc
);
434 kref_put(&line
->ref
, pblk_line_put
);
439 * Lines with no valid sectors will be returned to the free list immediately. If
440 * GC is activated - either because the free block count is under the determined
441 * threshold, or because it is being forced from user space - only lines with a
442 * high count of invalid sectors will be recycled.
444 static void pblk_gc_run(struct pblk
*pblk
)
446 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
447 struct pblk_gc
*gc
= &pblk
->gc
;
448 struct pblk_line
*line
;
449 struct list_head
*group_list
;
451 int read_inflight_gc
, gc_group
= 0, prev_group
= 0;
453 pblk_gc_free_full_lines(pblk
);
455 run_gc
= pblk_gc_should_run(&pblk
->gc
, &pblk
->rl
);
456 if (!run_gc
|| (atomic_read(&gc
->read_inflight_gc
) >= PBLK_GC_L_QD
))
460 group_list
= l_mg
->gc_lists
[gc_group
++];
463 spin_lock(&l_mg
->gc_lock
);
465 line
= pblk_gc_get_victim_line(pblk
, group_list
);
467 spin_unlock(&l_mg
->gc_lock
);
471 spin_lock(&line
->lock
);
472 WARN_ON(line
->state
!= PBLK_LINESTATE_CLOSED
);
473 line
->state
= PBLK_LINESTATE_GC
;
474 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
476 spin_unlock(&line
->lock
);
478 list_del(&line
->list
);
479 spin_unlock(&l_mg
->gc_lock
);
481 spin_lock(&gc
->r_lock
);
482 list_add_tail(&line
->list
, &gc
->r_list
);
483 spin_unlock(&gc
->r_lock
);
485 read_inflight_gc
= atomic_inc_return(&gc
->read_inflight_gc
);
486 pblk_gc_reader_kick(gc
);
490 /* No need to queue up more GC lines than we can handle */
491 run_gc
= pblk_gc_should_run(&pblk
->gc
, &pblk
->rl
);
492 if (!run_gc
|| read_inflight_gc
>= PBLK_GC_L_QD
)
496 if (!prev_group
&& pblk
->rl
.rb_state
> gc_group
&&
497 gc_group
< PBLK_GC_NR_LISTS
)
501 static void pblk_gc_timer(struct timer_list
*t
)
503 struct pblk
*pblk
= from_timer(pblk
, t
, gc
.gc_timer
);
508 static int pblk_gc_ts(void *data
)
510 struct pblk
*pblk
= data
;
512 while (!kthread_should_stop()) {
514 set_current_state(TASK_INTERRUPTIBLE
);
521 static int pblk_gc_writer_ts(void *data
)
523 struct pblk
*pblk
= data
;
525 while (!kthread_should_stop()) {
526 if (!pblk_gc_write(pblk
))
528 set_current_state(TASK_INTERRUPTIBLE
);
535 static int pblk_gc_reader_ts(void *data
)
537 struct pblk
*pblk
= data
;
538 struct pblk_gc
*gc
= &pblk
->gc
;
540 while (!kthread_should_stop()) {
541 if (!pblk_gc_read(pblk
))
543 set_current_state(TASK_INTERRUPTIBLE
);
547 #ifdef CONFIG_NVM_PBLK_DEBUG
548 pblk_info(pblk
, "flushing gc pipeline, %d lines left\n",
549 atomic_read(&gc
->pipeline_gc
));
553 if (!atomic_read(&gc
->pipeline_gc
))
562 static void pblk_gc_start(struct pblk
*pblk
)
564 pblk
->gc
.gc_active
= 1;
565 pblk_debug(pblk
, "gc start\n");
568 void pblk_gc_should_start(struct pblk
*pblk
)
570 struct pblk_gc
*gc
= &pblk
->gc
;
572 if (gc
->gc_enabled
&& !gc
->gc_active
) {
578 void pblk_gc_should_stop(struct pblk
*pblk
)
580 struct pblk_gc
*gc
= &pblk
->gc
;
582 if (gc
->gc_active
&& !gc
->gc_forced
)
586 void pblk_gc_should_kick(struct pblk
*pblk
)
588 pblk_rl_update_rates(&pblk
->rl
);
591 void pblk_gc_sysfs_state_show(struct pblk
*pblk
, int *gc_enabled
,
594 struct pblk_gc
*gc
= &pblk
->gc
;
596 spin_lock(&gc
->lock
);
597 *gc_enabled
= gc
->gc_enabled
;
598 *gc_active
= gc
->gc_active
;
599 spin_unlock(&gc
->lock
);
602 int pblk_gc_sysfs_force(struct pblk
*pblk
, int force
)
604 struct pblk_gc
*gc
= &pblk
->gc
;
606 if (force
< 0 || force
> 1)
609 spin_lock(&gc
->lock
);
610 gc
->gc_forced
= force
;
616 spin_unlock(&gc
->lock
);
618 pblk_gc_should_start(pblk
);
623 int pblk_gc_init(struct pblk
*pblk
)
625 struct pblk_gc
*gc
= &pblk
->gc
;
628 gc
->gc_ts
= kthread_create(pblk_gc_ts
, pblk
, "pblk-gc-ts");
629 if (IS_ERR(gc
->gc_ts
)) {
630 pblk_err(pblk
, "could not allocate GC main kthread\n");
631 return PTR_ERR(gc
->gc_ts
);
634 gc
->gc_writer_ts
= kthread_create(pblk_gc_writer_ts
, pblk
,
635 "pblk-gc-writer-ts");
636 if (IS_ERR(gc
->gc_writer_ts
)) {
637 pblk_err(pblk
, "could not allocate GC writer kthread\n");
638 ret
= PTR_ERR(gc
->gc_writer_ts
);
639 goto fail_free_main_kthread
;
642 gc
->gc_reader_ts
= kthread_create(pblk_gc_reader_ts
, pblk
,
643 "pblk-gc-reader-ts");
644 if (IS_ERR(gc
->gc_reader_ts
)) {
645 pblk_err(pblk
, "could not allocate GC reader kthread\n");
646 ret
= PTR_ERR(gc
->gc_reader_ts
);
647 goto fail_free_writer_kthread
;
650 timer_setup(&gc
->gc_timer
, pblk_gc_timer
, 0);
651 mod_timer(&gc
->gc_timer
, jiffies
+ msecs_to_jiffies(GC_TIME_MSECS
));
657 atomic_set(&gc
->read_inflight_gc
, 0);
658 atomic_set(&gc
->pipeline_gc
, 0);
660 /* Workqueue that reads valid sectors from a line and submit them to the
661 * GC writer to be recycled.
663 gc
->gc_line_reader_wq
= alloc_workqueue("pblk-gc-line-reader-wq",
664 WQ_MEM_RECLAIM
| WQ_UNBOUND
, PBLK_GC_MAX_READERS
);
665 if (!gc
->gc_line_reader_wq
) {
666 pblk_err(pblk
, "could not allocate GC line reader workqueue\n");
668 goto fail_free_reader_kthread
;
671 /* Workqueue that prepare lines for GC */
672 gc
->gc_reader_wq
= alloc_workqueue("pblk-gc-line_wq",
673 WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1);
674 if (!gc
->gc_reader_wq
) {
675 pblk_err(pblk
, "could not allocate GC reader workqueue\n");
677 goto fail_free_reader_line_wq
;
680 spin_lock_init(&gc
->lock
);
681 spin_lock_init(&gc
->w_lock
);
682 spin_lock_init(&gc
->r_lock
);
684 sema_init(&gc
->gc_sem
, PBLK_GC_RQ_QD
);
686 INIT_LIST_HEAD(&gc
->w_list
);
687 INIT_LIST_HEAD(&gc
->r_list
);
691 fail_free_reader_line_wq
:
692 destroy_workqueue(gc
->gc_line_reader_wq
);
693 fail_free_reader_kthread
:
694 kthread_stop(gc
->gc_reader_ts
);
695 fail_free_writer_kthread
:
696 kthread_stop(gc
->gc_writer_ts
);
697 fail_free_main_kthread
:
698 kthread_stop(gc
->gc_ts
);
703 void pblk_gc_exit(struct pblk
*pblk
, bool graceful
)
705 struct pblk_gc
*gc
= &pblk
->gc
;
708 del_timer_sync(&gc
->gc_timer
);
712 kthread_stop(gc
->gc_ts
);
714 if (gc
->gc_reader_ts
)
715 kthread_stop(gc
->gc_reader_ts
);
718 flush_workqueue(gc
->gc_reader_wq
);
719 flush_workqueue(gc
->gc_line_reader_wq
);
722 destroy_workqueue(gc
->gc_reader_wq
);
723 destroy_workqueue(gc
->gc_line_reader_wq
);
725 if (gc
->gc_writer_ts
)
726 kthread_stop(gc
->gc_writer_ts
);