2 * Copyright (C) 2016 CNEX Labs
3 * Initial: Javier Gonzalez <javier@cnexlabs.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * pblk-recovery.c - pblk's recovery path
19 void pblk_submit_rec(struct work_struct
*work
)
21 struct pblk_rec_ctx
*recovery
=
22 container_of(work
, struct pblk_rec_ctx
, ws_rec
);
23 struct pblk
*pblk
= recovery
->pblk
;
24 struct nvm_tgt_dev
*dev
= pblk
->dev
;
25 struct nvm_rq
*rqd
= recovery
->rqd
;
26 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
27 int max_secs
= nvm_max_phys_sects(dev
);
29 unsigned int nr_rec_secs
;
30 unsigned int pgs_read
;
33 nr_rec_secs
= bitmap_weight((unsigned long int *)&rqd
->ppa_status
,
36 bio
= bio_alloc(GFP_KERNEL
, nr_rec_secs
);
38 pr_err("pblk: not able to create recovery bio\n");
42 bio
->bi_iter
.bi_sector
= 0;
43 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
45 rqd
->nr_ppas
= nr_rec_secs
;
47 pgs_read
= pblk_rb_read_to_bio_list(&pblk
->rwb
, bio
, &recovery
->failed
,
49 if (pgs_read
!= nr_rec_secs
) {
50 pr_err("pblk: could not read recovery entries\n");
54 if (pblk_setup_w_rec_rq(pblk
, rqd
, c_ctx
)) {
55 pr_err("pblk: could not setup recovery request\n");
59 #ifdef CONFIG_NVM_DEBUG
60 atomic_long_add(nr_rec_secs
, &pblk
->recov_writes
);
63 ret
= pblk_submit_io(pblk
, rqd
);
65 pr_err("pblk: I/O submission failed: %d\n", ret
);
69 mempool_free(recovery
, pblk
->rec_pool
);
74 pblk_free_rqd(pblk
, rqd
, WRITE
);
77 int pblk_recov_setup_rq(struct pblk
*pblk
, struct pblk_c_ctx
*c_ctx
,
78 struct pblk_rec_ctx
*recovery
, u64
*comp_bits
,
81 struct nvm_tgt_dev
*dev
= pblk
->dev
;
82 int max_secs
= nvm_max_phys_sects(dev
);
83 struct nvm_rq
*rec_rqd
;
84 struct pblk_c_ctx
*rec_ctx
;
85 int nr_entries
= c_ctx
->nr_valid
+ c_ctx
->nr_padded
;
87 rec_rqd
= pblk_alloc_rqd(pblk
, WRITE
);
88 if (IS_ERR(rec_rqd
)) {
89 pr_err("pblk: could not create recovery req.\n");
93 rec_ctx
= nvm_rq_to_pdu(rec_rqd
);
95 /* Copy completion bitmap, but exclude the first X completed entries */
96 bitmap_shift_right((unsigned long int *)&rec_rqd
->ppa_status
,
97 (unsigned long int *)comp_bits
,
100 /* Save the context for the entries that need to be re-written and
101 * update current context with the completed entries.
103 rec_ctx
->sentry
= pblk_rb_wrap_pos(&pblk
->rwb
, c_ctx
->sentry
+ comp
);
104 if (comp
>= c_ctx
->nr_valid
) {
105 rec_ctx
->nr_valid
= 0;
106 rec_ctx
->nr_padded
= nr_entries
- comp
;
108 c_ctx
->nr_padded
= comp
- c_ctx
->nr_valid
;
110 rec_ctx
->nr_valid
= c_ctx
->nr_valid
- comp
;
111 rec_ctx
->nr_padded
= c_ctx
->nr_padded
;
113 c_ctx
->nr_valid
= comp
;
114 c_ctx
->nr_padded
= 0;
117 recovery
->rqd
= rec_rqd
;
118 recovery
->pblk
= pblk
;
123 __le64
*pblk_recov_get_lba_list(struct pblk
*pblk
, struct line_emeta
*emeta_buf
)
127 crc
= pblk_calc_emeta_crc(pblk
, emeta_buf
);
128 if (le32_to_cpu(emeta_buf
->crc
) != crc
)
131 if (le32_to_cpu(emeta_buf
->header
.identifier
) != PBLK_MAGIC
)
134 return emeta_to_lbas(pblk
, emeta_buf
);
137 static int pblk_recov_l2p_from_emeta(struct pblk
*pblk
, struct pblk_line
*line
)
139 struct nvm_tgt_dev
*dev
= pblk
->dev
;
140 struct nvm_geo
*geo
= &dev
->geo
;
141 struct pblk_line_meta
*lm
= &pblk
->lm
;
142 struct pblk_emeta
*emeta
= line
->emeta
;
143 struct line_emeta
*emeta_buf
= emeta
->buf
;
146 int nr_data_lbas
, nr_valid_lbas
, nr_lbas
= 0;
149 lba_list
= pblk_recov_get_lba_list(pblk
, emeta_buf
);
153 data_start
= pblk_line_smeta_start(pblk
, line
) + lm
->smeta_sec
;
154 nr_data_lbas
= lm
->sec_per_line
- lm
->emeta_sec
[0];
155 nr_valid_lbas
= le64_to_cpu(emeta_buf
->nr_valid_lbas
);
157 for (i
= data_start
; i
< nr_data_lbas
&& nr_lbas
< nr_valid_lbas
; i
++) {
161 ppa
= addr_to_pblk_ppa(pblk
, i
, line
->id
);
162 pos
= pblk_ppa_to_pos(geo
, ppa
);
164 /* Do not update bad blocks */
165 if (test_bit(pos
, line
->blk_bitmap
))
168 if (le64_to_cpu(lba_list
[i
]) == ADDR_EMPTY
) {
169 spin_lock(&line
->lock
);
170 if (test_and_set_bit(i
, line
->invalid_bitmap
))
171 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
173 le32_add_cpu(line
->vsc
, -1);
174 spin_unlock(&line
->lock
);
179 pblk_update_map(pblk
, le64_to_cpu(lba_list
[i
]), ppa
);
183 if (nr_valid_lbas
!= nr_lbas
)
184 pr_err("pblk: line %d - inconsistent lba list(%llu/%d)\n",
185 line
->id
, emeta_buf
->nr_valid_lbas
, nr_lbas
);
187 line
->left_msecs
= 0;
192 static int pblk_calc_sec_in_line(struct pblk
*pblk
, struct pblk_line
*line
)
194 struct nvm_tgt_dev
*dev
= pblk
->dev
;
195 struct nvm_geo
*geo
= &dev
->geo
;
196 struct pblk_line_meta
*lm
= &pblk
->lm
;
197 int nr_bb
= bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
199 return lm
->sec_per_line
- lm
->smeta_sec
- lm
->emeta_sec
[0] -
200 nr_bb
* geo
->sec_per_blk
;
203 struct pblk_recov_alloc
{
204 struct ppa_addr
*ppa_list
;
205 struct pblk_sec_meta
*meta_list
;
208 dma_addr_t dma_ppa_list
;
209 dma_addr_t dma_meta_list
;
212 static int pblk_recov_read_oob(struct pblk
*pblk
, struct pblk_line
*line
,
213 struct pblk_recov_alloc p
, u64 r_ptr
)
215 struct nvm_tgt_dev
*dev
= pblk
->dev
;
216 struct nvm_geo
*geo
= &dev
->geo
;
217 struct ppa_addr
*ppa_list
;
218 struct pblk_sec_meta
*meta_list
;
222 dma_addr_t dma_ppa_list
, dma_meta_list
;
228 DECLARE_COMPLETION_ONSTACK(wait
);
230 ppa_list
= p
.ppa_list
;
231 meta_list
= p
.meta_list
;
234 dma_ppa_list
= p
.dma_ppa_list
;
235 dma_meta_list
= p
.dma_meta_list
;
237 left_ppas
= line
->cur_sec
- r_ptr
;
244 memset(rqd
, 0, pblk_g_rq_size
);
246 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
248 rq_ppas
= pblk
->min_write_pgs
;
249 rq_len
= rq_ppas
* geo
->sec_size
;
251 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
255 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
256 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
259 rqd
->opcode
= NVM_OP_PREAD
;
260 rqd
->meta_list
= meta_list
;
261 rqd
->nr_ppas
= rq_ppas
;
262 rqd
->ppa_list
= ppa_list
;
263 rqd
->dma_ppa_list
= dma_ppa_list
;
264 rqd
->dma_meta_list
= dma_meta_list
;
265 rqd
->end_io
= pblk_end_io_sync
;
266 rqd
->private = &wait
;
268 if (pblk_io_aligned(pblk
, rq_ppas
))
269 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
271 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
273 for (i
= 0; i
< rqd
->nr_ppas
; ) {
277 ppa
= addr_to_gen_ppa(pblk
, r_ptr_int
, line
->id
);
278 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
280 while (test_bit(pos
, line
->blk_bitmap
)) {
281 r_ptr_int
+= pblk
->min_write_pgs
;
282 ppa
= addr_to_gen_ppa(pblk
, r_ptr_int
, line
->id
);
283 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
286 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, r_ptr_int
++)
288 addr_to_gen_ppa(pblk
, r_ptr_int
, line
->id
);
291 /* If read fails, more padding is needed */
292 ret
= pblk_submit_io(pblk
, rqd
);
294 pr_err("pblk: I/O submission failed: %d\n", ret
);
298 if (!wait_for_completion_io_timeout(&wait
,
299 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
300 pr_err("pblk: L2P recovery read timed out\n");
303 atomic_dec(&pblk
->inflight_io
);
304 reinit_completion(&wait
);
306 /* At this point, the read should not fail. If it does, it is a problem
307 * we cannot recover from here. Need FTL log.
310 pr_err("pblk: L2P recovery failed (%d)\n", rqd
->error
);
314 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
315 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
317 if (lba
== ADDR_EMPTY
|| lba
> pblk
->rl
.nr_secs
)
320 pblk_update_map(pblk
, lba
, rqd
->ppa_list
[i
]);
323 left_ppas
-= rq_ppas
;
330 static void pblk_recov_complete(struct kref
*ref
)
332 struct pblk_pad_rq
*pad_rq
= container_of(ref
, struct pblk_pad_rq
, ref
);
334 complete(&pad_rq
->wait
);
337 static void pblk_end_io_recov(struct nvm_rq
*rqd
)
339 struct pblk_pad_rq
*pad_rq
= rqd
->private;
340 struct pblk
*pblk
= pad_rq
->pblk
;
341 struct nvm_tgt_dev
*dev
= pblk
->dev
;
343 pblk_up_page(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
346 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
, rqd
->dma_meta_list
);
347 pblk_free_rqd(pblk
, rqd
, WRITE
);
349 atomic_dec(&pblk
->inflight_io
);
350 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
353 static int pblk_recov_pad_oob(struct pblk
*pblk
, struct pblk_line
*line
,
356 struct nvm_tgt_dev
*dev
= pblk
->dev
;
357 struct nvm_geo
*geo
= &dev
->geo
;
358 struct ppa_addr
*ppa_list
;
359 struct pblk_sec_meta
*meta_list
;
360 struct pblk_pad_rq
*pad_rq
;
364 dma_addr_t dma_ppa_list
, dma_meta_list
;
365 __le64
*lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
366 u64 w_ptr
= line
->cur_sec
;
367 int left_line_ppas
, rq_ppas
, rq_len
;
371 spin_lock(&line
->lock
);
372 left_line_ppas
= line
->left_msecs
;
373 spin_unlock(&line
->lock
);
375 pad_rq
= kmalloc(sizeof(struct pblk_pad_rq
), GFP_KERNEL
);
379 data
= vzalloc(pblk
->max_write_pgs
* geo
->sec_size
);
386 init_completion(&pad_rq
->wait
);
387 kref_init(&pad_rq
->ref
);
390 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
391 if (rq_ppas
< pblk
->min_write_pgs
) {
392 pr_err("pblk: corrupted pad line %d\n", line
->id
);
396 rq_len
= rq_ppas
* geo
->sec_size
;
398 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
, &dma_meta_list
);
404 ppa_list
= (void *)(meta_list
) + pblk_dma_meta_size
;
405 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size
;
407 rqd
= pblk_alloc_rqd(pblk
, WRITE
);
413 bio
= pblk_bio_map_addr(pblk
, data
, rq_ppas
, rq_len
,
414 PBLK_VMALLOC_META
, GFP_KERNEL
);
420 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
421 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
424 rqd
->opcode
= NVM_OP_PWRITE
;
425 rqd
->flags
= pblk_set_progr_mode(pblk
, WRITE
);
426 rqd
->meta_list
= meta_list
;
427 rqd
->nr_ppas
= rq_ppas
;
428 rqd
->ppa_list
= ppa_list
;
429 rqd
->dma_ppa_list
= dma_ppa_list
;
430 rqd
->dma_meta_list
= dma_meta_list
;
431 rqd
->end_io
= pblk_end_io_recov
;
432 rqd
->private = pad_rq
;
434 for (i
= 0; i
< rqd
->nr_ppas
; ) {
438 w_ptr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
439 ppa
= addr_to_pblk_ppa(pblk
, w_ptr
, line
->id
);
440 pos
= pblk_ppa_to_pos(geo
, ppa
);
442 while (test_bit(pos
, line
->blk_bitmap
)) {
443 w_ptr
+= pblk
->min_write_pgs
;
444 ppa
= addr_to_pblk_ppa(pblk
, w_ptr
, line
->id
);
445 pos
= pblk_ppa_to_pos(geo
, ppa
);
448 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, w_ptr
++) {
449 struct ppa_addr dev_ppa
;
450 __le64 addr_empty
= cpu_to_le64(ADDR_EMPTY
);
452 dev_ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
454 pblk_map_invalidate(pblk
, dev_ppa
);
455 lba_list
[w_ptr
] = meta_list
[i
].lba
= addr_empty
;
456 rqd
->ppa_list
[i
] = dev_ppa
;
460 kref_get(&pad_rq
->ref
);
461 pblk_down_page(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
463 ret
= pblk_submit_io(pblk
, rqd
);
465 pr_err("pblk: I/O submission failed: %d\n", ret
);
466 pblk_up_page(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
470 left_line_ppas
-= rq_ppas
;
471 left_ppas
-= rq_ppas
;
472 if (left_ppas
&& left_line_ppas
)
475 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
477 if (!wait_for_completion_io_timeout(&pad_rq
->wait
,
478 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
479 pr_err("pblk: pad write timed out\n");
483 if (!pblk_line_is_full(line
))
484 pr_err("pblk: corrupted padded line: %d\n", line
->id
);
494 pblk_free_rqd(pblk
, rqd
, WRITE
);
496 nvm_dev_dma_free(dev
->parent
, meta_list
, dma_meta_list
);
503 /* When this function is called, it means that not all upper pages have been
504 * written in a page that contains valid data. In order to recover this data, we
505 * first find the write pointer on the device, then we pad all necessary
506 * sectors, and finally attempt to read the valid data
508 static int pblk_recov_scan_all_oob(struct pblk
*pblk
, struct pblk_line
*line
,
509 struct pblk_recov_alloc p
)
511 struct nvm_tgt_dev
*dev
= pblk
->dev
;
512 struct nvm_geo
*geo
= &dev
->geo
;
513 struct ppa_addr
*ppa_list
;
514 struct pblk_sec_meta
*meta_list
;
518 dma_addr_t dma_ppa_list
, dma_meta_list
;
519 u64 w_ptr
= 0, r_ptr
;
524 int left_ppas
= pblk_calc_sec_in_line(pblk
, line
) - line
->cur_sec
;
525 DECLARE_COMPLETION_ONSTACK(wait
);
527 ppa_list
= p
.ppa_list
;
528 meta_list
= p
.meta_list
;
531 dma_ppa_list
= p
.dma_ppa_list
;
532 dma_meta_list
= p
.dma_meta_list
;
534 /* we could recover up until the line write pointer */
535 r_ptr
= line
->cur_sec
;
539 memset(rqd
, 0, pblk_g_rq_size
);
541 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
543 rq_ppas
= pblk
->min_write_pgs
;
544 rq_len
= rq_ppas
* geo
->sec_size
;
546 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
550 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
551 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
554 rqd
->opcode
= NVM_OP_PREAD
;
555 rqd
->meta_list
= meta_list
;
556 rqd
->nr_ppas
= rq_ppas
;
557 rqd
->ppa_list
= ppa_list
;
558 rqd
->dma_ppa_list
= dma_ppa_list
;
559 rqd
->dma_meta_list
= dma_meta_list
;
560 rqd
->end_io
= pblk_end_io_sync
;
561 rqd
->private = &wait
;
563 if (pblk_io_aligned(pblk
, rq_ppas
))
564 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
566 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
568 for (i
= 0; i
< rqd
->nr_ppas
; ) {
572 w_ptr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
573 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
574 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
576 while (test_bit(pos
, line
->blk_bitmap
)) {
577 w_ptr
+= pblk
->min_write_pgs
;
578 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
579 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
582 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, w_ptr
++)
584 addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
587 ret
= pblk_submit_io(pblk
, rqd
);
589 pr_err("pblk: I/O submission failed: %d\n", ret
);
593 if (!wait_for_completion_io_timeout(&wait
,
594 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
595 pr_err("pblk: L2P recovery read timed out\n");
597 atomic_dec(&pblk
->inflight_io
);
598 reinit_completion(&wait
);
600 /* This should not happen since the read failed during normal recovery,
601 * but the media works funny sometimes...
603 if (!rec_round
++ && !rqd
->error
) {
605 for (i
= 0; i
< rqd
->nr_ppas
; i
++, r_ptr
++) {
606 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
608 if (lba
== ADDR_EMPTY
|| lba
> pblk
->rl
.nr_secs
)
611 pblk_update_map(pblk
, lba
, rqd
->ppa_list
[i
]);
615 /* Reached the end of the written line */
616 if (rqd
->error
== NVM_RSP_ERR_EMPTYPAGE
) {
617 int pad_secs
, nr_error_bits
, bit
;
620 bit
= find_first_bit((void *)&rqd
->ppa_status
, rqd
->nr_ppas
);
621 nr_error_bits
= rqd
->nr_ppas
- bit
;
623 /* Roll back failed sectors */
624 line
->cur_sec
-= nr_error_bits
;
625 line
->left_msecs
+= nr_error_bits
;
626 bitmap_clear(line
->map_bitmap
, line
->cur_sec
, nr_error_bits
);
628 pad_secs
= pblk_pad_distance(pblk
);
629 if (pad_secs
> line
->left_msecs
)
630 pad_secs
= line
->left_msecs
;
632 ret
= pblk_recov_pad_oob(pblk
, line
, pad_secs
);
634 pr_err("pblk: OOB padding failed (err:%d)\n", ret
);
636 ret
= pblk_recov_read_oob(pblk
, line
, p
, r_ptr
);
638 pr_err("pblk: OOB read failed (err:%d)\n", ret
);
643 left_ppas
-= rq_ppas
;
650 static int pblk_recov_scan_oob(struct pblk
*pblk
, struct pblk_line
*line
,
651 struct pblk_recov_alloc p
, int *done
)
653 struct nvm_tgt_dev
*dev
= pblk
->dev
;
654 struct nvm_geo
*geo
= &dev
->geo
;
655 struct ppa_addr
*ppa_list
;
656 struct pblk_sec_meta
*meta_list
;
660 dma_addr_t dma_ppa_list
, dma_meta_list
;
665 int left_ppas
= pblk_calc_sec_in_line(pblk
, line
);
666 DECLARE_COMPLETION_ONSTACK(wait
);
668 ppa_list
= p
.ppa_list
;
669 meta_list
= p
.meta_list
;
672 dma_ppa_list
= p
.dma_ppa_list
;
673 dma_meta_list
= p
.dma_meta_list
;
678 memset(rqd
, 0, pblk_g_rq_size
);
680 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
682 rq_ppas
= pblk
->min_write_pgs
;
683 rq_len
= rq_ppas
* geo
->sec_size
;
685 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
689 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
690 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
693 rqd
->opcode
= NVM_OP_PREAD
;
694 rqd
->meta_list
= meta_list
;
695 rqd
->nr_ppas
= rq_ppas
;
696 rqd
->ppa_list
= ppa_list
;
697 rqd
->dma_ppa_list
= dma_ppa_list
;
698 rqd
->dma_meta_list
= dma_meta_list
;
699 rqd
->end_io
= pblk_end_io_sync
;
700 rqd
->private = &wait
;
702 if (pblk_io_aligned(pblk
, rq_ppas
))
703 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
705 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
707 for (i
= 0; i
< rqd
->nr_ppas
; ) {
711 paddr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
712 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
713 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
715 while (test_bit(pos
, line
->blk_bitmap
)) {
716 paddr
+= pblk
->min_write_pgs
;
717 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
718 pos
= pblk_dev_ppa_to_pos(geo
, ppa
);
721 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, paddr
++)
723 addr_to_gen_ppa(pblk
, paddr
, line
->id
);
726 ret
= pblk_submit_io(pblk
, rqd
);
728 pr_err("pblk: I/O submission failed: %d\n", ret
);
733 if (!wait_for_completion_io_timeout(&wait
,
734 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
735 pr_err("pblk: L2P recovery read timed out\n");
737 atomic_dec(&pblk
->inflight_io
);
738 reinit_completion(&wait
);
740 /* Reached the end of the written line */
742 int nr_error_bits
, bit
;
744 bit
= find_first_bit((void *)&rqd
->ppa_status
, rqd
->nr_ppas
);
745 nr_error_bits
= rqd
->nr_ppas
- bit
;
747 /* Roll back failed sectors */
748 line
->cur_sec
-= nr_error_bits
;
749 line
->left_msecs
+= nr_error_bits
;
750 bitmap_clear(line
->map_bitmap
, line
->cur_sec
, nr_error_bits
);
755 if (rqd
->error
!= NVM_RSP_ERR_EMPTYPAGE
)
759 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
760 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
762 if (lba
== ADDR_EMPTY
|| lba
> pblk
->rl
.nr_secs
)
765 pblk_update_map(pblk
, lba
, rqd
->ppa_list
[i
]);
768 left_ppas
-= rq_ppas
;
775 /* Scan line for lbas on out of bound area */
776 static int pblk_recov_l2p_from_oob(struct pblk
*pblk
, struct pblk_line
*line
)
778 struct nvm_tgt_dev
*dev
= pblk
->dev
;
779 struct nvm_geo
*geo
= &dev
->geo
;
781 struct ppa_addr
*ppa_list
;
782 struct pblk_sec_meta
*meta_list
;
783 struct pblk_recov_alloc p
;
785 dma_addr_t dma_ppa_list
, dma_meta_list
;
788 rqd
= pblk_alloc_rqd(pblk
, READ
);
792 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
, &dma_meta_list
);
798 ppa_list
= (void *)(meta_list
) + pblk_dma_meta_size
;
799 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size
;
801 data
= kcalloc(pblk
->max_write_pgs
, geo
->sec_size
, GFP_KERNEL
);
807 p
.ppa_list
= ppa_list
;
808 p
.meta_list
= meta_list
;
811 p
.dma_ppa_list
= dma_ppa_list
;
812 p
.dma_meta_list
= dma_meta_list
;
814 ret
= pblk_recov_scan_oob(pblk
, line
, p
, &done
);
816 pr_err("pblk: could not recover L2P from OOB\n");
821 ret
= pblk_recov_scan_all_oob(pblk
, line
, p
);
823 pr_err("pblk: could not recover L2P from OOB\n");
828 if (pblk_line_is_full(line
))
829 pblk_line_recov_close(pblk
, line
);
834 nvm_dev_dma_free(dev
->parent
, meta_list
, dma_meta_list
);
836 pblk_free_rqd(pblk
, rqd
, READ
);
841 /* Insert lines ordered by sequence number (seq_num) on list */
842 static void pblk_recov_line_add_ordered(struct list_head
*head
,
843 struct pblk_line
*line
)
845 struct pblk_line
*t
= NULL
;
847 list_for_each_entry(t
, head
, list
)
848 if (t
->seq_nr
> line
->seq_nr
)
851 __list_add(&line
->list
, t
->list
.prev
, &t
->list
);
854 struct pblk_line
*pblk_recov_l2p(struct pblk
*pblk
)
856 struct nvm_tgt_dev
*dev
= pblk
->dev
;
857 struct nvm_geo
*geo
= &dev
->geo
;
858 struct pblk_line_meta
*lm
= &pblk
->lm
;
859 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
860 struct pblk_line
*line
, *tline
, *data_line
= NULL
;
861 struct pblk_smeta
*smeta
;
862 struct pblk_emeta
*emeta
;
863 struct line_smeta
*smeta_buf
;
864 int found_lines
= 0, recovered_lines
= 0, open_lines
= 0;
867 int i
, valid_uuid
= 0;
868 LIST_HEAD(recov_list
);
870 /* TODO: Implement FTL snapshot */
872 /* Scan recovery - takes place when FTL snapshot fails */
873 spin_lock(&l_mg
->free_lock
);
874 meta_line
= find_first_zero_bit(&l_mg
->meta_bitmap
, PBLK_DATA_LINES
);
875 set_bit(meta_line
, &l_mg
->meta_bitmap
);
876 smeta
= l_mg
->sline_meta
[meta_line
];
877 emeta
= l_mg
->eline_meta
[meta_line
];
878 smeta_buf
= (struct line_smeta
*)smeta
;
879 spin_unlock(&l_mg
->free_lock
);
881 /* Order data lines using their sequence number */
882 for (i
= 0; i
< l_mg
->nr_lines
; i
++) {
885 line
= &pblk
->lines
[i
];
887 memset(smeta
, 0, lm
->smeta_len
);
889 line
->lun_bitmap
= ((void *)(smeta_buf
)) +
890 sizeof(struct line_smeta
);
892 /* Lines that cannot be read are assumed as not written here */
893 if (pblk_line_read_smeta(pblk
, line
))
896 crc
= pblk_calc_smeta_crc(pblk
, smeta_buf
);
897 if (le32_to_cpu(smeta_buf
->crc
) != crc
)
900 if (le32_to_cpu(smeta_buf
->header
.identifier
) != PBLK_MAGIC
)
903 if (le16_to_cpu(smeta_buf
->header
.version
) != 1) {
904 pr_err("pblk: found incompatible line version %u\n",
905 smeta_buf
->header
.version
);
906 return ERR_PTR(-EINVAL
);
909 /* The first valid instance uuid is used for initialization */
911 memcpy(pblk
->instance_uuid
, smeta_buf
->header
.uuid
, 16);
915 if (memcmp(pblk
->instance_uuid
, smeta_buf
->header
.uuid
, 16)) {
916 pr_debug("pblk: ignore line %u due to uuid mismatch\n",
921 /* Update line metadata */
922 spin_lock(&line
->lock
);
923 line
->id
= le32_to_cpu(smeta_buf
->header
.id
);
924 line
->type
= le16_to_cpu(smeta_buf
->header
.type
);
925 line
->seq_nr
= le64_to_cpu(smeta_buf
->seq_nr
);
926 spin_unlock(&line
->lock
);
928 /* Update general metadata */
929 spin_lock(&l_mg
->free_lock
);
930 if (line
->seq_nr
>= l_mg
->d_seq_nr
)
931 l_mg
->d_seq_nr
= line
->seq_nr
+ 1;
932 l_mg
->nr_free_lines
--;
933 spin_unlock(&l_mg
->free_lock
);
935 if (pblk_line_recov_alloc(pblk
, line
))
938 pblk_recov_line_add_ordered(&recov_list
, line
);
940 pr_debug("pblk: recovering data line %d, seq:%llu\n",
941 line
->id
, smeta_buf
->seq_nr
);
945 pblk_setup_uuid(pblk
);
947 spin_lock(&l_mg
->free_lock
);
948 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
949 &l_mg
->meta_bitmap
));
950 spin_unlock(&l_mg
->free_lock
);
955 /* Verify closed blocks and recover this portion of L2P table*/
956 list_for_each_entry_safe(line
, tline
, &recov_list
, list
) {
960 /* Calculate where emeta starts based on the line bb */
961 off
= lm
->sec_per_line
- lm
->emeta_sec
[0];
962 nr_bb
= bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
963 off
-= nr_bb
* geo
->sec_per_pl
;
965 line
->emeta_ssec
= off
;
967 memset(line
->emeta
->buf
, 0, lm
->emeta_len
[0]);
969 if (pblk_line_read_emeta(pblk
, line
, line
->emeta
->buf
)) {
970 pblk_recov_l2p_from_oob(pblk
, line
);
974 if (pblk_recov_l2p_from_emeta(pblk
, line
))
975 pblk_recov_l2p_from_oob(pblk
, line
);
978 if (pblk_line_is_full(line
)) {
979 struct list_head
*move_list
;
981 spin_lock(&line
->lock
);
982 line
->state
= PBLK_LINESTATE_CLOSED
;
983 move_list
= pblk_line_gc_list(pblk
, line
);
984 spin_unlock(&line
->lock
);
986 spin_lock(&l_mg
->gc_lock
);
987 list_move_tail(&line
->list
, move_list
);
988 spin_unlock(&l_mg
->gc_lock
);
990 mempool_free(line
->map_bitmap
, pblk
->line_meta_pool
);
991 line
->map_bitmap
= NULL
;
996 pr_err("pblk: failed to recover L2P\n");
999 line
->meta_line
= meta_line
;
1004 spin_lock(&l_mg
->free_lock
);
1006 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
1007 &l_mg
->meta_bitmap
));
1008 pblk_line_replace_data(pblk
);
1010 /* Allocate next line for preparation */
1011 l_mg
->data_next
= pblk_line_get(pblk
);
1012 if (l_mg
->data_next
) {
1013 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
1014 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
1018 spin_unlock(&l_mg
->free_lock
);
1021 pblk_line_erase(pblk
, l_mg
->data_next
);
1022 pblk_rl_free_lines_dec(&pblk
->rl
, l_mg
->data_next
);
1026 if (found_lines
!= recovered_lines
)
1027 pr_err("pblk: failed to recover all found lines %d/%d\n",
1028 found_lines
, recovered_lines
);
1036 int pblk_recov_pad(struct pblk
*pblk
)
1038 struct pblk_line
*line
;
1039 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1043 spin_lock(&l_mg
->free_lock
);
1044 line
= l_mg
->data_line
;
1045 left_msecs
= line
->left_msecs
;
1046 spin_unlock(&l_mg
->free_lock
);
1048 ret
= pblk_recov_pad_oob(pblk
, line
, left_msecs
);
1050 pr_err("pblk: Tear down padding failed (%d)\n", ret
);
1054 pblk_line_close_meta(pblk
, line
);