2 * Copyright (C) 2016 CNEX Labs
3 * Initial: Javier Gonzalez <javier@cnexlabs.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * pblk-recovery.c - pblk's recovery path
19 void pblk_submit_rec(struct work_struct
*work
)
21 struct pblk_rec_ctx
*recovery
=
22 container_of(work
, struct pblk_rec_ctx
, ws_rec
);
23 struct pblk
*pblk
= recovery
->pblk
;
24 struct nvm_tgt_dev
*dev
= pblk
->dev
;
25 struct nvm_rq
*rqd
= recovery
->rqd
;
26 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
27 int max_secs
= nvm_max_phys_sects(dev
);
29 unsigned int nr_rec_secs
;
30 unsigned int pgs_read
;
33 nr_rec_secs
= bitmap_weight((unsigned long int *)&rqd
->ppa_status
,
36 bio
= bio_alloc(GFP_KERNEL
, nr_rec_secs
);
38 bio
->bi_iter
.bi_sector
= 0;
39 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
41 rqd
->nr_ppas
= nr_rec_secs
;
43 pgs_read
= pblk_rb_read_to_bio_list(&pblk
->rwb
, bio
, &recovery
->failed
,
45 if (pgs_read
!= nr_rec_secs
) {
46 pr_err("pblk: could not read recovery entries\n");
50 if (pblk_setup_w_rec_rq(pblk
, rqd
, c_ctx
)) {
51 pr_err("pblk: could not setup recovery request\n");
55 #ifdef CONFIG_NVM_DEBUG
56 atomic_long_add(nr_rec_secs
, &pblk
->recov_writes
);
59 ret
= pblk_submit_io(pblk
, rqd
);
61 pr_err("pblk: I/O submission failed: %d\n", ret
);
65 mempool_free(recovery
, pblk
->rec_pool
);
70 pblk_free_rqd(pblk
, rqd
, PBLK_WRITE
);
73 int pblk_recov_setup_rq(struct pblk
*pblk
, struct pblk_c_ctx
*c_ctx
,
74 struct pblk_rec_ctx
*recovery
, u64
*comp_bits
,
77 struct nvm_tgt_dev
*dev
= pblk
->dev
;
78 int max_secs
= nvm_max_phys_sects(dev
);
79 struct nvm_rq
*rec_rqd
;
80 struct pblk_c_ctx
*rec_ctx
;
81 int nr_entries
= c_ctx
->nr_valid
+ c_ctx
->nr_padded
;
83 rec_rqd
= pblk_alloc_rqd(pblk
, PBLK_WRITE
);
84 rec_ctx
= nvm_rq_to_pdu(rec_rqd
);
86 /* Copy completion bitmap, but exclude the first X completed entries */
87 bitmap_shift_right((unsigned long int *)&rec_rqd
->ppa_status
,
88 (unsigned long int *)comp_bits
,
91 /* Save the context for the entries that need to be re-written and
92 * update current context with the completed entries.
94 rec_ctx
->sentry
= pblk_rb_wrap_pos(&pblk
->rwb
, c_ctx
->sentry
+ comp
);
95 if (comp
>= c_ctx
->nr_valid
) {
96 rec_ctx
->nr_valid
= 0;
97 rec_ctx
->nr_padded
= nr_entries
- comp
;
99 c_ctx
->nr_padded
= comp
- c_ctx
->nr_valid
;
101 rec_ctx
->nr_valid
= c_ctx
->nr_valid
- comp
;
102 rec_ctx
->nr_padded
= c_ctx
->nr_padded
;
104 c_ctx
->nr_valid
= comp
;
105 c_ctx
->nr_padded
= 0;
108 recovery
->rqd
= rec_rqd
;
109 recovery
->pblk
= pblk
;
114 int pblk_recov_check_emeta(struct pblk
*pblk
, struct line_emeta
*emeta_buf
)
118 crc
= pblk_calc_emeta_crc(pblk
, emeta_buf
);
119 if (le32_to_cpu(emeta_buf
->crc
) != crc
)
122 if (le32_to_cpu(emeta_buf
->header
.identifier
) != PBLK_MAGIC
)
128 static int pblk_recov_l2p_from_emeta(struct pblk
*pblk
, struct pblk_line
*line
)
130 struct nvm_tgt_dev
*dev
= pblk
->dev
;
131 struct nvm_geo
*geo
= &dev
->geo
;
132 struct pblk_line_meta
*lm
= &pblk
->lm
;
133 struct pblk_emeta
*emeta
= line
->emeta
;
134 struct line_emeta
*emeta_buf
= emeta
->buf
;
136 u64 data_start
, data_end
;
137 u64 nr_valid_lbas
, nr_lbas
= 0;
140 lba_list
= emeta_to_lbas(pblk
, emeta_buf
);
144 data_start
= pblk_line_smeta_start(pblk
, line
) + lm
->smeta_sec
;
145 data_end
= line
->emeta_ssec
;
146 nr_valid_lbas
= le64_to_cpu(emeta_buf
->nr_valid_lbas
);
148 for (i
= data_start
; i
< data_end
; i
++) {
152 ppa
= addr_to_gen_ppa(pblk
, i
, line
->id
);
153 pos
= pblk_ppa_to_pos(geo
, ppa
);
155 /* Do not update bad blocks */
156 if (test_bit(pos
, line
->blk_bitmap
))
159 if (le64_to_cpu(lba_list
[i
]) == ADDR_EMPTY
) {
160 spin_lock(&line
->lock
);
161 if (test_and_set_bit(i
, line
->invalid_bitmap
))
162 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
164 le32_add_cpu(line
->vsc
, -1);
165 spin_unlock(&line
->lock
);
170 pblk_update_map(pblk
, le64_to_cpu(lba_list
[i
]), ppa
);
174 if (nr_valid_lbas
!= nr_lbas
)
175 pr_err("pblk: line %d - inconsistent lba list(%llu/%llu)\n",
176 line
->id
, nr_valid_lbas
, nr_lbas
);
178 line
->left_msecs
= 0;
183 static int pblk_calc_sec_in_line(struct pblk
*pblk
, struct pblk_line
*line
)
185 struct nvm_tgt_dev
*dev
= pblk
->dev
;
186 struct nvm_geo
*geo
= &dev
->geo
;
187 struct pblk_line_meta
*lm
= &pblk
->lm
;
188 int nr_bb
= bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
190 return lm
->sec_per_line
- lm
->smeta_sec
- lm
->emeta_sec
[0] -
191 nr_bb
* geo
->sec_per_chk
;
194 struct pblk_recov_alloc
{
195 struct ppa_addr
*ppa_list
;
196 struct pblk_sec_meta
*meta_list
;
199 dma_addr_t dma_ppa_list
;
200 dma_addr_t dma_meta_list
;
203 static int pblk_recov_read_oob(struct pblk
*pblk
, struct pblk_line
*line
,
204 struct pblk_recov_alloc p
, u64 r_ptr
)
206 struct nvm_tgt_dev
*dev
= pblk
->dev
;
207 struct nvm_geo
*geo
= &dev
->geo
;
208 struct ppa_addr
*ppa_list
;
209 struct pblk_sec_meta
*meta_list
;
213 dma_addr_t dma_ppa_list
, dma_meta_list
;
220 ppa_list
= p
.ppa_list
;
221 meta_list
= p
.meta_list
;
224 dma_ppa_list
= p
.dma_ppa_list
;
225 dma_meta_list
= p
.dma_meta_list
;
227 left_ppas
= line
->cur_sec
- r_ptr
;
234 memset(rqd
, 0, pblk_g_rq_size
);
236 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
238 rq_ppas
= pblk
->min_write_pgs
;
239 rq_len
= rq_ppas
* geo
->sec_size
;
241 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
245 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
246 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
249 rqd
->opcode
= NVM_OP_PREAD
;
250 rqd
->meta_list
= meta_list
;
251 rqd
->nr_ppas
= rq_ppas
;
252 rqd
->ppa_list
= ppa_list
;
253 rqd
->dma_ppa_list
= dma_ppa_list
;
254 rqd
->dma_meta_list
= dma_meta_list
;
256 if (pblk_io_aligned(pblk
, rq_ppas
))
257 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
259 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
261 for (i
= 0; i
< rqd
->nr_ppas
; ) {
265 ppa
= addr_to_gen_ppa(pblk
, r_ptr_int
, line
->id
);
266 pos
= pblk_ppa_to_pos(geo
, ppa
);
268 while (test_bit(pos
, line
->blk_bitmap
)) {
269 r_ptr_int
+= pblk
->min_write_pgs
;
270 ppa
= addr_to_gen_ppa(pblk
, r_ptr_int
, line
->id
);
271 pos
= pblk_ppa_to_pos(geo
, ppa
);
274 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, r_ptr_int
++)
276 addr_to_gen_ppa(pblk
, r_ptr_int
, line
->id
);
279 /* If read fails, more padding is needed */
280 ret
= pblk_submit_io_sync(pblk
, rqd
);
282 pr_err("pblk: I/O submission failed: %d\n", ret
);
286 atomic_dec(&pblk
->inflight_io
);
288 /* At this point, the read should not fail. If it does, it is a problem
289 * we cannot recover from here. Need FTL log.
291 if (rqd
->error
&& rqd
->error
!= NVM_RSP_WARN_HIGHECC
) {
292 pr_err("pblk: L2P recovery failed (%d)\n", rqd
->error
);
296 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
297 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
299 if (lba
== ADDR_EMPTY
|| lba
> pblk
->rl
.nr_secs
)
302 pblk_update_map(pblk
, lba
, rqd
->ppa_list
[i
]);
305 left_ppas
-= rq_ppas
;
312 static void pblk_recov_complete(struct kref
*ref
)
314 struct pblk_pad_rq
*pad_rq
= container_of(ref
, struct pblk_pad_rq
, ref
);
316 complete(&pad_rq
->wait
);
319 static void pblk_end_io_recov(struct nvm_rq
*rqd
)
321 struct pblk_pad_rq
*pad_rq
= rqd
->private;
322 struct pblk
*pblk
= pad_rq
->pblk
;
324 pblk_up_page(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
326 pblk_free_rqd(pblk
, rqd
, PBLK_WRITE_INT
);
328 atomic_dec(&pblk
->inflight_io
);
329 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
332 static int pblk_recov_pad_oob(struct pblk
*pblk
, struct pblk_line
*line
,
335 struct nvm_tgt_dev
*dev
= pblk
->dev
;
336 struct nvm_geo
*geo
= &dev
->geo
;
337 struct ppa_addr
*ppa_list
;
338 struct pblk_sec_meta
*meta_list
;
339 struct pblk_pad_rq
*pad_rq
;
343 dma_addr_t dma_ppa_list
, dma_meta_list
;
344 __le64
*lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
345 u64 w_ptr
= line
->cur_sec
;
346 int left_line_ppas
, rq_ppas
, rq_len
;
350 spin_lock(&line
->lock
);
351 left_line_ppas
= line
->left_msecs
;
352 spin_unlock(&line
->lock
);
354 pad_rq
= kmalloc(sizeof(struct pblk_pad_rq
), GFP_KERNEL
);
358 data
= vzalloc(pblk
->max_write_pgs
* geo
->sec_size
);
365 init_completion(&pad_rq
->wait
);
366 kref_init(&pad_rq
->ref
);
369 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
370 if (rq_ppas
< pblk
->min_write_pgs
) {
371 pr_err("pblk: corrupted pad line %d\n", line
->id
);
375 rq_len
= rq_ppas
* geo
->sec_size
;
377 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
, &dma_meta_list
);
383 ppa_list
= (void *)(meta_list
) + pblk_dma_meta_size
;
384 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size
;
386 bio
= pblk_bio_map_addr(pblk
, data
, rq_ppas
, rq_len
,
387 PBLK_VMALLOC_META
, GFP_KERNEL
);
393 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
394 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
396 rqd
= pblk_alloc_rqd(pblk
, PBLK_WRITE_INT
);
399 rqd
->opcode
= NVM_OP_PWRITE
;
400 rqd
->flags
= pblk_set_progr_mode(pblk
, PBLK_WRITE
);
401 rqd
->meta_list
= meta_list
;
402 rqd
->nr_ppas
= rq_ppas
;
403 rqd
->ppa_list
= ppa_list
;
404 rqd
->dma_ppa_list
= dma_ppa_list
;
405 rqd
->dma_meta_list
= dma_meta_list
;
406 rqd
->end_io
= pblk_end_io_recov
;
407 rqd
->private = pad_rq
;
409 for (i
= 0; i
< rqd
->nr_ppas
; ) {
413 w_ptr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
414 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
415 pos
= pblk_ppa_to_pos(geo
, ppa
);
417 while (test_bit(pos
, line
->blk_bitmap
)) {
418 w_ptr
+= pblk
->min_write_pgs
;
419 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
420 pos
= pblk_ppa_to_pos(geo
, ppa
);
423 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, w_ptr
++) {
424 struct ppa_addr dev_ppa
;
425 __le64 addr_empty
= cpu_to_le64(ADDR_EMPTY
);
427 dev_ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
429 pblk_map_invalidate(pblk
, dev_ppa
);
430 lba_list
[w_ptr
] = meta_list
[i
].lba
= addr_empty
;
431 rqd
->ppa_list
[i
] = dev_ppa
;
435 kref_get(&pad_rq
->ref
);
436 pblk_down_page(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
438 ret
= pblk_submit_io(pblk
, rqd
);
440 pr_err("pblk: I/O submission failed: %d\n", ret
);
441 pblk_up_page(pblk
, rqd
->ppa_list
, rqd
->nr_ppas
);
445 left_line_ppas
-= rq_ppas
;
446 left_ppas
-= rq_ppas
;
447 if (left_ppas
&& left_line_ppas
)
450 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
452 if (!wait_for_completion_io_timeout(&pad_rq
->wait
,
453 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS
))) {
454 pr_err("pblk: pad write timed out\n");
458 if (!pblk_line_is_full(line
))
459 pr_err("pblk: corrupted padded line: %d\n", line
->id
);
469 nvm_dev_dma_free(dev
->parent
, meta_list
, dma_meta_list
);
476 /* When this function is called, it means that not all upper pages have been
477 * written in a page that contains valid data. In order to recover this data, we
478 * first find the write pointer on the device, then we pad all necessary
479 * sectors, and finally attempt to read the valid data
481 static int pblk_recov_scan_all_oob(struct pblk
*pblk
, struct pblk_line
*line
,
482 struct pblk_recov_alloc p
)
484 struct nvm_tgt_dev
*dev
= pblk
->dev
;
485 struct nvm_geo
*geo
= &dev
->geo
;
486 struct ppa_addr
*ppa_list
;
487 struct pblk_sec_meta
*meta_list
;
491 dma_addr_t dma_ppa_list
, dma_meta_list
;
492 u64 w_ptr
= 0, r_ptr
;
497 int left_ppas
= pblk_calc_sec_in_line(pblk
, line
) - line
->cur_sec
;
499 ppa_list
= p
.ppa_list
;
500 meta_list
= p
.meta_list
;
503 dma_ppa_list
= p
.dma_ppa_list
;
504 dma_meta_list
= p
.dma_meta_list
;
506 /* we could recover up until the line write pointer */
507 r_ptr
= line
->cur_sec
;
511 memset(rqd
, 0, pblk_g_rq_size
);
513 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
515 rq_ppas
= pblk
->min_write_pgs
;
516 rq_len
= rq_ppas
* geo
->sec_size
;
518 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
522 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
523 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
526 rqd
->opcode
= NVM_OP_PREAD
;
527 rqd
->meta_list
= meta_list
;
528 rqd
->nr_ppas
= rq_ppas
;
529 rqd
->ppa_list
= ppa_list
;
530 rqd
->dma_ppa_list
= dma_ppa_list
;
531 rqd
->dma_meta_list
= dma_meta_list
;
533 if (pblk_io_aligned(pblk
, rq_ppas
))
534 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
536 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
538 for (i
= 0; i
< rqd
->nr_ppas
; ) {
542 w_ptr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
543 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
544 pos
= pblk_ppa_to_pos(geo
, ppa
);
546 while (test_bit(pos
, line
->blk_bitmap
)) {
547 w_ptr
+= pblk
->min_write_pgs
;
548 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
549 pos
= pblk_ppa_to_pos(geo
, ppa
);
552 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, w_ptr
++)
554 addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
557 ret
= pblk_submit_io_sync(pblk
, rqd
);
559 pr_err("pblk: I/O submission failed: %d\n", ret
);
563 atomic_dec(&pblk
->inflight_io
);
565 /* This should not happen since the read failed during normal recovery,
566 * but the media works funny sometimes...
568 if (!rec_round
++ && !rqd
->error
) {
570 for (i
= 0; i
< rqd
->nr_ppas
; i
++, r_ptr
++) {
571 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
573 if (lba
== ADDR_EMPTY
|| lba
> pblk
->rl
.nr_secs
)
576 pblk_update_map(pblk
, lba
, rqd
->ppa_list
[i
]);
580 /* Reached the end of the written line */
581 if (rqd
->error
== NVM_RSP_ERR_EMPTYPAGE
) {
582 int pad_secs
, nr_error_bits
, bit
;
585 bit
= find_first_bit((void *)&rqd
->ppa_status
, rqd
->nr_ppas
);
586 nr_error_bits
= rqd
->nr_ppas
- bit
;
588 /* Roll back failed sectors */
589 line
->cur_sec
-= nr_error_bits
;
590 line
->left_msecs
+= nr_error_bits
;
591 bitmap_clear(line
->map_bitmap
, line
->cur_sec
, nr_error_bits
);
593 pad_secs
= pblk_pad_distance(pblk
);
594 if (pad_secs
> line
->left_msecs
)
595 pad_secs
= line
->left_msecs
;
597 ret
= pblk_recov_pad_oob(pblk
, line
, pad_secs
);
599 pr_err("pblk: OOB padding failed (err:%d)\n", ret
);
601 ret
= pblk_recov_read_oob(pblk
, line
, p
, r_ptr
);
603 pr_err("pblk: OOB read failed (err:%d)\n", ret
);
608 left_ppas
-= rq_ppas
;
615 static int pblk_recov_scan_oob(struct pblk
*pblk
, struct pblk_line
*line
,
616 struct pblk_recov_alloc p
, int *done
)
618 struct nvm_tgt_dev
*dev
= pblk
->dev
;
619 struct nvm_geo
*geo
= &dev
->geo
;
620 struct ppa_addr
*ppa_list
;
621 struct pblk_sec_meta
*meta_list
;
625 dma_addr_t dma_ppa_list
, dma_meta_list
;
630 int left_ppas
= pblk_calc_sec_in_line(pblk
, line
);
632 ppa_list
= p
.ppa_list
;
633 meta_list
= p
.meta_list
;
636 dma_ppa_list
= p
.dma_ppa_list
;
637 dma_meta_list
= p
.dma_meta_list
;
642 memset(rqd
, 0, pblk_g_rq_size
);
644 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
646 rq_ppas
= pblk
->min_write_pgs
;
647 rq_len
= rq_ppas
* geo
->sec_size
;
649 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
653 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
654 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
657 rqd
->opcode
= NVM_OP_PREAD
;
658 rqd
->meta_list
= meta_list
;
659 rqd
->nr_ppas
= rq_ppas
;
660 rqd
->ppa_list
= ppa_list
;
661 rqd
->dma_ppa_list
= dma_ppa_list
;
662 rqd
->dma_meta_list
= dma_meta_list
;
664 if (pblk_io_aligned(pblk
, rq_ppas
))
665 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
667 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
669 for (i
= 0; i
< rqd
->nr_ppas
; ) {
673 paddr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
674 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
675 pos
= pblk_ppa_to_pos(geo
, ppa
);
677 while (test_bit(pos
, line
->blk_bitmap
)) {
678 paddr
+= pblk
->min_write_pgs
;
679 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
680 pos
= pblk_ppa_to_pos(geo
, ppa
);
683 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, paddr
++)
685 addr_to_gen_ppa(pblk
, paddr
, line
->id
);
688 ret
= pblk_submit_io_sync(pblk
, rqd
);
690 pr_err("pblk: I/O submission failed: %d\n", ret
);
695 atomic_dec(&pblk
->inflight_io
);
697 /* Reached the end of the written line */
699 int nr_error_bits
, bit
;
701 bit
= find_first_bit((void *)&rqd
->ppa_status
, rqd
->nr_ppas
);
702 nr_error_bits
= rqd
->nr_ppas
- bit
;
704 /* Roll back failed sectors */
705 line
->cur_sec
-= nr_error_bits
;
706 line
->left_msecs
+= nr_error_bits
;
707 bitmap_clear(line
->map_bitmap
, line
->cur_sec
, nr_error_bits
);
712 if (rqd
->error
!= NVM_RSP_ERR_EMPTYPAGE
)
716 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
717 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
719 if (lba
== ADDR_EMPTY
|| lba
> pblk
->rl
.nr_secs
)
722 pblk_update_map(pblk
, lba
, rqd
->ppa_list
[i
]);
725 left_ppas
-= rq_ppas
;
732 /* Scan line for lbas on out of bound area */
733 static int pblk_recov_l2p_from_oob(struct pblk
*pblk
, struct pblk_line
*line
)
735 struct nvm_tgt_dev
*dev
= pblk
->dev
;
736 struct nvm_geo
*geo
= &dev
->geo
;
738 struct ppa_addr
*ppa_list
;
739 struct pblk_sec_meta
*meta_list
;
740 struct pblk_recov_alloc p
;
742 dma_addr_t dma_ppa_list
, dma_meta_list
;
745 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
, &dma_meta_list
);
749 ppa_list
= (void *)(meta_list
) + pblk_dma_meta_size
;
750 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size
;
752 data
= kcalloc(pblk
->max_write_pgs
, geo
->sec_size
, GFP_KERNEL
);
758 rqd
= pblk_alloc_rqd(pblk
, PBLK_READ
);
760 p
.ppa_list
= ppa_list
;
761 p
.meta_list
= meta_list
;
764 p
.dma_ppa_list
= dma_ppa_list
;
765 p
.dma_meta_list
= dma_meta_list
;
767 ret
= pblk_recov_scan_oob(pblk
, line
, p
, &done
);
769 pr_err("pblk: could not recover L2P from OOB\n");
774 ret
= pblk_recov_scan_all_oob(pblk
, line
, p
);
776 pr_err("pblk: could not recover L2P from OOB\n");
781 if (pblk_line_is_full(line
))
782 pblk_line_recov_close(pblk
, line
);
787 nvm_dev_dma_free(dev
->parent
, meta_list
, dma_meta_list
);
792 /* Insert lines ordered by sequence number (seq_num) on list */
793 static void pblk_recov_line_add_ordered(struct list_head
*head
,
794 struct pblk_line
*line
)
796 struct pblk_line
*t
= NULL
;
798 list_for_each_entry(t
, head
, list
)
799 if (t
->seq_nr
> line
->seq_nr
)
802 __list_add(&line
->list
, t
->list
.prev
, &t
->list
);
805 static u64
pblk_line_emeta_start(struct pblk
*pblk
, struct pblk_line
*line
)
807 struct nvm_tgt_dev
*dev
= pblk
->dev
;
808 struct nvm_geo
*geo
= &dev
->geo
;
809 struct pblk_line_meta
*lm
= &pblk
->lm
;
810 unsigned int emeta_secs
;
815 emeta_secs
= lm
->emeta_sec
[0];
816 emeta_start
= lm
->sec_per_line
;
820 ppa
= addr_to_gen_ppa(pblk
, emeta_start
, line
->id
);
821 pos
= pblk_ppa_to_pos(geo
, ppa
);
822 if (!test_bit(pos
, line
->blk_bitmap
))
829 struct pblk_line
*pblk_recov_l2p(struct pblk
*pblk
)
831 struct pblk_line_meta
*lm
= &pblk
->lm
;
832 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
833 struct pblk_line
*line
, *tline
, *data_line
= NULL
;
834 struct pblk_smeta
*smeta
;
835 struct pblk_emeta
*emeta
;
836 struct line_smeta
*smeta_buf
;
837 int found_lines
= 0, recovered_lines
= 0, open_lines
= 0;
840 int i
, valid_uuid
= 0;
841 LIST_HEAD(recov_list
);
843 /* TODO: Implement FTL snapshot */
845 /* Scan recovery - takes place when FTL snapshot fails */
846 spin_lock(&l_mg
->free_lock
);
847 meta_line
= find_first_zero_bit(&l_mg
->meta_bitmap
, PBLK_DATA_LINES
);
848 set_bit(meta_line
, &l_mg
->meta_bitmap
);
849 smeta
= l_mg
->sline_meta
[meta_line
];
850 emeta
= l_mg
->eline_meta
[meta_line
];
851 smeta_buf
= (struct line_smeta
*)smeta
;
852 spin_unlock(&l_mg
->free_lock
);
854 /* Order data lines using their sequence number */
855 for (i
= 0; i
< l_mg
->nr_lines
; i
++) {
858 line
= &pblk
->lines
[i
];
860 memset(smeta
, 0, lm
->smeta_len
);
862 line
->lun_bitmap
= ((void *)(smeta_buf
)) +
863 sizeof(struct line_smeta
);
865 /* Lines that cannot be read are assumed as not written here */
866 if (pblk_line_read_smeta(pblk
, line
))
869 crc
= pblk_calc_smeta_crc(pblk
, smeta_buf
);
870 if (le32_to_cpu(smeta_buf
->crc
) != crc
)
873 if (le32_to_cpu(smeta_buf
->header
.identifier
) != PBLK_MAGIC
)
876 if (smeta_buf
->header
.version
!= SMETA_VERSION
) {
877 pr_err("pblk: found incompatible line version %u\n",
878 le16_to_cpu(smeta_buf
->header
.version
));
879 return ERR_PTR(-EINVAL
);
882 /* The first valid instance uuid is used for initialization */
884 memcpy(pblk
->instance_uuid
, smeta_buf
->header
.uuid
, 16);
888 if (memcmp(pblk
->instance_uuid
, smeta_buf
->header
.uuid
, 16)) {
889 pr_debug("pblk: ignore line %u due to uuid mismatch\n",
894 /* Update line metadata */
895 spin_lock(&line
->lock
);
896 line
->id
= le32_to_cpu(smeta_buf
->header
.id
);
897 line
->type
= le16_to_cpu(smeta_buf
->header
.type
);
898 line
->seq_nr
= le64_to_cpu(smeta_buf
->seq_nr
);
899 spin_unlock(&line
->lock
);
901 /* Update general metadata */
902 spin_lock(&l_mg
->free_lock
);
903 if (line
->seq_nr
>= l_mg
->d_seq_nr
)
904 l_mg
->d_seq_nr
= line
->seq_nr
+ 1;
905 l_mg
->nr_free_lines
--;
906 spin_unlock(&l_mg
->free_lock
);
908 if (pblk_line_recov_alloc(pblk
, line
))
911 pblk_recov_line_add_ordered(&recov_list
, line
);
913 pr_debug("pblk: recovering data line %d, seq:%llu\n",
914 line
->id
, smeta_buf
->seq_nr
);
918 pblk_setup_uuid(pblk
);
920 spin_lock(&l_mg
->free_lock
);
921 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
922 &l_mg
->meta_bitmap
));
923 spin_unlock(&l_mg
->free_lock
);
928 /* Verify closed blocks and recover this portion of L2P table*/
929 list_for_each_entry_safe(line
, tline
, &recov_list
, list
) {
932 line
->emeta_ssec
= pblk_line_emeta_start(pblk
, line
);
934 memset(line
->emeta
->buf
, 0, lm
->emeta_len
[0]);
936 if (pblk_line_read_emeta(pblk
, line
, line
->emeta
->buf
)) {
937 pblk_recov_l2p_from_oob(pblk
, line
);
941 if (pblk_recov_check_emeta(pblk
, line
->emeta
->buf
)) {
942 pblk_recov_l2p_from_oob(pblk
, line
);
946 if (pblk_recov_l2p_from_emeta(pblk
, line
))
947 pblk_recov_l2p_from_oob(pblk
, line
);
950 if (pblk_line_is_full(line
)) {
951 struct list_head
*move_list
;
953 spin_lock(&line
->lock
);
954 line
->state
= PBLK_LINESTATE_CLOSED
;
955 move_list
= pblk_line_gc_list(pblk
, line
);
956 spin_unlock(&line
->lock
);
958 spin_lock(&l_mg
->gc_lock
);
959 list_move_tail(&line
->list
, move_list
);
960 spin_unlock(&l_mg
->gc_lock
);
962 kfree(line
->map_bitmap
);
963 line
->map_bitmap
= NULL
;
968 pr_err("pblk: failed to recover L2P\n");
971 line
->meta_line
= meta_line
;
976 spin_lock(&l_mg
->free_lock
);
978 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
979 &l_mg
->meta_bitmap
));
980 pblk_line_replace_data(pblk
);
982 /* Allocate next line for preparation */
983 l_mg
->data_next
= pblk_line_get(pblk
);
984 if (l_mg
->data_next
) {
985 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
986 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
990 spin_unlock(&l_mg
->free_lock
);
993 pblk_line_erase(pblk
, l_mg
->data_next
);
996 if (found_lines
!= recovered_lines
)
997 pr_err("pblk: failed to recover all found lines %d/%d\n",
998 found_lines
, recovered_lines
);
1006 int pblk_recov_pad(struct pblk
*pblk
)
1008 struct pblk_line
*line
;
1009 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1013 spin_lock(&l_mg
->free_lock
);
1014 line
= l_mg
->data_line
;
1015 left_msecs
= line
->left_msecs
;
1016 spin_unlock(&l_mg
->free_lock
);
1018 ret
= pblk_recov_pad_oob(pblk
, line
, left_msecs
);
1020 pr_err("pblk: Tear down padding failed (%d)\n", ret
);
1024 pblk_line_close_meta(pblk
, line
);