1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
4 * Initial: Javier Gonzalez <javier@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-recovery.c - pblk's recovery path
17 * The L2P recovery path is single threaded as the L2P table is updated in order
18 * following the line sequence ID.
22 #include "pblk-trace.h"
24 int pblk_recov_check_emeta(struct pblk
*pblk
, struct line_emeta
*emeta_buf
)
28 crc
= pblk_calc_emeta_crc(pblk
, emeta_buf
);
29 if (le32_to_cpu(emeta_buf
->crc
) != crc
)
32 if (le32_to_cpu(emeta_buf
->header
.identifier
) != PBLK_MAGIC
)
38 static int pblk_recov_l2p_from_emeta(struct pblk
*pblk
, struct pblk_line
*line
)
40 struct nvm_tgt_dev
*dev
= pblk
->dev
;
41 struct nvm_geo
*geo
= &dev
->geo
;
42 struct pblk_line_meta
*lm
= &pblk
->lm
;
43 struct pblk_emeta
*emeta
= line
->emeta
;
44 struct line_emeta
*emeta_buf
= emeta
->buf
;
46 u64 data_start
, data_end
;
47 u64 nr_valid_lbas
, nr_lbas
= 0;
50 lba_list
= emeta_to_lbas(pblk
, emeta_buf
);
54 data_start
= pblk_line_smeta_start(pblk
, line
) + lm
->smeta_sec
;
55 data_end
= line
->emeta_ssec
;
56 nr_valid_lbas
= le64_to_cpu(emeta_buf
->nr_valid_lbas
);
58 for (i
= data_start
; i
< data_end
; i
++) {
62 ppa
= addr_to_gen_ppa(pblk
, i
, line
->id
);
63 pos
= pblk_ppa_to_pos(geo
, ppa
);
65 /* Do not update bad blocks */
66 if (test_bit(pos
, line
->blk_bitmap
))
69 if (le64_to_cpu(lba_list
[i
]) == ADDR_EMPTY
) {
70 spin_lock(&line
->lock
);
71 if (test_and_set_bit(i
, line
->invalid_bitmap
))
72 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
74 le32_add_cpu(line
->vsc
, -1);
75 spin_unlock(&line
->lock
);
80 pblk_update_map(pblk
, le64_to_cpu(lba_list
[i
]), ppa
);
84 if (nr_valid_lbas
!= nr_lbas
)
85 pblk_err(pblk
, "line %d - inconsistent lba list(%llu/%llu)\n",
86 line
->id
, nr_valid_lbas
, nr_lbas
);
93 static void pblk_update_line_wp(struct pblk
*pblk
, struct pblk_line
*line
,
96 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
99 for (i
= 0; i
< written_secs
; i
+= pblk
->min_write_pgs
)
100 __pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
102 spin_lock(&l_mg
->free_lock
);
103 if (written_secs
> line
->left_msecs
) {
105 * We have all data sectors written
106 * and some emeta sectors written too.
108 line
->left_msecs
= 0;
110 /* We have only some data sectors written. */
111 line
->left_msecs
-= written_secs
;
113 spin_unlock(&l_mg
->free_lock
);
116 static u64
pblk_sec_in_open_line(struct pblk
*pblk
, struct pblk_line
*line
)
118 struct pblk_line_meta
*lm
= &pblk
->lm
;
119 int nr_bb
= bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
120 u64 written_secs
= 0;
121 int valid_chunks
= 0;
124 for (i
= 0; i
< lm
->blk_per_line
; i
++) {
125 struct nvm_chk_meta
*chunk
= &line
->chks
[i
];
127 if (chunk
->state
& NVM_CHK_ST_OFFLINE
)
130 written_secs
+= chunk
->wp
;
134 if (lm
->blk_per_line
- nr_bb
!= valid_chunks
)
135 pblk_err(pblk
, "recovery line %d is bad\n", line
->id
);
137 pblk_update_line_wp(pblk
, line
, written_secs
- lm
->smeta_sec
);
142 struct pblk_recov_alloc
{
143 struct ppa_addr
*ppa_list
;
147 dma_addr_t dma_ppa_list
;
148 dma_addr_t dma_meta_list
;
151 static void pblk_recov_complete(struct kref
*ref
)
153 struct pblk_pad_rq
*pad_rq
= container_of(ref
, struct pblk_pad_rq
, ref
);
155 complete(&pad_rq
->wait
);
158 static void pblk_end_io_recov(struct nvm_rq
*rqd
)
160 struct ppa_addr
*ppa_list
= nvm_rq_to_ppa_list(rqd
);
161 struct pblk_pad_rq
*pad_rq
= rqd
->private;
162 struct pblk
*pblk
= pad_rq
->pblk
;
164 pblk_up_chunk(pblk
, ppa_list
[0]);
166 pblk_free_rqd(pblk
, rqd
, PBLK_WRITE_INT
);
168 atomic_dec(&pblk
->inflight_io
);
169 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
172 /* pad line using line bitmap. */
173 static int pblk_recov_pad_line(struct pblk
*pblk
, struct pblk_line
*line
,
176 struct nvm_tgt_dev
*dev
= pblk
->dev
;
177 struct nvm_geo
*geo
= &dev
->geo
;
179 struct pblk_pad_rq
*pad_rq
;
182 struct ppa_addr
*ppa_list
;
184 __le64
*lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
185 u64 w_ptr
= line
->cur_sec
;
186 int left_line_ppas
, rq_ppas
, rq_len
;
190 spin_lock(&line
->lock
);
191 left_line_ppas
= line
->left_msecs
;
192 spin_unlock(&line
->lock
);
194 pad_rq
= kmalloc(sizeof(struct pblk_pad_rq
), GFP_KERNEL
);
198 data
= vzalloc(array_size(pblk
->max_write_pgs
, geo
->csecs
));
205 init_completion(&pad_rq
->wait
);
206 kref_init(&pad_rq
->ref
);
209 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0, false);
210 if (rq_ppas
< pblk
->min_write_pgs
) {
211 pblk_err(pblk
, "corrupted pad line %d\n", line
->id
);
215 rq_len
= rq_ppas
* geo
->csecs
;
217 bio
= pblk_bio_map_addr(pblk
, data
, rq_ppas
, rq_len
,
218 PBLK_VMALLOC_META
, GFP_KERNEL
);
224 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
225 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
227 rqd
= pblk_alloc_rqd(pblk
, PBLK_WRITE_INT
);
229 ret
= pblk_alloc_rqd_meta(pblk
, rqd
);
231 pblk_free_rqd(pblk
, rqd
, PBLK_WRITE_INT
);
237 rqd
->opcode
= NVM_OP_PWRITE
;
239 rqd
->nr_ppas
= rq_ppas
;
240 rqd
->end_io
= pblk_end_io_recov
;
241 rqd
->private = pad_rq
;
243 ppa_list
= nvm_rq_to_ppa_list(rqd
);
244 meta_list
= rqd
->meta_list
;
246 for (i
= 0; i
< rqd
->nr_ppas
; ) {
250 w_ptr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
251 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
252 pos
= pblk_ppa_to_pos(geo
, ppa
);
254 while (test_bit(pos
, line
->blk_bitmap
)) {
255 w_ptr
+= pblk
->min_write_pgs
;
256 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
257 pos
= pblk_ppa_to_pos(geo
, ppa
);
260 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, w_ptr
++) {
261 struct ppa_addr dev_ppa
;
262 struct pblk_sec_meta
*meta
;
263 __le64 addr_empty
= cpu_to_le64(ADDR_EMPTY
);
265 dev_ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
267 pblk_map_invalidate(pblk
, dev_ppa
);
268 lba_list
[w_ptr
] = addr_empty
;
269 meta
= pblk_get_meta(pblk
, meta_list
, i
);
270 meta
->lba
= addr_empty
;
271 ppa_list
[i
] = dev_ppa
;
275 kref_get(&pad_rq
->ref
);
276 pblk_down_chunk(pblk
, ppa_list
[0]);
278 ret
= pblk_submit_io(pblk
, rqd
);
280 pblk_err(pblk
, "I/O submission failed: %d\n", ret
);
281 pblk_up_chunk(pblk
, ppa_list
[0]);
282 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
283 pblk_free_rqd(pblk
, rqd
, PBLK_WRITE_INT
);
288 left_line_ppas
-= rq_ppas
;
289 left_ppas
-= rq_ppas
;
290 if (left_ppas
&& left_line_ppas
)
294 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
295 wait_for_completion(&pad_rq
->wait
);
297 if (!pblk_line_is_full(line
))
298 pblk_err(pblk
, "corrupted padded line: %d\n", line
->id
);
306 static int pblk_pad_distance(struct pblk
*pblk
, struct pblk_line
*line
)
308 struct nvm_tgt_dev
*dev
= pblk
->dev
;
309 struct nvm_geo
*geo
= &dev
->geo
;
310 int distance
= geo
->mw_cunits
* geo
->all_luns
* geo
->ws_opt
;
312 return (distance
> line
->left_msecs
) ? line
->left_msecs
: distance
;
315 /* Return a chunk belonging to a line by stripe(write order) index */
316 static struct nvm_chk_meta
*pblk_get_stripe_chunk(struct pblk
*pblk
,
317 struct pblk_line
*line
,
320 struct nvm_tgt_dev
*dev
= pblk
->dev
;
321 struct nvm_geo
*geo
= &dev
->geo
;
322 struct pblk_lun
*rlun
;
326 rlun
= &pblk
->luns
[index
];
328 pos
= pblk_ppa_to_pos(geo
, ppa
);
330 return &line
->chks
[pos
];
333 static int pblk_line_wps_are_unbalanced(struct pblk
*pblk
,
334 struct pblk_line
*line
)
336 struct pblk_line_meta
*lm
= &pblk
->lm
;
337 int blk_in_line
= lm
->blk_per_line
;
338 struct nvm_chk_meta
*chunk
;
342 i
= find_first_zero_bit(line
->blk_bitmap
, blk_in_line
);
344 /* If there is one or zero good chunks in the line,
345 * the write pointers can't be unbalanced.
347 if (i
>= (blk_in_line
- 1))
350 chunk
= pblk_get_stripe_chunk(pblk
, line
, i
);
352 if (max_wp
> pblk
->max_write_pgs
)
353 min_wp
= max_wp
- pblk
->max_write_pgs
;
357 i
= find_next_zero_bit(line
->blk_bitmap
, blk_in_line
, i
+ 1);
358 while (i
< blk_in_line
) {
359 chunk
= pblk_get_stripe_chunk(pblk
, line
, i
);
360 if (chunk
->wp
> max_wp
|| chunk
->wp
< min_wp
)
363 i
= find_next_zero_bit(line
->blk_bitmap
, blk_in_line
, i
+ 1);
369 static int pblk_recov_scan_oob(struct pblk
*pblk
, struct pblk_line
*line
,
370 struct pblk_recov_alloc p
)
372 struct nvm_tgt_dev
*dev
= pblk
->dev
;
373 struct pblk_line_meta
*lm
= &pblk
->lm
;
374 struct nvm_geo
*geo
= &dev
->geo
;
375 struct ppa_addr
*ppa_list
;
380 dma_addr_t dma_ppa_list
, dma_meta_list
;
382 u64 paddr
= pblk_line_smeta_start(pblk
, line
) + lm
->smeta_sec
;
387 u64 left_ppas
= pblk_sec_in_open_line(pblk
, line
) - lm
->smeta_sec
;
389 if (pblk_line_wps_are_unbalanced(pblk
, line
))
390 pblk_warn(pblk
, "recovering unbalanced line (%d)\n", line
->id
);
392 ppa_list
= p
.ppa_list
;
393 meta_list
= p
.meta_list
;
396 dma_ppa_list
= p
.dma_ppa_list
;
397 dma_meta_list
= p
.dma_meta_list
;
399 lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
402 memset(rqd
, 0, pblk_g_rq_size
);
404 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0, false);
406 rq_ppas
= pblk
->min_write_pgs
;
407 rq_len
= rq_ppas
* geo
->csecs
;
410 bio
= bio_map_kern(dev
->q
, data
, rq_len
, GFP_KERNEL
);
414 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
415 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
419 rqd
->opcode
= NVM_OP_PREAD
;
420 rqd
->meta_list
= meta_list
;
421 rqd
->nr_ppas
= rq_ppas
;
422 rqd
->ppa_list
= ppa_list
;
423 rqd
->dma_ppa_list
= dma_ppa_list
;
424 rqd
->dma_meta_list
= dma_meta_list
;
425 ppa_list
= nvm_rq_to_ppa_list(rqd
);
427 if (pblk_io_aligned(pblk
, rq_ppas
))
430 for (i
= 0; i
< rqd
->nr_ppas
; ) {
434 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
435 pos
= pblk_ppa_to_pos(geo
, ppa
);
437 while (test_bit(pos
, line
->blk_bitmap
)) {
438 paddr
+= pblk
->min_write_pgs
;
439 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
440 pos
= pblk_ppa_to_pos(geo
, ppa
);
443 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++)
445 addr_to_gen_ppa(pblk
, paddr
+ j
, line
->id
);
448 ret
= pblk_submit_io_sync(pblk
, rqd
);
450 pblk_err(pblk
, "I/O submission failed: %d\n", ret
);
455 atomic_dec(&pblk
->inflight_io
);
457 /* If a read fails, do a best effort by padding the line and retrying */
458 if (rqd
->error
&& rqd
->error
!= NVM_RSP_WARN_HIGHECC
) {
459 int pad_distance
, ret
;
462 pblk_log_read_err(pblk
, rqd
);
467 pad_distance
= pblk_pad_distance(pblk
, line
);
468 ret
= pblk_recov_pad_line(pblk
, line
, pad_distance
);
479 pblk_get_packed_meta(pblk
, rqd
);
482 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
483 struct pblk_sec_meta
*meta
= pblk_get_meta(pblk
, meta_list
, i
);
484 u64 lba
= le64_to_cpu(meta
->lba
);
486 lba_list
[paddr
++] = cpu_to_le64(lba
);
488 if (lba
== ADDR_EMPTY
|| lba
>= pblk
->capacity
)
491 line
->nr_valid_lbas
++;
492 pblk_update_map(pblk
, lba
, ppa_list
[i
]);
495 left_ppas
-= rq_ppas
;
499 #ifdef CONFIG_NVM_PBLK_DEBUG
500 WARN_ON(padded
&& !pblk_line_is_full(line
));
506 /* Scan line for lbas on out of bound area */
507 static int pblk_recov_l2p_from_oob(struct pblk
*pblk
, struct pblk_line
*line
)
509 struct nvm_tgt_dev
*dev
= pblk
->dev
;
510 struct nvm_geo
*geo
= &dev
->geo
;
512 struct ppa_addr
*ppa_list
;
514 struct pblk_recov_alloc p
;
516 dma_addr_t dma_ppa_list
, dma_meta_list
;
519 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
, &dma_meta_list
);
523 ppa_list
= (void *)(meta_list
) + pblk_dma_meta_size(pblk
);
524 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size(pblk
);
526 data
= kcalloc(pblk
->max_write_pgs
, geo
->csecs
, GFP_KERNEL
);
532 rqd
= mempool_alloc(&pblk
->r_rq_pool
, GFP_KERNEL
);
533 memset(rqd
, 0, pblk_g_rq_size
);
535 p
.ppa_list
= ppa_list
;
536 p
.meta_list
= meta_list
;
539 p
.dma_ppa_list
= dma_ppa_list
;
540 p
.dma_meta_list
= dma_meta_list
;
542 ret
= pblk_recov_scan_oob(pblk
, line
, p
);
544 pblk_err(pblk
, "could not recover L2P form OOB\n");
548 if (pblk_line_is_full(line
))
549 pblk_line_recov_close(pblk
, line
);
552 mempool_free(rqd
, &pblk
->r_rq_pool
);
555 nvm_dev_dma_free(dev
->parent
, meta_list
, dma_meta_list
);
560 /* Insert lines ordered by sequence number (seq_num) on list */
561 static void pblk_recov_line_add_ordered(struct list_head
*head
,
562 struct pblk_line
*line
)
564 struct pblk_line
*t
= NULL
;
566 list_for_each_entry(t
, head
, list
)
567 if (t
->seq_nr
> line
->seq_nr
)
570 __list_add(&line
->list
, t
->list
.prev
, &t
->list
);
573 static u64
pblk_line_emeta_start(struct pblk
*pblk
, struct pblk_line
*line
)
575 struct nvm_tgt_dev
*dev
= pblk
->dev
;
576 struct nvm_geo
*geo
= &dev
->geo
;
577 struct pblk_line_meta
*lm
= &pblk
->lm
;
578 unsigned int emeta_secs
;
583 emeta_secs
= lm
->emeta_sec
[0];
584 emeta_start
= lm
->sec_per_line
;
588 ppa
= addr_to_gen_ppa(pblk
, emeta_start
, line
->id
);
589 pos
= pblk_ppa_to_pos(geo
, ppa
);
590 if (!test_bit(pos
, line
->blk_bitmap
))
597 static int pblk_recov_check_line_version(struct pblk
*pblk
,
598 struct line_emeta
*emeta
)
600 struct line_header
*header
= &emeta
->header
;
602 if (header
->version_major
!= EMETA_VERSION_MAJOR
) {
603 pblk_err(pblk
, "line major version mismatch: %d, expected: %d\n",
604 header
->version_major
, EMETA_VERSION_MAJOR
);
608 #ifdef CONFIG_NVM_PBLK_DEBUG
609 if (header
->version_minor
> EMETA_VERSION_MINOR
)
610 pblk_info(pblk
, "newer line minor version found: %d\n",
611 header
->version_minor
);
617 static void pblk_recov_wa_counters(struct pblk
*pblk
,
618 struct line_emeta
*emeta
)
620 struct pblk_line_meta
*lm
= &pblk
->lm
;
621 struct line_header
*header
= &emeta
->header
;
622 struct wa_counters
*wa
= emeta_to_wa(lm
, emeta
);
624 /* WA counters were introduced in emeta version 0.2 */
625 if (header
->version_major
> 0 || header
->version_minor
>= 2) {
626 u64 user
= le64_to_cpu(wa
->user
);
627 u64 pad
= le64_to_cpu(wa
->pad
);
628 u64 gc
= le64_to_cpu(wa
->gc
);
630 atomic64_set(&pblk
->user_wa
, user
);
631 atomic64_set(&pblk
->pad_wa
, pad
);
632 atomic64_set(&pblk
->gc_wa
, gc
);
634 pblk
->user_rst_wa
= user
;
635 pblk
->pad_rst_wa
= pad
;
636 pblk
->gc_rst_wa
= gc
;
640 static int pblk_line_was_written(struct pblk_line
*line
,
644 struct pblk_line_meta
*lm
= &pblk
->lm
;
645 struct nvm_tgt_dev
*dev
= pblk
->dev
;
646 struct nvm_geo
*geo
= &dev
->geo
;
647 struct nvm_chk_meta
*chunk
;
648 struct ppa_addr bppa
;
651 if (line
->state
== PBLK_LINESTATE_BAD
)
654 smeta_blk
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
655 if (smeta_blk
>= lm
->blk_per_line
)
658 bppa
= pblk
->luns
[smeta_blk
].bppa
;
659 chunk
= &line
->chks
[pblk_ppa_to_pos(geo
, bppa
)];
661 if (chunk
->state
& NVM_CHK_ST_CLOSED
||
662 (chunk
->state
& NVM_CHK_ST_OPEN
663 && chunk
->wp
>= lm
->smeta_sec
))
669 static bool pblk_line_is_open(struct pblk
*pblk
, struct pblk_line
*line
)
671 struct pblk_line_meta
*lm
= &pblk
->lm
;
674 for (i
= 0; i
< lm
->blk_per_line
; i
++)
675 if (line
->chks
[i
].state
& NVM_CHK_ST_OPEN
)
681 struct pblk_line
*pblk_recov_l2p(struct pblk
*pblk
)
683 struct pblk_line_meta
*lm
= &pblk
->lm
;
684 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
685 struct pblk_line
*line
, *tline
, *data_line
= NULL
;
686 struct pblk_smeta
*smeta
;
687 struct pblk_emeta
*emeta
;
688 struct line_smeta
*smeta_buf
;
689 int found_lines
= 0, recovered_lines
= 0, open_lines
= 0;
692 int i
, valid_uuid
= 0;
693 LIST_HEAD(recov_list
);
695 /* TODO: Implement FTL snapshot */
697 /* Scan recovery - takes place when FTL snapshot fails */
698 spin_lock(&l_mg
->free_lock
);
699 meta_line
= find_first_zero_bit(&l_mg
->meta_bitmap
, PBLK_DATA_LINES
);
700 set_bit(meta_line
, &l_mg
->meta_bitmap
);
701 smeta
= l_mg
->sline_meta
[meta_line
];
702 emeta
= l_mg
->eline_meta
[meta_line
];
703 smeta_buf
= (struct line_smeta
*)smeta
;
704 spin_unlock(&l_mg
->free_lock
);
706 /* Order data lines using their sequence number */
707 for (i
= 0; i
< l_mg
->nr_lines
; i
++) {
710 line
= &pblk
->lines
[i
];
712 memset(smeta
, 0, lm
->smeta_len
);
714 line
->lun_bitmap
= ((void *)(smeta_buf
)) +
715 sizeof(struct line_smeta
);
717 if (!pblk_line_was_written(line
, pblk
))
720 /* Lines that cannot be read are assumed as not written here */
721 if (pblk_line_smeta_read(pblk
, line
))
724 crc
= pblk_calc_smeta_crc(pblk
, smeta_buf
);
725 if (le32_to_cpu(smeta_buf
->crc
) != crc
)
728 if (le32_to_cpu(smeta_buf
->header
.identifier
) != PBLK_MAGIC
)
731 if (smeta_buf
->header
.version_major
!= SMETA_VERSION_MAJOR
) {
732 pblk_err(pblk
, "found incompatible line version %u\n",
733 smeta_buf
->header
.version_major
);
734 return ERR_PTR(-EINVAL
);
737 /* The first valid instance uuid is used for initialization */
739 guid_copy(&pblk
->instance_uuid
,
740 (guid_t
*)&smeta_buf
->header
.uuid
);
744 if (!guid_equal(&pblk
->instance_uuid
,
745 (guid_t
*)&smeta_buf
->header
.uuid
)) {
746 pblk_debug(pblk
, "ignore line %u due to uuid mismatch\n",
751 /* Update line metadata */
752 spin_lock(&line
->lock
);
753 line
->id
= le32_to_cpu(smeta_buf
->header
.id
);
754 line
->type
= le16_to_cpu(smeta_buf
->header
.type
);
755 line
->seq_nr
= le64_to_cpu(smeta_buf
->seq_nr
);
756 spin_unlock(&line
->lock
);
758 /* Update general metadata */
759 spin_lock(&l_mg
->free_lock
);
760 if (line
->seq_nr
>= l_mg
->d_seq_nr
)
761 l_mg
->d_seq_nr
= line
->seq_nr
+ 1;
762 l_mg
->nr_free_lines
--;
763 spin_unlock(&l_mg
->free_lock
);
765 if (pblk_line_recov_alloc(pblk
, line
))
768 pblk_recov_line_add_ordered(&recov_list
, line
);
770 pblk_debug(pblk
, "recovering data line %d, seq:%llu\n",
771 line
->id
, smeta_buf
->seq_nr
);
775 guid_gen(&pblk
->instance_uuid
);
777 spin_lock(&l_mg
->free_lock
);
778 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
779 &l_mg
->meta_bitmap
));
780 spin_unlock(&l_mg
->free_lock
);
785 /* Verify closed blocks and recover this portion of L2P table*/
786 list_for_each_entry_safe(line
, tline
, &recov_list
, list
) {
789 line
->emeta_ssec
= pblk_line_emeta_start(pblk
, line
);
791 memset(line
->emeta
->buf
, 0, lm
->emeta_len
[0]);
793 if (pblk_line_is_open(pblk
, line
)) {
794 pblk_recov_l2p_from_oob(pblk
, line
);
798 if (pblk_line_emeta_read(pblk
, line
, line
->emeta
->buf
)) {
799 pblk_recov_l2p_from_oob(pblk
, line
);
803 if (pblk_recov_check_emeta(pblk
, line
->emeta
->buf
)) {
804 pblk_recov_l2p_from_oob(pblk
, line
);
808 if (pblk_recov_check_line_version(pblk
, line
->emeta
->buf
))
809 return ERR_PTR(-EINVAL
);
811 pblk_recov_wa_counters(pblk
, line
->emeta
->buf
);
813 if (pblk_recov_l2p_from_emeta(pblk
, line
))
814 pblk_recov_l2p_from_oob(pblk
, line
);
817 if (pblk_line_is_full(line
)) {
818 struct list_head
*move_list
;
820 spin_lock(&line
->lock
);
821 line
->state
= PBLK_LINESTATE_CLOSED
;
822 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
824 move_list
= pblk_line_gc_list(pblk
, line
);
825 spin_unlock(&line
->lock
);
827 spin_lock(&l_mg
->gc_lock
);
828 list_move_tail(&line
->list
, move_list
);
829 spin_unlock(&l_mg
->gc_lock
);
831 mempool_free(line
->map_bitmap
, l_mg
->bitmap_pool
);
832 line
->map_bitmap
= NULL
;
836 spin_lock(&line
->lock
);
837 line
->state
= PBLK_LINESTATE_OPEN
;
838 spin_unlock(&line
->lock
);
840 line
->emeta
->mem
= 0;
841 atomic_set(&line
->emeta
->sync
, 0);
843 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
847 line
->meta_line
= meta_line
;
854 spin_lock(&l_mg
->free_lock
);
855 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
856 &l_mg
->meta_bitmap
));
857 spin_unlock(&l_mg
->free_lock
);
859 spin_lock(&l_mg
->free_lock
);
860 l_mg
->data_line
= data_line
;
861 /* Allocate next line for preparation */
862 l_mg
->data_next
= pblk_line_get(pblk
);
863 if (l_mg
->data_next
) {
864 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
865 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
868 spin_unlock(&l_mg
->free_lock
);
872 pblk_line_erase(pblk
, l_mg
->data_next
);
875 if (found_lines
!= recovered_lines
)
876 pblk_err(pblk
, "failed to recover all found lines %d/%d\n",
877 found_lines
, recovered_lines
);
885 int pblk_recov_pad(struct pblk
*pblk
)
887 struct pblk_line
*line
;
888 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
892 spin_lock(&l_mg
->free_lock
);
893 line
= l_mg
->data_line
;
894 left_msecs
= line
->left_msecs
;
895 spin_unlock(&l_mg
->free_lock
);
897 ret
= pblk_recov_pad_line(pblk
, line
, left_msecs
);
899 pblk_err(pblk
, "tear down padding failed (%d)\n", ret
);
903 pblk_line_close_meta(pblk
, line
);