1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
4 * Initial: Javier Gonzalez <javier@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-recovery.c - pblk's recovery path
17 * The L2P recovery path is single threaded as the L2P table is updated in order
18 * following the line sequence ID.
22 #include "pblk-trace.h"
24 int pblk_recov_check_emeta(struct pblk
*pblk
, struct line_emeta
*emeta_buf
)
28 crc
= pblk_calc_emeta_crc(pblk
, emeta_buf
);
29 if (le32_to_cpu(emeta_buf
->crc
) != crc
)
32 if (le32_to_cpu(emeta_buf
->header
.identifier
) != PBLK_MAGIC
)
38 static int pblk_recov_l2p_from_emeta(struct pblk
*pblk
, struct pblk_line
*line
)
40 struct nvm_tgt_dev
*dev
= pblk
->dev
;
41 struct nvm_geo
*geo
= &dev
->geo
;
42 struct pblk_line_meta
*lm
= &pblk
->lm
;
43 struct pblk_emeta
*emeta
= line
->emeta
;
44 struct line_emeta
*emeta_buf
= emeta
->buf
;
46 u64 data_start
, data_end
;
47 u64 nr_valid_lbas
, nr_lbas
= 0;
50 lba_list
= emeta_to_lbas(pblk
, emeta_buf
);
54 data_start
= pblk_line_smeta_start(pblk
, line
) + lm
->smeta_sec
;
55 data_end
= line
->emeta_ssec
;
56 nr_valid_lbas
= le64_to_cpu(emeta_buf
->nr_valid_lbas
);
58 for (i
= data_start
; i
< data_end
; i
++) {
62 ppa
= addr_to_gen_ppa(pblk
, i
, line
->id
);
63 pos
= pblk_ppa_to_pos(geo
, ppa
);
65 /* Do not update bad blocks */
66 if (test_bit(pos
, line
->blk_bitmap
))
69 if (le64_to_cpu(lba_list
[i
]) == ADDR_EMPTY
) {
70 spin_lock(&line
->lock
);
71 if (test_and_set_bit(i
, line
->invalid_bitmap
))
72 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
74 le32_add_cpu(line
->vsc
, -1);
75 spin_unlock(&line
->lock
);
80 pblk_update_map(pblk
, le64_to_cpu(lba_list
[i
]), ppa
);
84 if (nr_valid_lbas
!= nr_lbas
)
85 pblk_err(pblk
, "line %d - inconsistent lba list(%llu/%llu)\n",
86 line
->id
, nr_valid_lbas
, nr_lbas
);
93 static void pblk_update_line_wp(struct pblk
*pblk
, struct pblk_line
*line
,
96 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
99 for (i
= 0; i
< written_secs
; i
+= pblk
->min_write_pgs
)
100 __pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
102 spin_lock(&l_mg
->free_lock
);
103 if (written_secs
> line
->left_msecs
) {
105 * We have all data sectors written
106 * and some emeta sectors written too.
108 line
->left_msecs
= 0;
110 /* We have only some data sectors written. */
111 line
->left_msecs
-= written_secs
;
113 spin_unlock(&l_mg
->free_lock
);
116 static u64
pblk_sec_in_open_line(struct pblk
*pblk
, struct pblk_line
*line
)
118 struct pblk_line_meta
*lm
= &pblk
->lm
;
119 int nr_bb
= bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
120 u64 written_secs
= 0;
121 int valid_chunks
= 0;
124 for (i
= 0; i
< lm
->blk_per_line
; i
++) {
125 struct nvm_chk_meta
*chunk
= &line
->chks
[i
];
127 if (chunk
->state
& NVM_CHK_ST_OFFLINE
)
130 written_secs
+= chunk
->wp
;
134 if (lm
->blk_per_line
- nr_bb
!= valid_chunks
)
135 pblk_err(pblk
, "recovery line %d is bad\n", line
->id
);
137 pblk_update_line_wp(pblk
, line
, written_secs
- lm
->smeta_sec
);
142 struct pblk_recov_alloc
{
143 struct ppa_addr
*ppa_list
;
147 dma_addr_t dma_ppa_list
;
148 dma_addr_t dma_meta_list
;
151 static void pblk_recov_complete(struct kref
*ref
)
153 struct pblk_pad_rq
*pad_rq
= container_of(ref
, struct pblk_pad_rq
, ref
);
155 complete(&pad_rq
->wait
);
158 static void pblk_end_io_recov(struct nvm_rq
*rqd
)
160 struct ppa_addr
*ppa_list
= nvm_rq_to_ppa_list(rqd
);
161 struct pblk_pad_rq
*pad_rq
= rqd
->private;
162 struct pblk
*pblk
= pad_rq
->pblk
;
164 pblk_up_chunk(pblk
, ppa_list
[0]);
166 pblk_free_rqd(pblk
, rqd
, PBLK_WRITE_INT
);
168 atomic_dec(&pblk
->inflight_io
);
169 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
172 /* pad line using line bitmap. */
173 static int pblk_recov_pad_line(struct pblk
*pblk
, struct pblk_line
*line
,
176 struct nvm_tgt_dev
*dev
= pblk
->dev
;
177 struct nvm_geo
*geo
= &dev
->geo
;
179 struct pblk_pad_rq
*pad_rq
;
181 struct ppa_addr
*ppa_list
;
183 __le64
*lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
184 u64 w_ptr
= line
->cur_sec
;
185 int left_line_ppas
, rq_ppas
;
189 spin_lock(&line
->lock
);
190 left_line_ppas
= line
->left_msecs
;
191 spin_unlock(&line
->lock
);
193 pad_rq
= kmalloc(sizeof(struct pblk_pad_rq
), GFP_KERNEL
);
197 data
= vzalloc(array_size(pblk
->max_write_pgs
, geo
->csecs
));
204 init_completion(&pad_rq
->wait
);
205 kref_init(&pad_rq
->ref
);
208 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0, false);
209 if (rq_ppas
< pblk
->min_write_pgs
) {
210 pblk_err(pblk
, "corrupted pad line %d\n", line
->id
);
214 rqd
= pblk_alloc_rqd(pblk
, PBLK_WRITE_INT
);
216 ret
= pblk_alloc_rqd_meta(pblk
, rqd
);
218 pblk_free_rqd(pblk
, rqd
, PBLK_WRITE_INT
);
223 rqd
->opcode
= NVM_OP_PWRITE
;
225 rqd
->nr_ppas
= rq_ppas
;
226 rqd
->end_io
= pblk_end_io_recov
;
227 rqd
->private = pad_rq
;
229 ppa_list
= nvm_rq_to_ppa_list(rqd
);
230 meta_list
= rqd
->meta_list
;
232 for (i
= 0; i
< rqd
->nr_ppas
; ) {
236 w_ptr
= pblk_alloc_page(pblk
, line
, pblk
->min_write_pgs
);
237 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
238 pos
= pblk_ppa_to_pos(geo
, ppa
);
240 while (test_bit(pos
, line
->blk_bitmap
)) {
241 w_ptr
+= pblk
->min_write_pgs
;
242 ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
243 pos
= pblk_ppa_to_pos(geo
, ppa
);
246 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++, w_ptr
++) {
247 struct ppa_addr dev_ppa
;
248 struct pblk_sec_meta
*meta
;
249 __le64 addr_empty
= cpu_to_le64(ADDR_EMPTY
);
251 dev_ppa
= addr_to_gen_ppa(pblk
, w_ptr
, line
->id
);
253 pblk_map_invalidate(pblk
, dev_ppa
);
254 lba_list
[w_ptr
] = addr_empty
;
255 meta
= pblk_get_meta(pblk
, meta_list
, i
);
256 meta
->lba
= addr_empty
;
257 ppa_list
[i
] = dev_ppa
;
261 kref_get(&pad_rq
->ref
);
262 pblk_down_chunk(pblk
, ppa_list
[0]);
264 ret
= pblk_submit_io(pblk
, rqd
, data
);
266 pblk_err(pblk
, "I/O submission failed: %d\n", ret
);
267 pblk_up_chunk(pblk
, ppa_list
[0]);
268 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
269 pblk_free_rqd(pblk
, rqd
, PBLK_WRITE_INT
);
273 left_line_ppas
-= rq_ppas
;
274 left_ppas
-= rq_ppas
;
275 if (left_ppas
&& left_line_ppas
)
279 kref_put(&pad_rq
->ref
, pblk_recov_complete
);
280 wait_for_completion(&pad_rq
->wait
);
282 if (!pblk_line_is_full(line
))
283 pblk_err(pblk
, "corrupted padded line: %d\n", line
->id
);
291 static int pblk_pad_distance(struct pblk
*pblk
, struct pblk_line
*line
)
293 struct nvm_tgt_dev
*dev
= pblk
->dev
;
294 struct nvm_geo
*geo
= &dev
->geo
;
295 int distance
= geo
->mw_cunits
* geo
->all_luns
* geo
->ws_opt
;
297 return (distance
> line
->left_msecs
) ? line
->left_msecs
: distance
;
300 /* Return a chunk belonging to a line by stripe(write order) index */
301 static struct nvm_chk_meta
*pblk_get_stripe_chunk(struct pblk
*pblk
,
302 struct pblk_line
*line
,
305 struct nvm_tgt_dev
*dev
= pblk
->dev
;
306 struct nvm_geo
*geo
= &dev
->geo
;
307 struct pblk_lun
*rlun
;
311 rlun
= &pblk
->luns
[index
];
313 pos
= pblk_ppa_to_pos(geo
, ppa
);
315 return &line
->chks
[pos
];
318 static int pblk_line_wps_are_unbalanced(struct pblk
*pblk
,
319 struct pblk_line
*line
)
321 struct pblk_line_meta
*lm
= &pblk
->lm
;
322 int blk_in_line
= lm
->blk_per_line
;
323 struct nvm_chk_meta
*chunk
;
327 i
= find_first_zero_bit(line
->blk_bitmap
, blk_in_line
);
329 /* If there is one or zero good chunks in the line,
330 * the write pointers can't be unbalanced.
332 if (i
>= (blk_in_line
- 1))
335 chunk
= pblk_get_stripe_chunk(pblk
, line
, i
);
337 if (max_wp
> pblk
->max_write_pgs
)
338 min_wp
= max_wp
- pblk
->max_write_pgs
;
342 i
= find_next_zero_bit(line
->blk_bitmap
, blk_in_line
, i
+ 1);
343 while (i
< blk_in_line
) {
344 chunk
= pblk_get_stripe_chunk(pblk
, line
, i
);
345 if (chunk
->wp
> max_wp
|| chunk
->wp
< min_wp
)
348 i
= find_next_zero_bit(line
->blk_bitmap
, blk_in_line
, i
+ 1);
354 static int pblk_recov_scan_oob(struct pblk
*pblk
, struct pblk_line
*line
,
355 struct pblk_recov_alloc p
)
357 struct nvm_tgt_dev
*dev
= pblk
->dev
;
358 struct pblk_line_meta
*lm
= &pblk
->lm
;
359 struct nvm_geo
*geo
= &dev
->geo
;
360 struct ppa_addr
*ppa_list
;
364 dma_addr_t dma_ppa_list
, dma_meta_list
;
366 u64 paddr
= pblk_line_smeta_start(pblk
, line
) + lm
->smeta_sec
;
371 u64 left_ppas
= pblk_sec_in_open_line(pblk
, line
) - lm
->smeta_sec
;
373 if (pblk_line_wps_are_unbalanced(pblk
, line
))
374 pblk_warn(pblk
, "recovering unbalanced line (%d)\n", line
->id
);
376 ppa_list
= p
.ppa_list
;
377 meta_list
= p
.meta_list
;
380 dma_ppa_list
= p
.dma_ppa_list
;
381 dma_meta_list
= p
.dma_meta_list
;
383 lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
386 memset(rqd
, 0, pblk_g_rq_size
);
388 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0, false);
390 rq_ppas
= pblk
->min_write_pgs
;
394 rqd
->opcode
= NVM_OP_PREAD
;
395 rqd
->meta_list
= meta_list
;
396 rqd
->nr_ppas
= rq_ppas
;
397 rqd
->ppa_list
= ppa_list
;
398 rqd
->dma_ppa_list
= dma_ppa_list
;
399 rqd
->dma_meta_list
= dma_meta_list
;
400 ppa_list
= nvm_rq_to_ppa_list(rqd
);
402 if (pblk_io_aligned(pblk
, rq_ppas
))
405 for (i
= 0; i
< rqd
->nr_ppas
; ) {
409 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
410 pos
= pblk_ppa_to_pos(geo
, ppa
);
412 while (test_bit(pos
, line
->blk_bitmap
)) {
413 paddr
+= pblk
->min_write_pgs
;
414 ppa
= addr_to_gen_ppa(pblk
, paddr
, line
->id
);
415 pos
= pblk_ppa_to_pos(geo
, ppa
);
418 for (j
= 0; j
< pblk
->min_write_pgs
; j
++, i
++)
420 addr_to_gen_ppa(pblk
, paddr
+ j
, line
->id
);
423 ret
= pblk_submit_io_sync(pblk
, rqd
, data
);
425 pblk_err(pblk
, "I/O submission failed: %d\n", ret
);
429 atomic_dec(&pblk
->inflight_io
);
431 /* If a read fails, do a best effort by padding the line and retrying */
432 if (rqd
->error
&& rqd
->error
!= NVM_RSP_WARN_HIGHECC
) {
433 int pad_distance
, ret
;
436 pblk_log_read_err(pblk
, rqd
);
440 pad_distance
= pblk_pad_distance(pblk
, line
);
441 ret
= pblk_recov_pad_line(pblk
, line
, pad_distance
);
450 pblk_get_packed_meta(pblk
, rqd
);
452 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
453 struct pblk_sec_meta
*meta
= pblk_get_meta(pblk
, meta_list
, i
);
454 u64 lba
= le64_to_cpu(meta
->lba
);
456 lba_list
[paddr
++] = cpu_to_le64(lba
);
458 if (lba
== ADDR_EMPTY
|| lba
>= pblk
->capacity
)
461 line
->nr_valid_lbas
++;
462 pblk_update_map(pblk
, lba
, ppa_list
[i
]);
465 left_ppas
-= rq_ppas
;
469 #ifdef CONFIG_NVM_PBLK_DEBUG
470 WARN_ON(padded
&& !pblk_line_is_full(line
));
476 /* Scan line for lbas on out of bound area */
477 static int pblk_recov_l2p_from_oob(struct pblk
*pblk
, struct pblk_line
*line
)
479 struct nvm_tgt_dev
*dev
= pblk
->dev
;
480 struct nvm_geo
*geo
= &dev
->geo
;
482 struct ppa_addr
*ppa_list
;
484 struct pblk_recov_alloc p
;
486 dma_addr_t dma_ppa_list
, dma_meta_list
;
489 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
, &dma_meta_list
);
493 ppa_list
= (void *)(meta_list
) + pblk_dma_meta_size(pblk
);
494 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size(pblk
);
496 data
= kcalloc(pblk
->max_write_pgs
, geo
->csecs
, GFP_KERNEL
);
502 rqd
= mempool_alloc(&pblk
->r_rq_pool
, GFP_KERNEL
);
503 memset(rqd
, 0, pblk_g_rq_size
);
505 p
.ppa_list
= ppa_list
;
506 p
.meta_list
= meta_list
;
509 p
.dma_ppa_list
= dma_ppa_list
;
510 p
.dma_meta_list
= dma_meta_list
;
512 ret
= pblk_recov_scan_oob(pblk
, line
, p
);
514 pblk_err(pblk
, "could not recover L2P form OOB\n");
518 if (pblk_line_is_full(line
))
519 pblk_line_recov_close(pblk
, line
);
522 mempool_free(rqd
, &pblk
->r_rq_pool
);
525 nvm_dev_dma_free(dev
->parent
, meta_list
, dma_meta_list
);
530 /* Insert lines ordered by sequence number (seq_num) on list */
531 static void pblk_recov_line_add_ordered(struct list_head
*head
,
532 struct pblk_line
*line
)
534 struct pblk_line
*t
= NULL
;
536 list_for_each_entry(t
, head
, list
)
537 if (t
->seq_nr
> line
->seq_nr
)
540 __list_add(&line
->list
, t
->list
.prev
, &t
->list
);
543 static u64
pblk_line_emeta_start(struct pblk
*pblk
, struct pblk_line
*line
)
545 struct nvm_tgt_dev
*dev
= pblk
->dev
;
546 struct nvm_geo
*geo
= &dev
->geo
;
547 struct pblk_line_meta
*lm
= &pblk
->lm
;
548 unsigned int emeta_secs
;
553 emeta_secs
= lm
->emeta_sec
[0];
554 emeta_start
= lm
->sec_per_line
;
558 ppa
= addr_to_gen_ppa(pblk
, emeta_start
, line
->id
);
559 pos
= pblk_ppa_to_pos(geo
, ppa
);
560 if (!test_bit(pos
, line
->blk_bitmap
))
567 static int pblk_recov_check_line_version(struct pblk
*pblk
,
568 struct line_emeta
*emeta
)
570 struct line_header
*header
= &emeta
->header
;
572 if (header
->version_major
!= EMETA_VERSION_MAJOR
) {
573 pblk_err(pblk
, "line major version mismatch: %d, expected: %d\n",
574 header
->version_major
, EMETA_VERSION_MAJOR
);
578 #ifdef CONFIG_NVM_PBLK_DEBUG
579 if (header
->version_minor
> EMETA_VERSION_MINOR
)
580 pblk_info(pblk
, "newer line minor version found: %d\n",
581 header
->version_minor
);
587 static void pblk_recov_wa_counters(struct pblk
*pblk
,
588 struct line_emeta
*emeta
)
590 struct pblk_line_meta
*lm
= &pblk
->lm
;
591 struct line_header
*header
= &emeta
->header
;
592 struct wa_counters
*wa
= emeta_to_wa(lm
, emeta
);
594 /* WA counters were introduced in emeta version 0.2 */
595 if (header
->version_major
> 0 || header
->version_minor
>= 2) {
596 u64 user
= le64_to_cpu(wa
->user
);
597 u64 pad
= le64_to_cpu(wa
->pad
);
598 u64 gc
= le64_to_cpu(wa
->gc
);
600 atomic64_set(&pblk
->user_wa
, user
);
601 atomic64_set(&pblk
->pad_wa
, pad
);
602 atomic64_set(&pblk
->gc_wa
, gc
);
604 pblk
->user_rst_wa
= user
;
605 pblk
->pad_rst_wa
= pad
;
606 pblk
->gc_rst_wa
= gc
;
610 static int pblk_line_was_written(struct pblk_line
*line
,
614 struct pblk_line_meta
*lm
= &pblk
->lm
;
615 struct nvm_tgt_dev
*dev
= pblk
->dev
;
616 struct nvm_geo
*geo
= &dev
->geo
;
617 struct nvm_chk_meta
*chunk
;
618 struct ppa_addr bppa
;
621 if (line
->state
== PBLK_LINESTATE_BAD
)
624 smeta_blk
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
625 if (smeta_blk
>= lm
->blk_per_line
)
628 bppa
= pblk
->luns
[smeta_blk
].bppa
;
629 chunk
= &line
->chks
[pblk_ppa_to_pos(geo
, bppa
)];
631 if (chunk
->state
& NVM_CHK_ST_CLOSED
||
632 (chunk
->state
& NVM_CHK_ST_OPEN
633 && chunk
->wp
>= lm
->smeta_sec
))
639 static bool pblk_line_is_open(struct pblk
*pblk
, struct pblk_line
*line
)
641 struct pblk_line_meta
*lm
= &pblk
->lm
;
644 for (i
= 0; i
< lm
->blk_per_line
; i
++)
645 if (line
->chks
[i
].state
& NVM_CHK_ST_OPEN
)
651 struct pblk_line
*pblk_recov_l2p(struct pblk
*pblk
)
653 struct pblk_line_meta
*lm
= &pblk
->lm
;
654 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
655 struct pblk_line
*line
, *tline
, *data_line
= NULL
;
656 struct pblk_smeta
*smeta
;
657 struct pblk_emeta
*emeta
;
658 struct line_smeta
*smeta_buf
;
659 int found_lines
= 0, recovered_lines
= 0, open_lines
= 0;
662 int i
, valid_uuid
= 0;
663 LIST_HEAD(recov_list
);
665 /* TODO: Implement FTL snapshot */
667 /* Scan recovery - takes place when FTL snapshot fails */
668 spin_lock(&l_mg
->free_lock
);
669 meta_line
= find_first_zero_bit(&l_mg
->meta_bitmap
, PBLK_DATA_LINES
);
670 set_bit(meta_line
, &l_mg
->meta_bitmap
);
671 smeta
= l_mg
->sline_meta
[meta_line
];
672 emeta
= l_mg
->eline_meta
[meta_line
];
673 smeta_buf
= (struct line_smeta
*)smeta
;
674 spin_unlock(&l_mg
->free_lock
);
676 /* Order data lines using their sequence number */
677 for (i
= 0; i
< l_mg
->nr_lines
; i
++) {
680 line
= &pblk
->lines
[i
];
682 memset(smeta
, 0, lm
->smeta_len
);
684 line
->lun_bitmap
= ((void *)(smeta_buf
)) +
685 sizeof(struct line_smeta
);
687 if (!pblk_line_was_written(line
, pblk
))
690 /* Lines that cannot be read are assumed as not written here */
691 if (pblk_line_smeta_read(pblk
, line
))
694 crc
= pblk_calc_smeta_crc(pblk
, smeta_buf
);
695 if (le32_to_cpu(smeta_buf
->crc
) != crc
)
698 if (le32_to_cpu(smeta_buf
->header
.identifier
) != PBLK_MAGIC
)
701 if (smeta_buf
->header
.version_major
!= SMETA_VERSION_MAJOR
) {
702 pblk_err(pblk
, "found incompatible line version %u\n",
703 smeta_buf
->header
.version_major
);
704 return ERR_PTR(-EINVAL
);
707 /* The first valid instance uuid is used for initialization */
709 guid_copy(&pblk
->instance_uuid
,
710 (guid_t
*)&smeta_buf
->header
.uuid
);
714 if (!guid_equal(&pblk
->instance_uuid
,
715 (guid_t
*)&smeta_buf
->header
.uuid
)) {
716 pblk_debug(pblk
, "ignore line %u due to uuid mismatch\n",
721 /* Update line metadata */
722 spin_lock(&line
->lock
);
723 line
->id
= le32_to_cpu(smeta_buf
->header
.id
);
724 line
->type
= le16_to_cpu(smeta_buf
->header
.type
);
725 line
->seq_nr
= le64_to_cpu(smeta_buf
->seq_nr
);
726 spin_unlock(&line
->lock
);
728 /* Update general metadata */
729 spin_lock(&l_mg
->free_lock
);
730 if (line
->seq_nr
>= l_mg
->d_seq_nr
)
731 l_mg
->d_seq_nr
= line
->seq_nr
+ 1;
732 l_mg
->nr_free_lines
--;
733 spin_unlock(&l_mg
->free_lock
);
735 if (pblk_line_recov_alloc(pblk
, line
))
738 pblk_recov_line_add_ordered(&recov_list
, line
);
740 pblk_debug(pblk
, "recovering data line %d, seq:%llu\n",
741 line
->id
, smeta_buf
->seq_nr
);
745 guid_gen(&pblk
->instance_uuid
);
747 spin_lock(&l_mg
->free_lock
);
748 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
749 &l_mg
->meta_bitmap
));
750 spin_unlock(&l_mg
->free_lock
);
755 /* Verify closed blocks and recover this portion of L2P table*/
756 list_for_each_entry_safe(line
, tline
, &recov_list
, list
) {
759 line
->emeta_ssec
= pblk_line_emeta_start(pblk
, line
);
761 memset(line
->emeta
->buf
, 0, lm
->emeta_len
[0]);
763 if (pblk_line_is_open(pblk
, line
)) {
764 pblk_recov_l2p_from_oob(pblk
, line
);
768 if (pblk_line_emeta_read(pblk
, line
, line
->emeta
->buf
)) {
769 pblk_recov_l2p_from_oob(pblk
, line
);
773 if (pblk_recov_check_emeta(pblk
, line
->emeta
->buf
)) {
774 pblk_recov_l2p_from_oob(pblk
, line
);
778 if (pblk_recov_check_line_version(pblk
, line
->emeta
->buf
))
779 return ERR_PTR(-EINVAL
);
781 pblk_recov_wa_counters(pblk
, line
->emeta
->buf
);
783 if (pblk_recov_l2p_from_emeta(pblk
, line
))
784 pblk_recov_l2p_from_oob(pblk
, line
);
787 if (pblk_line_is_full(line
)) {
788 struct list_head
*move_list
;
790 spin_lock(&line
->lock
);
791 line
->state
= PBLK_LINESTATE_CLOSED
;
792 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
794 move_list
= pblk_line_gc_list(pblk
, line
);
795 spin_unlock(&line
->lock
);
797 spin_lock(&l_mg
->gc_lock
);
798 list_move_tail(&line
->list
, move_list
);
799 spin_unlock(&l_mg
->gc_lock
);
801 mempool_free(line
->map_bitmap
, l_mg
->bitmap_pool
);
802 line
->map_bitmap
= NULL
;
806 spin_lock(&line
->lock
);
807 line
->state
= PBLK_LINESTATE_OPEN
;
808 spin_unlock(&line
->lock
);
810 line
->emeta
->mem
= 0;
811 atomic_set(&line
->emeta
->sync
, 0);
813 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
817 line
->meta_line
= meta_line
;
824 spin_lock(&l_mg
->free_lock
);
825 WARN_ON_ONCE(!test_and_clear_bit(meta_line
,
826 &l_mg
->meta_bitmap
));
827 spin_unlock(&l_mg
->free_lock
);
829 spin_lock(&l_mg
->free_lock
);
830 l_mg
->data_line
= data_line
;
831 /* Allocate next line for preparation */
832 l_mg
->data_next
= pblk_line_get(pblk
);
833 if (l_mg
->data_next
) {
834 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
835 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
838 spin_unlock(&l_mg
->free_lock
);
842 pblk_line_erase(pblk
, l_mg
->data_next
);
845 if (found_lines
!= recovered_lines
)
846 pblk_err(pblk
, "failed to recover all found lines %d/%d\n",
847 found_lines
, recovered_lines
);
855 int pblk_recov_pad(struct pblk
*pblk
)
857 struct pblk_line
*line
;
858 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
862 spin_lock(&l_mg
->free_lock
);
863 line
= l_mg
->data_line
;
864 left_msecs
= line
->left_msecs
;
865 spin_unlock(&l_mg
->free_lock
);
867 ret
= pblk_recov_pad_line(pblk
, line
, left_msecs
);
869 pblk_err(pblk
, "tear down padding failed (%d)\n", ret
);
873 pblk_line_close_meta(pblk
, line
);