1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-core.c - pblk's core functionality
20 #define CREATE_TRACE_POINTS
23 #include "pblk-trace.h"
25 static void pblk_line_mark_bb(struct work_struct
*work
)
27 struct pblk_line_ws
*line_ws
= container_of(work
, struct pblk_line_ws
,
29 struct pblk
*pblk
= line_ws
->pblk
;
30 struct nvm_tgt_dev
*dev
= pblk
->dev
;
31 struct ppa_addr
*ppa
= line_ws
->priv
;
34 ret
= nvm_set_chunk_meta(dev
, ppa
, 1, NVM_BLK_T_GRWN_BAD
);
36 struct pblk_line
*line
;
39 line
= pblk_ppa_to_line(pblk
, *ppa
);
40 pos
= pblk_ppa_to_pos(&dev
->geo
, *ppa
);
42 pblk_err(pblk
, "failed to mark bb, line:%d, pos:%d\n",
47 mempool_free(line_ws
, &pblk
->gen_ws_pool
);
50 static void pblk_mark_bb(struct pblk
*pblk
, struct pblk_line
*line
,
51 struct ppa_addr ppa_addr
)
53 struct nvm_tgt_dev
*dev
= pblk
->dev
;
54 struct nvm_geo
*geo
= &dev
->geo
;
56 int pos
= pblk_ppa_to_pos(geo
, ppa_addr
);
58 pblk_debug(pblk
, "erase failed: line:%d, pos:%d\n", line
->id
, pos
);
59 atomic_long_inc(&pblk
->erase_failed
);
61 atomic_dec(&line
->blk_in_line
);
62 if (test_and_set_bit(pos
, line
->blk_bitmap
))
63 pblk_err(pblk
, "attempted to erase bb: line:%d, pos:%d\n",
66 /* Not necessary to mark bad blocks on 2.0 spec. */
67 if (geo
->version
== NVM_OCSSD_SPEC_20
)
70 ppa
= kmalloc(sizeof(struct ppa_addr
), GFP_ATOMIC
);
75 pblk_gen_run_ws(pblk
, NULL
, ppa
, pblk_line_mark_bb
,
76 GFP_ATOMIC
, pblk
->bb_wq
);
79 static void __pblk_end_io_erase(struct pblk
*pblk
, struct nvm_rq
*rqd
)
81 struct nvm_tgt_dev
*dev
= pblk
->dev
;
82 struct nvm_geo
*geo
= &dev
->geo
;
83 struct nvm_chk_meta
*chunk
;
84 struct pblk_line
*line
;
87 line
= pblk_ppa_to_line(pblk
, rqd
->ppa_addr
);
88 pos
= pblk_ppa_to_pos(geo
, rqd
->ppa_addr
);
89 chunk
= &line
->chks
[pos
];
91 atomic_dec(&line
->left_seblks
);
94 trace_pblk_chunk_reset(pblk_disk_name(pblk
),
95 &rqd
->ppa_addr
, PBLK_CHUNK_RESET_FAILED
);
97 chunk
->state
= NVM_CHK_ST_OFFLINE
;
98 pblk_mark_bb(pblk
, line
, rqd
->ppa_addr
);
100 trace_pblk_chunk_reset(pblk_disk_name(pblk
),
101 &rqd
->ppa_addr
, PBLK_CHUNK_RESET_DONE
);
103 chunk
->state
= NVM_CHK_ST_FREE
;
106 trace_pblk_chunk_state(pblk_disk_name(pblk
), &rqd
->ppa_addr
,
109 atomic_dec(&pblk
->inflight_io
);
112 /* Erase completion assumes that only one block is erased at the time */
113 static void pblk_end_io_erase(struct nvm_rq
*rqd
)
115 struct pblk
*pblk
= rqd
->private;
117 __pblk_end_io_erase(pblk
, rqd
);
118 mempool_free(rqd
, &pblk
->e_rq_pool
);
122 * Get information for all chunks from the device.
124 * The caller is responsible for freeing (vmalloc) the returned structure
126 struct nvm_chk_meta
*pblk_get_chunk_meta(struct pblk
*pblk
)
128 struct nvm_tgt_dev
*dev
= pblk
->dev
;
129 struct nvm_geo
*geo
= &dev
->geo
;
130 struct nvm_chk_meta
*meta
;
137 len
= geo
->all_chunks
* sizeof(*meta
);
140 return ERR_PTR(-ENOMEM
);
142 ret
= nvm_get_chunk_meta(dev
, ppa
, geo
->all_chunks
, meta
);
145 return ERR_PTR(-EIO
);
151 struct nvm_chk_meta
*pblk_chunk_get_off(struct pblk
*pblk
,
152 struct nvm_chk_meta
*meta
,
155 struct nvm_tgt_dev
*dev
= pblk
->dev
;
156 struct nvm_geo
*geo
= &dev
->geo
;
157 int ch_off
= ppa
.m
.grp
* geo
->num_chk
* geo
->num_lun
;
158 int lun_off
= ppa
.m
.pu
* geo
->num_chk
;
159 int chk_off
= ppa
.m
.chk
;
161 return meta
+ ch_off
+ lun_off
+ chk_off
;
164 void __pblk_map_invalidate(struct pblk
*pblk
, struct pblk_line
*line
,
167 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
168 struct list_head
*move_list
= NULL
;
170 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
171 * table is modified with reclaimed sectors, a check is done to endure
172 * that newer updates are not overwritten.
174 spin_lock(&line
->lock
);
175 WARN_ON(line
->state
== PBLK_LINESTATE_FREE
);
177 if (test_and_set_bit(paddr
, line
->invalid_bitmap
)) {
178 WARN_ONCE(1, "pblk: double invalidate\n");
179 spin_unlock(&line
->lock
);
182 le32_add_cpu(line
->vsc
, -1);
184 if (line
->state
== PBLK_LINESTATE_CLOSED
)
185 move_list
= pblk_line_gc_list(pblk
, line
);
186 spin_unlock(&line
->lock
);
189 spin_lock(&l_mg
->gc_lock
);
190 spin_lock(&line
->lock
);
191 /* Prevent moving a line that has just been chosen for GC */
192 if (line
->state
== PBLK_LINESTATE_GC
) {
193 spin_unlock(&line
->lock
);
194 spin_unlock(&l_mg
->gc_lock
);
197 spin_unlock(&line
->lock
);
199 list_move_tail(&line
->list
, move_list
);
200 spin_unlock(&l_mg
->gc_lock
);
204 void pblk_map_invalidate(struct pblk
*pblk
, struct ppa_addr ppa
)
206 struct pblk_line
*line
;
209 #ifdef CONFIG_NVM_PBLK_DEBUG
210 /* Callers must ensure that the ppa points to a device address */
211 BUG_ON(pblk_addr_in_cache(ppa
));
212 BUG_ON(pblk_ppa_empty(ppa
));
215 line
= pblk_ppa_to_line(pblk
, ppa
);
216 paddr
= pblk_dev_ppa_to_line_addr(pblk
, ppa
);
218 __pblk_map_invalidate(pblk
, line
, paddr
);
221 static void pblk_invalidate_range(struct pblk
*pblk
, sector_t slba
,
222 unsigned int nr_secs
)
226 spin_lock(&pblk
->trans_lock
);
227 for (lba
= slba
; lba
< slba
+ nr_secs
; lba
++) {
230 ppa
= pblk_trans_map_get(pblk
, lba
);
232 if (!pblk_addr_in_cache(ppa
) && !pblk_ppa_empty(ppa
))
233 pblk_map_invalidate(pblk
, ppa
);
235 pblk_ppa_set_empty(&ppa
);
236 pblk_trans_map_set(pblk
, lba
, ppa
);
238 spin_unlock(&pblk
->trans_lock
);
241 int pblk_alloc_rqd_meta(struct pblk
*pblk
, struct nvm_rq
*rqd
)
243 struct nvm_tgt_dev
*dev
= pblk
->dev
;
245 rqd
->meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
246 &rqd
->dma_meta_list
);
250 if (rqd
->nr_ppas
== 1)
253 rqd
->ppa_list
= rqd
->meta_list
+ pblk_dma_meta_size(pblk
);
254 rqd
->dma_ppa_list
= rqd
->dma_meta_list
+ pblk_dma_meta_size(pblk
);
259 void pblk_free_rqd_meta(struct pblk
*pblk
, struct nvm_rq
*rqd
)
261 struct nvm_tgt_dev
*dev
= pblk
->dev
;
264 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
,
268 /* Caller must guarantee that the request is a valid type */
269 struct nvm_rq
*pblk_alloc_rqd(struct pblk
*pblk
, int type
)
278 pool
= &pblk
->w_rq_pool
;
279 rq_size
= pblk_w_rq_size
;
282 pool
= &pblk
->r_rq_pool
;
283 rq_size
= pblk_g_rq_size
;
286 pool
= &pblk
->e_rq_pool
;
287 rq_size
= pblk_g_rq_size
;
290 rqd
= mempool_alloc(pool
, GFP_KERNEL
);
291 memset(rqd
, 0, rq_size
);
296 /* Typically used on completion path. Cannot guarantee request consistency */
297 void pblk_free_rqd(struct pblk
*pblk
, struct nvm_rq
*rqd
, int type
)
303 kfree(((struct pblk_c_ctx
*)nvm_rq_to_pdu(rqd
))->lun_bitmap
);
306 pool
= &pblk
->w_rq_pool
;
309 pool
= &pblk
->r_rq_pool
;
312 pool
= &pblk
->e_rq_pool
;
315 pblk_err(pblk
, "trying to free unknown rqd type\n");
319 pblk_free_rqd_meta(pblk
, rqd
);
320 mempool_free(rqd
, pool
);
323 void pblk_bio_free_pages(struct pblk
*pblk
, struct bio
*bio
, int off
,
330 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
331 bv
= &bio
->bi_io_vec
[i
];
333 for (e
= 0; e
< bv
->bv_len
; e
+= PBLK_EXPOSED_PAGE_SIZE
, nbv
++)
335 mempool_free(page
++, &pblk
->page_bio_pool
);
339 int pblk_bio_add_pages(struct pblk
*pblk
, struct bio
*bio
, gfp_t flags
,
342 struct request_queue
*q
= pblk
->dev
->q
;
346 for (i
= 0; i
< nr_pages
; i
++) {
347 page
= mempool_alloc(&pblk
->page_bio_pool
, flags
);
349 ret
= bio_add_pc_page(q
, bio
, page
, PBLK_EXPOSED_PAGE_SIZE
, 0);
350 if (ret
!= PBLK_EXPOSED_PAGE_SIZE
) {
351 pblk_err(pblk
, "could not add page to bio\n");
352 mempool_free(page
, &pblk
->page_bio_pool
);
359 pblk_bio_free_pages(pblk
, bio
, (bio
->bi_vcnt
- i
), i
);
363 void pblk_write_kick(struct pblk
*pblk
)
365 wake_up_process(pblk
->writer_ts
);
366 mod_timer(&pblk
->wtimer
, jiffies
+ msecs_to_jiffies(1000));
369 void pblk_write_timer_fn(struct timer_list
*t
)
371 struct pblk
*pblk
= from_timer(pblk
, t
, wtimer
);
373 /* kick the write thread every tick to flush outstanding data */
374 pblk_write_kick(pblk
);
377 void pblk_write_should_kick(struct pblk
*pblk
)
379 unsigned int secs_avail
= pblk_rb_read_count(&pblk
->rwb
);
381 if (secs_avail
>= pblk
->min_write_pgs_data
)
382 pblk_write_kick(pblk
);
385 static void pblk_wait_for_meta(struct pblk
*pblk
)
388 if (!atomic_read(&pblk
->inflight_io
))
395 static void pblk_flush_writer(struct pblk
*pblk
)
397 pblk_rb_flush(&pblk
->rwb
);
399 if (!pblk_rb_sync_count(&pblk
->rwb
))
402 pblk_write_kick(pblk
);
407 struct list_head
*pblk_line_gc_list(struct pblk
*pblk
, struct pblk_line
*line
)
409 struct pblk_line_meta
*lm
= &pblk
->lm
;
410 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
411 struct list_head
*move_list
= NULL
;
412 int packed_meta
= (le32_to_cpu(*line
->vsc
) / pblk
->min_write_pgs_data
)
413 * (pblk
->min_write_pgs
- pblk
->min_write_pgs_data
);
414 int vsc
= le32_to_cpu(*line
->vsc
) + packed_meta
;
416 lockdep_assert_held(&line
->lock
);
418 if (line
->w_err_gc
->has_write_err
) {
419 if (line
->gc_group
!= PBLK_LINEGC_WERR
) {
420 line
->gc_group
= PBLK_LINEGC_WERR
;
421 move_list
= &l_mg
->gc_werr_list
;
422 pblk_rl_werr_line_in(&pblk
->rl
);
425 if (line
->gc_group
!= PBLK_LINEGC_FULL
) {
426 line
->gc_group
= PBLK_LINEGC_FULL
;
427 move_list
= &l_mg
->gc_full_list
;
429 } else if (vsc
< lm
->high_thrs
) {
430 if (line
->gc_group
!= PBLK_LINEGC_HIGH
) {
431 line
->gc_group
= PBLK_LINEGC_HIGH
;
432 move_list
= &l_mg
->gc_high_list
;
434 } else if (vsc
< lm
->mid_thrs
) {
435 if (line
->gc_group
!= PBLK_LINEGC_MID
) {
436 line
->gc_group
= PBLK_LINEGC_MID
;
437 move_list
= &l_mg
->gc_mid_list
;
439 } else if (vsc
< line
->sec_in_line
) {
440 if (line
->gc_group
!= PBLK_LINEGC_LOW
) {
441 line
->gc_group
= PBLK_LINEGC_LOW
;
442 move_list
= &l_mg
->gc_low_list
;
444 } else if (vsc
== line
->sec_in_line
) {
445 if (line
->gc_group
!= PBLK_LINEGC_EMPTY
) {
446 line
->gc_group
= PBLK_LINEGC_EMPTY
;
447 move_list
= &l_mg
->gc_empty_list
;
450 line
->state
= PBLK_LINESTATE_CORRUPT
;
451 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
454 line
->gc_group
= PBLK_LINEGC_NONE
;
455 move_list
= &l_mg
->corrupt_list
;
456 pblk_err(pblk
, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
459 lm
->high_thrs
, lm
->mid_thrs
);
465 void pblk_discard(struct pblk
*pblk
, struct bio
*bio
)
467 sector_t slba
= pblk_get_lba(bio
);
468 sector_t nr_secs
= pblk_get_secs(bio
);
470 pblk_invalidate_range(pblk
, slba
, nr_secs
);
473 void pblk_log_write_err(struct pblk
*pblk
, struct nvm_rq
*rqd
)
475 atomic_long_inc(&pblk
->write_failed
);
476 #ifdef CONFIG_NVM_PBLK_DEBUG
477 pblk_print_failed_rqd(pblk
, rqd
, rqd
->error
);
481 void pblk_log_read_err(struct pblk
*pblk
, struct nvm_rq
*rqd
)
483 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
484 if (rqd
->error
== NVM_RSP_ERR_EMPTYPAGE
) {
485 atomic_long_inc(&pblk
->read_empty
);
489 switch (rqd
->error
) {
490 case NVM_RSP_WARN_HIGHECC
:
491 atomic_long_inc(&pblk
->read_high_ecc
);
493 case NVM_RSP_ERR_FAILECC
:
494 case NVM_RSP_ERR_FAILCRC
:
495 atomic_long_inc(&pblk
->read_failed
);
498 pblk_err(pblk
, "unknown read error:%d\n", rqd
->error
);
500 #ifdef CONFIG_NVM_PBLK_DEBUG
501 pblk_print_failed_rqd(pblk
, rqd
, rqd
->error
);
505 void pblk_set_sec_per_write(struct pblk
*pblk
, int sec_per_write
)
507 pblk
->sec_per_write
= sec_per_write
;
510 int pblk_submit_io(struct pblk
*pblk
, struct nvm_rq
*rqd
)
512 struct nvm_tgt_dev
*dev
= pblk
->dev
;
514 atomic_inc(&pblk
->inflight_io
);
516 #ifdef CONFIG_NVM_PBLK_DEBUG
517 if (pblk_check_io(pblk
, rqd
))
521 return nvm_submit_io(dev
, rqd
);
524 void pblk_check_chunk_state_update(struct pblk
*pblk
, struct nvm_rq
*rqd
)
526 struct ppa_addr
*ppa_list
= nvm_rq_to_ppa_list(rqd
);
530 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
531 struct ppa_addr
*ppa
= &ppa_list
[i
];
532 struct nvm_chk_meta
*chunk
= pblk_dev_ppa_to_chunk(pblk
, *ppa
);
533 u64 caddr
= pblk_dev_ppa_to_chunk_addr(pblk
, *ppa
);
536 trace_pblk_chunk_state(pblk_disk_name(pblk
),
537 ppa
, NVM_CHK_ST_OPEN
);
538 else if (caddr
== (chunk
->cnlb
- 1))
539 trace_pblk_chunk_state(pblk_disk_name(pblk
),
540 ppa
, NVM_CHK_ST_CLOSED
);
544 int pblk_submit_io_sync(struct pblk
*pblk
, struct nvm_rq
*rqd
)
546 struct nvm_tgt_dev
*dev
= pblk
->dev
;
549 atomic_inc(&pblk
->inflight_io
);
551 #ifdef CONFIG_NVM_PBLK_DEBUG
552 if (pblk_check_io(pblk
, rqd
))
556 ret
= nvm_submit_io_sync(dev
, rqd
);
558 if (trace_pblk_chunk_state_enabled() && !ret
&&
559 rqd
->opcode
== NVM_OP_PWRITE
)
560 pblk_check_chunk_state_update(pblk
, rqd
);
565 int pblk_submit_io_sync_sem(struct pblk
*pblk
, struct nvm_rq
*rqd
)
567 struct ppa_addr
*ppa_list
= nvm_rq_to_ppa_list(rqd
);
570 pblk_down_chunk(pblk
, ppa_list
[0]);
571 ret
= pblk_submit_io_sync(pblk
, rqd
);
572 pblk_up_chunk(pblk
, ppa_list
[0]);
577 static void pblk_bio_map_addr_endio(struct bio
*bio
)
582 struct bio
*pblk_bio_map_addr(struct pblk
*pblk
, void *data
,
583 unsigned int nr_secs
, unsigned int len
,
584 int alloc_type
, gfp_t gfp_mask
)
586 struct nvm_tgt_dev
*dev
= pblk
->dev
;
592 if (alloc_type
== PBLK_KMALLOC_META
)
593 return bio_map_kern(dev
->q
, kaddr
, len
, gfp_mask
);
595 bio
= bio_kmalloc(gfp_mask
, nr_secs
);
597 return ERR_PTR(-ENOMEM
);
599 for (i
= 0; i
< nr_secs
; i
++) {
600 page
= vmalloc_to_page(kaddr
);
602 pblk_err(pblk
, "could not map vmalloc bio\n");
604 bio
= ERR_PTR(-ENOMEM
);
608 ret
= bio_add_pc_page(dev
->q
, bio
, page
, PAGE_SIZE
, 0);
609 if (ret
!= PAGE_SIZE
) {
610 pblk_err(pblk
, "could not add page to bio\n");
612 bio
= ERR_PTR(-ENOMEM
);
619 bio
->bi_end_io
= pblk_bio_map_addr_endio
;
624 int pblk_calc_secs(struct pblk
*pblk
, unsigned long secs_avail
,
625 unsigned long secs_to_flush
, bool skip_meta
)
627 int max
= pblk
->sec_per_write
;
628 int min
= pblk
->min_write_pgs
;
629 int secs_to_sync
= 0;
631 if (skip_meta
&& pblk
->min_write_pgs_data
!= pblk
->min_write_pgs
)
632 min
= max
= pblk
->min_write_pgs_data
;
634 if (secs_avail
>= max
)
636 else if (secs_avail
>= min
)
637 secs_to_sync
= min
* (secs_avail
/ min
);
638 else if (secs_to_flush
)
644 void pblk_dealloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
)
649 spin_lock(&line
->lock
);
650 addr
= find_next_zero_bit(line
->map_bitmap
,
651 pblk
->lm
.sec_per_line
, line
->cur_sec
);
652 line
->cur_sec
= addr
- nr_secs
;
654 for (i
= 0; i
< nr_secs
; i
++, line
->cur_sec
--)
655 WARN_ON(!test_and_clear_bit(line
->cur_sec
, line
->map_bitmap
));
656 spin_unlock(&line
->lock
);
659 u64
__pblk_alloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
)
664 lockdep_assert_held(&line
->lock
);
666 /* logic error: ppa out-of-bounds. Prevent generating bad address */
667 if (line
->cur_sec
+ nr_secs
> pblk
->lm
.sec_per_line
) {
668 WARN(1, "pblk: page allocation out of bounds\n");
669 nr_secs
= pblk
->lm
.sec_per_line
- line
->cur_sec
;
672 line
->cur_sec
= addr
= find_next_zero_bit(line
->map_bitmap
,
673 pblk
->lm
.sec_per_line
, line
->cur_sec
);
674 for (i
= 0; i
< nr_secs
; i
++, line
->cur_sec
++)
675 WARN_ON(test_and_set_bit(line
->cur_sec
, line
->map_bitmap
));
680 u64
pblk_alloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
)
684 /* Lock needed in case a write fails and a recovery needs to remap
685 * failed write buffer entries
687 spin_lock(&line
->lock
);
688 addr
= __pblk_alloc_page(pblk
, line
, nr_secs
);
689 line
->left_msecs
-= nr_secs
;
690 WARN(line
->left_msecs
< 0, "pblk: page allocation out of bounds\n");
691 spin_unlock(&line
->lock
);
696 u64
pblk_lookup_page(struct pblk
*pblk
, struct pblk_line
*line
)
700 spin_lock(&line
->lock
);
701 paddr
= find_next_zero_bit(line
->map_bitmap
,
702 pblk
->lm
.sec_per_line
, line
->cur_sec
);
703 spin_unlock(&line
->lock
);
708 u64
pblk_line_smeta_start(struct pblk
*pblk
, struct pblk_line
*line
)
710 struct nvm_tgt_dev
*dev
= pblk
->dev
;
711 struct nvm_geo
*geo
= &dev
->geo
;
712 struct pblk_line_meta
*lm
= &pblk
->lm
;
715 /* This usually only happens on bad lines */
716 bit
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
717 if (bit
>= lm
->blk_per_line
)
720 return bit
* geo
->ws_opt
;
723 int pblk_line_smeta_read(struct pblk
*pblk
, struct pblk_line
*line
)
725 struct nvm_tgt_dev
*dev
= pblk
->dev
;
726 struct pblk_line_meta
*lm
= &pblk
->lm
;
728 struct ppa_addr
*ppa_list
;
730 u64 paddr
= pblk_line_smeta_start(pblk
, line
);
733 memset(&rqd
, 0, sizeof(struct nvm_rq
));
735 ret
= pblk_alloc_rqd_meta(pblk
, &rqd
);
739 bio
= bio_map_kern(dev
->q
, line
->smeta
, lm
->smeta_len
, GFP_KERNEL
);
745 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
746 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
749 rqd
.opcode
= NVM_OP_PREAD
;
750 rqd
.nr_ppas
= lm
->smeta_sec
;
752 ppa_list
= nvm_rq_to_ppa_list(&rqd
);
754 for (i
= 0; i
< lm
->smeta_sec
; i
++, paddr
++)
755 ppa_list
[i
] = addr_to_gen_ppa(pblk
, paddr
, line
->id
);
757 ret
= pblk_submit_io_sync(pblk
, &rqd
);
759 pblk_err(pblk
, "smeta I/O submission failed: %d\n", ret
);
764 atomic_dec(&pblk
->inflight_io
);
766 if (rqd
.error
&& rqd
.error
!= NVM_RSP_WARN_HIGHECC
) {
767 pblk_log_read_err(pblk
, &rqd
);
772 pblk_free_rqd_meta(pblk
, &rqd
);
776 static int pblk_line_smeta_write(struct pblk
*pblk
, struct pblk_line
*line
,
779 struct nvm_tgt_dev
*dev
= pblk
->dev
;
780 struct pblk_line_meta
*lm
= &pblk
->lm
;
782 struct ppa_addr
*ppa_list
;
784 __le64
*lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
785 __le64 addr_empty
= cpu_to_le64(ADDR_EMPTY
);
788 memset(&rqd
, 0, sizeof(struct nvm_rq
));
790 ret
= pblk_alloc_rqd_meta(pblk
, &rqd
);
794 bio
= bio_map_kern(dev
->q
, line
->smeta
, lm
->smeta_len
, GFP_KERNEL
);
800 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
801 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
804 rqd
.opcode
= NVM_OP_PWRITE
;
805 rqd
.nr_ppas
= lm
->smeta_sec
;
807 ppa_list
= nvm_rq_to_ppa_list(&rqd
);
809 for (i
= 0; i
< lm
->smeta_sec
; i
++, paddr
++) {
810 struct pblk_sec_meta
*meta
= pblk_get_meta(pblk
,
813 ppa_list
[i
] = addr_to_gen_ppa(pblk
, paddr
, line
->id
);
814 meta
->lba
= lba_list
[paddr
] = addr_empty
;
817 ret
= pblk_submit_io_sync_sem(pblk
, &rqd
);
819 pblk_err(pblk
, "smeta I/O submission failed: %d\n", ret
);
824 atomic_dec(&pblk
->inflight_io
);
827 pblk_log_write_err(pblk
, &rqd
);
832 pblk_free_rqd_meta(pblk
, &rqd
);
836 int pblk_line_emeta_read(struct pblk
*pblk
, struct pblk_line
*line
,
839 struct nvm_tgt_dev
*dev
= pblk
->dev
;
840 struct nvm_geo
*geo
= &dev
->geo
;
841 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
842 struct pblk_line_meta
*lm
= &pblk
->lm
;
843 void *ppa_list_buf
, *meta_list
;
845 struct ppa_addr
*ppa_list
;
847 u64 paddr
= line
->emeta_ssec
;
848 dma_addr_t dma_ppa_list
, dma_meta_list
;
849 int min
= pblk
->min_write_pgs
;
850 int left_ppas
= lm
->emeta_sec
[0];
851 int line_id
= line
->id
;
856 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
861 ppa_list_buf
= meta_list
+ pblk_dma_meta_size(pblk
);
862 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size(pblk
);
865 memset(&rqd
, 0, sizeof(struct nvm_rq
));
867 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0, false);
868 rq_len
= rq_ppas
* geo
->csecs
;
870 bio
= pblk_bio_map_addr(pblk
, emeta_buf
, rq_ppas
, rq_len
,
871 l_mg
->emeta_alloc_type
, GFP_KERNEL
);
877 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
878 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
881 rqd
.meta_list
= meta_list
;
882 rqd
.ppa_list
= ppa_list_buf
;
883 rqd
.dma_meta_list
= dma_meta_list
;
884 rqd
.dma_ppa_list
= dma_ppa_list
;
885 rqd
.opcode
= NVM_OP_PREAD
;
886 rqd
.nr_ppas
= rq_ppas
;
887 ppa_list
= nvm_rq_to_ppa_list(&rqd
);
889 for (i
= 0; i
< rqd
.nr_ppas
; ) {
890 struct ppa_addr ppa
= addr_to_gen_ppa(pblk
, paddr
, line_id
);
891 int pos
= pblk_ppa_to_pos(geo
, ppa
);
893 if (pblk_io_aligned(pblk
, rq_ppas
))
896 while (test_bit(pos
, line
->blk_bitmap
)) {
898 if (pblk_boundary_paddr_checks(pblk
, paddr
)) {
904 ppa
= addr_to_gen_ppa(pblk
, paddr
, line_id
);
905 pos
= pblk_ppa_to_pos(geo
, ppa
);
908 if (pblk_boundary_paddr_checks(pblk
, paddr
+ min
)) {
914 for (j
= 0; j
< min
; j
++, i
++, paddr
++)
915 ppa_list
[i
] = addr_to_gen_ppa(pblk
, paddr
, line_id
);
918 ret
= pblk_submit_io_sync(pblk
, &rqd
);
920 pblk_err(pblk
, "emeta I/O submission failed: %d\n", ret
);
925 atomic_dec(&pblk
->inflight_io
);
927 if (rqd
.error
&& rqd
.error
!= NVM_RSP_WARN_HIGHECC
) {
928 pblk_log_read_err(pblk
, &rqd
);
934 left_ppas
-= rq_ppas
;
939 nvm_dev_dma_free(dev
->parent
, rqd
.meta_list
, rqd
.dma_meta_list
);
943 static void pblk_setup_e_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
946 rqd
->opcode
= NVM_OP_ERASE
;
953 static int pblk_blk_erase_sync(struct pblk
*pblk
, struct ppa_addr ppa
)
955 struct nvm_rq rqd
= {NULL
};
958 trace_pblk_chunk_reset(pblk_disk_name(pblk
), &ppa
,
959 PBLK_CHUNK_RESET_START
);
961 pblk_setup_e_rq(pblk
, &rqd
, ppa
);
963 /* The write thread schedules erases so that it minimizes disturbances
964 * with writes. Thus, there is no need to take the LUN semaphore.
966 ret
= pblk_submit_io_sync(pblk
, &rqd
);
968 __pblk_end_io_erase(pblk
, &rqd
);
973 int pblk_line_erase(struct pblk
*pblk
, struct pblk_line
*line
)
975 struct pblk_line_meta
*lm
= &pblk
->lm
;
979 /* Erase only good blocks, one at a time */
981 spin_lock(&line
->lock
);
982 bit
= find_next_zero_bit(line
->erase_bitmap
, lm
->blk_per_line
,
984 if (bit
>= lm
->blk_per_line
) {
985 spin_unlock(&line
->lock
);
989 ppa
= pblk
->luns
[bit
].bppa
; /* set ch and lun */
990 ppa
.a
.blk
= line
->id
;
992 atomic_dec(&line
->left_eblks
);
993 WARN_ON(test_and_set_bit(bit
, line
->erase_bitmap
));
994 spin_unlock(&line
->lock
);
996 ret
= pblk_blk_erase_sync(pblk
, ppa
);
998 pblk_err(pblk
, "failed to erase line %d\n", line
->id
);
1006 static void pblk_line_setup_metadata(struct pblk_line
*line
,
1007 struct pblk_line_mgmt
*l_mg
,
1008 struct pblk_line_meta
*lm
)
1012 lockdep_assert_held(&l_mg
->free_lock
);
1015 meta_line
= find_first_zero_bit(&l_mg
->meta_bitmap
, PBLK_DATA_LINES
);
1016 if (meta_line
== PBLK_DATA_LINES
) {
1017 spin_unlock(&l_mg
->free_lock
);
1019 spin_lock(&l_mg
->free_lock
);
1023 set_bit(meta_line
, &l_mg
->meta_bitmap
);
1024 line
->meta_line
= meta_line
;
1026 line
->smeta
= l_mg
->sline_meta
[meta_line
];
1027 line
->emeta
= l_mg
->eline_meta
[meta_line
];
1029 memset(line
->smeta
, 0, lm
->smeta_len
);
1030 memset(line
->emeta
->buf
, 0, lm
->emeta_len
[0]);
1032 line
->emeta
->mem
= 0;
1033 atomic_set(&line
->emeta
->sync
, 0);
1036 /* For now lines are always assumed full lines. Thus, smeta former and current
1037 * lun bitmaps are omitted.
1039 static int pblk_line_init_metadata(struct pblk
*pblk
, struct pblk_line
*line
,
1040 struct pblk_line
*cur
)
1042 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1043 struct nvm_geo
*geo
= &dev
->geo
;
1044 struct pblk_line_meta
*lm
= &pblk
->lm
;
1045 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1046 struct pblk_emeta
*emeta
= line
->emeta
;
1047 struct line_emeta
*emeta_buf
= emeta
->buf
;
1048 struct line_smeta
*smeta_buf
= (struct line_smeta
*)line
->smeta
;
1051 /* After erasing the line, new bad blocks might appear and we risk
1052 * having an invalid line
1054 nr_blk_line
= lm
->blk_per_line
-
1055 bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
1056 if (nr_blk_line
< lm
->min_blk_line
) {
1057 spin_lock(&l_mg
->free_lock
);
1058 spin_lock(&line
->lock
);
1059 line
->state
= PBLK_LINESTATE_BAD
;
1060 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
1062 spin_unlock(&line
->lock
);
1064 list_add_tail(&line
->list
, &l_mg
->bad_list
);
1065 spin_unlock(&l_mg
->free_lock
);
1067 pblk_debug(pblk
, "line %d is bad\n", line
->id
);
1072 /* Run-time metadata */
1073 line
->lun_bitmap
= ((void *)(smeta_buf
)) + sizeof(struct line_smeta
);
1075 /* Mark LUNs allocated in this line (all for now) */
1076 bitmap_set(line
->lun_bitmap
, 0, lm
->lun_bitmap_len
);
1078 smeta_buf
->header
.identifier
= cpu_to_le32(PBLK_MAGIC
);
1079 guid_copy((guid_t
*)&smeta_buf
->header
.uuid
, &pblk
->instance_uuid
);
1080 smeta_buf
->header
.id
= cpu_to_le32(line
->id
);
1081 smeta_buf
->header
.type
= cpu_to_le16(line
->type
);
1082 smeta_buf
->header
.version_major
= SMETA_VERSION_MAJOR
;
1083 smeta_buf
->header
.version_minor
= SMETA_VERSION_MINOR
;
1085 /* Start metadata */
1086 smeta_buf
->seq_nr
= cpu_to_le64(line
->seq_nr
);
1087 smeta_buf
->window_wr_lun
= cpu_to_le32(geo
->all_luns
);
1089 /* Fill metadata among lines */
1091 memcpy(line
->lun_bitmap
, cur
->lun_bitmap
, lm
->lun_bitmap_len
);
1092 smeta_buf
->prev_id
= cpu_to_le32(cur
->id
);
1093 cur
->emeta
->buf
->next_id
= cpu_to_le32(line
->id
);
1095 smeta_buf
->prev_id
= cpu_to_le32(PBLK_LINE_EMPTY
);
1098 /* All smeta must be set at this point */
1099 smeta_buf
->header
.crc
= cpu_to_le32(
1100 pblk_calc_meta_header_crc(pblk
, &smeta_buf
->header
));
1101 smeta_buf
->crc
= cpu_to_le32(pblk_calc_smeta_crc(pblk
, smeta_buf
));
1104 memcpy(&emeta_buf
->header
, &smeta_buf
->header
,
1105 sizeof(struct line_header
));
1107 emeta_buf
->header
.version_major
= EMETA_VERSION_MAJOR
;
1108 emeta_buf
->header
.version_minor
= EMETA_VERSION_MINOR
;
1109 emeta_buf
->header
.crc
= cpu_to_le32(
1110 pblk_calc_meta_header_crc(pblk
, &emeta_buf
->header
));
1112 emeta_buf
->seq_nr
= cpu_to_le64(line
->seq_nr
);
1113 emeta_buf
->nr_lbas
= cpu_to_le64(line
->sec_in_line
);
1114 emeta_buf
->nr_valid_lbas
= cpu_to_le64(0);
1115 emeta_buf
->next_id
= cpu_to_le32(PBLK_LINE_EMPTY
);
1116 emeta_buf
->crc
= cpu_to_le32(0);
1117 emeta_buf
->prev_id
= smeta_buf
->prev_id
;
1122 static int pblk_line_alloc_bitmaps(struct pblk
*pblk
, struct pblk_line
*line
)
1124 struct pblk_line_meta
*lm
= &pblk
->lm
;
1125 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1127 line
->map_bitmap
= mempool_alloc(l_mg
->bitmap_pool
, GFP_KERNEL
);
1128 if (!line
->map_bitmap
)
1131 memset(line
->map_bitmap
, 0, lm
->sec_bitmap_len
);
1133 /* will be initialized using bb info from map_bitmap */
1134 line
->invalid_bitmap
= mempool_alloc(l_mg
->bitmap_pool
, GFP_KERNEL
);
1135 if (!line
->invalid_bitmap
) {
1136 mempool_free(line
->map_bitmap
, l_mg
->bitmap_pool
);
1137 line
->map_bitmap
= NULL
;
1144 /* For now lines are always assumed full lines. Thus, smeta former and current
1145 * lun bitmaps are omitted.
1147 static int pblk_line_init_bb(struct pblk
*pblk
, struct pblk_line
*line
,
1150 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1151 struct nvm_geo
*geo
= &dev
->geo
;
1152 struct pblk_line_meta
*lm
= &pblk
->lm
;
1153 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1158 line
->sec_in_line
= lm
->sec_per_line
;
1160 /* Capture bad block information on line mapping bitmaps */
1161 while ((bit
= find_next_bit(line
->blk_bitmap
, lm
->blk_per_line
,
1162 bit
+ 1)) < lm
->blk_per_line
) {
1163 off
= bit
* geo
->ws_opt
;
1164 bitmap_shift_left(l_mg
->bb_aux
, l_mg
->bb_template
, off
,
1166 bitmap_or(line
->map_bitmap
, line
->map_bitmap
, l_mg
->bb_aux
,
1168 line
->sec_in_line
-= geo
->clba
;
1171 /* Mark smeta metadata sectors as bad sectors */
1172 bit
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
1173 off
= bit
* geo
->ws_opt
;
1174 bitmap_set(line
->map_bitmap
, off
, lm
->smeta_sec
);
1175 line
->sec_in_line
-= lm
->smeta_sec
;
1176 line
->cur_sec
= off
+ lm
->smeta_sec
;
1178 if (init
&& pblk_line_smeta_write(pblk
, line
, off
)) {
1179 pblk_debug(pblk
, "line smeta I/O failed. Retry\n");
1183 bitmap_copy(line
->invalid_bitmap
, line
->map_bitmap
, lm
->sec_per_line
);
1185 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1186 * blocks to make sure that there are enough sectors to store emeta
1188 emeta_secs
= lm
->emeta_sec
[0];
1189 off
= lm
->sec_per_line
;
1190 while (emeta_secs
) {
1192 if (!test_bit(off
, line
->invalid_bitmap
)) {
1193 bitmap_set(line
->invalid_bitmap
, off
, geo
->ws_opt
);
1194 emeta_secs
-= geo
->ws_opt
;
1198 line
->emeta_ssec
= off
;
1199 line
->sec_in_line
-= lm
->emeta_sec
[0];
1200 line
->nr_valid_lbas
= 0;
1201 line
->left_msecs
= line
->sec_in_line
;
1202 *line
->vsc
= cpu_to_le32(line
->sec_in_line
);
1204 if (lm
->sec_per_line
- line
->sec_in_line
!=
1205 bitmap_weight(line
->invalid_bitmap
, lm
->sec_per_line
)) {
1206 spin_lock(&line
->lock
);
1207 line
->state
= PBLK_LINESTATE_BAD
;
1208 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
1210 spin_unlock(&line
->lock
);
1212 list_add_tail(&line
->list
, &l_mg
->bad_list
);
1213 pblk_err(pblk
, "unexpected line %d is bad\n", line
->id
);
1221 static int pblk_prepare_new_line(struct pblk
*pblk
, struct pblk_line
*line
)
1223 struct pblk_line_meta
*lm
= &pblk
->lm
;
1224 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1225 struct nvm_geo
*geo
= &dev
->geo
;
1226 int blk_to_erase
= atomic_read(&line
->blk_in_line
);
1229 for (i
= 0; i
< lm
->blk_per_line
; i
++) {
1230 struct pblk_lun
*rlun
= &pblk
->luns
[i
];
1231 int pos
= pblk_ppa_to_pos(geo
, rlun
->bppa
);
1232 int state
= line
->chks
[pos
].state
;
1234 /* Free chunks should not be erased */
1235 if (state
& NVM_CHK_ST_FREE
) {
1236 set_bit(pblk_ppa_to_pos(geo
, rlun
->bppa
),
1237 line
->erase_bitmap
);
1242 return blk_to_erase
;
1245 static int pblk_line_prepare(struct pblk
*pblk
, struct pblk_line
*line
)
1247 struct pblk_line_meta
*lm
= &pblk
->lm
;
1248 int blk_in_line
= atomic_read(&line
->blk_in_line
);
1251 /* Bad blocks do not need to be erased */
1252 bitmap_copy(line
->erase_bitmap
, line
->blk_bitmap
, lm
->blk_per_line
);
1254 spin_lock(&line
->lock
);
1256 /* If we have not written to this line, we need to mark up free chunks
1259 if (line
->state
== PBLK_LINESTATE_NEW
) {
1260 blk_to_erase
= pblk_prepare_new_line(pblk
, line
);
1261 line
->state
= PBLK_LINESTATE_FREE
;
1262 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
1265 blk_to_erase
= blk_in_line
;
1268 if (blk_in_line
< lm
->min_blk_line
) {
1269 spin_unlock(&line
->lock
);
1273 if (line
->state
!= PBLK_LINESTATE_FREE
) {
1274 WARN(1, "pblk: corrupted line %d, state %d\n",
1275 line
->id
, line
->state
);
1276 spin_unlock(&line
->lock
);
1280 line
->state
= PBLK_LINESTATE_OPEN
;
1281 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
1284 atomic_set(&line
->left_eblks
, blk_to_erase
);
1285 atomic_set(&line
->left_seblks
, blk_to_erase
);
1287 line
->meta_distance
= lm
->meta_distance
;
1288 spin_unlock(&line
->lock
);
1290 kref_init(&line
->ref
);
1291 atomic_set(&line
->sec_to_update
, 0);
1296 /* Line allocations in the recovery path are always single threaded */
1297 int pblk_line_recov_alloc(struct pblk
*pblk
, struct pblk_line
*line
)
1299 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1302 spin_lock(&l_mg
->free_lock
);
1303 l_mg
->data_line
= line
;
1304 list_del(&line
->list
);
1306 ret
= pblk_line_prepare(pblk
, line
);
1308 list_add(&line
->list
, &l_mg
->free_list
);
1309 spin_unlock(&l_mg
->free_lock
);
1312 spin_unlock(&l_mg
->free_lock
);
1314 ret
= pblk_line_alloc_bitmaps(pblk
, line
);
1318 if (!pblk_line_init_bb(pblk
, line
, 0)) {
1323 pblk_rl_free_lines_dec(&pblk
->rl
, line
, true);
1327 spin_lock(&l_mg
->free_lock
);
1328 list_add(&line
->list
, &l_mg
->free_list
);
1329 spin_unlock(&l_mg
->free_lock
);
1334 void pblk_line_recov_close(struct pblk
*pblk
, struct pblk_line
*line
)
1336 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1338 mempool_free(line
->map_bitmap
, l_mg
->bitmap_pool
);
1339 line
->map_bitmap
= NULL
;
1344 static void pblk_line_reinit(struct pblk_line
*line
)
1346 *line
->vsc
= cpu_to_le32(EMPTY_ENTRY
);
1348 line
->map_bitmap
= NULL
;
1349 line
->invalid_bitmap
= NULL
;
1354 void pblk_line_free(struct pblk_line
*line
)
1356 struct pblk
*pblk
= line
->pblk
;
1357 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1359 mempool_free(line
->map_bitmap
, l_mg
->bitmap_pool
);
1360 mempool_free(line
->invalid_bitmap
, l_mg
->bitmap_pool
);
1362 pblk_line_reinit(line
);
1365 struct pblk_line
*pblk_line_get(struct pblk
*pblk
)
1367 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1368 struct pblk_line_meta
*lm
= &pblk
->lm
;
1369 struct pblk_line
*line
;
1372 lockdep_assert_held(&l_mg
->free_lock
);
1375 if (list_empty(&l_mg
->free_list
)) {
1376 pblk_err(pblk
, "no free lines\n");
1380 line
= list_first_entry(&l_mg
->free_list
, struct pblk_line
, list
);
1381 list_del(&line
->list
);
1382 l_mg
->nr_free_lines
--;
1384 bit
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
1385 if (unlikely(bit
>= lm
->blk_per_line
)) {
1386 spin_lock(&line
->lock
);
1387 line
->state
= PBLK_LINESTATE_BAD
;
1388 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
1390 spin_unlock(&line
->lock
);
1392 list_add_tail(&line
->list
, &l_mg
->bad_list
);
1394 pblk_debug(pblk
, "line %d is bad\n", line
->id
);
1398 ret
= pblk_line_prepare(pblk
, line
);
1402 list_add(&line
->list
, &l_mg
->bad_list
);
1405 list_add(&line
->list
, &l_mg
->corrupt_list
);
1408 pblk_err(pblk
, "failed to prepare line %d\n", line
->id
);
1409 list_add(&line
->list
, &l_mg
->free_list
);
1410 l_mg
->nr_free_lines
++;
1418 static struct pblk_line
*pblk_line_retry(struct pblk
*pblk
,
1419 struct pblk_line
*line
)
1421 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1422 struct pblk_line
*retry_line
;
1425 spin_lock(&l_mg
->free_lock
);
1426 retry_line
= pblk_line_get(pblk
);
1428 l_mg
->data_line
= NULL
;
1429 spin_unlock(&l_mg
->free_lock
);
1433 retry_line
->map_bitmap
= line
->map_bitmap
;
1434 retry_line
->invalid_bitmap
= line
->invalid_bitmap
;
1435 retry_line
->smeta
= line
->smeta
;
1436 retry_line
->emeta
= line
->emeta
;
1437 retry_line
->meta_line
= line
->meta_line
;
1439 pblk_line_reinit(line
);
1441 l_mg
->data_line
= retry_line
;
1442 spin_unlock(&l_mg
->free_lock
);
1444 pblk_rl_free_lines_dec(&pblk
->rl
, line
, false);
1446 if (pblk_line_erase(pblk
, retry_line
))
1452 static void pblk_set_space_limit(struct pblk
*pblk
)
1454 struct pblk_rl
*rl
= &pblk
->rl
;
1456 atomic_set(&rl
->rb_space
, 0);
1459 struct pblk_line
*pblk_line_get_first_data(struct pblk
*pblk
)
1461 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1462 struct pblk_line
*line
;
1464 spin_lock(&l_mg
->free_lock
);
1465 line
= pblk_line_get(pblk
);
1467 spin_unlock(&l_mg
->free_lock
);
1471 line
->seq_nr
= l_mg
->d_seq_nr
++;
1472 line
->type
= PBLK_LINETYPE_DATA
;
1473 l_mg
->data_line
= line
;
1475 pblk_line_setup_metadata(line
, l_mg
, &pblk
->lm
);
1477 /* Allocate next line for preparation */
1478 l_mg
->data_next
= pblk_line_get(pblk
);
1479 if (!l_mg
->data_next
) {
1480 /* If we cannot get a new line, we need to stop the pipeline.
1481 * Only allow as many writes in as we can store safely and then
1484 pblk_set_space_limit(pblk
);
1486 l_mg
->data_next
= NULL
;
1488 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
1489 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
1491 spin_unlock(&l_mg
->free_lock
);
1493 if (pblk_line_alloc_bitmaps(pblk
, line
))
1496 if (pblk_line_erase(pblk
, line
)) {
1497 line
= pblk_line_retry(pblk
, line
);
1503 if (!pblk_line_init_metadata(pblk
, line
, NULL
)) {
1504 line
= pblk_line_retry(pblk
, line
);
1511 if (!pblk_line_init_bb(pblk
, line
, 1)) {
1512 line
= pblk_line_retry(pblk
, line
);
1519 pblk_rl_free_lines_dec(&pblk
->rl
, line
, true);
1524 void pblk_ppa_to_line_put(struct pblk
*pblk
, struct ppa_addr ppa
)
1526 struct pblk_line
*line
;
1528 line
= pblk_ppa_to_line(pblk
, ppa
);
1529 kref_put(&line
->ref
, pblk_line_put_wq
);
1532 void pblk_rq_to_line_put(struct pblk
*pblk
, struct nvm_rq
*rqd
)
1534 struct ppa_addr
*ppa_list
= nvm_rq_to_ppa_list(rqd
);
1537 for (i
= 0; i
< rqd
->nr_ppas
; i
++)
1538 pblk_ppa_to_line_put(pblk
, ppa_list
[i
]);
1541 static void pblk_stop_writes(struct pblk
*pblk
, struct pblk_line
*line
)
1543 lockdep_assert_held(&pblk
->l_mg
.free_lock
);
1545 pblk_set_space_limit(pblk
);
1546 pblk
->state
= PBLK_STATE_STOPPING
;
1547 trace_pblk_state(pblk_disk_name(pblk
), pblk
->state
);
1550 static void pblk_line_close_meta_sync(struct pblk
*pblk
)
1552 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1553 struct pblk_line_meta
*lm
= &pblk
->lm
;
1554 struct pblk_line
*line
, *tline
;
1557 spin_lock(&l_mg
->close_lock
);
1558 if (list_empty(&l_mg
->emeta_list
)) {
1559 spin_unlock(&l_mg
->close_lock
);
1563 list_cut_position(&list
, &l_mg
->emeta_list
, l_mg
->emeta_list
.prev
);
1564 spin_unlock(&l_mg
->close_lock
);
1566 list_for_each_entry_safe(line
, tline
, &list
, list
) {
1567 struct pblk_emeta
*emeta
= line
->emeta
;
1569 while (emeta
->mem
< lm
->emeta_len
[0]) {
1572 ret
= pblk_submit_meta_io(pblk
, line
);
1574 pblk_err(pblk
, "sync meta line %d failed (%d)\n",
1581 pblk_wait_for_meta(pblk
);
1582 flush_workqueue(pblk
->close_wq
);
1585 void __pblk_pipeline_flush(struct pblk
*pblk
)
1587 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1590 spin_lock(&l_mg
->free_lock
);
1591 if (pblk
->state
== PBLK_STATE_RECOVERING
||
1592 pblk
->state
== PBLK_STATE_STOPPED
) {
1593 spin_unlock(&l_mg
->free_lock
);
1596 pblk
->state
= PBLK_STATE_RECOVERING
;
1597 trace_pblk_state(pblk_disk_name(pblk
), pblk
->state
);
1598 spin_unlock(&l_mg
->free_lock
);
1600 pblk_flush_writer(pblk
);
1601 pblk_wait_for_meta(pblk
);
1603 ret
= pblk_recov_pad(pblk
);
1605 pblk_err(pblk
, "could not close data on teardown(%d)\n", ret
);
1609 flush_workqueue(pblk
->bb_wq
);
1610 pblk_line_close_meta_sync(pblk
);
1613 void __pblk_pipeline_stop(struct pblk
*pblk
)
1615 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1617 spin_lock(&l_mg
->free_lock
);
1618 pblk
->state
= PBLK_STATE_STOPPED
;
1619 trace_pblk_state(pblk_disk_name(pblk
), pblk
->state
);
1620 l_mg
->data_line
= NULL
;
1621 l_mg
->data_next
= NULL
;
1622 spin_unlock(&l_mg
->free_lock
);
1625 void pblk_pipeline_stop(struct pblk
*pblk
)
1627 __pblk_pipeline_flush(pblk
);
1628 __pblk_pipeline_stop(pblk
);
1631 struct pblk_line
*pblk_line_replace_data(struct pblk
*pblk
)
1633 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1634 struct pblk_line
*cur
, *new = NULL
;
1635 unsigned int left_seblks
;
1637 new = l_mg
->data_next
;
1641 spin_lock(&l_mg
->free_lock
);
1642 cur
= l_mg
->data_line
;
1643 l_mg
->data_line
= new;
1645 pblk_line_setup_metadata(new, l_mg
, &pblk
->lm
);
1646 spin_unlock(&l_mg
->free_lock
);
1649 left_seblks
= atomic_read(&new->left_seblks
);
1651 /* If line is not fully erased, erase it */
1652 if (atomic_read(&new->left_eblks
)) {
1653 if (pblk_line_erase(pblk
, new))
1661 if (pblk_line_alloc_bitmaps(pblk
, new))
1665 if (!pblk_line_init_metadata(pblk
, new, cur
)) {
1666 new = pblk_line_retry(pblk
, new);
1673 if (!pblk_line_init_bb(pblk
, new, 1)) {
1674 new = pblk_line_retry(pblk
, new);
1681 pblk_rl_free_lines_dec(&pblk
->rl
, new, true);
1683 /* Allocate next line for preparation */
1684 spin_lock(&l_mg
->free_lock
);
1685 l_mg
->data_next
= pblk_line_get(pblk
);
1686 if (!l_mg
->data_next
) {
1687 /* If we cannot get a new line, we need to stop the pipeline.
1688 * Only allow as many writes in as we can store safely and then
1691 pblk_stop_writes(pblk
, new);
1692 l_mg
->data_next
= NULL
;
1694 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
1695 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
1697 spin_unlock(&l_mg
->free_lock
);
1703 static void __pblk_line_put(struct pblk
*pblk
, struct pblk_line
*line
)
1705 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1706 struct pblk_gc
*gc
= &pblk
->gc
;
1708 spin_lock(&line
->lock
);
1709 WARN_ON(line
->state
!= PBLK_LINESTATE_GC
);
1710 if (line
->w_err_gc
->has_gc_err
) {
1711 spin_unlock(&line
->lock
);
1712 pblk_err(pblk
, "line %d had errors during GC\n", line
->id
);
1713 pblk_put_line_back(pblk
, line
);
1714 line
->w_err_gc
->has_gc_err
= 0;
1718 line
->state
= PBLK_LINESTATE_FREE
;
1719 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
1721 line
->gc_group
= PBLK_LINEGC_NONE
;
1722 pblk_line_free(line
);
1724 if (line
->w_err_gc
->has_write_err
) {
1725 pblk_rl_werr_line_out(&pblk
->rl
);
1726 line
->w_err_gc
->has_write_err
= 0;
1729 spin_unlock(&line
->lock
);
1730 atomic_dec(&gc
->pipeline_gc
);
1732 spin_lock(&l_mg
->free_lock
);
1733 list_add_tail(&line
->list
, &l_mg
->free_list
);
1734 l_mg
->nr_free_lines
++;
1735 spin_unlock(&l_mg
->free_lock
);
1737 pblk_rl_free_lines_inc(&pblk
->rl
, line
);
1740 static void pblk_line_put_ws(struct work_struct
*work
)
1742 struct pblk_line_ws
*line_put_ws
= container_of(work
,
1743 struct pblk_line_ws
, ws
);
1744 struct pblk
*pblk
= line_put_ws
->pblk
;
1745 struct pblk_line
*line
= line_put_ws
->line
;
1747 __pblk_line_put(pblk
, line
);
1748 mempool_free(line_put_ws
, &pblk
->gen_ws_pool
);
1751 void pblk_line_put(struct kref
*ref
)
1753 struct pblk_line
*line
= container_of(ref
, struct pblk_line
, ref
);
1754 struct pblk
*pblk
= line
->pblk
;
1756 __pblk_line_put(pblk
, line
);
1759 void pblk_line_put_wq(struct kref
*ref
)
1761 struct pblk_line
*line
= container_of(ref
, struct pblk_line
, ref
);
1762 struct pblk
*pblk
= line
->pblk
;
1763 struct pblk_line_ws
*line_put_ws
;
1765 line_put_ws
= mempool_alloc(&pblk
->gen_ws_pool
, GFP_ATOMIC
);
1769 line_put_ws
->pblk
= pblk
;
1770 line_put_ws
->line
= line
;
1771 line_put_ws
->priv
= NULL
;
1773 INIT_WORK(&line_put_ws
->ws
, pblk_line_put_ws
);
1774 queue_work(pblk
->r_end_wq
, &line_put_ws
->ws
);
1777 int pblk_blk_erase_async(struct pblk
*pblk
, struct ppa_addr ppa
)
1782 rqd
= pblk_alloc_rqd(pblk
, PBLK_ERASE
);
1784 pblk_setup_e_rq(pblk
, rqd
, ppa
);
1786 rqd
->end_io
= pblk_end_io_erase
;
1787 rqd
->private = pblk
;
1789 trace_pblk_chunk_reset(pblk_disk_name(pblk
),
1790 &ppa
, PBLK_CHUNK_RESET_START
);
1792 /* The write thread schedules erases so that it minimizes disturbances
1793 * with writes. Thus, there is no need to take the LUN semaphore.
1795 err
= pblk_submit_io(pblk
, rqd
);
1797 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1798 struct nvm_geo
*geo
= &dev
->geo
;
1800 pblk_err(pblk
, "could not async erase line:%d,blk:%d\n",
1801 pblk_ppa_to_line_id(ppa
),
1802 pblk_ppa_to_pos(geo
, ppa
));
1808 struct pblk_line
*pblk_line_get_data(struct pblk
*pblk
)
1810 return pblk
->l_mg
.data_line
;
1813 /* For now, always erase next line */
1814 struct pblk_line
*pblk_line_get_erase(struct pblk
*pblk
)
1816 return pblk
->l_mg
.data_next
;
1819 int pblk_line_is_full(struct pblk_line
*line
)
1821 return (line
->left_msecs
== 0);
1824 static void pblk_line_should_sync_meta(struct pblk
*pblk
)
1826 if (pblk_rl_is_limit(&pblk
->rl
))
1827 pblk_line_close_meta_sync(pblk
);
1830 void pblk_line_close(struct pblk
*pblk
, struct pblk_line
*line
)
1832 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1833 struct nvm_geo
*geo
= &dev
->geo
;
1834 struct pblk_line_meta
*lm
= &pblk
->lm
;
1835 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1836 struct list_head
*move_list
;
1839 #ifdef CONFIG_NVM_PBLK_DEBUG
1840 WARN(!bitmap_full(line
->map_bitmap
, lm
->sec_per_line
),
1841 "pblk: corrupt closed line %d\n", line
->id
);
1844 spin_lock(&l_mg
->free_lock
);
1845 WARN_ON(!test_and_clear_bit(line
->meta_line
, &l_mg
->meta_bitmap
));
1846 spin_unlock(&l_mg
->free_lock
);
1848 spin_lock(&l_mg
->gc_lock
);
1849 spin_lock(&line
->lock
);
1850 WARN_ON(line
->state
!= PBLK_LINESTATE_OPEN
);
1851 line
->state
= PBLK_LINESTATE_CLOSED
;
1852 move_list
= pblk_line_gc_list(pblk
, line
);
1853 list_add_tail(&line
->list
, move_list
);
1855 mempool_free(line
->map_bitmap
, l_mg
->bitmap_pool
);
1856 line
->map_bitmap
= NULL
;
1860 for (i
= 0; i
< lm
->blk_per_line
; i
++) {
1861 struct pblk_lun
*rlun
= &pblk
->luns
[i
];
1862 int pos
= pblk_ppa_to_pos(geo
, rlun
->bppa
);
1863 int state
= line
->chks
[pos
].state
;
1865 if (!(state
& NVM_CHK_ST_OFFLINE
))
1866 state
= NVM_CHK_ST_CLOSED
;
1869 spin_unlock(&line
->lock
);
1870 spin_unlock(&l_mg
->gc_lock
);
1872 trace_pblk_line_state(pblk_disk_name(pblk
), line
->id
,
1876 void pblk_line_close_meta(struct pblk
*pblk
, struct pblk_line
*line
)
1878 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1879 struct pblk_line_meta
*lm
= &pblk
->lm
;
1880 struct pblk_emeta
*emeta
= line
->emeta
;
1881 struct line_emeta
*emeta_buf
= emeta
->buf
;
1882 struct wa_counters
*wa
= emeta_to_wa(lm
, emeta_buf
);
1884 /* No need for exact vsc value; avoid a big line lock and take aprox. */
1885 memcpy(emeta_to_vsc(pblk
, emeta_buf
), l_mg
->vsc_list
, lm
->vsc_list_len
);
1886 memcpy(emeta_to_bb(emeta_buf
), line
->blk_bitmap
, lm
->blk_bitmap_len
);
1888 wa
->user
= cpu_to_le64(atomic64_read(&pblk
->user_wa
));
1889 wa
->pad
= cpu_to_le64(atomic64_read(&pblk
->pad_wa
));
1890 wa
->gc
= cpu_to_le64(atomic64_read(&pblk
->gc_wa
));
1892 if (le32_to_cpu(emeta_buf
->header
.identifier
) != PBLK_MAGIC
) {
1893 emeta_buf
->header
.identifier
= cpu_to_le32(PBLK_MAGIC
);
1894 guid_copy((guid_t
*)&emeta_buf
->header
.uuid
,
1895 &pblk
->instance_uuid
);
1896 emeta_buf
->header
.id
= cpu_to_le32(line
->id
);
1897 emeta_buf
->header
.type
= cpu_to_le16(line
->type
);
1898 emeta_buf
->header
.version_major
= EMETA_VERSION_MAJOR
;
1899 emeta_buf
->header
.version_minor
= EMETA_VERSION_MINOR
;
1900 emeta_buf
->header
.crc
= cpu_to_le32(
1901 pblk_calc_meta_header_crc(pblk
, &emeta_buf
->header
));
1904 emeta_buf
->nr_valid_lbas
= cpu_to_le64(line
->nr_valid_lbas
);
1905 emeta_buf
->crc
= cpu_to_le32(pblk_calc_emeta_crc(pblk
, emeta_buf
));
1907 spin_lock(&l_mg
->close_lock
);
1908 spin_lock(&line
->lock
);
1910 /* Update the in-memory start address for emeta, in case it has
1911 * shifted due to write errors
1913 if (line
->emeta_ssec
!= line
->cur_sec
)
1914 line
->emeta_ssec
= line
->cur_sec
;
1916 list_add_tail(&line
->list
, &l_mg
->emeta_list
);
1917 spin_unlock(&line
->lock
);
1918 spin_unlock(&l_mg
->close_lock
);
1920 pblk_line_should_sync_meta(pblk
);
1923 static void pblk_save_lba_list(struct pblk
*pblk
, struct pblk_line
*line
)
1925 struct pblk_line_meta
*lm
= &pblk
->lm
;
1926 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1927 unsigned int lba_list_size
= lm
->emeta_len
[2];
1928 struct pblk_w_err_gc
*w_err_gc
= line
->w_err_gc
;
1929 struct pblk_emeta
*emeta
= line
->emeta
;
1931 w_err_gc
->lba_list
= pblk_malloc(lba_list_size
,
1932 l_mg
->emeta_alloc_type
, GFP_KERNEL
);
1933 memcpy(w_err_gc
->lba_list
, emeta_to_lbas(pblk
, emeta
->buf
),
1937 void pblk_line_close_ws(struct work_struct
*work
)
1939 struct pblk_line_ws
*line_ws
= container_of(work
, struct pblk_line_ws
,
1941 struct pblk
*pblk
= line_ws
->pblk
;
1942 struct pblk_line
*line
= line_ws
->line
;
1943 struct pblk_w_err_gc
*w_err_gc
= line
->w_err_gc
;
1945 /* Write errors makes the emeta start address stored in smeta invalid,
1946 * so keep a copy of the lba list until we've gc'd the line
1948 if (w_err_gc
->has_write_err
)
1949 pblk_save_lba_list(pblk
, line
);
1951 pblk_line_close(pblk
, line
);
1952 mempool_free(line_ws
, &pblk
->gen_ws_pool
);
1955 void pblk_gen_run_ws(struct pblk
*pblk
, struct pblk_line
*line
, void *priv
,
1956 void (*work
)(struct work_struct
*), gfp_t gfp_mask
,
1957 struct workqueue_struct
*wq
)
1959 struct pblk_line_ws
*line_ws
;
1961 line_ws
= mempool_alloc(&pblk
->gen_ws_pool
, gfp_mask
);
1963 line_ws
->pblk
= pblk
;
1964 line_ws
->line
= line
;
1965 line_ws
->priv
= priv
;
1967 INIT_WORK(&line_ws
->ws
, work
);
1968 queue_work(wq
, &line_ws
->ws
);
1971 static void __pblk_down_chunk(struct pblk
*pblk
, int pos
)
1973 struct pblk_lun
*rlun
= &pblk
->luns
[pos
];
1977 * Only send one inflight I/O per LUN. Since we map at a page
1978 * granurality, all ppas in the I/O will map to the same LUN
1981 ret
= down_timeout(&rlun
->wr_sem
, msecs_to_jiffies(30000));
1982 if (ret
== -ETIME
|| ret
== -EINTR
)
1983 pblk_err(pblk
, "taking lun semaphore timed out: err %d\n",
1987 void pblk_down_chunk(struct pblk
*pblk
, struct ppa_addr ppa
)
1989 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1990 struct nvm_geo
*geo
= &dev
->geo
;
1991 int pos
= pblk_ppa_to_pos(geo
, ppa
);
1993 __pblk_down_chunk(pblk
, pos
);
1996 void pblk_down_rq(struct pblk
*pblk
, struct ppa_addr ppa
,
1997 unsigned long *lun_bitmap
)
1999 struct nvm_tgt_dev
*dev
= pblk
->dev
;
2000 struct nvm_geo
*geo
= &dev
->geo
;
2001 int pos
= pblk_ppa_to_pos(geo
, ppa
);
2003 /* If the LUN has been locked for this same request, do no attempt to
2006 if (test_and_set_bit(pos
, lun_bitmap
))
2009 __pblk_down_chunk(pblk
, pos
);
2012 void pblk_up_chunk(struct pblk
*pblk
, struct ppa_addr ppa
)
2014 struct nvm_tgt_dev
*dev
= pblk
->dev
;
2015 struct nvm_geo
*geo
= &dev
->geo
;
2016 struct pblk_lun
*rlun
;
2017 int pos
= pblk_ppa_to_pos(geo
, ppa
);
2019 rlun
= &pblk
->luns
[pos
];
2023 void pblk_up_rq(struct pblk
*pblk
, unsigned long *lun_bitmap
)
2025 struct nvm_tgt_dev
*dev
= pblk
->dev
;
2026 struct nvm_geo
*geo
= &dev
->geo
;
2027 struct pblk_lun
*rlun
;
2028 int num_lun
= geo
->all_luns
;
2031 while ((bit
= find_next_bit(lun_bitmap
, num_lun
, bit
+ 1)) < num_lun
) {
2032 rlun
= &pblk
->luns
[bit
];
2037 void pblk_update_map(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa
)
2039 struct ppa_addr ppa_l2p
;
2041 /* logic error: lba out-of-bounds. Ignore update */
2042 if (!(lba
< pblk
->capacity
)) {
2043 WARN(1, "pblk: corrupted L2P map request\n");
2047 spin_lock(&pblk
->trans_lock
);
2048 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
2050 if (!pblk_addr_in_cache(ppa_l2p
) && !pblk_ppa_empty(ppa_l2p
))
2051 pblk_map_invalidate(pblk
, ppa_l2p
);
2053 pblk_trans_map_set(pblk
, lba
, ppa
);
2054 spin_unlock(&pblk
->trans_lock
);
2057 void pblk_update_map_cache(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa
)
2060 #ifdef CONFIG_NVM_PBLK_DEBUG
2061 /* Callers must ensure that the ppa points to a cache address */
2062 BUG_ON(!pblk_addr_in_cache(ppa
));
2063 BUG_ON(pblk_rb_pos_oob(&pblk
->rwb
, pblk_addr_to_cacheline(ppa
)));
2066 pblk_update_map(pblk
, lba
, ppa
);
2069 int pblk_update_map_gc(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa_new
,
2070 struct pblk_line
*gc_line
, u64 paddr_gc
)
2072 struct ppa_addr ppa_l2p
, ppa_gc
;
2075 #ifdef CONFIG_NVM_PBLK_DEBUG
2076 /* Callers must ensure that the ppa points to a cache address */
2077 BUG_ON(!pblk_addr_in_cache(ppa_new
));
2078 BUG_ON(pblk_rb_pos_oob(&pblk
->rwb
, pblk_addr_to_cacheline(ppa_new
)));
2081 /* logic error: lba out-of-bounds. Ignore update */
2082 if (!(lba
< pblk
->capacity
)) {
2083 WARN(1, "pblk: corrupted L2P map request\n");
2087 spin_lock(&pblk
->trans_lock
);
2088 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
2089 ppa_gc
= addr_to_gen_ppa(pblk
, paddr_gc
, gc_line
->id
);
2091 if (!pblk_ppa_comp(ppa_l2p
, ppa_gc
)) {
2092 spin_lock(&gc_line
->lock
);
2093 WARN(!test_bit(paddr_gc
, gc_line
->invalid_bitmap
),
2094 "pblk: corrupted GC update");
2095 spin_unlock(&gc_line
->lock
);
2101 pblk_trans_map_set(pblk
, lba
, ppa_new
);
2103 spin_unlock(&pblk
->trans_lock
);
2107 void pblk_update_map_dev(struct pblk
*pblk
, sector_t lba
,
2108 struct ppa_addr ppa_mapped
, struct ppa_addr ppa_cache
)
2110 struct ppa_addr ppa_l2p
;
2112 #ifdef CONFIG_NVM_PBLK_DEBUG
2113 /* Callers must ensure that the ppa points to a device address */
2114 BUG_ON(pblk_addr_in_cache(ppa_mapped
));
2116 /* Invalidate and discard padded entries */
2117 if (lba
== ADDR_EMPTY
) {
2118 atomic64_inc(&pblk
->pad_wa
);
2119 #ifdef CONFIG_NVM_PBLK_DEBUG
2120 atomic_long_inc(&pblk
->padded_wb
);
2122 if (!pblk_ppa_empty(ppa_mapped
))
2123 pblk_map_invalidate(pblk
, ppa_mapped
);
2127 /* logic error: lba out-of-bounds. Ignore update */
2128 if (!(lba
< pblk
->capacity
)) {
2129 WARN(1, "pblk: corrupted L2P map request\n");
2133 spin_lock(&pblk
->trans_lock
);
2134 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
2136 /* Do not update L2P if the cacheline has been updated. In this case,
2137 * the mapped ppa must be invalidated
2139 if (!pblk_ppa_comp(ppa_l2p
, ppa_cache
)) {
2140 if (!pblk_ppa_empty(ppa_mapped
))
2141 pblk_map_invalidate(pblk
, ppa_mapped
);
2145 #ifdef CONFIG_NVM_PBLK_DEBUG
2146 WARN_ON(!pblk_addr_in_cache(ppa_l2p
) && !pblk_ppa_empty(ppa_l2p
));
2149 pblk_trans_map_set(pblk
, lba
, ppa_mapped
);
2151 spin_unlock(&pblk
->trans_lock
);
2154 int pblk_lookup_l2p_seq(struct pblk
*pblk
, struct ppa_addr
*ppas
,
2155 sector_t blba
, int nr_secs
, bool *from_cache
)
2159 spin_lock(&pblk
->trans_lock
);
2160 for (i
= 0; i
< nr_secs
; i
++) {
2161 struct ppa_addr ppa
;
2163 ppa
= ppas
[i
] = pblk_trans_map_get(pblk
, blba
+ i
);
2165 /* If the L2P entry maps to a line, the reference is valid */
2166 if (!pblk_ppa_empty(ppa
) && !pblk_addr_in_cache(ppa
)) {
2167 struct pblk_line
*line
= pblk_ppa_to_line(pblk
, ppa
);
2169 if (i
> 0 && *from_cache
)
2171 *from_cache
= false;
2173 kref_get(&line
->ref
);
2175 if (i
> 0 && !*from_cache
)
2180 spin_unlock(&pblk
->trans_lock
);
2184 void pblk_lookup_l2p_rand(struct pblk
*pblk
, struct ppa_addr
*ppas
,
2185 u64
*lba_list
, int nr_secs
)
2190 spin_lock(&pblk
->trans_lock
);
2191 for (i
= 0; i
< nr_secs
; i
++) {
2193 if (lba
!= ADDR_EMPTY
) {
2194 /* logic error: lba out-of-bounds. Ignore update */
2195 if (!(lba
< pblk
->capacity
)) {
2196 WARN(1, "pblk: corrupted L2P map request\n");
2199 ppas
[i
] = pblk_trans_map_get(pblk
, lba
);
2202 spin_unlock(&pblk
->trans_lock
);
2205 void *pblk_get_meta_for_writes(struct pblk
*pblk
, struct nvm_rq
*rqd
)
2209 if (pblk_is_oob_meta_supported(pblk
)) {
2210 /* Just use OOB metadata buffer as always */
2211 buffer
= rqd
->meta_list
;
2213 /* We need to reuse last page of request (packed metadata)
2214 * in similar way as traditional oob metadata
2216 buffer
= page_to_virt(
2217 rqd
->bio
->bi_io_vec
[rqd
->bio
->bi_vcnt
- 1].bv_page
);
2223 void pblk_get_packed_meta(struct pblk
*pblk
, struct nvm_rq
*rqd
)
2225 void *meta_list
= rqd
->meta_list
;
2229 if (pblk_is_oob_meta_supported(pblk
))
2232 page
= page_to_virt(rqd
->bio
->bi_io_vec
[rqd
->bio
->bi_vcnt
- 1].bv_page
);
2233 /* We need to fill oob meta buffer with data from packed metadata */
2234 for (; i
< rqd
->nr_ppas
; i
++)
2235 memcpy(pblk_get_meta(pblk
, meta_list
, i
),
2236 page
+ (i
* sizeof(struct pblk_sec_meta
)),
2237 sizeof(struct pblk_sec_meta
));