2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-core.c - pblk's core functionality
21 static void pblk_line_mark_bb(struct work_struct
*work
)
23 struct pblk_line_ws
*line_ws
= container_of(work
, struct pblk_line_ws
,
25 struct pblk
*pblk
= line_ws
->pblk
;
26 struct nvm_tgt_dev
*dev
= pblk
->dev
;
27 struct ppa_addr
*ppa
= line_ws
->priv
;
30 ret
= nvm_set_tgt_bb_tbl(dev
, ppa
, 1, NVM_BLK_T_GRWN_BAD
);
32 struct pblk_line
*line
;
35 line
= &pblk
->lines
[pblk_ppa_to_line(*ppa
)];
36 pos
= pblk_ppa_to_pos(&dev
->geo
, *ppa
);
38 pblk_err(pblk
, "failed to mark bb, line:%d, pos:%d\n",
43 mempool_free(line_ws
, &pblk
->gen_ws_pool
);
46 static void pblk_mark_bb(struct pblk
*pblk
, struct pblk_line
*line
,
47 struct ppa_addr ppa_addr
)
49 struct nvm_tgt_dev
*dev
= pblk
->dev
;
50 struct nvm_geo
*geo
= &dev
->geo
;
52 int pos
= pblk_ppa_to_pos(geo
, ppa_addr
);
54 pblk_debug(pblk
, "erase failed: line:%d, pos:%d\n", line
->id
, pos
);
55 atomic_long_inc(&pblk
->erase_failed
);
57 atomic_dec(&line
->blk_in_line
);
58 if (test_and_set_bit(pos
, line
->blk_bitmap
))
59 pblk_err(pblk
, "attempted to erase bb: line:%d, pos:%d\n",
62 /* Not necessary to mark bad blocks on 2.0 spec. */
63 if (geo
->version
== NVM_OCSSD_SPEC_20
)
66 ppa
= kmalloc(sizeof(struct ppa_addr
), GFP_ATOMIC
);
71 pblk_gen_run_ws(pblk
, NULL
, ppa
, pblk_line_mark_bb
,
72 GFP_ATOMIC
, pblk
->bb_wq
);
75 static void __pblk_end_io_erase(struct pblk
*pblk
, struct nvm_rq
*rqd
)
77 struct nvm_tgt_dev
*dev
= pblk
->dev
;
78 struct nvm_geo
*geo
= &dev
->geo
;
79 struct nvm_chk_meta
*chunk
;
80 struct pblk_line
*line
;
83 line
= &pblk
->lines
[pblk_ppa_to_line(rqd
->ppa_addr
)];
84 pos
= pblk_ppa_to_pos(geo
, rqd
->ppa_addr
);
85 chunk
= &line
->chks
[pos
];
87 atomic_dec(&line
->left_seblks
);
90 chunk
->state
= NVM_CHK_ST_OFFLINE
;
91 pblk_mark_bb(pblk
, line
, rqd
->ppa_addr
);
93 chunk
->state
= NVM_CHK_ST_FREE
;
96 atomic_dec(&pblk
->inflight_io
);
99 /* Erase completion assumes that only one block is erased at the time */
100 static void pblk_end_io_erase(struct nvm_rq
*rqd
)
102 struct pblk
*pblk
= rqd
->private;
104 __pblk_end_io_erase(pblk
, rqd
);
105 mempool_free(rqd
, &pblk
->e_rq_pool
);
109 * Get information for all chunks from the device.
111 * The caller is responsible for freeing the returned structure
113 struct nvm_chk_meta
*pblk_chunk_get_info(struct pblk
*pblk
)
115 struct nvm_tgt_dev
*dev
= pblk
->dev
;
116 struct nvm_geo
*geo
= &dev
->geo
;
117 struct nvm_chk_meta
*meta
;
124 len
= geo
->all_chunks
* sizeof(*meta
);
125 meta
= kzalloc(len
, GFP_KERNEL
);
127 return ERR_PTR(-ENOMEM
);
129 ret
= nvm_get_chunk_meta(dev
, meta
, ppa
, geo
->all_chunks
);
132 return ERR_PTR(-EIO
);
138 struct nvm_chk_meta
*pblk_chunk_get_off(struct pblk
*pblk
,
139 struct nvm_chk_meta
*meta
,
142 struct nvm_tgt_dev
*dev
= pblk
->dev
;
143 struct nvm_geo
*geo
= &dev
->geo
;
144 int ch_off
= ppa
.m
.grp
* geo
->num_chk
* geo
->num_lun
;
145 int lun_off
= ppa
.m
.pu
* geo
->num_chk
;
146 int chk_off
= ppa
.m
.chk
;
148 return meta
+ ch_off
+ lun_off
+ chk_off
;
151 void __pblk_map_invalidate(struct pblk
*pblk
, struct pblk_line
*line
,
154 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
155 struct list_head
*move_list
= NULL
;
157 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
158 * table is modified with reclaimed sectors, a check is done to endure
159 * that newer updates are not overwritten.
161 spin_lock(&line
->lock
);
162 WARN_ON(line
->state
== PBLK_LINESTATE_FREE
);
164 if (test_and_set_bit(paddr
, line
->invalid_bitmap
)) {
165 WARN_ONCE(1, "pblk: double invalidate\n");
166 spin_unlock(&line
->lock
);
169 le32_add_cpu(line
->vsc
, -1);
171 if (line
->state
== PBLK_LINESTATE_CLOSED
)
172 move_list
= pblk_line_gc_list(pblk
, line
);
173 spin_unlock(&line
->lock
);
176 spin_lock(&l_mg
->gc_lock
);
177 spin_lock(&line
->lock
);
178 /* Prevent moving a line that has just been chosen for GC */
179 if (line
->state
== PBLK_LINESTATE_GC
) {
180 spin_unlock(&line
->lock
);
181 spin_unlock(&l_mg
->gc_lock
);
184 spin_unlock(&line
->lock
);
186 list_move_tail(&line
->list
, move_list
);
187 spin_unlock(&l_mg
->gc_lock
);
191 void pblk_map_invalidate(struct pblk
*pblk
, struct ppa_addr ppa
)
193 struct pblk_line
*line
;
197 #ifdef CONFIG_NVM_PBLK_DEBUG
198 /* Callers must ensure that the ppa points to a device address */
199 BUG_ON(pblk_addr_in_cache(ppa
));
200 BUG_ON(pblk_ppa_empty(ppa
));
203 line_id
= pblk_ppa_to_line(ppa
);
204 line
= &pblk
->lines
[line_id
];
205 paddr
= pblk_dev_ppa_to_line_addr(pblk
, ppa
);
207 __pblk_map_invalidate(pblk
, line
, paddr
);
210 static void pblk_invalidate_range(struct pblk
*pblk
, sector_t slba
,
211 unsigned int nr_secs
)
215 spin_lock(&pblk
->trans_lock
);
216 for (lba
= slba
; lba
< slba
+ nr_secs
; lba
++) {
219 ppa
= pblk_trans_map_get(pblk
, lba
);
221 if (!pblk_addr_in_cache(ppa
) && !pblk_ppa_empty(ppa
))
222 pblk_map_invalidate(pblk
, ppa
);
224 pblk_ppa_set_empty(&ppa
);
225 pblk_trans_map_set(pblk
, lba
, ppa
);
227 spin_unlock(&pblk
->trans_lock
);
230 /* Caller must guarantee that the request is a valid type */
231 struct nvm_rq
*pblk_alloc_rqd(struct pblk
*pblk
, int type
)
240 pool
= &pblk
->w_rq_pool
;
241 rq_size
= pblk_w_rq_size
;
244 pool
= &pblk
->r_rq_pool
;
245 rq_size
= pblk_g_rq_size
;
248 pool
= &pblk
->e_rq_pool
;
249 rq_size
= pblk_g_rq_size
;
252 rqd
= mempool_alloc(pool
, GFP_KERNEL
);
253 memset(rqd
, 0, rq_size
);
258 /* Typically used on completion path. Cannot guarantee request consistency */
259 void pblk_free_rqd(struct pblk
*pblk
, struct nvm_rq
*rqd
, int type
)
261 struct nvm_tgt_dev
*dev
= pblk
->dev
;
266 kfree(((struct pblk_c_ctx
*)nvm_rq_to_pdu(rqd
))->lun_bitmap
);
269 pool
= &pblk
->w_rq_pool
;
272 pool
= &pblk
->r_rq_pool
;
275 pool
= &pblk
->e_rq_pool
;
278 pblk_err(pblk
, "trying to free unknown rqd type\n");
283 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
,
285 mempool_free(rqd
, pool
);
288 void pblk_bio_free_pages(struct pblk
*pblk
, struct bio
*bio
, int off
,
295 for (i
= 0; i
< bio
->bi_vcnt
; i
++) {
296 bv
= &bio
->bi_io_vec
[i
];
298 for (e
= 0; e
< bv
->bv_len
; e
+= PBLK_EXPOSED_PAGE_SIZE
, nbv
++)
300 mempool_free(page
++, &pblk
->page_bio_pool
);
304 int pblk_bio_add_pages(struct pblk
*pblk
, struct bio
*bio
, gfp_t flags
,
307 struct request_queue
*q
= pblk
->dev
->q
;
311 for (i
= 0; i
< nr_pages
; i
++) {
312 page
= mempool_alloc(&pblk
->page_bio_pool
, flags
);
314 ret
= bio_add_pc_page(q
, bio
, page
, PBLK_EXPOSED_PAGE_SIZE
, 0);
315 if (ret
!= PBLK_EXPOSED_PAGE_SIZE
) {
316 pblk_err(pblk
, "could not add page to bio\n");
317 mempool_free(page
, &pblk
->page_bio_pool
);
324 pblk_bio_free_pages(pblk
, bio
, (bio
->bi_vcnt
- i
), i
);
328 void pblk_write_kick(struct pblk
*pblk
)
330 wake_up_process(pblk
->writer_ts
);
331 mod_timer(&pblk
->wtimer
, jiffies
+ msecs_to_jiffies(1000));
334 void pblk_write_timer_fn(struct timer_list
*t
)
336 struct pblk
*pblk
= from_timer(pblk
, t
, wtimer
);
338 /* kick the write thread every tick to flush outstanding data */
339 pblk_write_kick(pblk
);
342 void pblk_write_should_kick(struct pblk
*pblk
)
344 unsigned int secs_avail
= pblk_rb_read_count(&pblk
->rwb
);
346 if (secs_avail
>= pblk
->min_write_pgs
)
347 pblk_write_kick(pblk
);
350 static void pblk_wait_for_meta(struct pblk
*pblk
)
353 if (!atomic_read(&pblk
->inflight_io
))
360 static void pblk_flush_writer(struct pblk
*pblk
)
362 pblk_rb_flush(&pblk
->rwb
);
364 if (!pblk_rb_sync_count(&pblk
->rwb
))
367 pblk_write_kick(pblk
);
372 struct list_head
*pblk_line_gc_list(struct pblk
*pblk
, struct pblk_line
*line
)
374 struct pblk_line_meta
*lm
= &pblk
->lm
;
375 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
376 struct list_head
*move_list
= NULL
;
377 int vsc
= le32_to_cpu(*line
->vsc
);
379 lockdep_assert_held(&line
->lock
);
381 if (line
->w_err_gc
->has_write_err
) {
382 if (line
->gc_group
!= PBLK_LINEGC_WERR
) {
383 line
->gc_group
= PBLK_LINEGC_WERR
;
384 move_list
= &l_mg
->gc_werr_list
;
385 pblk_rl_werr_line_in(&pblk
->rl
);
388 if (line
->gc_group
!= PBLK_LINEGC_FULL
) {
389 line
->gc_group
= PBLK_LINEGC_FULL
;
390 move_list
= &l_mg
->gc_full_list
;
392 } else if (vsc
< lm
->high_thrs
) {
393 if (line
->gc_group
!= PBLK_LINEGC_HIGH
) {
394 line
->gc_group
= PBLK_LINEGC_HIGH
;
395 move_list
= &l_mg
->gc_high_list
;
397 } else if (vsc
< lm
->mid_thrs
) {
398 if (line
->gc_group
!= PBLK_LINEGC_MID
) {
399 line
->gc_group
= PBLK_LINEGC_MID
;
400 move_list
= &l_mg
->gc_mid_list
;
402 } else if (vsc
< line
->sec_in_line
) {
403 if (line
->gc_group
!= PBLK_LINEGC_LOW
) {
404 line
->gc_group
= PBLK_LINEGC_LOW
;
405 move_list
= &l_mg
->gc_low_list
;
407 } else if (vsc
== line
->sec_in_line
) {
408 if (line
->gc_group
!= PBLK_LINEGC_EMPTY
) {
409 line
->gc_group
= PBLK_LINEGC_EMPTY
;
410 move_list
= &l_mg
->gc_empty_list
;
413 line
->state
= PBLK_LINESTATE_CORRUPT
;
414 line
->gc_group
= PBLK_LINEGC_NONE
;
415 move_list
= &l_mg
->corrupt_list
;
416 pblk_err(pblk
, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
419 lm
->high_thrs
, lm
->mid_thrs
);
425 void pblk_discard(struct pblk
*pblk
, struct bio
*bio
)
427 sector_t slba
= pblk_get_lba(bio
);
428 sector_t nr_secs
= pblk_get_secs(bio
);
430 pblk_invalidate_range(pblk
, slba
, nr_secs
);
433 void pblk_log_write_err(struct pblk
*pblk
, struct nvm_rq
*rqd
)
435 atomic_long_inc(&pblk
->write_failed
);
436 #ifdef CONFIG_NVM_PBLK_DEBUG
437 pblk_print_failed_rqd(pblk
, rqd
, rqd
->error
);
441 void pblk_log_read_err(struct pblk
*pblk
, struct nvm_rq
*rqd
)
443 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
444 if (rqd
->error
== NVM_RSP_ERR_EMPTYPAGE
) {
445 atomic_long_inc(&pblk
->read_empty
);
449 switch (rqd
->error
) {
450 case NVM_RSP_WARN_HIGHECC
:
451 atomic_long_inc(&pblk
->read_high_ecc
);
453 case NVM_RSP_ERR_FAILECC
:
454 case NVM_RSP_ERR_FAILCRC
:
455 atomic_long_inc(&pblk
->read_failed
);
458 pblk_err(pblk
, "unknown read error:%d\n", rqd
->error
);
460 #ifdef CONFIG_NVM_PBLK_DEBUG
461 pblk_print_failed_rqd(pblk
, rqd
, rqd
->error
);
465 void pblk_set_sec_per_write(struct pblk
*pblk
, int sec_per_write
)
467 pblk
->sec_per_write
= sec_per_write
;
470 int pblk_submit_io(struct pblk
*pblk
, struct nvm_rq
*rqd
)
472 struct nvm_tgt_dev
*dev
= pblk
->dev
;
474 atomic_inc(&pblk
->inflight_io
);
476 #ifdef CONFIG_NVM_PBLK_DEBUG
477 if (pblk_check_io(pblk
, rqd
))
481 return nvm_submit_io(dev
, rqd
);
484 int pblk_submit_io_sync(struct pblk
*pblk
, struct nvm_rq
*rqd
)
486 struct nvm_tgt_dev
*dev
= pblk
->dev
;
488 atomic_inc(&pblk
->inflight_io
);
490 #ifdef CONFIG_NVM_PBLK_DEBUG
491 if (pblk_check_io(pblk
, rqd
))
495 return nvm_submit_io_sync(dev
, rqd
);
498 static void pblk_bio_map_addr_endio(struct bio
*bio
)
503 struct bio
*pblk_bio_map_addr(struct pblk
*pblk
, void *data
,
504 unsigned int nr_secs
, unsigned int len
,
505 int alloc_type
, gfp_t gfp_mask
)
507 struct nvm_tgt_dev
*dev
= pblk
->dev
;
513 if (alloc_type
== PBLK_KMALLOC_META
)
514 return bio_map_kern(dev
->q
, kaddr
, len
, gfp_mask
);
516 bio
= bio_kmalloc(gfp_mask
, nr_secs
);
518 return ERR_PTR(-ENOMEM
);
520 for (i
= 0; i
< nr_secs
; i
++) {
521 page
= vmalloc_to_page(kaddr
);
523 pblk_err(pblk
, "could not map vmalloc bio\n");
525 bio
= ERR_PTR(-ENOMEM
);
529 ret
= bio_add_pc_page(dev
->q
, bio
, page
, PAGE_SIZE
, 0);
530 if (ret
!= PAGE_SIZE
) {
531 pblk_err(pblk
, "could not add page to bio\n");
533 bio
= ERR_PTR(-ENOMEM
);
540 bio
->bi_end_io
= pblk_bio_map_addr_endio
;
545 int pblk_calc_secs(struct pblk
*pblk
, unsigned long secs_avail
,
546 unsigned long secs_to_flush
)
548 int max
= pblk
->sec_per_write
;
549 int min
= pblk
->min_write_pgs
;
550 int secs_to_sync
= 0;
552 if (secs_avail
>= max
)
554 else if (secs_avail
>= min
)
555 secs_to_sync
= min
* (secs_avail
/ min
);
556 else if (secs_to_flush
)
562 void pblk_dealloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
)
567 spin_lock(&line
->lock
);
568 addr
= find_next_zero_bit(line
->map_bitmap
,
569 pblk
->lm
.sec_per_line
, line
->cur_sec
);
570 line
->cur_sec
= addr
- nr_secs
;
572 for (i
= 0; i
< nr_secs
; i
++, line
->cur_sec
--)
573 WARN_ON(!test_and_clear_bit(line
->cur_sec
, line
->map_bitmap
));
574 spin_unlock(&line
->lock
);
577 u64
__pblk_alloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
)
582 lockdep_assert_held(&line
->lock
);
584 /* logic error: ppa out-of-bounds. Prevent generating bad address */
585 if (line
->cur_sec
+ nr_secs
> pblk
->lm
.sec_per_line
) {
586 WARN(1, "pblk: page allocation out of bounds\n");
587 nr_secs
= pblk
->lm
.sec_per_line
- line
->cur_sec
;
590 line
->cur_sec
= addr
= find_next_zero_bit(line
->map_bitmap
,
591 pblk
->lm
.sec_per_line
, line
->cur_sec
);
592 for (i
= 0; i
< nr_secs
; i
++, line
->cur_sec
++)
593 WARN_ON(test_and_set_bit(line
->cur_sec
, line
->map_bitmap
));
598 u64
pblk_alloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
)
602 /* Lock needed in case a write fails and a recovery needs to remap
603 * failed write buffer entries
605 spin_lock(&line
->lock
);
606 addr
= __pblk_alloc_page(pblk
, line
, nr_secs
);
607 line
->left_msecs
-= nr_secs
;
608 WARN(line
->left_msecs
< 0, "pblk: page allocation out of bounds\n");
609 spin_unlock(&line
->lock
);
614 u64
pblk_lookup_page(struct pblk
*pblk
, struct pblk_line
*line
)
618 spin_lock(&line
->lock
);
619 paddr
= find_next_zero_bit(line
->map_bitmap
,
620 pblk
->lm
.sec_per_line
, line
->cur_sec
);
621 spin_unlock(&line
->lock
);
627 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
628 * taking the per LUN semaphore.
630 static int pblk_line_submit_emeta_io(struct pblk
*pblk
, struct pblk_line
*line
,
631 void *emeta_buf
, u64 paddr
, int dir
)
633 struct nvm_tgt_dev
*dev
= pblk
->dev
;
634 struct nvm_geo
*geo
= &dev
->geo
;
635 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
636 struct pblk_line_meta
*lm
= &pblk
->lm
;
637 void *ppa_list
, *meta_list
;
640 dma_addr_t dma_ppa_list
, dma_meta_list
;
641 int min
= pblk
->min_write_pgs
;
642 int left_ppas
= lm
->emeta_sec
[0];
649 if (dir
== PBLK_WRITE
) {
650 bio_op
= REQ_OP_WRITE
;
651 cmd_op
= NVM_OP_PWRITE
;
652 } else if (dir
== PBLK_READ
) {
653 bio_op
= REQ_OP_READ
;
654 cmd_op
= NVM_OP_PREAD
;
658 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
663 ppa_list
= meta_list
+ pblk_dma_meta_size
;
664 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size
;
667 memset(&rqd
, 0, sizeof(struct nvm_rq
));
669 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
670 rq_len
= rq_ppas
* geo
->csecs
;
672 bio
= pblk_bio_map_addr(pblk
, emeta_buf
, rq_ppas
, rq_len
,
673 l_mg
->emeta_alloc_type
, GFP_KERNEL
);
679 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
680 bio_set_op_attrs(bio
, bio_op
, 0);
683 rqd
.meta_list
= meta_list
;
684 rqd
.ppa_list
= ppa_list
;
685 rqd
.dma_meta_list
= dma_meta_list
;
686 rqd
.dma_ppa_list
= dma_ppa_list
;
688 rqd
.nr_ppas
= rq_ppas
;
690 if (dir
== PBLK_WRITE
) {
691 struct pblk_sec_meta
*meta_list
= rqd
.meta_list
;
693 rqd
.flags
= pblk_set_progr_mode(pblk
, PBLK_WRITE
);
694 for (i
= 0; i
< rqd
.nr_ppas
; ) {
695 spin_lock(&line
->lock
);
696 paddr
= __pblk_alloc_page(pblk
, line
, min
);
697 spin_unlock(&line
->lock
);
698 for (j
= 0; j
< min
; j
++, i
++, paddr
++) {
699 meta_list
[i
].lba
= cpu_to_le64(ADDR_EMPTY
);
701 addr_to_gen_ppa(pblk
, paddr
, id
);
705 for (i
= 0; i
< rqd
.nr_ppas
; ) {
706 struct ppa_addr ppa
= addr_to_gen_ppa(pblk
, paddr
, id
);
707 int pos
= pblk_ppa_to_pos(geo
, ppa
);
708 int read_type
= PBLK_READ_RANDOM
;
710 if (pblk_io_aligned(pblk
, rq_ppas
))
711 read_type
= PBLK_READ_SEQUENTIAL
;
712 rqd
.flags
= pblk_set_read_mode(pblk
, read_type
);
714 while (test_bit(pos
, line
->blk_bitmap
)) {
716 if (pblk_boundary_paddr_checks(pblk
, paddr
)) {
717 pblk_err(pblk
, "corrupt emeta line:%d\n",
724 ppa
= addr_to_gen_ppa(pblk
, paddr
, id
);
725 pos
= pblk_ppa_to_pos(geo
, ppa
);
728 if (pblk_boundary_paddr_checks(pblk
, paddr
+ min
)) {
729 pblk_err(pblk
, "corrupt emeta line:%d\n",
736 for (j
= 0; j
< min
; j
++, i
++, paddr
++)
738 addr_to_gen_ppa(pblk
, paddr
, line
->id
);
742 ret
= pblk_submit_io_sync(pblk
, &rqd
);
744 pblk_err(pblk
, "emeta I/O submission failed: %d\n", ret
);
749 atomic_dec(&pblk
->inflight_io
);
752 if (dir
== PBLK_WRITE
)
753 pblk_log_write_err(pblk
, &rqd
);
755 pblk_log_read_err(pblk
, &rqd
);
759 left_ppas
-= rq_ppas
;
763 nvm_dev_dma_free(dev
->parent
, rqd
.meta_list
, rqd
.dma_meta_list
);
767 u64
pblk_line_smeta_start(struct pblk
*pblk
, struct pblk_line
*line
)
769 struct nvm_tgt_dev
*dev
= pblk
->dev
;
770 struct nvm_geo
*geo
= &dev
->geo
;
771 struct pblk_line_meta
*lm
= &pblk
->lm
;
774 /* This usually only happens on bad lines */
775 bit
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
776 if (bit
>= lm
->blk_per_line
)
779 return bit
* geo
->ws_opt
;
782 static int pblk_line_submit_smeta_io(struct pblk
*pblk
, struct pblk_line
*line
,
785 struct nvm_tgt_dev
*dev
= pblk
->dev
;
786 struct pblk_line_meta
*lm
= &pblk
->lm
;
789 __le64
*lba_list
= NULL
;
794 if (dir
== PBLK_WRITE
) {
795 bio_op
= REQ_OP_WRITE
;
796 cmd_op
= NVM_OP_PWRITE
;
797 flags
= pblk_set_progr_mode(pblk
, PBLK_WRITE
);
798 lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
799 } else if (dir
== PBLK_READ_RECOV
|| dir
== PBLK_READ
) {
800 bio_op
= REQ_OP_READ
;
801 cmd_op
= NVM_OP_PREAD
;
802 flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
806 memset(&rqd
, 0, sizeof(struct nvm_rq
));
808 rqd
.meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
813 rqd
.ppa_list
= rqd
.meta_list
+ pblk_dma_meta_size
;
814 rqd
.dma_ppa_list
= rqd
.dma_meta_list
+ pblk_dma_meta_size
;
816 bio
= bio_map_kern(dev
->q
, line
->smeta
, lm
->smeta_len
, GFP_KERNEL
);
822 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
823 bio_set_op_attrs(bio
, bio_op
, 0);
828 rqd
.nr_ppas
= lm
->smeta_sec
;
830 for (i
= 0; i
< lm
->smeta_sec
; i
++, paddr
++) {
831 struct pblk_sec_meta
*meta_list
= rqd
.meta_list
;
833 rqd
.ppa_list
[i
] = addr_to_gen_ppa(pblk
, paddr
, line
->id
);
835 if (dir
== PBLK_WRITE
) {
836 __le64 addr_empty
= cpu_to_le64(ADDR_EMPTY
);
838 meta_list
[i
].lba
= lba_list
[paddr
] = addr_empty
;
843 * This I/O is sent by the write thread when a line is replace. Since
844 * the write thread is the only one sending write and erase commands,
845 * there is no need to take the LUN semaphore.
847 ret
= pblk_submit_io_sync(pblk
, &rqd
);
849 pblk_err(pblk
, "smeta I/O submission failed: %d\n", ret
);
854 atomic_dec(&pblk
->inflight_io
);
857 if (dir
== PBLK_WRITE
) {
858 pblk_log_write_err(pblk
, &rqd
);
860 } else if (dir
== PBLK_READ
)
861 pblk_log_read_err(pblk
, &rqd
);
865 nvm_dev_dma_free(dev
->parent
, rqd
.meta_list
, rqd
.dma_meta_list
);
870 int pblk_line_read_smeta(struct pblk
*pblk
, struct pblk_line
*line
)
872 u64 bpaddr
= pblk_line_smeta_start(pblk
, line
);
874 return pblk_line_submit_smeta_io(pblk
, line
, bpaddr
, PBLK_READ_RECOV
);
877 int pblk_line_read_emeta(struct pblk
*pblk
, struct pblk_line
*line
,
880 return pblk_line_submit_emeta_io(pblk
, line
, emeta_buf
,
881 line
->emeta_ssec
, PBLK_READ
);
884 static void pblk_setup_e_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
887 rqd
->opcode
= NVM_OP_ERASE
;
890 rqd
->flags
= pblk_set_progr_mode(pblk
, PBLK_ERASE
);
894 static int pblk_blk_erase_sync(struct pblk
*pblk
, struct ppa_addr ppa
)
896 struct nvm_rq rqd
= {NULL
};
899 pblk_setup_e_rq(pblk
, &rqd
, ppa
);
901 /* The write thread schedules erases so that it minimizes disturbances
902 * with writes. Thus, there is no need to take the LUN semaphore.
904 ret
= pblk_submit_io_sync(pblk
, &rqd
);
906 __pblk_end_io_erase(pblk
, &rqd
);
911 int pblk_line_erase(struct pblk
*pblk
, struct pblk_line
*line
)
913 struct pblk_line_meta
*lm
= &pblk
->lm
;
917 /* Erase only good blocks, one at a time */
919 spin_lock(&line
->lock
);
920 bit
= find_next_zero_bit(line
->erase_bitmap
, lm
->blk_per_line
,
922 if (bit
>= lm
->blk_per_line
) {
923 spin_unlock(&line
->lock
);
927 ppa
= pblk
->luns
[bit
].bppa
; /* set ch and lun */
928 ppa
.a
.blk
= line
->id
;
930 atomic_dec(&line
->left_eblks
);
931 WARN_ON(test_and_set_bit(bit
, line
->erase_bitmap
));
932 spin_unlock(&line
->lock
);
934 ret
= pblk_blk_erase_sync(pblk
, ppa
);
936 pblk_err(pblk
, "failed to erase line %d\n", line
->id
);
944 static void pblk_line_setup_metadata(struct pblk_line
*line
,
945 struct pblk_line_mgmt
*l_mg
,
946 struct pblk_line_meta
*lm
)
950 lockdep_assert_held(&l_mg
->free_lock
);
953 meta_line
= find_first_zero_bit(&l_mg
->meta_bitmap
, PBLK_DATA_LINES
);
954 if (meta_line
== PBLK_DATA_LINES
) {
955 spin_unlock(&l_mg
->free_lock
);
957 spin_lock(&l_mg
->free_lock
);
961 set_bit(meta_line
, &l_mg
->meta_bitmap
);
962 line
->meta_line
= meta_line
;
964 line
->smeta
= l_mg
->sline_meta
[meta_line
];
965 line
->emeta
= l_mg
->eline_meta
[meta_line
];
967 memset(line
->smeta
, 0, lm
->smeta_len
);
968 memset(line
->emeta
->buf
, 0, lm
->emeta_len
[0]);
970 line
->emeta
->mem
= 0;
971 atomic_set(&line
->emeta
->sync
, 0);
974 /* For now lines are always assumed full lines. Thus, smeta former and current
975 * lun bitmaps are omitted.
977 static int pblk_line_init_metadata(struct pblk
*pblk
, struct pblk_line
*line
,
978 struct pblk_line
*cur
)
980 struct nvm_tgt_dev
*dev
= pblk
->dev
;
981 struct nvm_geo
*geo
= &dev
->geo
;
982 struct pblk_line_meta
*lm
= &pblk
->lm
;
983 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
984 struct pblk_emeta
*emeta
= line
->emeta
;
985 struct line_emeta
*emeta_buf
= emeta
->buf
;
986 struct line_smeta
*smeta_buf
= (struct line_smeta
*)line
->smeta
;
989 /* After erasing the line, new bad blocks might appear and we risk
990 * having an invalid line
992 nr_blk_line
= lm
->blk_per_line
-
993 bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
994 if (nr_blk_line
< lm
->min_blk_line
) {
995 spin_lock(&l_mg
->free_lock
);
996 spin_lock(&line
->lock
);
997 line
->state
= PBLK_LINESTATE_BAD
;
998 spin_unlock(&line
->lock
);
1000 list_add_tail(&line
->list
, &l_mg
->bad_list
);
1001 spin_unlock(&l_mg
->free_lock
);
1003 pblk_debug(pblk
, "line %d is bad\n", line
->id
);
1008 /* Run-time metadata */
1009 line
->lun_bitmap
= ((void *)(smeta_buf
)) + sizeof(struct line_smeta
);
1011 /* Mark LUNs allocated in this line (all for now) */
1012 bitmap_set(line
->lun_bitmap
, 0, lm
->lun_bitmap_len
);
1014 smeta_buf
->header
.identifier
= cpu_to_le32(PBLK_MAGIC
);
1015 memcpy(smeta_buf
->header
.uuid
, pblk
->instance_uuid
, 16);
1016 smeta_buf
->header
.id
= cpu_to_le32(line
->id
);
1017 smeta_buf
->header
.type
= cpu_to_le16(line
->type
);
1018 smeta_buf
->header
.version_major
= SMETA_VERSION_MAJOR
;
1019 smeta_buf
->header
.version_minor
= SMETA_VERSION_MINOR
;
1021 /* Start metadata */
1022 smeta_buf
->seq_nr
= cpu_to_le64(line
->seq_nr
);
1023 smeta_buf
->window_wr_lun
= cpu_to_le32(geo
->all_luns
);
1025 /* Fill metadata among lines */
1027 memcpy(line
->lun_bitmap
, cur
->lun_bitmap
, lm
->lun_bitmap_len
);
1028 smeta_buf
->prev_id
= cpu_to_le32(cur
->id
);
1029 cur
->emeta
->buf
->next_id
= cpu_to_le32(line
->id
);
1031 smeta_buf
->prev_id
= cpu_to_le32(PBLK_LINE_EMPTY
);
1034 /* All smeta must be set at this point */
1035 smeta_buf
->header
.crc
= cpu_to_le32(
1036 pblk_calc_meta_header_crc(pblk
, &smeta_buf
->header
));
1037 smeta_buf
->crc
= cpu_to_le32(pblk_calc_smeta_crc(pblk
, smeta_buf
));
1040 memcpy(&emeta_buf
->header
, &smeta_buf
->header
,
1041 sizeof(struct line_header
));
1043 emeta_buf
->header
.version_major
= EMETA_VERSION_MAJOR
;
1044 emeta_buf
->header
.version_minor
= EMETA_VERSION_MINOR
;
1045 emeta_buf
->header
.crc
= cpu_to_le32(
1046 pblk_calc_meta_header_crc(pblk
, &emeta_buf
->header
));
1048 emeta_buf
->seq_nr
= cpu_to_le64(line
->seq_nr
);
1049 emeta_buf
->nr_lbas
= cpu_to_le64(line
->sec_in_line
);
1050 emeta_buf
->nr_valid_lbas
= cpu_to_le64(0);
1051 emeta_buf
->next_id
= cpu_to_le32(PBLK_LINE_EMPTY
);
1052 emeta_buf
->crc
= cpu_to_le32(0);
1053 emeta_buf
->prev_id
= smeta_buf
->prev_id
;
1058 static int pblk_line_alloc_bitmaps(struct pblk
*pblk
, struct pblk_line
*line
)
1060 struct pblk_line_meta
*lm
= &pblk
->lm
;
1062 line
->map_bitmap
= kzalloc(lm
->sec_bitmap_len
, GFP_KERNEL
);
1063 if (!line
->map_bitmap
)
1066 /* will be initialized using bb info from map_bitmap */
1067 line
->invalid_bitmap
= kmalloc(lm
->sec_bitmap_len
, GFP_KERNEL
);
1068 if (!line
->invalid_bitmap
) {
1069 kfree(line
->map_bitmap
);
1070 line
->map_bitmap
= NULL
;
1077 /* For now lines are always assumed full lines. Thus, smeta former and current
1078 * lun bitmaps are omitted.
1080 static int pblk_line_init_bb(struct pblk
*pblk
, struct pblk_line
*line
,
1083 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1084 struct nvm_geo
*geo
= &dev
->geo
;
1085 struct pblk_line_meta
*lm
= &pblk
->lm
;
1086 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1091 line
->sec_in_line
= lm
->sec_per_line
;
1093 /* Capture bad block information on line mapping bitmaps */
1094 while ((bit
= find_next_bit(line
->blk_bitmap
, lm
->blk_per_line
,
1095 bit
+ 1)) < lm
->blk_per_line
) {
1096 off
= bit
* geo
->ws_opt
;
1097 bitmap_shift_left(l_mg
->bb_aux
, l_mg
->bb_template
, off
,
1099 bitmap_or(line
->map_bitmap
, line
->map_bitmap
, l_mg
->bb_aux
,
1101 line
->sec_in_line
-= geo
->clba
;
1104 /* Mark smeta metadata sectors as bad sectors */
1105 bit
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
1106 off
= bit
* geo
->ws_opt
;
1107 bitmap_set(line
->map_bitmap
, off
, lm
->smeta_sec
);
1108 line
->sec_in_line
-= lm
->smeta_sec
;
1109 line
->smeta_ssec
= off
;
1110 line
->cur_sec
= off
+ lm
->smeta_sec
;
1112 if (init
&& pblk_line_submit_smeta_io(pblk
, line
, off
, PBLK_WRITE
)) {
1113 pblk_debug(pblk
, "line smeta I/O failed. Retry\n");
1117 bitmap_copy(line
->invalid_bitmap
, line
->map_bitmap
, lm
->sec_per_line
);
1119 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1120 * blocks to make sure that there are enough sectors to store emeta
1122 emeta_secs
= lm
->emeta_sec
[0];
1123 off
= lm
->sec_per_line
;
1124 while (emeta_secs
) {
1126 if (!test_bit(off
, line
->invalid_bitmap
)) {
1127 bitmap_set(line
->invalid_bitmap
, off
, geo
->ws_opt
);
1128 emeta_secs
-= geo
->ws_opt
;
1132 line
->emeta_ssec
= off
;
1133 line
->sec_in_line
-= lm
->emeta_sec
[0];
1134 line
->nr_valid_lbas
= 0;
1135 line
->left_msecs
= line
->sec_in_line
;
1136 *line
->vsc
= cpu_to_le32(line
->sec_in_line
);
1138 if (lm
->sec_per_line
- line
->sec_in_line
!=
1139 bitmap_weight(line
->invalid_bitmap
, lm
->sec_per_line
)) {
1140 spin_lock(&line
->lock
);
1141 line
->state
= PBLK_LINESTATE_BAD
;
1142 spin_unlock(&line
->lock
);
1144 list_add_tail(&line
->list
, &l_mg
->bad_list
);
1145 pblk_err(pblk
, "unexpected line %d is bad\n", line
->id
);
1153 static int pblk_prepare_new_line(struct pblk
*pblk
, struct pblk_line
*line
)
1155 struct pblk_line_meta
*lm
= &pblk
->lm
;
1156 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1157 struct nvm_geo
*geo
= &dev
->geo
;
1158 int blk_to_erase
= atomic_read(&line
->blk_in_line
);
1161 for (i
= 0; i
< lm
->blk_per_line
; i
++) {
1162 struct pblk_lun
*rlun
= &pblk
->luns
[i
];
1163 int pos
= pblk_ppa_to_pos(geo
, rlun
->bppa
);
1164 int state
= line
->chks
[pos
].state
;
1166 /* Free chunks should not be erased */
1167 if (state
& NVM_CHK_ST_FREE
) {
1168 set_bit(pblk_ppa_to_pos(geo
, rlun
->bppa
),
1169 line
->erase_bitmap
);
1174 return blk_to_erase
;
1177 static int pblk_line_prepare(struct pblk
*pblk
, struct pblk_line
*line
)
1179 struct pblk_line_meta
*lm
= &pblk
->lm
;
1180 int blk_in_line
= atomic_read(&line
->blk_in_line
);
1183 /* Bad blocks do not need to be erased */
1184 bitmap_copy(line
->erase_bitmap
, line
->blk_bitmap
, lm
->blk_per_line
);
1186 spin_lock(&line
->lock
);
1188 /* If we have not written to this line, we need to mark up free chunks
1191 if (line
->state
== PBLK_LINESTATE_NEW
) {
1192 blk_to_erase
= pblk_prepare_new_line(pblk
, line
);
1193 line
->state
= PBLK_LINESTATE_FREE
;
1195 blk_to_erase
= blk_in_line
;
1198 if (blk_in_line
< lm
->min_blk_line
) {
1199 spin_unlock(&line
->lock
);
1203 if (line
->state
!= PBLK_LINESTATE_FREE
) {
1204 WARN(1, "pblk: corrupted line %d, state %d\n",
1205 line
->id
, line
->state
);
1206 spin_unlock(&line
->lock
);
1210 line
->state
= PBLK_LINESTATE_OPEN
;
1212 atomic_set(&line
->left_eblks
, blk_to_erase
);
1213 atomic_set(&line
->left_seblks
, blk_to_erase
);
1215 line
->meta_distance
= lm
->meta_distance
;
1216 spin_unlock(&line
->lock
);
1218 kref_init(&line
->ref
);
1223 int pblk_line_recov_alloc(struct pblk
*pblk
, struct pblk_line
*line
)
1225 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1228 spin_lock(&l_mg
->free_lock
);
1229 l_mg
->data_line
= line
;
1230 list_del(&line
->list
);
1232 ret
= pblk_line_prepare(pblk
, line
);
1234 list_add(&line
->list
, &l_mg
->free_list
);
1235 spin_unlock(&l_mg
->free_lock
);
1238 spin_unlock(&l_mg
->free_lock
);
1240 ret
= pblk_line_alloc_bitmaps(pblk
, line
);
1244 if (!pblk_line_init_bb(pblk
, line
, 0)) {
1249 pblk_rl_free_lines_dec(&pblk
->rl
, line
, true);
1253 spin_lock(&l_mg
->free_lock
);
1254 list_add(&line
->list
, &l_mg
->free_list
);
1255 spin_unlock(&l_mg
->free_lock
);
1260 void pblk_line_recov_close(struct pblk
*pblk
, struct pblk_line
*line
)
1262 kfree(line
->map_bitmap
);
1263 line
->map_bitmap
= NULL
;
1268 static void pblk_line_reinit(struct pblk_line
*line
)
1270 *line
->vsc
= cpu_to_le32(EMPTY_ENTRY
);
1272 line
->map_bitmap
= NULL
;
1273 line
->invalid_bitmap
= NULL
;
1278 void pblk_line_free(struct pblk_line
*line
)
1280 kfree(line
->map_bitmap
);
1281 kfree(line
->invalid_bitmap
);
1283 pblk_line_reinit(line
);
1286 struct pblk_line
*pblk_line_get(struct pblk
*pblk
)
1288 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1289 struct pblk_line_meta
*lm
= &pblk
->lm
;
1290 struct pblk_line
*line
;
1293 lockdep_assert_held(&l_mg
->free_lock
);
1296 if (list_empty(&l_mg
->free_list
)) {
1297 pblk_err(pblk
, "no free lines\n");
1301 line
= list_first_entry(&l_mg
->free_list
, struct pblk_line
, list
);
1302 list_del(&line
->list
);
1303 l_mg
->nr_free_lines
--;
1305 bit
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
1306 if (unlikely(bit
>= lm
->blk_per_line
)) {
1307 spin_lock(&line
->lock
);
1308 line
->state
= PBLK_LINESTATE_BAD
;
1309 spin_unlock(&line
->lock
);
1311 list_add_tail(&line
->list
, &l_mg
->bad_list
);
1313 pblk_debug(pblk
, "line %d is bad\n", line
->id
);
1317 ret
= pblk_line_prepare(pblk
, line
);
1321 list_add(&line
->list
, &l_mg
->bad_list
);
1324 list_add(&line
->list
, &l_mg
->corrupt_list
);
1327 pblk_err(pblk
, "failed to prepare line %d\n", line
->id
);
1328 list_add(&line
->list
, &l_mg
->free_list
);
1329 l_mg
->nr_free_lines
++;
1337 static struct pblk_line
*pblk_line_retry(struct pblk
*pblk
,
1338 struct pblk_line
*line
)
1340 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1341 struct pblk_line
*retry_line
;
1344 spin_lock(&l_mg
->free_lock
);
1345 retry_line
= pblk_line_get(pblk
);
1347 l_mg
->data_line
= NULL
;
1348 spin_unlock(&l_mg
->free_lock
);
1352 retry_line
->map_bitmap
= line
->map_bitmap
;
1353 retry_line
->invalid_bitmap
= line
->invalid_bitmap
;
1354 retry_line
->smeta
= line
->smeta
;
1355 retry_line
->emeta
= line
->emeta
;
1356 retry_line
->meta_line
= line
->meta_line
;
1358 pblk_line_reinit(line
);
1360 l_mg
->data_line
= retry_line
;
1361 spin_unlock(&l_mg
->free_lock
);
1363 pblk_rl_free_lines_dec(&pblk
->rl
, line
, false);
1365 if (pblk_line_erase(pblk
, retry_line
))
1371 static void pblk_set_space_limit(struct pblk
*pblk
)
1373 struct pblk_rl
*rl
= &pblk
->rl
;
1375 atomic_set(&rl
->rb_space
, 0);
1378 struct pblk_line
*pblk_line_get_first_data(struct pblk
*pblk
)
1380 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1381 struct pblk_line
*line
;
1383 spin_lock(&l_mg
->free_lock
);
1384 line
= pblk_line_get(pblk
);
1386 spin_unlock(&l_mg
->free_lock
);
1390 line
->seq_nr
= l_mg
->d_seq_nr
++;
1391 line
->type
= PBLK_LINETYPE_DATA
;
1392 l_mg
->data_line
= line
;
1394 pblk_line_setup_metadata(line
, l_mg
, &pblk
->lm
);
1396 /* Allocate next line for preparation */
1397 l_mg
->data_next
= pblk_line_get(pblk
);
1398 if (!l_mg
->data_next
) {
1399 /* If we cannot get a new line, we need to stop the pipeline.
1400 * Only allow as many writes in as we can store safely and then
1403 pblk_set_space_limit(pblk
);
1405 l_mg
->data_next
= NULL
;
1407 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
1408 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
1410 spin_unlock(&l_mg
->free_lock
);
1412 if (pblk_line_alloc_bitmaps(pblk
, line
))
1415 if (pblk_line_erase(pblk
, line
)) {
1416 line
= pblk_line_retry(pblk
, line
);
1422 if (!pblk_line_init_metadata(pblk
, line
, NULL
)) {
1423 line
= pblk_line_retry(pblk
, line
);
1430 if (!pblk_line_init_bb(pblk
, line
, 1)) {
1431 line
= pblk_line_retry(pblk
, line
);
1438 pblk_rl_free_lines_dec(&pblk
->rl
, line
, true);
1443 static void pblk_stop_writes(struct pblk
*pblk
, struct pblk_line
*line
)
1445 lockdep_assert_held(&pblk
->l_mg
.free_lock
);
1447 pblk_set_space_limit(pblk
);
1448 pblk
->state
= PBLK_STATE_STOPPING
;
1451 static void pblk_line_close_meta_sync(struct pblk
*pblk
)
1453 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1454 struct pblk_line_meta
*lm
= &pblk
->lm
;
1455 struct pblk_line
*line
, *tline
;
1458 spin_lock(&l_mg
->close_lock
);
1459 if (list_empty(&l_mg
->emeta_list
)) {
1460 spin_unlock(&l_mg
->close_lock
);
1464 list_cut_position(&list
, &l_mg
->emeta_list
, l_mg
->emeta_list
.prev
);
1465 spin_unlock(&l_mg
->close_lock
);
1467 list_for_each_entry_safe(line
, tline
, &list
, list
) {
1468 struct pblk_emeta
*emeta
= line
->emeta
;
1470 while (emeta
->mem
< lm
->emeta_len
[0]) {
1473 ret
= pblk_submit_meta_io(pblk
, line
);
1475 pblk_err(pblk
, "sync meta line %d failed (%d)\n",
1482 pblk_wait_for_meta(pblk
);
1483 flush_workqueue(pblk
->close_wq
);
1486 void __pblk_pipeline_flush(struct pblk
*pblk
)
1488 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1491 spin_lock(&l_mg
->free_lock
);
1492 if (pblk
->state
== PBLK_STATE_RECOVERING
||
1493 pblk
->state
== PBLK_STATE_STOPPED
) {
1494 spin_unlock(&l_mg
->free_lock
);
1497 pblk
->state
= PBLK_STATE_RECOVERING
;
1498 spin_unlock(&l_mg
->free_lock
);
1500 pblk_flush_writer(pblk
);
1501 pblk_wait_for_meta(pblk
);
1503 ret
= pblk_recov_pad(pblk
);
1505 pblk_err(pblk
, "could not close data on teardown(%d)\n", ret
);
1509 flush_workqueue(pblk
->bb_wq
);
1510 pblk_line_close_meta_sync(pblk
);
1513 void __pblk_pipeline_stop(struct pblk
*pblk
)
1515 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1517 spin_lock(&l_mg
->free_lock
);
1518 pblk
->state
= PBLK_STATE_STOPPED
;
1519 l_mg
->data_line
= NULL
;
1520 l_mg
->data_next
= NULL
;
1521 spin_unlock(&l_mg
->free_lock
);
1524 void pblk_pipeline_stop(struct pblk
*pblk
)
1526 __pblk_pipeline_flush(pblk
);
1527 __pblk_pipeline_stop(pblk
);
1530 struct pblk_line
*pblk_line_replace_data(struct pblk
*pblk
)
1532 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1533 struct pblk_line
*cur
, *new = NULL
;
1534 unsigned int left_seblks
;
1536 new = l_mg
->data_next
;
1540 spin_lock(&l_mg
->free_lock
);
1541 cur
= l_mg
->data_line
;
1542 l_mg
->data_line
= new;
1544 pblk_line_setup_metadata(new, l_mg
, &pblk
->lm
);
1545 spin_unlock(&l_mg
->free_lock
);
1548 left_seblks
= atomic_read(&new->left_seblks
);
1550 /* If line is not fully erased, erase it */
1551 if (atomic_read(&new->left_eblks
)) {
1552 if (pblk_line_erase(pblk
, new))
1560 if (pblk_line_alloc_bitmaps(pblk
, new))
1564 if (!pblk_line_init_metadata(pblk
, new, cur
)) {
1565 new = pblk_line_retry(pblk
, new);
1572 if (!pblk_line_init_bb(pblk
, new, 1)) {
1573 new = pblk_line_retry(pblk
, new);
1580 pblk_rl_free_lines_dec(&pblk
->rl
, new, true);
1582 /* Allocate next line for preparation */
1583 spin_lock(&l_mg
->free_lock
);
1584 l_mg
->data_next
= pblk_line_get(pblk
);
1585 if (!l_mg
->data_next
) {
1586 /* If we cannot get a new line, we need to stop the pipeline.
1587 * Only allow as many writes in as we can store safely and then
1590 pblk_stop_writes(pblk
, new);
1591 l_mg
->data_next
= NULL
;
1593 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
1594 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
1596 spin_unlock(&l_mg
->free_lock
);
1602 static void __pblk_line_put(struct pblk
*pblk
, struct pblk_line
*line
)
1604 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1605 struct pblk_gc
*gc
= &pblk
->gc
;
1607 spin_lock(&line
->lock
);
1608 WARN_ON(line
->state
!= PBLK_LINESTATE_GC
);
1609 line
->state
= PBLK_LINESTATE_FREE
;
1610 line
->gc_group
= PBLK_LINEGC_NONE
;
1611 pblk_line_free(line
);
1613 if (line
->w_err_gc
->has_write_err
) {
1614 pblk_rl_werr_line_out(&pblk
->rl
);
1615 line
->w_err_gc
->has_write_err
= 0;
1618 spin_unlock(&line
->lock
);
1619 atomic_dec(&gc
->pipeline_gc
);
1621 spin_lock(&l_mg
->free_lock
);
1622 list_add_tail(&line
->list
, &l_mg
->free_list
);
1623 l_mg
->nr_free_lines
++;
1624 spin_unlock(&l_mg
->free_lock
);
1626 pblk_rl_free_lines_inc(&pblk
->rl
, line
);
1629 static void pblk_line_put_ws(struct work_struct
*work
)
1631 struct pblk_line_ws
*line_put_ws
= container_of(work
,
1632 struct pblk_line_ws
, ws
);
1633 struct pblk
*pblk
= line_put_ws
->pblk
;
1634 struct pblk_line
*line
= line_put_ws
->line
;
1636 __pblk_line_put(pblk
, line
);
1637 mempool_free(line_put_ws
, &pblk
->gen_ws_pool
);
1640 void pblk_line_put(struct kref
*ref
)
1642 struct pblk_line
*line
= container_of(ref
, struct pblk_line
, ref
);
1643 struct pblk
*pblk
= line
->pblk
;
1645 __pblk_line_put(pblk
, line
);
1648 void pblk_line_put_wq(struct kref
*ref
)
1650 struct pblk_line
*line
= container_of(ref
, struct pblk_line
, ref
);
1651 struct pblk
*pblk
= line
->pblk
;
1652 struct pblk_line_ws
*line_put_ws
;
1654 line_put_ws
= mempool_alloc(&pblk
->gen_ws_pool
, GFP_ATOMIC
);
1658 line_put_ws
->pblk
= pblk
;
1659 line_put_ws
->line
= line
;
1660 line_put_ws
->priv
= NULL
;
1662 INIT_WORK(&line_put_ws
->ws
, pblk_line_put_ws
);
1663 queue_work(pblk
->r_end_wq
, &line_put_ws
->ws
);
1666 int pblk_blk_erase_async(struct pblk
*pblk
, struct ppa_addr ppa
)
1671 rqd
= pblk_alloc_rqd(pblk
, PBLK_ERASE
);
1673 pblk_setup_e_rq(pblk
, rqd
, ppa
);
1675 rqd
->end_io
= pblk_end_io_erase
;
1676 rqd
->private = pblk
;
1678 /* The write thread schedules erases so that it minimizes disturbances
1679 * with writes. Thus, there is no need to take the LUN semaphore.
1681 err
= pblk_submit_io(pblk
, rqd
);
1683 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1684 struct nvm_geo
*geo
= &dev
->geo
;
1686 pblk_err(pblk
, "could not async erase line:%d,blk:%d\n",
1687 pblk_ppa_to_line(ppa
),
1688 pblk_ppa_to_pos(geo
, ppa
));
1694 struct pblk_line
*pblk_line_get_data(struct pblk
*pblk
)
1696 return pblk
->l_mg
.data_line
;
1699 /* For now, always erase next line */
1700 struct pblk_line
*pblk_line_get_erase(struct pblk
*pblk
)
1702 return pblk
->l_mg
.data_next
;
1705 int pblk_line_is_full(struct pblk_line
*line
)
1707 return (line
->left_msecs
== 0);
1710 static void pblk_line_should_sync_meta(struct pblk
*pblk
)
1712 if (pblk_rl_is_limit(&pblk
->rl
))
1713 pblk_line_close_meta_sync(pblk
);
1716 void pblk_line_close(struct pblk
*pblk
, struct pblk_line
*line
)
1718 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1719 struct nvm_geo
*geo
= &dev
->geo
;
1720 struct pblk_line_meta
*lm
= &pblk
->lm
;
1721 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1722 struct list_head
*move_list
;
1725 #ifdef CONFIG_NVM_PBLK_DEBUG
1726 WARN(!bitmap_full(line
->map_bitmap
, lm
->sec_per_line
),
1727 "pblk: corrupt closed line %d\n", line
->id
);
1730 spin_lock(&l_mg
->free_lock
);
1731 WARN_ON(!test_and_clear_bit(line
->meta_line
, &l_mg
->meta_bitmap
));
1732 spin_unlock(&l_mg
->free_lock
);
1734 spin_lock(&l_mg
->gc_lock
);
1735 spin_lock(&line
->lock
);
1736 WARN_ON(line
->state
!= PBLK_LINESTATE_OPEN
);
1737 line
->state
= PBLK_LINESTATE_CLOSED
;
1738 move_list
= pblk_line_gc_list(pblk
, line
);
1740 list_add_tail(&line
->list
, move_list
);
1742 kfree(line
->map_bitmap
);
1743 line
->map_bitmap
= NULL
;
1747 for (i
= 0; i
< lm
->blk_per_line
; i
++) {
1748 struct pblk_lun
*rlun
= &pblk
->luns
[i
];
1749 int pos
= pblk_ppa_to_pos(geo
, rlun
->bppa
);
1750 int state
= line
->chks
[pos
].state
;
1752 if (!(state
& NVM_CHK_ST_OFFLINE
))
1753 state
= NVM_CHK_ST_CLOSED
;
1756 spin_unlock(&line
->lock
);
1757 spin_unlock(&l_mg
->gc_lock
);
1760 void pblk_line_close_meta(struct pblk
*pblk
, struct pblk_line
*line
)
1762 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1763 struct pblk_line_meta
*lm
= &pblk
->lm
;
1764 struct pblk_emeta
*emeta
= line
->emeta
;
1765 struct line_emeta
*emeta_buf
= emeta
->buf
;
1766 struct wa_counters
*wa
= emeta_to_wa(lm
, emeta_buf
);
1768 /* No need for exact vsc value; avoid a big line lock and take aprox. */
1769 memcpy(emeta_to_vsc(pblk
, emeta_buf
), l_mg
->vsc_list
, lm
->vsc_list_len
);
1770 memcpy(emeta_to_bb(emeta_buf
), line
->blk_bitmap
, lm
->blk_bitmap_len
);
1772 wa
->user
= cpu_to_le64(atomic64_read(&pblk
->user_wa
));
1773 wa
->pad
= cpu_to_le64(atomic64_read(&pblk
->pad_wa
));
1774 wa
->gc
= cpu_to_le64(atomic64_read(&pblk
->gc_wa
));
1776 if (le32_to_cpu(emeta_buf
->header
.identifier
) != PBLK_MAGIC
) {
1777 emeta_buf
->header
.identifier
= cpu_to_le32(PBLK_MAGIC
);
1778 memcpy(emeta_buf
->header
.uuid
, pblk
->instance_uuid
, 16);
1779 emeta_buf
->header
.id
= cpu_to_le32(line
->id
);
1780 emeta_buf
->header
.type
= cpu_to_le16(line
->type
);
1781 emeta_buf
->header
.version_major
= EMETA_VERSION_MAJOR
;
1782 emeta_buf
->header
.version_minor
= EMETA_VERSION_MINOR
;
1783 emeta_buf
->header
.crc
= cpu_to_le32(
1784 pblk_calc_meta_header_crc(pblk
, &emeta_buf
->header
));
1787 emeta_buf
->nr_valid_lbas
= cpu_to_le64(line
->nr_valid_lbas
);
1788 emeta_buf
->crc
= cpu_to_le32(pblk_calc_emeta_crc(pblk
, emeta_buf
));
1790 spin_lock(&l_mg
->close_lock
);
1791 spin_lock(&line
->lock
);
1793 /* Update the in-memory start address for emeta, in case it has
1794 * shifted due to write errors
1796 if (line
->emeta_ssec
!= line
->cur_sec
)
1797 line
->emeta_ssec
= line
->cur_sec
;
1799 list_add_tail(&line
->list
, &l_mg
->emeta_list
);
1800 spin_unlock(&line
->lock
);
1801 spin_unlock(&l_mg
->close_lock
);
1803 pblk_line_should_sync_meta(pblk
);
1806 static void pblk_save_lba_list(struct pblk
*pblk
, struct pblk_line
*line
)
1808 struct pblk_line_meta
*lm
= &pblk
->lm
;
1809 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1810 unsigned int lba_list_size
= lm
->emeta_len
[2];
1811 struct pblk_w_err_gc
*w_err_gc
= line
->w_err_gc
;
1812 struct pblk_emeta
*emeta
= line
->emeta
;
1814 w_err_gc
->lba_list
= pblk_malloc(lba_list_size
,
1815 l_mg
->emeta_alloc_type
, GFP_KERNEL
);
1816 memcpy(w_err_gc
->lba_list
, emeta_to_lbas(pblk
, emeta
->buf
),
1820 void pblk_line_close_ws(struct work_struct
*work
)
1822 struct pblk_line_ws
*line_ws
= container_of(work
, struct pblk_line_ws
,
1824 struct pblk
*pblk
= line_ws
->pblk
;
1825 struct pblk_line
*line
= line_ws
->line
;
1826 struct pblk_w_err_gc
*w_err_gc
= line
->w_err_gc
;
1828 /* Write errors makes the emeta start address stored in smeta invalid,
1829 * so keep a copy of the lba list until we've gc'd the line
1831 if (w_err_gc
->has_write_err
)
1832 pblk_save_lba_list(pblk
, line
);
1834 pblk_line_close(pblk
, line
);
1835 mempool_free(line_ws
, &pblk
->gen_ws_pool
);
1838 void pblk_gen_run_ws(struct pblk
*pblk
, struct pblk_line
*line
, void *priv
,
1839 void (*work
)(struct work_struct
*), gfp_t gfp_mask
,
1840 struct workqueue_struct
*wq
)
1842 struct pblk_line_ws
*line_ws
;
1844 line_ws
= mempool_alloc(&pblk
->gen_ws_pool
, gfp_mask
);
1846 line_ws
->pblk
= pblk
;
1847 line_ws
->line
= line
;
1848 line_ws
->priv
= priv
;
1850 INIT_WORK(&line_ws
->ws
, work
);
1851 queue_work(wq
, &line_ws
->ws
);
1854 static void __pblk_down_page(struct pblk
*pblk
, struct ppa_addr
*ppa_list
,
1855 int nr_ppas
, int pos
)
1857 struct pblk_lun
*rlun
= &pblk
->luns
[pos
];
1861 * Only send one inflight I/O per LUN. Since we map at a page
1862 * granurality, all ppas in the I/O will map to the same LUN
1864 #ifdef CONFIG_NVM_PBLK_DEBUG
1867 for (i
= 1; i
< nr_ppas
; i
++)
1868 WARN_ON(ppa_list
[0].a
.lun
!= ppa_list
[i
].a
.lun
||
1869 ppa_list
[0].a
.ch
!= ppa_list
[i
].a
.ch
);
1872 ret
= down_timeout(&rlun
->wr_sem
, msecs_to_jiffies(30000));
1873 if (ret
== -ETIME
|| ret
== -EINTR
)
1874 pblk_err(pblk
, "taking lun semaphore timed out: err %d\n",
1878 void pblk_down_page(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
)
1880 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1881 struct nvm_geo
*geo
= &dev
->geo
;
1882 int pos
= pblk_ppa_to_pos(geo
, ppa_list
[0]);
1884 __pblk_down_page(pblk
, ppa_list
, nr_ppas
, pos
);
1887 void pblk_down_rq(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
,
1888 unsigned long *lun_bitmap
)
1890 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1891 struct nvm_geo
*geo
= &dev
->geo
;
1892 int pos
= pblk_ppa_to_pos(geo
, ppa_list
[0]);
1894 /* If the LUN has been locked for this same request, do no attempt to
1897 if (test_and_set_bit(pos
, lun_bitmap
))
1900 __pblk_down_page(pblk
, ppa_list
, nr_ppas
, pos
);
1903 void pblk_up_page(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
)
1905 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1906 struct nvm_geo
*geo
= &dev
->geo
;
1907 struct pblk_lun
*rlun
;
1908 int pos
= pblk_ppa_to_pos(geo
, ppa_list
[0]);
1910 #ifdef CONFIG_NVM_PBLK_DEBUG
1913 for (i
= 1; i
< nr_ppas
; i
++)
1914 WARN_ON(ppa_list
[0].a
.lun
!= ppa_list
[i
].a
.lun
||
1915 ppa_list
[0].a
.ch
!= ppa_list
[i
].a
.ch
);
1918 rlun
= &pblk
->luns
[pos
];
1922 void pblk_up_rq(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
,
1923 unsigned long *lun_bitmap
)
1925 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1926 struct nvm_geo
*geo
= &dev
->geo
;
1927 struct pblk_lun
*rlun
;
1928 int num_lun
= geo
->all_luns
;
1931 while ((bit
= find_next_bit(lun_bitmap
, num_lun
, bit
+ 1)) < num_lun
) {
1932 rlun
= &pblk
->luns
[bit
];
1937 void pblk_update_map(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa
)
1939 struct ppa_addr ppa_l2p
;
1941 /* logic error: lba out-of-bounds. Ignore update */
1942 if (!(lba
< pblk
->rl
.nr_secs
)) {
1943 WARN(1, "pblk: corrupted L2P map request\n");
1947 spin_lock(&pblk
->trans_lock
);
1948 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
1950 if (!pblk_addr_in_cache(ppa_l2p
) && !pblk_ppa_empty(ppa_l2p
))
1951 pblk_map_invalidate(pblk
, ppa_l2p
);
1953 pblk_trans_map_set(pblk
, lba
, ppa
);
1954 spin_unlock(&pblk
->trans_lock
);
1957 void pblk_update_map_cache(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa
)
1960 #ifdef CONFIG_NVM_PBLK_DEBUG
1961 /* Callers must ensure that the ppa points to a cache address */
1962 BUG_ON(!pblk_addr_in_cache(ppa
));
1963 BUG_ON(pblk_rb_pos_oob(&pblk
->rwb
, pblk_addr_to_cacheline(ppa
)));
1966 pblk_update_map(pblk
, lba
, ppa
);
1969 int pblk_update_map_gc(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa_new
,
1970 struct pblk_line
*gc_line
, u64 paddr_gc
)
1972 struct ppa_addr ppa_l2p
, ppa_gc
;
1975 #ifdef CONFIG_NVM_PBLK_DEBUG
1976 /* Callers must ensure that the ppa points to a cache address */
1977 BUG_ON(!pblk_addr_in_cache(ppa_new
));
1978 BUG_ON(pblk_rb_pos_oob(&pblk
->rwb
, pblk_addr_to_cacheline(ppa_new
)));
1981 /* logic error: lba out-of-bounds. Ignore update */
1982 if (!(lba
< pblk
->rl
.nr_secs
)) {
1983 WARN(1, "pblk: corrupted L2P map request\n");
1987 spin_lock(&pblk
->trans_lock
);
1988 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
1989 ppa_gc
= addr_to_gen_ppa(pblk
, paddr_gc
, gc_line
->id
);
1991 if (!pblk_ppa_comp(ppa_l2p
, ppa_gc
)) {
1992 spin_lock(&gc_line
->lock
);
1993 WARN(!test_bit(paddr_gc
, gc_line
->invalid_bitmap
),
1994 "pblk: corrupted GC update");
1995 spin_unlock(&gc_line
->lock
);
2001 pblk_trans_map_set(pblk
, lba
, ppa_new
);
2003 spin_unlock(&pblk
->trans_lock
);
2007 void pblk_update_map_dev(struct pblk
*pblk
, sector_t lba
,
2008 struct ppa_addr ppa_mapped
, struct ppa_addr ppa_cache
)
2010 struct ppa_addr ppa_l2p
;
2012 #ifdef CONFIG_NVM_PBLK_DEBUG
2013 /* Callers must ensure that the ppa points to a device address */
2014 BUG_ON(pblk_addr_in_cache(ppa_mapped
));
2016 /* Invalidate and discard padded entries */
2017 if (lba
== ADDR_EMPTY
) {
2018 atomic64_inc(&pblk
->pad_wa
);
2019 #ifdef CONFIG_NVM_PBLK_DEBUG
2020 atomic_long_inc(&pblk
->padded_wb
);
2022 if (!pblk_ppa_empty(ppa_mapped
))
2023 pblk_map_invalidate(pblk
, ppa_mapped
);
2027 /* logic error: lba out-of-bounds. Ignore update */
2028 if (!(lba
< pblk
->rl
.nr_secs
)) {
2029 WARN(1, "pblk: corrupted L2P map request\n");
2033 spin_lock(&pblk
->trans_lock
);
2034 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
2036 /* Do not update L2P if the cacheline has been updated. In this case,
2037 * the mapped ppa must be invalidated
2039 if (!pblk_ppa_comp(ppa_l2p
, ppa_cache
)) {
2040 if (!pblk_ppa_empty(ppa_mapped
))
2041 pblk_map_invalidate(pblk
, ppa_mapped
);
2045 #ifdef CONFIG_NVM_PBLK_DEBUG
2046 WARN_ON(!pblk_addr_in_cache(ppa_l2p
) && !pblk_ppa_empty(ppa_l2p
));
2049 pblk_trans_map_set(pblk
, lba
, ppa_mapped
);
2051 spin_unlock(&pblk
->trans_lock
);
2054 void pblk_lookup_l2p_seq(struct pblk
*pblk
, struct ppa_addr
*ppas
,
2055 sector_t blba
, int nr_secs
)
2059 spin_lock(&pblk
->trans_lock
);
2060 for (i
= 0; i
< nr_secs
; i
++) {
2061 struct ppa_addr ppa
;
2063 ppa
= ppas
[i
] = pblk_trans_map_get(pblk
, blba
+ i
);
2065 /* If the L2P entry maps to a line, the reference is valid */
2066 if (!pblk_ppa_empty(ppa
) && !pblk_addr_in_cache(ppa
)) {
2067 int line_id
= pblk_ppa_to_line(ppa
);
2068 struct pblk_line
*line
= &pblk
->lines
[line_id
];
2070 kref_get(&line
->ref
);
2073 spin_unlock(&pblk
->trans_lock
);
2076 void pblk_lookup_l2p_rand(struct pblk
*pblk
, struct ppa_addr
*ppas
,
2077 u64
*lba_list
, int nr_secs
)
2082 spin_lock(&pblk
->trans_lock
);
2083 for (i
= 0; i
< nr_secs
; i
++) {
2085 if (lba
!= ADDR_EMPTY
) {
2086 /* logic error: lba out-of-bounds. Ignore update */
2087 if (!(lba
< pblk
->rl
.nr_secs
)) {
2088 WARN(1, "pblk: corrupted L2P map request\n");
2091 ppas
[i
] = pblk_trans_map_get(pblk
, lba
);
2094 spin_unlock(&pblk
->trans_lock
);