2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-core.c - pblk's core functionality
21 static void pblk_line_mark_bb(struct work_struct
*work
)
23 struct pblk_line_ws
*line_ws
= container_of(work
, struct pblk_line_ws
,
25 struct pblk
*pblk
= line_ws
->pblk
;
26 struct nvm_tgt_dev
*dev
= pblk
->dev
;
27 struct ppa_addr
*ppa
= line_ws
->priv
;
30 ret
= nvm_set_tgt_bb_tbl(dev
, ppa
, 1, NVM_BLK_T_GRWN_BAD
);
32 struct pblk_line
*line
;
35 line
= &pblk
->lines
[pblk_ppa_to_line(*ppa
)];
36 pos
= pblk_ppa_to_pos(&dev
->geo
, *ppa
);
38 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
43 mempool_free(line_ws
, pblk
->gen_ws_pool
);
46 static void pblk_mark_bb(struct pblk
*pblk
, struct pblk_line
*line
,
49 struct nvm_tgt_dev
*dev
= pblk
->dev
;
50 struct nvm_geo
*geo
= &dev
->geo
;
51 int pos
= pblk_ppa_to_pos(geo
, *ppa
);
53 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line
->id
, pos
);
54 atomic_long_inc(&pblk
->erase_failed
);
56 atomic_dec(&line
->blk_in_line
);
57 if (test_and_set_bit(pos
, line
->blk_bitmap
))
58 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
61 pblk_gen_run_ws(pblk
, NULL
, ppa
, pblk_line_mark_bb
,
62 GFP_ATOMIC
, pblk
->bb_wq
);
65 static void __pblk_end_io_erase(struct pblk
*pblk
, struct nvm_rq
*rqd
)
67 struct pblk_line
*line
;
69 line
= &pblk
->lines
[pblk_ppa_to_line(rqd
->ppa_addr
)];
70 atomic_dec(&line
->left_seblks
);
75 ppa
= kmalloc(sizeof(struct ppa_addr
), GFP_ATOMIC
);
80 pblk_mark_bb(pblk
, line
, ppa
);
83 atomic_dec(&pblk
->inflight_io
);
86 /* Erase completion assumes that only one block is erased at the time */
87 static void pblk_end_io_erase(struct nvm_rq
*rqd
)
89 struct pblk
*pblk
= rqd
->private;
91 __pblk_end_io_erase(pblk
, rqd
);
92 mempool_free(rqd
, pblk
->e_rq_pool
);
95 void __pblk_map_invalidate(struct pblk
*pblk
, struct pblk_line
*line
,
98 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
99 struct list_head
*move_list
= NULL
;
101 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
102 * table is modified with reclaimed sectors, a check is done to endure
103 * that newer updates are not overwritten.
105 spin_lock(&line
->lock
);
106 WARN_ON(line
->state
== PBLK_LINESTATE_FREE
);
108 if (test_and_set_bit(paddr
, line
->invalid_bitmap
)) {
109 WARN_ONCE(1, "pblk: double invalidate\n");
110 spin_unlock(&line
->lock
);
113 le32_add_cpu(line
->vsc
, -1);
115 if (line
->state
== PBLK_LINESTATE_CLOSED
)
116 move_list
= pblk_line_gc_list(pblk
, line
);
117 spin_unlock(&line
->lock
);
120 spin_lock(&l_mg
->gc_lock
);
121 spin_lock(&line
->lock
);
122 /* Prevent moving a line that has just been chosen for GC */
123 if (line
->state
== PBLK_LINESTATE_GC
) {
124 spin_unlock(&line
->lock
);
125 spin_unlock(&l_mg
->gc_lock
);
128 spin_unlock(&line
->lock
);
130 list_move_tail(&line
->list
, move_list
);
131 spin_unlock(&l_mg
->gc_lock
);
135 void pblk_map_invalidate(struct pblk
*pblk
, struct ppa_addr ppa
)
137 struct pblk_line
*line
;
141 #ifdef CONFIG_NVM_DEBUG
142 /* Callers must ensure that the ppa points to a device address */
143 BUG_ON(pblk_addr_in_cache(ppa
));
144 BUG_ON(pblk_ppa_empty(ppa
));
147 line_id
= pblk_ppa_to_line(ppa
);
148 line
= &pblk
->lines
[line_id
];
149 paddr
= pblk_dev_ppa_to_line_addr(pblk
, ppa
);
151 __pblk_map_invalidate(pblk
, line
, paddr
);
154 static void pblk_invalidate_range(struct pblk
*pblk
, sector_t slba
,
155 unsigned int nr_secs
)
159 spin_lock(&pblk
->trans_lock
);
160 for (lba
= slba
; lba
< slba
+ nr_secs
; lba
++) {
163 ppa
= pblk_trans_map_get(pblk
, lba
);
165 if (!pblk_addr_in_cache(ppa
) && !pblk_ppa_empty(ppa
))
166 pblk_map_invalidate(pblk
, ppa
);
168 pblk_ppa_set_empty(&ppa
);
169 pblk_trans_map_set(pblk
, lba
, ppa
);
171 spin_unlock(&pblk
->trans_lock
);
174 /* Caller must guarantee that the request is a valid type */
175 struct nvm_rq
*pblk_alloc_rqd(struct pblk
*pblk
, int type
)
184 pool
= pblk
->w_rq_pool
;
185 rq_size
= pblk_w_rq_size
;
188 pool
= pblk
->r_rq_pool
;
189 rq_size
= pblk_g_rq_size
;
192 pool
= pblk
->e_rq_pool
;
193 rq_size
= pblk_g_rq_size
;
196 rqd
= mempool_alloc(pool
, GFP_KERNEL
);
197 memset(rqd
, 0, rq_size
);
202 /* Typically used on completion path. Cannot guarantee request consistency */
203 void pblk_free_rqd(struct pblk
*pblk
, struct nvm_rq
*rqd
, int type
)
205 struct nvm_tgt_dev
*dev
= pblk
->dev
;
210 kfree(((struct pblk_c_ctx
*)nvm_rq_to_pdu(rqd
))->lun_bitmap
);
212 pool
= pblk
->w_rq_pool
;
215 pool
= pblk
->r_rq_pool
;
218 pool
= pblk
->e_rq_pool
;
221 pr_err("pblk: trying to free unknown rqd type\n");
225 nvm_dev_dma_free(dev
->parent
, rqd
->meta_list
, rqd
->dma_meta_list
);
226 mempool_free(rqd
, pool
);
229 void pblk_bio_free_pages(struct pblk
*pblk
, struct bio
*bio
, int off
,
235 WARN_ON(off
+ nr_pages
!= bio
->bi_vcnt
);
237 for (i
= off
; i
< nr_pages
+ off
; i
++) {
238 bv
= bio
->bi_io_vec
[i
];
239 mempool_free(bv
.bv_page
, pblk
->page_bio_pool
);
243 int pblk_bio_add_pages(struct pblk
*pblk
, struct bio
*bio
, gfp_t flags
,
246 struct request_queue
*q
= pblk
->dev
->q
;
250 for (i
= 0; i
< nr_pages
; i
++) {
251 page
= mempool_alloc(pblk
->page_bio_pool
, flags
);
253 ret
= bio_add_pc_page(q
, bio
, page
, PBLK_EXPOSED_PAGE_SIZE
, 0);
254 if (ret
!= PBLK_EXPOSED_PAGE_SIZE
) {
255 pr_err("pblk: could not add page to bio\n");
256 mempool_free(page
, pblk
->page_bio_pool
);
263 pblk_bio_free_pages(pblk
, bio
, 0, i
- 1);
267 static void pblk_write_kick(struct pblk
*pblk
)
269 wake_up_process(pblk
->writer_ts
);
270 mod_timer(&pblk
->wtimer
, jiffies
+ msecs_to_jiffies(1000));
273 void pblk_write_timer_fn(struct timer_list
*t
)
275 struct pblk
*pblk
= from_timer(pblk
, t
, wtimer
);
277 /* kick the write thread every tick to flush outstanding data */
278 pblk_write_kick(pblk
);
281 void pblk_write_should_kick(struct pblk
*pblk
)
283 unsigned int secs_avail
= pblk_rb_read_count(&pblk
->rwb
);
285 if (secs_avail
>= pblk
->min_write_pgs
)
286 pblk_write_kick(pblk
);
289 void pblk_end_io_sync(struct nvm_rq
*rqd
)
291 struct completion
*waiting
= rqd
->private;
296 static void pblk_wait_for_meta(struct pblk
*pblk
)
299 if (!atomic_read(&pblk
->inflight_io
))
306 static void pblk_flush_writer(struct pblk
*pblk
)
308 pblk_rb_flush(&pblk
->rwb
);
310 if (!pblk_rb_sync_count(&pblk
->rwb
))
313 pblk_write_kick(pblk
);
318 struct list_head
*pblk_line_gc_list(struct pblk
*pblk
, struct pblk_line
*line
)
320 struct pblk_line_meta
*lm
= &pblk
->lm
;
321 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
322 struct list_head
*move_list
= NULL
;
323 int vsc
= le32_to_cpu(*line
->vsc
);
325 lockdep_assert_held(&line
->lock
);
328 if (line
->gc_group
!= PBLK_LINEGC_FULL
) {
329 line
->gc_group
= PBLK_LINEGC_FULL
;
330 move_list
= &l_mg
->gc_full_list
;
332 } else if (vsc
< lm
->high_thrs
) {
333 if (line
->gc_group
!= PBLK_LINEGC_HIGH
) {
334 line
->gc_group
= PBLK_LINEGC_HIGH
;
335 move_list
= &l_mg
->gc_high_list
;
337 } else if (vsc
< lm
->mid_thrs
) {
338 if (line
->gc_group
!= PBLK_LINEGC_MID
) {
339 line
->gc_group
= PBLK_LINEGC_MID
;
340 move_list
= &l_mg
->gc_mid_list
;
342 } else if (vsc
< line
->sec_in_line
) {
343 if (line
->gc_group
!= PBLK_LINEGC_LOW
) {
344 line
->gc_group
= PBLK_LINEGC_LOW
;
345 move_list
= &l_mg
->gc_low_list
;
347 } else if (vsc
== line
->sec_in_line
) {
348 if (line
->gc_group
!= PBLK_LINEGC_EMPTY
) {
349 line
->gc_group
= PBLK_LINEGC_EMPTY
;
350 move_list
= &l_mg
->gc_empty_list
;
353 line
->state
= PBLK_LINESTATE_CORRUPT
;
354 line
->gc_group
= PBLK_LINEGC_NONE
;
355 move_list
= &l_mg
->corrupt_list
;
356 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
359 lm
->high_thrs
, lm
->mid_thrs
);
365 void pblk_discard(struct pblk
*pblk
, struct bio
*bio
)
367 sector_t slba
= pblk_get_lba(bio
);
368 sector_t nr_secs
= pblk_get_secs(bio
);
370 pblk_invalidate_range(pblk
, slba
, nr_secs
);
373 void pblk_log_write_err(struct pblk
*pblk
, struct nvm_rq
*rqd
)
375 atomic_long_inc(&pblk
->write_failed
);
376 #ifdef CONFIG_NVM_DEBUG
377 pblk_print_failed_rqd(pblk
, rqd
, rqd
->error
);
381 void pblk_log_read_err(struct pblk
*pblk
, struct nvm_rq
*rqd
)
383 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
384 if (rqd
->error
== NVM_RSP_ERR_EMPTYPAGE
) {
385 atomic_long_inc(&pblk
->read_empty
);
389 switch (rqd
->error
) {
390 case NVM_RSP_WARN_HIGHECC
:
391 atomic_long_inc(&pblk
->read_high_ecc
);
393 case NVM_RSP_ERR_FAILECC
:
394 case NVM_RSP_ERR_FAILCRC
:
395 atomic_long_inc(&pblk
->read_failed
);
398 pr_err("pblk: unknown read error:%d\n", rqd
->error
);
400 #ifdef CONFIG_NVM_DEBUG
401 pblk_print_failed_rqd(pblk
, rqd
, rqd
->error
);
405 void pblk_set_sec_per_write(struct pblk
*pblk
, int sec_per_write
)
407 pblk
->sec_per_write
= sec_per_write
;
410 int pblk_submit_io(struct pblk
*pblk
, struct nvm_rq
*rqd
)
412 struct nvm_tgt_dev
*dev
= pblk
->dev
;
414 #ifdef CONFIG_NVM_DEBUG
417 ret
= pblk_check_io(pblk
, rqd
);
422 atomic_inc(&pblk
->inflight_io
);
424 return nvm_submit_io(dev
, rqd
);
427 int pblk_submit_io_sync(struct pblk
*pblk
, struct nvm_rq
*rqd
)
429 struct nvm_tgt_dev
*dev
= pblk
->dev
;
431 #ifdef CONFIG_NVM_DEBUG
434 ret
= pblk_check_io(pblk
, rqd
);
439 atomic_inc(&pblk
->inflight_io
);
441 return nvm_submit_io_sync(dev
, rqd
);
444 static void pblk_bio_map_addr_endio(struct bio
*bio
)
449 struct bio
*pblk_bio_map_addr(struct pblk
*pblk
, void *data
,
450 unsigned int nr_secs
, unsigned int len
,
451 int alloc_type
, gfp_t gfp_mask
)
453 struct nvm_tgt_dev
*dev
= pblk
->dev
;
459 if (alloc_type
== PBLK_KMALLOC_META
)
460 return bio_map_kern(dev
->q
, kaddr
, len
, gfp_mask
);
462 bio
= bio_kmalloc(gfp_mask
, nr_secs
);
464 return ERR_PTR(-ENOMEM
);
466 for (i
= 0; i
< nr_secs
; i
++) {
467 page
= vmalloc_to_page(kaddr
);
469 pr_err("pblk: could not map vmalloc bio\n");
471 bio
= ERR_PTR(-ENOMEM
);
475 ret
= bio_add_pc_page(dev
->q
, bio
, page
, PAGE_SIZE
, 0);
476 if (ret
!= PAGE_SIZE
) {
477 pr_err("pblk: could not add page to bio\n");
479 bio
= ERR_PTR(-ENOMEM
);
486 bio
->bi_end_io
= pblk_bio_map_addr_endio
;
491 int pblk_calc_secs(struct pblk
*pblk
, unsigned long secs_avail
,
492 unsigned long secs_to_flush
)
494 int max
= pblk
->sec_per_write
;
495 int min
= pblk
->min_write_pgs
;
496 int secs_to_sync
= 0;
498 if (secs_avail
>= max
)
500 else if (secs_avail
>= min
)
501 secs_to_sync
= min
* (secs_avail
/ min
);
502 else if (secs_to_flush
)
508 void pblk_dealloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
)
513 spin_lock(&line
->lock
);
514 addr
= find_next_zero_bit(line
->map_bitmap
,
515 pblk
->lm
.sec_per_line
, line
->cur_sec
);
516 line
->cur_sec
= addr
- nr_secs
;
518 for (i
= 0; i
< nr_secs
; i
++, line
->cur_sec
--)
519 WARN_ON(!test_and_clear_bit(line
->cur_sec
, line
->map_bitmap
));
520 spin_unlock(&line
->lock
);
523 u64
__pblk_alloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
)
528 lockdep_assert_held(&line
->lock
);
530 /* logic error: ppa out-of-bounds. Prevent generating bad address */
531 if (line
->cur_sec
+ nr_secs
> pblk
->lm
.sec_per_line
) {
532 WARN(1, "pblk: page allocation out of bounds\n");
533 nr_secs
= pblk
->lm
.sec_per_line
- line
->cur_sec
;
536 line
->cur_sec
= addr
= find_next_zero_bit(line
->map_bitmap
,
537 pblk
->lm
.sec_per_line
, line
->cur_sec
);
538 for (i
= 0; i
< nr_secs
; i
++, line
->cur_sec
++)
539 WARN_ON(test_and_set_bit(line
->cur_sec
, line
->map_bitmap
));
544 u64
pblk_alloc_page(struct pblk
*pblk
, struct pblk_line
*line
, int nr_secs
)
548 /* Lock needed in case a write fails and a recovery needs to remap
549 * failed write buffer entries
551 spin_lock(&line
->lock
);
552 addr
= __pblk_alloc_page(pblk
, line
, nr_secs
);
553 line
->left_msecs
-= nr_secs
;
554 WARN(line
->left_msecs
< 0, "pblk: page allocation out of bounds\n");
555 spin_unlock(&line
->lock
);
560 u64
pblk_lookup_page(struct pblk
*pblk
, struct pblk_line
*line
)
564 spin_lock(&line
->lock
);
565 paddr
= find_next_zero_bit(line
->map_bitmap
,
566 pblk
->lm
.sec_per_line
, line
->cur_sec
);
567 spin_unlock(&line
->lock
);
573 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
574 * taking the per LUN semaphore.
576 static int pblk_line_submit_emeta_io(struct pblk
*pblk
, struct pblk_line
*line
,
577 void *emeta_buf
, u64 paddr
, int dir
)
579 struct nvm_tgt_dev
*dev
= pblk
->dev
;
580 struct nvm_geo
*geo
= &dev
->geo
;
581 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
582 struct pblk_line_meta
*lm
= &pblk
->lm
;
583 void *ppa_list
, *meta_list
;
586 dma_addr_t dma_ppa_list
, dma_meta_list
;
587 int min
= pblk
->min_write_pgs
;
588 int left_ppas
= lm
->emeta_sec
[0];
595 if (dir
== PBLK_WRITE
) {
596 bio_op
= REQ_OP_WRITE
;
597 cmd_op
= NVM_OP_PWRITE
;
598 } else if (dir
== PBLK_READ
) {
599 bio_op
= REQ_OP_READ
;
600 cmd_op
= NVM_OP_PREAD
;
604 meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
609 ppa_list
= meta_list
+ pblk_dma_meta_size
;
610 dma_ppa_list
= dma_meta_list
+ pblk_dma_meta_size
;
613 memset(&rqd
, 0, sizeof(struct nvm_rq
));
615 rq_ppas
= pblk_calc_secs(pblk
, left_ppas
, 0);
616 rq_len
= rq_ppas
* geo
->sec_size
;
618 bio
= pblk_bio_map_addr(pblk
, emeta_buf
, rq_ppas
, rq_len
,
619 l_mg
->emeta_alloc_type
, GFP_KERNEL
);
625 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
626 bio_set_op_attrs(bio
, bio_op
, 0);
629 rqd
.meta_list
= meta_list
;
630 rqd
.ppa_list
= ppa_list
;
631 rqd
.dma_meta_list
= dma_meta_list
;
632 rqd
.dma_ppa_list
= dma_ppa_list
;
634 rqd
.nr_ppas
= rq_ppas
;
636 if (dir
== PBLK_WRITE
) {
637 struct pblk_sec_meta
*meta_list
= rqd
.meta_list
;
639 rqd
.flags
= pblk_set_progr_mode(pblk
, PBLK_WRITE
);
640 for (i
= 0; i
< rqd
.nr_ppas
; ) {
641 spin_lock(&line
->lock
);
642 paddr
= __pblk_alloc_page(pblk
, line
, min
);
643 spin_unlock(&line
->lock
);
644 for (j
= 0; j
< min
; j
++, i
++, paddr
++) {
645 meta_list
[i
].lba
= cpu_to_le64(ADDR_EMPTY
);
647 addr_to_gen_ppa(pblk
, paddr
, id
);
651 for (i
= 0; i
< rqd
.nr_ppas
; ) {
652 struct ppa_addr ppa
= addr_to_gen_ppa(pblk
, paddr
, id
);
653 int pos
= pblk_ppa_to_pos(geo
, ppa
);
654 int read_type
= PBLK_READ_RANDOM
;
656 if (pblk_io_aligned(pblk
, rq_ppas
))
657 read_type
= PBLK_READ_SEQUENTIAL
;
658 rqd
.flags
= pblk_set_read_mode(pblk
, read_type
);
660 while (test_bit(pos
, line
->blk_bitmap
)) {
662 if (pblk_boundary_paddr_checks(pblk
, paddr
)) {
663 pr_err("pblk: corrupt emeta line:%d\n",
670 ppa
= addr_to_gen_ppa(pblk
, paddr
, id
);
671 pos
= pblk_ppa_to_pos(geo
, ppa
);
674 if (pblk_boundary_paddr_checks(pblk
, paddr
+ min
)) {
675 pr_err("pblk: corrupt emeta line:%d\n",
682 for (j
= 0; j
< min
; j
++, i
++, paddr
++)
684 addr_to_gen_ppa(pblk
, paddr
, line
->id
);
688 ret
= pblk_submit_io_sync(pblk
, &rqd
);
690 pr_err("pblk: emeta I/O submission failed: %d\n", ret
);
695 atomic_dec(&pblk
->inflight_io
);
698 if (dir
== PBLK_WRITE
)
699 pblk_log_write_err(pblk
, &rqd
);
701 pblk_log_read_err(pblk
, &rqd
);
705 left_ppas
-= rq_ppas
;
709 nvm_dev_dma_free(dev
->parent
, rqd
.meta_list
, rqd
.dma_meta_list
);
713 u64
pblk_line_smeta_start(struct pblk
*pblk
, struct pblk_line
*line
)
715 struct nvm_tgt_dev
*dev
= pblk
->dev
;
716 struct nvm_geo
*geo
= &dev
->geo
;
717 struct pblk_line_meta
*lm
= &pblk
->lm
;
720 /* This usually only happens on bad lines */
721 bit
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
722 if (bit
>= lm
->blk_per_line
)
725 return bit
* geo
->sec_per_pl
;
728 static int pblk_line_submit_smeta_io(struct pblk
*pblk
, struct pblk_line
*line
,
731 struct nvm_tgt_dev
*dev
= pblk
->dev
;
732 struct pblk_line_meta
*lm
= &pblk
->lm
;
735 __le64
*lba_list
= NULL
;
740 if (dir
== PBLK_WRITE
) {
741 bio_op
= REQ_OP_WRITE
;
742 cmd_op
= NVM_OP_PWRITE
;
743 flags
= pblk_set_progr_mode(pblk
, PBLK_WRITE
);
744 lba_list
= emeta_to_lbas(pblk
, line
->emeta
->buf
);
745 } else if (dir
== PBLK_READ_RECOV
|| dir
== PBLK_READ
) {
746 bio_op
= REQ_OP_READ
;
747 cmd_op
= NVM_OP_PREAD
;
748 flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
752 memset(&rqd
, 0, sizeof(struct nvm_rq
));
754 rqd
.meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
759 rqd
.ppa_list
= rqd
.meta_list
+ pblk_dma_meta_size
;
760 rqd
.dma_ppa_list
= rqd
.dma_meta_list
+ pblk_dma_meta_size
;
762 bio
= bio_map_kern(dev
->q
, line
->smeta
, lm
->smeta_len
, GFP_KERNEL
);
768 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
769 bio_set_op_attrs(bio
, bio_op
, 0);
774 rqd
.nr_ppas
= lm
->smeta_sec
;
776 for (i
= 0; i
< lm
->smeta_sec
; i
++, paddr
++) {
777 struct pblk_sec_meta
*meta_list
= rqd
.meta_list
;
779 rqd
.ppa_list
[i
] = addr_to_gen_ppa(pblk
, paddr
, line
->id
);
781 if (dir
== PBLK_WRITE
) {
782 __le64 addr_empty
= cpu_to_le64(ADDR_EMPTY
);
784 meta_list
[i
].lba
= lba_list
[paddr
] = addr_empty
;
789 * This I/O is sent by the write thread when a line is replace. Since
790 * the write thread is the only one sending write and erase commands,
791 * there is no need to take the LUN semaphore.
793 ret
= pblk_submit_io_sync(pblk
, &rqd
);
795 pr_err("pblk: smeta I/O submission failed: %d\n", ret
);
800 atomic_dec(&pblk
->inflight_io
);
803 if (dir
== PBLK_WRITE
)
804 pblk_log_write_err(pblk
, &rqd
);
805 else if (dir
== PBLK_READ
)
806 pblk_log_read_err(pblk
, &rqd
);
810 nvm_dev_dma_free(dev
->parent
, rqd
.meta_list
, rqd
.dma_meta_list
);
815 int pblk_line_read_smeta(struct pblk
*pblk
, struct pblk_line
*line
)
817 u64 bpaddr
= pblk_line_smeta_start(pblk
, line
);
819 return pblk_line_submit_smeta_io(pblk
, line
, bpaddr
, PBLK_READ_RECOV
);
822 int pblk_line_read_emeta(struct pblk
*pblk
, struct pblk_line
*line
,
825 return pblk_line_submit_emeta_io(pblk
, line
, emeta_buf
,
826 line
->emeta_ssec
, PBLK_READ
);
829 static void pblk_setup_e_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
832 rqd
->opcode
= NVM_OP_ERASE
;
835 rqd
->flags
= pblk_set_progr_mode(pblk
, PBLK_ERASE
);
839 static int pblk_blk_erase_sync(struct pblk
*pblk
, struct ppa_addr ppa
)
844 memset(&rqd
, 0, sizeof(struct nvm_rq
));
846 pblk_setup_e_rq(pblk
, &rqd
, ppa
);
848 /* The write thread schedules erases so that it minimizes disturbances
849 * with writes. Thus, there is no need to take the LUN semaphore.
851 ret
= pblk_submit_io_sync(pblk
, &rqd
);
853 struct nvm_tgt_dev
*dev
= pblk
->dev
;
854 struct nvm_geo
*geo
= &dev
->geo
;
856 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
857 pblk_ppa_to_line(ppa
),
858 pblk_ppa_to_pos(geo
, ppa
));
866 __pblk_end_io_erase(pblk
, &rqd
);
871 int pblk_line_erase(struct pblk
*pblk
, struct pblk_line
*line
)
873 struct pblk_line_meta
*lm
= &pblk
->lm
;
877 /* Erase only good blocks, one at a time */
879 spin_lock(&line
->lock
);
880 bit
= find_next_zero_bit(line
->erase_bitmap
, lm
->blk_per_line
,
882 if (bit
>= lm
->blk_per_line
) {
883 spin_unlock(&line
->lock
);
887 ppa
= pblk
->luns
[bit
].bppa
; /* set ch and lun */
888 ppa
.g
.blk
= line
->id
;
890 atomic_dec(&line
->left_eblks
);
891 WARN_ON(test_and_set_bit(bit
, line
->erase_bitmap
));
892 spin_unlock(&line
->lock
);
894 ret
= pblk_blk_erase_sync(pblk
, ppa
);
896 pr_err("pblk: failed to erase line %d\n", line
->id
);
904 static void pblk_line_setup_metadata(struct pblk_line
*line
,
905 struct pblk_line_mgmt
*l_mg
,
906 struct pblk_line_meta
*lm
)
910 lockdep_assert_held(&l_mg
->free_lock
);
913 meta_line
= find_first_zero_bit(&l_mg
->meta_bitmap
, PBLK_DATA_LINES
);
914 if (meta_line
== PBLK_DATA_LINES
) {
915 spin_unlock(&l_mg
->free_lock
);
917 spin_lock(&l_mg
->free_lock
);
921 set_bit(meta_line
, &l_mg
->meta_bitmap
);
922 line
->meta_line
= meta_line
;
924 line
->smeta
= l_mg
->sline_meta
[meta_line
];
925 line
->emeta
= l_mg
->eline_meta
[meta_line
];
927 memset(line
->smeta
, 0, lm
->smeta_len
);
928 memset(line
->emeta
->buf
, 0, lm
->emeta_len
[0]);
930 line
->emeta
->mem
= 0;
931 atomic_set(&line
->emeta
->sync
, 0);
934 /* For now lines are always assumed full lines. Thus, smeta former and current
935 * lun bitmaps are omitted.
937 static int pblk_line_init_metadata(struct pblk
*pblk
, struct pblk_line
*line
,
938 struct pblk_line
*cur
)
940 struct nvm_tgt_dev
*dev
= pblk
->dev
;
941 struct nvm_geo
*geo
= &dev
->geo
;
942 struct pblk_line_meta
*lm
= &pblk
->lm
;
943 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
944 struct pblk_emeta
*emeta
= line
->emeta
;
945 struct line_emeta
*emeta_buf
= emeta
->buf
;
946 struct line_smeta
*smeta_buf
= (struct line_smeta
*)line
->smeta
;
949 /* After erasing the line, new bad blocks might appear and we risk
950 * having an invalid line
952 nr_blk_line
= lm
->blk_per_line
-
953 bitmap_weight(line
->blk_bitmap
, lm
->blk_per_line
);
954 if (nr_blk_line
< lm
->min_blk_line
) {
955 spin_lock(&l_mg
->free_lock
);
956 spin_lock(&line
->lock
);
957 line
->state
= PBLK_LINESTATE_BAD
;
958 spin_unlock(&line
->lock
);
960 list_add_tail(&line
->list
, &l_mg
->bad_list
);
961 spin_unlock(&l_mg
->free_lock
);
963 pr_debug("pblk: line %d is bad\n", line
->id
);
968 /* Run-time metadata */
969 line
->lun_bitmap
= ((void *)(smeta_buf
)) + sizeof(struct line_smeta
);
971 /* Mark LUNs allocated in this line (all for now) */
972 bitmap_set(line
->lun_bitmap
, 0, lm
->lun_bitmap_len
);
974 smeta_buf
->header
.identifier
= cpu_to_le32(PBLK_MAGIC
);
975 memcpy(smeta_buf
->header
.uuid
, pblk
->instance_uuid
, 16);
976 smeta_buf
->header
.id
= cpu_to_le32(line
->id
);
977 smeta_buf
->header
.type
= cpu_to_le16(line
->type
);
978 smeta_buf
->header
.version
= SMETA_VERSION
;
981 smeta_buf
->seq_nr
= cpu_to_le64(line
->seq_nr
);
982 smeta_buf
->window_wr_lun
= cpu_to_le32(geo
->all_luns
);
984 /* Fill metadata among lines */
986 memcpy(line
->lun_bitmap
, cur
->lun_bitmap
, lm
->lun_bitmap_len
);
987 smeta_buf
->prev_id
= cpu_to_le32(cur
->id
);
988 cur
->emeta
->buf
->next_id
= cpu_to_le32(line
->id
);
990 smeta_buf
->prev_id
= cpu_to_le32(PBLK_LINE_EMPTY
);
993 /* All smeta must be set at this point */
994 smeta_buf
->header
.crc
= cpu_to_le32(
995 pblk_calc_meta_header_crc(pblk
, &smeta_buf
->header
));
996 smeta_buf
->crc
= cpu_to_le32(pblk_calc_smeta_crc(pblk
, smeta_buf
));
999 memcpy(&emeta_buf
->header
, &smeta_buf
->header
,
1000 sizeof(struct line_header
));
1001 emeta_buf
->seq_nr
= cpu_to_le64(line
->seq_nr
);
1002 emeta_buf
->nr_lbas
= cpu_to_le64(line
->sec_in_line
);
1003 emeta_buf
->nr_valid_lbas
= cpu_to_le64(0);
1004 emeta_buf
->next_id
= cpu_to_le32(PBLK_LINE_EMPTY
);
1005 emeta_buf
->crc
= cpu_to_le32(0);
1006 emeta_buf
->prev_id
= smeta_buf
->prev_id
;
1011 /* For now lines are always assumed full lines. Thus, smeta former and current
1012 * lun bitmaps are omitted.
1014 static int pblk_line_init_bb(struct pblk
*pblk
, struct pblk_line
*line
,
1017 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1018 struct nvm_geo
*geo
= &dev
->geo
;
1019 struct pblk_line_meta
*lm
= &pblk
->lm
;
1020 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1025 line
->sec_in_line
= lm
->sec_per_line
;
1027 /* Capture bad block information on line mapping bitmaps */
1028 while ((bit
= find_next_bit(line
->blk_bitmap
, lm
->blk_per_line
,
1029 bit
+ 1)) < lm
->blk_per_line
) {
1030 off
= bit
* geo
->sec_per_pl
;
1031 bitmap_shift_left(l_mg
->bb_aux
, l_mg
->bb_template
, off
,
1033 bitmap_or(line
->map_bitmap
, line
->map_bitmap
, l_mg
->bb_aux
,
1035 line
->sec_in_line
-= geo
->sec_per_chk
;
1036 if (bit
>= lm
->emeta_bb
)
1040 /* Mark smeta metadata sectors as bad sectors */
1041 bit
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
1042 off
= bit
* geo
->sec_per_pl
;
1043 bitmap_set(line
->map_bitmap
, off
, lm
->smeta_sec
);
1044 line
->sec_in_line
-= lm
->smeta_sec
;
1045 line
->smeta_ssec
= off
;
1046 line
->cur_sec
= off
+ lm
->smeta_sec
;
1048 if (init
&& pblk_line_submit_smeta_io(pblk
, line
, off
, PBLK_WRITE
)) {
1049 pr_debug("pblk: line smeta I/O failed. Retry\n");
1053 bitmap_copy(line
->invalid_bitmap
, line
->map_bitmap
, lm
->sec_per_line
);
1055 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1056 * blocks to make sure that there are enough sectors to store emeta
1058 off
= lm
->sec_per_line
- lm
->emeta_sec
[0];
1059 bitmap_set(line
->invalid_bitmap
, off
, lm
->emeta_sec
[0]);
1061 off
-= geo
->sec_per_pl
;
1062 if (!test_bit(off
, line
->invalid_bitmap
)) {
1063 bitmap_set(line
->invalid_bitmap
, off
, geo
->sec_per_pl
);
1068 line
->sec_in_line
-= lm
->emeta_sec
[0];
1069 line
->emeta_ssec
= off
;
1070 line
->nr_valid_lbas
= 0;
1071 line
->left_msecs
= line
->sec_in_line
;
1072 *line
->vsc
= cpu_to_le32(line
->sec_in_line
);
1074 if (lm
->sec_per_line
- line
->sec_in_line
!=
1075 bitmap_weight(line
->invalid_bitmap
, lm
->sec_per_line
)) {
1076 spin_lock(&line
->lock
);
1077 line
->state
= PBLK_LINESTATE_BAD
;
1078 spin_unlock(&line
->lock
);
1080 list_add_tail(&line
->list
, &l_mg
->bad_list
);
1081 pr_err("pblk: unexpected line %d is bad\n", line
->id
);
1089 static int pblk_line_prepare(struct pblk
*pblk
, struct pblk_line
*line
)
1091 struct pblk_line_meta
*lm
= &pblk
->lm
;
1092 int blk_in_line
= atomic_read(&line
->blk_in_line
);
1094 line
->map_bitmap
= kzalloc(lm
->sec_bitmap_len
, GFP_ATOMIC
);
1095 if (!line
->map_bitmap
)
1098 /* will be initialized using bb info from map_bitmap */
1099 line
->invalid_bitmap
= kmalloc(lm
->sec_bitmap_len
, GFP_ATOMIC
);
1100 if (!line
->invalid_bitmap
) {
1101 kfree(line
->map_bitmap
);
1105 spin_lock(&line
->lock
);
1106 if (line
->state
!= PBLK_LINESTATE_FREE
) {
1107 kfree(line
->map_bitmap
);
1108 kfree(line
->invalid_bitmap
);
1109 spin_unlock(&line
->lock
);
1110 WARN(1, "pblk: corrupted line %d, state %d\n",
1111 line
->id
, line
->state
);
1115 line
->state
= PBLK_LINESTATE_OPEN
;
1117 atomic_set(&line
->left_eblks
, blk_in_line
);
1118 atomic_set(&line
->left_seblks
, blk_in_line
);
1120 line
->meta_distance
= lm
->meta_distance
;
1121 spin_unlock(&line
->lock
);
1123 /* Bad blocks do not need to be erased */
1124 bitmap_copy(line
->erase_bitmap
, line
->blk_bitmap
, lm
->blk_per_line
);
1126 kref_init(&line
->ref
);
1131 int pblk_line_recov_alloc(struct pblk
*pblk
, struct pblk_line
*line
)
1133 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1136 spin_lock(&l_mg
->free_lock
);
1137 l_mg
->data_line
= line
;
1138 list_del(&line
->list
);
1140 ret
= pblk_line_prepare(pblk
, line
);
1142 list_add(&line
->list
, &l_mg
->free_list
);
1143 spin_unlock(&l_mg
->free_lock
);
1146 spin_unlock(&l_mg
->free_lock
);
1148 pblk_rl_free_lines_dec(&pblk
->rl
, line
, true);
1150 if (!pblk_line_init_bb(pblk
, line
, 0)) {
1151 list_add(&line
->list
, &l_mg
->free_list
);
1158 void pblk_line_recov_close(struct pblk
*pblk
, struct pblk_line
*line
)
1160 kfree(line
->map_bitmap
);
1161 line
->map_bitmap
= NULL
;
1166 struct pblk_line
*pblk_line_get(struct pblk
*pblk
)
1168 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1169 struct pblk_line_meta
*lm
= &pblk
->lm
;
1170 struct pblk_line
*line
;
1173 lockdep_assert_held(&l_mg
->free_lock
);
1176 if (list_empty(&l_mg
->free_list
)) {
1177 pr_err("pblk: no free lines\n");
1181 line
= list_first_entry(&l_mg
->free_list
, struct pblk_line
, list
);
1182 list_del(&line
->list
);
1183 l_mg
->nr_free_lines
--;
1185 bit
= find_first_zero_bit(line
->blk_bitmap
, lm
->blk_per_line
);
1186 if (unlikely(bit
>= lm
->blk_per_line
)) {
1187 spin_lock(&line
->lock
);
1188 line
->state
= PBLK_LINESTATE_BAD
;
1189 spin_unlock(&line
->lock
);
1191 list_add_tail(&line
->list
, &l_mg
->bad_list
);
1193 pr_debug("pblk: line %d is bad\n", line
->id
);
1197 ret
= pblk_line_prepare(pblk
, line
);
1199 if (ret
== -EAGAIN
) {
1200 list_add(&line
->list
, &l_mg
->corrupt_list
);
1203 pr_err("pblk: failed to prepare line %d\n", line
->id
);
1204 list_add(&line
->list
, &l_mg
->free_list
);
1205 l_mg
->nr_free_lines
++;
1213 static struct pblk_line
*pblk_line_retry(struct pblk
*pblk
,
1214 struct pblk_line
*line
)
1216 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1217 struct pblk_line
*retry_line
;
1220 spin_lock(&l_mg
->free_lock
);
1221 retry_line
= pblk_line_get(pblk
);
1223 l_mg
->data_line
= NULL
;
1224 spin_unlock(&l_mg
->free_lock
);
1228 retry_line
->smeta
= line
->smeta
;
1229 retry_line
->emeta
= line
->emeta
;
1230 retry_line
->meta_line
= line
->meta_line
;
1232 pblk_line_free(pblk
, line
);
1233 l_mg
->data_line
= retry_line
;
1234 spin_unlock(&l_mg
->free_lock
);
1236 pblk_rl_free_lines_dec(&pblk
->rl
, line
, false);
1238 if (pblk_line_erase(pblk
, retry_line
))
1244 static void pblk_set_space_limit(struct pblk
*pblk
)
1246 struct pblk_rl
*rl
= &pblk
->rl
;
1248 atomic_set(&rl
->rb_space
, 0);
1251 struct pblk_line
*pblk_line_get_first_data(struct pblk
*pblk
)
1253 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1254 struct pblk_line
*line
;
1256 spin_lock(&l_mg
->free_lock
);
1257 line
= pblk_line_get(pblk
);
1259 spin_unlock(&l_mg
->free_lock
);
1263 line
->seq_nr
= l_mg
->d_seq_nr
++;
1264 line
->type
= PBLK_LINETYPE_DATA
;
1265 l_mg
->data_line
= line
;
1267 pblk_line_setup_metadata(line
, l_mg
, &pblk
->lm
);
1269 /* Allocate next line for preparation */
1270 l_mg
->data_next
= pblk_line_get(pblk
);
1271 if (!l_mg
->data_next
) {
1272 /* If we cannot get a new line, we need to stop the pipeline.
1273 * Only allow as many writes in as we can store safely and then
1276 pblk_set_space_limit(pblk
);
1278 l_mg
->data_next
= NULL
;
1280 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
1281 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
1283 spin_unlock(&l_mg
->free_lock
);
1285 if (pblk_line_erase(pblk
, line
)) {
1286 line
= pblk_line_retry(pblk
, line
);
1292 if (!pblk_line_init_metadata(pblk
, line
, NULL
)) {
1293 line
= pblk_line_retry(pblk
, line
);
1300 if (!pblk_line_init_bb(pblk
, line
, 1)) {
1301 line
= pblk_line_retry(pblk
, line
);
1308 pblk_rl_free_lines_dec(&pblk
->rl
, line
, true);
1313 static void pblk_stop_writes(struct pblk
*pblk
, struct pblk_line
*line
)
1315 lockdep_assert_held(&pblk
->l_mg
.free_lock
);
1317 pblk_set_space_limit(pblk
);
1318 pblk
->state
= PBLK_STATE_STOPPING
;
1321 static void pblk_line_close_meta_sync(struct pblk
*pblk
)
1323 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1324 struct pblk_line_meta
*lm
= &pblk
->lm
;
1325 struct pblk_line
*line
, *tline
;
1328 spin_lock(&l_mg
->close_lock
);
1329 if (list_empty(&l_mg
->emeta_list
)) {
1330 spin_unlock(&l_mg
->close_lock
);
1334 list_cut_position(&list
, &l_mg
->emeta_list
, l_mg
->emeta_list
.prev
);
1335 spin_unlock(&l_mg
->close_lock
);
1337 list_for_each_entry_safe(line
, tline
, &list
, list
) {
1338 struct pblk_emeta
*emeta
= line
->emeta
;
1340 while (emeta
->mem
< lm
->emeta_len
[0]) {
1343 ret
= pblk_submit_meta_io(pblk
, line
);
1345 pr_err("pblk: sync meta line %d failed (%d)\n",
1352 pblk_wait_for_meta(pblk
);
1353 flush_workqueue(pblk
->close_wq
);
1356 void pblk_pipeline_stop(struct pblk
*pblk
)
1358 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1361 spin_lock(&l_mg
->free_lock
);
1362 if (pblk
->state
== PBLK_STATE_RECOVERING
||
1363 pblk
->state
== PBLK_STATE_STOPPED
) {
1364 spin_unlock(&l_mg
->free_lock
);
1367 pblk
->state
= PBLK_STATE_RECOVERING
;
1368 spin_unlock(&l_mg
->free_lock
);
1370 pblk_flush_writer(pblk
);
1371 pblk_wait_for_meta(pblk
);
1373 ret
= pblk_recov_pad(pblk
);
1375 pr_err("pblk: could not close data on teardown(%d)\n", ret
);
1379 flush_workqueue(pblk
->bb_wq
);
1380 pblk_line_close_meta_sync(pblk
);
1382 spin_lock(&l_mg
->free_lock
);
1383 pblk
->state
= PBLK_STATE_STOPPED
;
1384 l_mg
->data_line
= NULL
;
1385 l_mg
->data_next
= NULL
;
1386 spin_unlock(&l_mg
->free_lock
);
1389 struct pblk_line
*pblk_line_replace_data(struct pblk
*pblk
)
1391 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1392 struct pblk_line
*cur
, *new = NULL
;
1393 unsigned int left_seblks
;
1395 cur
= l_mg
->data_line
;
1396 new = l_mg
->data_next
;
1399 l_mg
->data_line
= new;
1401 spin_lock(&l_mg
->free_lock
);
1402 if (pblk
->state
!= PBLK_STATE_RUNNING
) {
1403 l_mg
->data_line
= NULL
;
1404 l_mg
->data_next
= NULL
;
1405 spin_unlock(&l_mg
->free_lock
);
1409 pblk_line_setup_metadata(new, l_mg
, &pblk
->lm
);
1410 spin_unlock(&l_mg
->free_lock
);
1413 left_seblks
= atomic_read(&new->left_seblks
);
1415 /* If line is not fully erased, erase it */
1416 if (atomic_read(&new->left_eblks
)) {
1417 if (pblk_line_erase(pblk
, new))
1426 if (!pblk_line_init_metadata(pblk
, new, cur
)) {
1427 new = pblk_line_retry(pblk
, new);
1434 if (!pblk_line_init_bb(pblk
, new, 1)) {
1435 new = pblk_line_retry(pblk
, new);
1442 pblk_rl_free_lines_dec(&pblk
->rl
, new, true);
1444 /* Allocate next line for preparation */
1445 spin_lock(&l_mg
->free_lock
);
1446 l_mg
->data_next
= pblk_line_get(pblk
);
1447 if (!l_mg
->data_next
) {
1448 /* If we cannot get a new line, we need to stop the pipeline.
1449 * Only allow as many writes in as we can store safely and then
1452 pblk_stop_writes(pblk
, new);
1453 l_mg
->data_next
= NULL
;
1455 l_mg
->data_next
->seq_nr
= l_mg
->d_seq_nr
++;
1456 l_mg
->data_next
->type
= PBLK_LINETYPE_DATA
;
1458 spin_unlock(&l_mg
->free_lock
);
1464 void pblk_line_free(struct pblk
*pblk
, struct pblk_line
*line
)
1466 kfree(line
->map_bitmap
);
1467 kfree(line
->invalid_bitmap
);
1469 *line
->vsc
= cpu_to_le32(EMPTY_ENTRY
);
1471 line
->map_bitmap
= NULL
;
1472 line
->invalid_bitmap
= NULL
;
1477 static void __pblk_line_put(struct pblk
*pblk
, struct pblk_line
*line
)
1479 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1480 struct pblk_gc
*gc
= &pblk
->gc
;
1482 spin_lock(&line
->lock
);
1483 WARN_ON(line
->state
!= PBLK_LINESTATE_GC
);
1484 line
->state
= PBLK_LINESTATE_FREE
;
1485 line
->gc_group
= PBLK_LINEGC_NONE
;
1486 pblk_line_free(pblk
, line
);
1487 spin_unlock(&line
->lock
);
1489 atomic_dec(&gc
->pipeline_gc
);
1491 spin_lock(&l_mg
->free_lock
);
1492 list_add_tail(&line
->list
, &l_mg
->free_list
);
1493 l_mg
->nr_free_lines
++;
1494 spin_unlock(&l_mg
->free_lock
);
1496 pblk_rl_free_lines_inc(&pblk
->rl
, line
);
1499 static void pblk_line_put_ws(struct work_struct
*work
)
1501 struct pblk_line_ws
*line_put_ws
= container_of(work
,
1502 struct pblk_line_ws
, ws
);
1503 struct pblk
*pblk
= line_put_ws
->pblk
;
1504 struct pblk_line
*line
= line_put_ws
->line
;
1506 __pblk_line_put(pblk
, line
);
1507 mempool_free(line_put_ws
, pblk
->gen_ws_pool
);
1510 void pblk_line_put(struct kref
*ref
)
1512 struct pblk_line
*line
= container_of(ref
, struct pblk_line
, ref
);
1513 struct pblk
*pblk
= line
->pblk
;
1515 __pblk_line_put(pblk
, line
);
1518 void pblk_line_put_wq(struct kref
*ref
)
1520 struct pblk_line
*line
= container_of(ref
, struct pblk_line
, ref
);
1521 struct pblk
*pblk
= line
->pblk
;
1522 struct pblk_line_ws
*line_put_ws
;
1524 line_put_ws
= mempool_alloc(pblk
->gen_ws_pool
, GFP_ATOMIC
);
1528 line_put_ws
->pblk
= pblk
;
1529 line_put_ws
->line
= line
;
1530 line_put_ws
->priv
= NULL
;
1532 INIT_WORK(&line_put_ws
->ws
, pblk_line_put_ws
);
1533 queue_work(pblk
->r_end_wq
, &line_put_ws
->ws
);
1536 int pblk_blk_erase_async(struct pblk
*pblk
, struct ppa_addr ppa
)
1541 rqd
= pblk_alloc_rqd(pblk
, PBLK_ERASE
);
1543 pblk_setup_e_rq(pblk
, rqd
, ppa
);
1545 rqd
->end_io
= pblk_end_io_erase
;
1546 rqd
->private = pblk
;
1548 /* The write thread schedules erases so that it minimizes disturbances
1549 * with writes. Thus, there is no need to take the LUN semaphore.
1551 err
= pblk_submit_io(pblk
, rqd
);
1553 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1554 struct nvm_geo
*geo
= &dev
->geo
;
1556 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1557 pblk_ppa_to_line(ppa
),
1558 pblk_ppa_to_pos(geo
, ppa
));
1564 struct pblk_line
*pblk_line_get_data(struct pblk
*pblk
)
1566 return pblk
->l_mg
.data_line
;
1569 /* For now, always erase next line */
1570 struct pblk_line
*pblk_line_get_erase(struct pblk
*pblk
)
1572 return pblk
->l_mg
.data_next
;
1575 int pblk_line_is_full(struct pblk_line
*line
)
1577 return (line
->left_msecs
== 0);
1580 static void pblk_line_should_sync_meta(struct pblk
*pblk
)
1582 if (pblk_rl_is_limit(&pblk
->rl
))
1583 pblk_line_close_meta_sync(pblk
);
1586 void pblk_line_close(struct pblk
*pblk
, struct pblk_line
*line
)
1588 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1589 struct list_head
*move_list
;
1591 #ifdef CONFIG_NVM_DEBUG
1592 struct pblk_line_meta
*lm
= &pblk
->lm
;
1594 WARN(!bitmap_full(line
->map_bitmap
, lm
->sec_per_line
),
1595 "pblk: corrupt closed line %d\n", line
->id
);
1598 spin_lock(&l_mg
->free_lock
);
1599 WARN_ON(!test_and_clear_bit(line
->meta_line
, &l_mg
->meta_bitmap
));
1600 spin_unlock(&l_mg
->free_lock
);
1602 spin_lock(&l_mg
->gc_lock
);
1603 spin_lock(&line
->lock
);
1604 WARN_ON(line
->state
!= PBLK_LINESTATE_OPEN
);
1605 line
->state
= PBLK_LINESTATE_CLOSED
;
1606 move_list
= pblk_line_gc_list(pblk
, line
);
1608 list_add_tail(&line
->list
, move_list
);
1610 kfree(line
->map_bitmap
);
1611 line
->map_bitmap
= NULL
;
1615 spin_unlock(&line
->lock
);
1616 spin_unlock(&l_mg
->gc_lock
);
1619 void pblk_line_close_meta(struct pblk
*pblk
, struct pblk_line
*line
)
1621 struct pblk_line_mgmt
*l_mg
= &pblk
->l_mg
;
1622 struct pblk_line_meta
*lm
= &pblk
->lm
;
1623 struct pblk_emeta
*emeta
= line
->emeta
;
1624 struct line_emeta
*emeta_buf
= emeta
->buf
;
1626 /* No need for exact vsc value; avoid a big line lock and take aprox. */
1627 memcpy(emeta_to_vsc(pblk
, emeta_buf
), l_mg
->vsc_list
, lm
->vsc_list_len
);
1628 memcpy(emeta_to_bb(emeta_buf
), line
->blk_bitmap
, lm
->blk_bitmap_len
);
1630 emeta_buf
->nr_valid_lbas
= cpu_to_le64(line
->nr_valid_lbas
);
1631 emeta_buf
->crc
= cpu_to_le32(pblk_calc_emeta_crc(pblk
, emeta_buf
));
1633 spin_lock(&l_mg
->close_lock
);
1634 spin_lock(&line
->lock
);
1635 list_add_tail(&line
->list
, &l_mg
->emeta_list
);
1636 spin_unlock(&line
->lock
);
1637 spin_unlock(&l_mg
->close_lock
);
1639 pblk_line_should_sync_meta(pblk
);
1642 void pblk_line_close_ws(struct work_struct
*work
)
1644 struct pblk_line_ws
*line_ws
= container_of(work
, struct pblk_line_ws
,
1646 struct pblk
*pblk
= line_ws
->pblk
;
1647 struct pblk_line
*line
= line_ws
->line
;
1649 pblk_line_close(pblk
, line
);
1650 mempool_free(line_ws
, pblk
->gen_ws_pool
);
1653 void pblk_gen_run_ws(struct pblk
*pblk
, struct pblk_line
*line
, void *priv
,
1654 void (*work
)(struct work_struct
*), gfp_t gfp_mask
,
1655 struct workqueue_struct
*wq
)
1657 struct pblk_line_ws
*line_ws
;
1659 line_ws
= mempool_alloc(pblk
->gen_ws_pool
, gfp_mask
);
1661 line_ws
->pblk
= pblk
;
1662 line_ws
->line
= line
;
1663 line_ws
->priv
= priv
;
1665 INIT_WORK(&line_ws
->ws
, work
);
1666 queue_work(wq
, &line_ws
->ws
);
1669 static void __pblk_down_page(struct pblk
*pblk
, struct ppa_addr
*ppa_list
,
1670 int nr_ppas
, int pos
)
1672 struct pblk_lun
*rlun
= &pblk
->luns
[pos
];
1676 * Only send one inflight I/O per LUN. Since we map at a page
1677 * granurality, all ppas in the I/O will map to the same LUN
1679 #ifdef CONFIG_NVM_DEBUG
1682 for (i
= 1; i
< nr_ppas
; i
++)
1683 WARN_ON(ppa_list
[0].g
.lun
!= ppa_list
[i
].g
.lun
||
1684 ppa_list
[0].g
.ch
!= ppa_list
[i
].g
.ch
);
1687 ret
= down_timeout(&rlun
->wr_sem
, msecs_to_jiffies(30000));
1688 if (ret
== -ETIME
|| ret
== -EINTR
)
1689 pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret
);
1692 void pblk_down_page(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
)
1694 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1695 struct nvm_geo
*geo
= &dev
->geo
;
1696 int pos
= pblk_ppa_to_pos(geo
, ppa_list
[0]);
1698 __pblk_down_page(pblk
, ppa_list
, nr_ppas
, pos
);
1701 void pblk_down_rq(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
,
1702 unsigned long *lun_bitmap
)
1704 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1705 struct nvm_geo
*geo
= &dev
->geo
;
1706 int pos
= pblk_ppa_to_pos(geo
, ppa_list
[0]);
1708 /* If the LUN has been locked for this same request, do no attempt to
1711 if (test_and_set_bit(pos
, lun_bitmap
))
1714 __pblk_down_page(pblk
, ppa_list
, nr_ppas
, pos
);
1717 void pblk_up_page(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
)
1719 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1720 struct nvm_geo
*geo
= &dev
->geo
;
1721 struct pblk_lun
*rlun
;
1722 int pos
= pblk_ppa_to_pos(geo
, ppa_list
[0]);
1724 #ifdef CONFIG_NVM_DEBUG
1727 for (i
= 1; i
< nr_ppas
; i
++)
1728 WARN_ON(ppa_list
[0].g
.lun
!= ppa_list
[i
].g
.lun
||
1729 ppa_list
[0].g
.ch
!= ppa_list
[i
].g
.ch
);
1732 rlun
= &pblk
->luns
[pos
];
1736 void pblk_up_rq(struct pblk
*pblk
, struct ppa_addr
*ppa_list
, int nr_ppas
,
1737 unsigned long *lun_bitmap
)
1739 struct nvm_tgt_dev
*dev
= pblk
->dev
;
1740 struct nvm_geo
*geo
= &dev
->geo
;
1741 struct pblk_lun
*rlun
;
1742 int nr_luns
= geo
->all_luns
;
1745 while ((bit
= find_next_bit(lun_bitmap
, nr_luns
, bit
+ 1)) < nr_luns
) {
1746 rlun
= &pblk
->luns
[bit
];
1751 void pblk_update_map(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa
)
1753 struct ppa_addr ppa_l2p
;
1755 /* logic error: lba out-of-bounds. Ignore update */
1756 if (!(lba
< pblk
->rl
.nr_secs
)) {
1757 WARN(1, "pblk: corrupted L2P map request\n");
1761 spin_lock(&pblk
->trans_lock
);
1762 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
1764 if (!pblk_addr_in_cache(ppa_l2p
) && !pblk_ppa_empty(ppa_l2p
))
1765 pblk_map_invalidate(pblk
, ppa_l2p
);
1767 pblk_trans_map_set(pblk
, lba
, ppa
);
1768 spin_unlock(&pblk
->trans_lock
);
1771 void pblk_update_map_cache(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa
)
1774 #ifdef CONFIG_NVM_DEBUG
1775 /* Callers must ensure that the ppa points to a cache address */
1776 BUG_ON(!pblk_addr_in_cache(ppa
));
1777 BUG_ON(pblk_rb_pos_oob(&pblk
->rwb
, pblk_addr_to_cacheline(ppa
)));
1780 pblk_update_map(pblk
, lba
, ppa
);
1783 int pblk_update_map_gc(struct pblk
*pblk
, sector_t lba
, struct ppa_addr ppa_new
,
1784 struct pblk_line
*gc_line
, u64 paddr_gc
)
1786 struct ppa_addr ppa_l2p
, ppa_gc
;
1789 #ifdef CONFIG_NVM_DEBUG
1790 /* Callers must ensure that the ppa points to a cache address */
1791 BUG_ON(!pblk_addr_in_cache(ppa_new
));
1792 BUG_ON(pblk_rb_pos_oob(&pblk
->rwb
, pblk_addr_to_cacheline(ppa_new
)));
1795 /* logic error: lba out-of-bounds. Ignore update */
1796 if (!(lba
< pblk
->rl
.nr_secs
)) {
1797 WARN(1, "pblk: corrupted L2P map request\n");
1801 spin_lock(&pblk
->trans_lock
);
1802 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
1803 ppa_gc
= addr_to_gen_ppa(pblk
, paddr_gc
, gc_line
->id
);
1805 if (!pblk_ppa_comp(ppa_l2p
, ppa_gc
)) {
1806 spin_lock(&gc_line
->lock
);
1807 WARN(!test_bit(paddr_gc
, gc_line
->invalid_bitmap
),
1808 "pblk: corrupted GC update");
1809 spin_unlock(&gc_line
->lock
);
1815 pblk_trans_map_set(pblk
, lba
, ppa_new
);
1817 spin_unlock(&pblk
->trans_lock
);
1821 void pblk_update_map_dev(struct pblk
*pblk
, sector_t lba
,
1822 struct ppa_addr ppa_mapped
, struct ppa_addr ppa_cache
)
1824 struct ppa_addr ppa_l2p
;
1826 #ifdef CONFIG_NVM_DEBUG
1827 /* Callers must ensure that the ppa points to a device address */
1828 BUG_ON(pblk_addr_in_cache(ppa_mapped
));
1830 /* Invalidate and discard padded entries */
1831 if (lba
== ADDR_EMPTY
) {
1832 #ifdef CONFIG_NVM_DEBUG
1833 atomic_long_inc(&pblk
->padded_wb
);
1835 if (!pblk_ppa_empty(ppa_mapped
))
1836 pblk_map_invalidate(pblk
, ppa_mapped
);
1840 /* logic error: lba out-of-bounds. Ignore update */
1841 if (!(lba
< pblk
->rl
.nr_secs
)) {
1842 WARN(1, "pblk: corrupted L2P map request\n");
1846 spin_lock(&pblk
->trans_lock
);
1847 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
1849 /* Do not update L2P if the cacheline has been updated. In this case,
1850 * the mapped ppa must be invalidated
1852 if (!pblk_ppa_comp(ppa_l2p
, ppa_cache
)) {
1853 if (!pblk_ppa_empty(ppa_mapped
))
1854 pblk_map_invalidate(pblk
, ppa_mapped
);
1858 #ifdef CONFIG_NVM_DEBUG
1859 WARN_ON(!pblk_addr_in_cache(ppa_l2p
) && !pblk_ppa_empty(ppa_l2p
));
1862 pblk_trans_map_set(pblk
, lba
, ppa_mapped
);
1864 spin_unlock(&pblk
->trans_lock
);
1867 void pblk_lookup_l2p_seq(struct pblk
*pblk
, struct ppa_addr
*ppas
,
1868 sector_t blba
, int nr_secs
)
1872 spin_lock(&pblk
->trans_lock
);
1873 for (i
= 0; i
< nr_secs
; i
++) {
1874 struct ppa_addr ppa
;
1876 ppa
= ppas
[i
] = pblk_trans_map_get(pblk
, blba
+ i
);
1878 /* If the L2P entry maps to a line, the reference is valid */
1879 if (!pblk_ppa_empty(ppa
) && !pblk_addr_in_cache(ppa
)) {
1880 int line_id
= pblk_ppa_to_line(ppa
);
1881 struct pblk_line
*line
= &pblk
->lines
[line_id
];
1883 kref_get(&line
->ref
);
1886 spin_unlock(&pblk
->trans_lock
);
1889 void pblk_lookup_l2p_rand(struct pblk
*pblk
, struct ppa_addr
*ppas
,
1890 u64
*lba_list
, int nr_secs
)
1895 spin_lock(&pblk
->trans_lock
);
1896 for (i
= 0; i
< nr_secs
; i
++) {
1898 if (lba
!= ADDR_EMPTY
) {
1899 /* logic error: lba out-of-bounds. Ignore update */
1900 if (!(lba
< pblk
->rl
.nr_secs
)) {
1901 WARN(1, "pblk: corrupted L2P map request\n");
1904 ppas
[i
] = pblk_trans_map_get(pblk
, lba
);
1907 spin_unlock(&pblk
->trans_lock
);