Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / lightnvm / pblk-core.c
blob1dddba11e72123be5fb56e4199052161ade30f8d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-core.c - pblk's core functionality
20 #define CREATE_TRACE_POINTS
22 #include "pblk.h"
23 #include "pblk-trace.h"
25 static void pblk_line_mark_bb(struct work_struct *work)
27 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
28 ws);
29 struct pblk *pblk = line_ws->pblk;
30 struct nvm_tgt_dev *dev = pblk->dev;
31 struct ppa_addr *ppa = line_ws->priv;
32 int ret;
34 ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
35 if (ret) {
36 struct pblk_line *line;
37 int pos;
39 line = pblk_ppa_to_line(pblk, *ppa);
40 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
42 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
43 line->id, pos);
46 kfree(ppa);
47 mempool_free(line_ws, &pblk->gen_ws_pool);
50 static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
51 struct ppa_addr ppa_addr)
53 struct nvm_tgt_dev *dev = pblk->dev;
54 struct nvm_geo *geo = &dev->geo;
55 struct ppa_addr *ppa;
56 int pos = pblk_ppa_to_pos(geo, ppa_addr);
58 pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
59 atomic_long_inc(&pblk->erase_failed);
61 atomic_dec(&line->blk_in_line);
62 if (test_and_set_bit(pos, line->blk_bitmap))
63 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
64 line->id, pos);
66 /* Not necessary to mark bad blocks on 2.0 spec. */
67 if (geo->version == NVM_OCSSD_SPEC_20)
68 return;
70 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
71 if (!ppa)
72 return;
74 *ppa = ppa_addr;
75 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
76 GFP_ATOMIC, pblk->bb_wq);
79 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
81 struct nvm_tgt_dev *dev = pblk->dev;
82 struct nvm_geo *geo = &dev->geo;
83 struct nvm_chk_meta *chunk;
84 struct pblk_line *line;
85 int pos;
87 line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
88 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
89 chunk = &line->chks[pos];
91 atomic_dec(&line->left_seblks);
93 if (rqd->error) {
94 trace_pblk_chunk_reset(pblk_disk_name(pblk),
95 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
97 chunk->state = NVM_CHK_ST_OFFLINE;
98 pblk_mark_bb(pblk, line, rqd->ppa_addr);
99 } else {
100 trace_pblk_chunk_reset(pblk_disk_name(pblk),
101 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
103 chunk->state = NVM_CHK_ST_FREE;
106 trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
107 chunk->state);
109 atomic_dec(&pblk->inflight_io);
112 /* Erase completion assumes that only one block is erased at the time */
113 static void pblk_end_io_erase(struct nvm_rq *rqd)
115 struct pblk *pblk = rqd->private;
117 __pblk_end_io_erase(pblk, rqd);
118 mempool_free(rqd, &pblk->e_rq_pool);
122 * Get information for all chunks from the device.
124 * The caller is responsible for freeing (vmalloc) the returned structure
126 struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
128 struct nvm_tgt_dev *dev = pblk->dev;
129 struct nvm_geo *geo = &dev->geo;
130 struct nvm_chk_meta *meta;
131 struct ppa_addr ppa;
132 unsigned long len;
133 int ret;
135 ppa.ppa = 0;
137 len = geo->all_chunks * sizeof(*meta);
138 meta = vzalloc(len);
139 if (!meta)
140 return ERR_PTR(-ENOMEM);
142 ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
143 if (ret) {
144 vfree(meta);
145 return ERR_PTR(-EIO);
148 return meta;
151 struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
152 struct nvm_chk_meta *meta,
153 struct ppa_addr ppa)
155 struct nvm_tgt_dev *dev = pblk->dev;
156 struct nvm_geo *geo = &dev->geo;
157 int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
158 int lun_off = ppa.m.pu * geo->num_chk;
159 int chk_off = ppa.m.chk;
161 return meta + ch_off + lun_off + chk_off;
164 void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
165 u64 paddr)
167 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
168 struct list_head *move_list = NULL;
170 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
171 * table is modified with reclaimed sectors, a check is done to endure
172 * that newer updates are not overwritten.
174 spin_lock(&line->lock);
175 WARN_ON(line->state == PBLK_LINESTATE_FREE);
177 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
178 WARN_ONCE(1, "pblk: double invalidate\n");
179 spin_unlock(&line->lock);
180 return;
182 le32_add_cpu(line->vsc, -1);
184 if (line->state == PBLK_LINESTATE_CLOSED)
185 move_list = pblk_line_gc_list(pblk, line);
186 spin_unlock(&line->lock);
188 if (move_list) {
189 spin_lock(&l_mg->gc_lock);
190 spin_lock(&line->lock);
191 /* Prevent moving a line that has just been chosen for GC */
192 if (line->state == PBLK_LINESTATE_GC) {
193 spin_unlock(&line->lock);
194 spin_unlock(&l_mg->gc_lock);
195 return;
197 spin_unlock(&line->lock);
199 list_move_tail(&line->list, move_list);
200 spin_unlock(&l_mg->gc_lock);
204 void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
206 struct pblk_line *line;
207 u64 paddr;
209 #ifdef CONFIG_NVM_PBLK_DEBUG
210 /* Callers must ensure that the ppa points to a device address */
211 BUG_ON(pblk_addr_in_cache(ppa));
212 BUG_ON(pblk_ppa_empty(ppa));
213 #endif
215 line = pblk_ppa_to_line(pblk, ppa);
216 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
218 __pblk_map_invalidate(pblk, line, paddr);
221 static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
222 unsigned int nr_secs)
224 sector_t lba;
226 spin_lock(&pblk->trans_lock);
227 for (lba = slba; lba < slba + nr_secs; lba++) {
228 struct ppa_addr ppa;
230 ppa = pblk_trans_map_get(pblk, lba);
232 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
233 pblk_map_invalidate(pblk, ppa);
235 pblk_ppa_set_empty(&ppa);
236 pblk_trans_map_set(pblk, lba, ppa);
238 spin_unlock(&pblk->trans_lock);
241 int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
243 struct nvm_tgt_dev *dev = pblk->dev;
245 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
246 &rqd->dma_meta_list);
247 if (!rqd->meta_list)
248 return -ENOMEM;
250 if (rqd->nr_ppas == 1)
251 return 0;
253 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
254 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
256 return 0;
259 void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
261 struct nvm_tgt_dev *dev = pblk->dev;
263 if (rqd->meta_list)
264 nvm_dev_dma_free(dev->parent, rqd->meta_list,
265 rqd->dma_meta_list);
268 /* Caller must guarantee that the request is a valid type */
269 struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
271 mempool_t *pool;
272 struct nvm_rq *rqd;
273 int rq_size;
275 switch (type) {
276 case PBLK_WRITE:
277 case PBLK_WRITE_INT:
278 pool = &pblk->w_rq_pool;
279 rq_size = pblk_w_rq_size;
280 break;
281 case PBLK_READ:
282 pool = &pblk->r_rq_pool;
283 rq_size = pblk_g_rq_size;
284 break;
285 default:
286 pool = &pblk->e_rq_pool;
287 rq_size = pblk_g_rq_size;
290 rqd = mempool_alloc(pool, GFP_KERNEL);
291 memset(rqd, 0, rq_size);
293 return rqd;
296 /* Typically used on completion path. Cannot guarantee request consistency */
297 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
299 mempool_t *pool;
301 switch (type) {
302 case PBLK_WRITE:
303 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
304 fallthrough;
305 case PBLK_WRITE_INT:
306 pool = &pblk->w_rq_pool;
307 break;
308 case PBLK_READ:
309 pool = &pblk->r_rq_pool;
310 break;
311 case PBLK_ERASE:
312 pool = &pblk->e_rq_pool;
313 break;
314 default:
315 pblk_err(pblk, "trying to free unknown rqd type\n");
316 return;
319 pblk_free_rqd_meta(pblk, rqd);
320 mempool_free(rqd, pool);
323 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
324 int nr_pages)
326 struct bio_vec *bv;
327 struct page *page;
328 int i, e, nbv = 0;
330 for (i = 0; i < bio->bi_vcnt; i++) {
331 bv = &bio->bi_io_vec[i];
332 page = bv->bv_page;
333 for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
334 if (nbv >= off)
335 mempool_free(page++, &pblk->page_bio_pool);
339 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
340 int nr_pages)
342 struct request_queue *q = pblk->dev->q;
343 struct page *page;
344 int i, ret;
346 for (i = 0; i < nr_pages; i++) {
347 page = mempool_alloc(&pblk->page_bio_pool, flags);
349 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
350 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
351 pblk_err(pblk, "could not add page to bio\n");
352 mempool_free(page, &pblk->page_bio_pool);
353 goto err;
357 return 0;
358 err:
359 pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
360 return -1;
363 void pblk_write_kick(struct pblk *pblk)
365 wake_up_process(pblk->writer_ts);
366 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
369 void pblk_write_timer_fn(struct timer_list *t)
371 struct pblk *pblk = from_timer(pblk, t, wtimer);
373 /* kick the write thread every tick to flush outstanding data */
374 pblk_write_kick(pblk);
377 void pblk_write_should_kick(struct pblk *pblk)
379 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
381 if (secs_avail >= pblk->min_write_pgs_data)
382 pblk_write_kick(pblk);
385 static void pblk_wait_for_meta(struct pblk *pblk)
387 do {
388 if (!atomic_read(&pblk->inflight_io))
389 break;
391 schedule();
392 } while (1);
395 static void pblk_flush_writer(struct pblk *pblk)
397 pblk_rb_flush(&pblk->rwb);
398 do {
399 if (!pblk_rb_sync_count(&pblk->rwb))
400 break;
402 pblk_write_kick(pblk);
403 schedule();
404 } while (1);
407 struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
409 struct pblk_line_meta *lm = &pblk->lm;
410 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
411 struct list_head *move_list = NULL;
412 int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
413 * (pblk->min_write_pgs - pblk->min_write_pgs_data);
414 int vsc = le32_to_cpu(*line->vsc) + packed_meta;
416 lockdep_assert_held(&line->lock);
418 if (line->w_err_gc->has_write_err) {
419 if (line->gc_group != PBLK_LINEGC_WERR) {
420 line->gc_group = PBLK_LINEGC_WERR;
421 move_list = &l_mg->gc_werr_list;
422 pblk_rl_werr_line_in(&pblk->rl);
424 } else if (!vsc) {
425 if (line->gc_group != PBLK_LINEGC_FULL) {
426 line->gc_group = PBLK_LINEGC_FULL;
427 move_list = &l_mg->gc_full_list;
429 } else if (vsc < lm->high_thrs) {
430 if (line->gc_group != PBLK_LINEGC_HIGH) {
431 line->gc_group = PBLK_LINEGC_HIGH;
432 move_list = &l_mg->gc_high_list;
434 } else if (vsc < lm->mid_thrs) {
435 if (line->gc_group != PBLK_LINEGC_MID) {
436 line->gc_group = PBLK_LINEGC_MID;
437 move_list = &l_mg->gc_mid_list;
439 } else if (vsc < line->sec_in_line) {
440 if (line->gc_group != PBLK_LINEGC_LOW) {
441 line->gc_group = PBLK_LINEGC_LOW;
442 move_list = &l_mg->gc_low_list;
444 } else if (vsc == line->sec_in_line) {
445 if (line->gc_group != PBLK_LINEGC_EMPTY) {
446 line->gc_group = PBLK_LINEGC_EMPTY;
447 move_list = &l_mg->gc_empty_list;
449 } else {
450 line->state = PBLK_LINESTATE_CORRUPT;
451 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
452 line->state);
454 line->gc_group = PBLK_LINEGC_NONE;
455 move_list = &l_mg->corrupt_list;
456 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
457 line->id, vsc,
458 line->sec_in_line,
459 lm->high_thrs, lm->mid_thrs);
462 return move_list;
465 void pblk_discard(struct pblk *pblk, struct bio *bio)
467 sector_t slba = pblk_get_lba(bio);
468 sector_t nr_secs = pblk_get_secs(bio);
470 pblk_invalidate_range(pblk, slba, nr_secs);
473 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
475 atomic_long_inc(&pblk->write_failed);
476 #ifdef CONFIG_NVM_PBLK_DEBUG
477 pblk_print_failed_rqd(pblk, rqd, rqd->error);
478 #endif
481 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
483 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
484 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
485 atomic_long_inc(&pblk->read_empty);
486 return;
489 switch (rqd->error) {
490 case NVM_RSP_WARN_HIGHECC:
491 atomic_long_inc(&pblk->read_high_ecc);
492 break;
493 case NVM_RSP_ERR_FAILECC:
494 case NVM_RSP_ERR_FAILCRC:
495 atomic_long_inc(&pblk->read_failed);
496 break;
497 default:
498 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
500 #ifdef CONFIG_NVM_PBLK_DEBUG
501 pblk_print_failed_rqd(pblk, rqd, rqd->error);
502 #endif
505 void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
507 pblk->sec_per_write = sec_per_write;
510 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
512 struct nvm_tgt_dev *dev = pblk->dev;
514 atomic_inc(&pblk->inflight_io);
516 #ifdef CONFIG_NVM_PBLK_DEBUG
517 if (pblk_check_io(pblk, rqd))
518 return NVM_IO_ERR;
519 #endif
521 return nvm_submit_io(dev, rqd, buf);
524 void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
526 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
528 int i;
530 for (i = 0; i < rqd->nr_ppas; i++) {
531 struct ppa_addr *ppa = &ppa_list[i];
532 struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
533 u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
535 if (caddr == 0)
536 trace_pblk_chunk_state(pblk_disk_name(pblk),
537 ppa, NVM_CHK_ST_OPEN);
538 else if (caddr == (chunk->cnlb - 1))
539 trace_pblk_chunk_state(pblk_disk_name(pblk),
540 ppa, NVM_CHK_ST_CLOSED);
544 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
546 struct nvm_tgt_dev *dev = pblk->dev;
547 int ret;
549 atomic_inc(&pblk->inflight_io);
551 #ifdef CONFIG_NVM_PBLK_DEBUG
552 if (pblk_check_io(pblk, rqd))
553 return NVM_IO_ERR;
554 #endif
556 ret = nvm_submit_io_sync(dev, rqd, buf);
558 if (trace_pblk_chunk_state_enabled() && !ret &&
559 rqd->opcode == NVM_OP_PWRITE)
560 pblk_check_chunk_state_update(pblk, rqd);
562 return ret;
565 static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
566 void *buf)
568 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
569 int ret;
571 pblk_down_chunk(pblk, ppa_list[0]);
572 ret = pblk_submit_io_sync(pblk, rqd, buf);
573 pblk_up_chunk(pblk, ppa_list[0]);
575 return ret;
578 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
579 unsigned long secs_to_flush, bool skip_meta)
581 int max = pblk->sec_per_write;
582 int min = pblk->min_write_pgs;
583 int secs_to_sync = 0;
585 if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
586 min = max = pblk->min_write_pgs_data;
588 if (secs_avail >= max)
589 secs_to_sync = max;
590 else if (secs_avail >= min)
591 secs_to_sync = min * (secs_avail / min);
592 else if (secs_to_flush)
593 secs_to_sync = min;
595 return secs_to_sync;
598 void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
600 u64 addr;
601 int i;
603 spin_lock(&line->lock);
604 addr = find_next_zero_bit(line->map_bitmap,
605 pblk->lm.sec_per_line, line->cur_sec);
606 line->cur_sec = addr - nr_secs;
608 for (i = 0; i < nr_secs; i++, line->cur_sec--)
609 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
610 spin_unlock(&line->lock);
613 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
615 u64 addr;
616 int i;
618 lockdep_assert_held(&line->lock);
620 /* logic error: ppa out-of-bounds. Prevent generating bad address */
621 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
622 WARN(1, "pblk: page allocation out of bounds\n");
623 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
626 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
627 pblk->lm.sec_per_line, line->cur_sec);
628 for (i = 0; i < nr_secs; i++, line->cur_sec++)
629 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
631 return addr;
634 u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
636 u64 addr;
638 /* Lock needed in case a write fails and a recovery needs to remap
639 * failed write buffer entries
641 spin_lock(&line->lock);
642 addr = __pblk_alloc_page(pblk, line, nr_secs);
643 line->left_msecs -= nr_secs;
644 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
645 spin_unlock(&line->lock);
647 return addr;
650 u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
652 u64 paddr;
654 spin_lock(&line->lock);
655 paddr = find_next_zero_bit(line->map_bitmap,
656 pblk->lm.sec_per_line, line->cur_sec);
657 spin_unlock(&line->lock);
659 return paddr;
662 u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
664 struct nvm_tgt_dev *dev = pblk->dev;
665 struct nvm_geo *geo = &dev->geo;
666 struct pblk_line_meta *lm = &pblk->lm;
667 int bit;
669 /* This usually only happens on bad lines */
670 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
671 if (bit >= lm->blk_per_line)
672 return -1;
674 return bit * geo->ws_opt;
677 int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
679 struct pblk_line_meta *lm = &pblk->lm;
680 struct ppa_addr *ppa_list;
681 struct nvm_rq rqd;
682 u64 paddr = pblk_line_smeta_start(pblk, line);
683 int i, ret;
685 memset(&rqd, 0, sizeof(struct nvm_rq));
687 ret = pblk_alloc_rqd_meta(pblk, &rqd);
688 if (ret)
689 return ret;
691 rqd.opcode = NVM_OP_PREAD;
692 rqd.nr_ppas = lm->smeta_sec;
693 rqd.is_seq = 1;
694 ppa_list = nvm_rq_to_ppa_list(&rqd);
696 for (i = 0; i < lm->smeta_sec; i++, paddr++)
697 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
699 ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
700 if (ret) {
701 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
702 goto clear_rqd;
705 atomic_dec(&pblk->inflight_io);
707 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
708 pblk_log_read_err(pblk, &rqd);
709 ret = -EIO;
712 clear_rqd:
713 pblk_free_rqd_meta(pblk, &rqd);
714 return ret;
717 static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
718 u64 paddr)
720 struct pblk_line_meta *lm = &pblk->lm;
721 struct ppa_addr *ppa_list;
722 struct nvm_rq rqd;
723 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
724 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
725 int i, ret;
727 memset(&rqd, 0, sizeof(struct nvm_rq));
729 ret = pblk_alloc_rqd_meta(pblk, &rqd);
730 if (ret)
731 return ret;
733 rqd.opcode = NVM_OP_PWRITE;
734 rqd.nr_ppas = lm->smeta_sec;
735 rqd.is_seq = 1;
736 ppa_list = nvm_rq_to_ppa_list(&rqd);
738 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
739 struct pblk_sec_meta *meta = pblk_get_meta(pblk,
740 rqd.meta_list, i);
742 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
743 meta->lba = lba_list[paddr] = addr_empty;
746 ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
747 if (ret) {
748 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
749 goto clear_rqd;
752 atomic_dec(&pblk->inflight_io);
754 if (rqd.error) {
755 pblk_log_write_err(pblk, &rqd);
756 ret = -EIO;
759 clear_rqd:
760 pblk_free_rqd_meta(pblk, &rqd);
761 return ret;
764 int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
765 void *emeta_buf)
767 struct nvm_tgt_dev *dev = pblk->dev;
768 struct nvm_geo *geo = &dev->geo;
769 struct pblk_line_meta *lm = &pblk->lm;
770 void *ppa_list_buf, *meta_list;
771 struct ppa_addr *ppa_list;
772 struct nvm_rq rqd;
773 u64 paddr = line->emeta_ssec;
774 dma_addr_t dma_ppa_list, dma_meta_list;
775 int min = pblk->min_write_pgs;
776 int left_ppas = lm->emeta_sec[0];
777 int line_id = line->id;
778 int rq_ppas, rq_len;
779 int i, j;
780 int ret;
782 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
783 &dma_meta_list);
784 if (!meta_list)
785 return -ENOMEM;
787 ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
788 dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
790 next_rq:
791 memset(&rqd, 0, sizeof(struct nvm_rq));
793 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
794 rq_len = rq_ppas * geo->csecs;
796 rqd.meta_list = meta_list;
797 rqd.ppa_list = ppa_list_buf;
798 rqd.dma_meta_list = dma_meta_list;
799 rqd.dma_ppa_list = dma_ppa_list;
800 rqd.opcode = NVM_OP_PREAD;
801 rqd.nr_ppas = rq_ppas;
802 ppa_list = nvm_rq_to_ppa_list(&rqd);
804 for (i = 0; i < rqd.nr_ppas; ) {
805 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
806 int pos = pblk_ppa_to_pos(geo, ppa);
808 if (pblk_io_aligned(pblk, rq_ppas))
809 rqd.is_seq = 1;
811 while (test_bit(pos, line->blk_bitmap)) {
812 paddr += min;
813 if (pblk_boundary_paddr_checks(pblk, paddr)) {
814 ret = -EINTR;
815 goto free_rqd_dma;
818 ppa = addr_to_gen_ppa(pblk, paddr, line_id);
819 pos = pblk_ppa_to_pos(geo, ppa);
822 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
823 ret = -EINTR;
824 goto free_rqd_dma;
827 for (j = 0; j < min; j++, i++, paddr++)
828 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
831 ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
832 if (ret) {
833 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
834 goto free_rqd_dma;
837 atomic_dec(&pblk->inflight_io);
839 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
840 pblk_log_read_err(pblk, &rqd);
841 ret = -EIO;
842 goto free_rqd_dma;
845 emeta_buf += rq_len;
846 left_ppas -= rq_ppas;
847 if (left_ppas)
848 goto next_rq;
850 free_rqd_dma:
851 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
852 return ret;
855 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
856 struct ppa_addr ppa)
858 rqd->opcode = NVM_OP_ERASE;
859 rqd->ppa_addr = ppa;
860 rqd->nr_ppas = 1;
861 rqd->is_seq = 1;
862 rqd->bio = NULL;
865 static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
867 struct nvm_rq rqd = {NULL};
868 int ret;
870 trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
871 PBLK_CHUNK_RESET_START);
873 pblk_setup_e_rq(pblk, &rqd, ppa);
875 /* The write thread schedules erases so that it minimizes disturbances
876 * with writes. Thus, there is no need to take the LUN semaphore.
878 ret = pblk_submit_io_sync(pblk, &rqd, NULL);
879 rqd.private = pblk;
880 __pblk_end_io_erase(pblk, &rqd);
882 return ret;
885 int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
887 struct pblk_line_meta *lm = &pblk->lm;
888 struct ppa_addr ppa;
889 int ret, bit = -1;
891 /* Erase only good blocks, one at a time */
892 do {
893 spin_lock(&line->lock);
894 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
895 bit + 1);
896 if (bit >= lm->blk_per_line) {
897 spin_unlock(&line->lock);
898 break;
901 ppa = pblk->luns[bit].bppa; /* set ch and lun */
902 ppa.a.blk = line->id;
904 atomic_dec(&line->left_eblks);
905 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
906 spin_unlock(&line->lock);
908 ret = pblk_blk_erase_sync(pblk, ppa);
909 if (ret) {
910 pblk_err(pblk, "failed to erase line %d\n", line->id);
911 return ret;
913 } while (1);
915 return 0;
918 static void pblk_line_setup_metadata(struct pblk_line *line,
919 struct pblk_line_mgmt *l_mg,
920 struct pblk_line_meta *lm)
922 int meta_line;
924 lockdep_assert_held(&l_mg->free_lock);
926 retry_meta:
927 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
928 if (meta_line == PBLK_DATA_LINES) {
929 spin_unlock(&l_mg->free_lock);
930 io_schedule();
931 spin_lock(&l_mg->free_lock);
932 goto retry_meta;
935 set_bit(meta_line, &l_mg->meta_bitmap);
936 line->meta_line = meta_line;
938 line->smeta = l_mg->sline_meta[meta_line];
939 line->emeta = l_mg->eline_meta[meta_line];
941 memset(line->smeta, 0, lm->smeta_len);
942 memset(line->emeta->buf, 0, lm->emeta_len[0]);
944 line->emeta->mem = 0;
945 atomic_set(&line->emeta->sync, 0);
948 /* For now lines are always assumed full lines. Thus, smeta former and current
949 * lun bitmaps are omitted.
951 static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
952 struct pblk_line *cur)
954 struct nvm_tgt_dev *dev = pblk->dev;
955 struct nvm_geo *geo = &dev->geo;
956 struct pblk_line_meta *lm = &pblk->lm;
957 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
958 struct pblk_emeta *emeta = line->emeta;
959 struct line_emeta *emeta_buf = emeta->buf;
960 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
961 int nr_blk_line;
963 /* After erasing the line, new bad blocks might appear and we risk
964 * having an invalid line
966 nr_blk_line = lm->blk_per_line -
967 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
968 if (nr_blk_line < lm->min_blk_line) {
969 spin_lock(&l_mg->free_lock);
970 spin_lock(&line->lock);
971 line->state = PBLK_LINESTATE_BAD;
972 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
973 line->state);
974 spin_unlock(&line->lock);
976 list_add_tail(&line->list, &l_mg->bad_list);
977 spin_unlock(&l_mg->free_lock);
979 pblk_debug(pblk, "line %d is bad\n", line->id);
981 return 0;
984 /* Run-time metadata */
985 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
987 /* Mark LUNs allocated in this line (all for now) */
988 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
990 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
991 guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
992 smeta_buf->header.id = cpu_to_le32(line->id);
993 smeta_buf->header.type = cpu_to_le16(line->type);
994 smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
995 smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
997 /* Start metadata */
998 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
999 smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1001 /* Fill metadata among lines */
1002 if (cur) {
1003 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1004 smeta_buf->prev_id = cpu_to_le32(cur->id);
1005 cur->emeta->buf->next_id = cpu_to_le32(line->id);
1006 } else {
1007 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1010 /* All smeta must be set at this point */
1011 smeta_buf->header.crc = cpu_to_le32(
1012 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1013 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1015 /* End metadata */
1016 memcpy(&emeta_buf->header, &smeta_buf->header,
1017 sizeof(struct line_header));
1019 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1020 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1021 emeta_buf->header.crc = cpu_to_le32(
1022 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1024 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1025 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1026 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1027 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1028 emeta_buf->crc = cpu_to_le32(0);
1029 emeta_buf->prev_id = smeta_buf->prev_id;
1031 return 1;
1034 static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1036 struct pblk_line_meta *lm = &pblk->lm;
1037 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1039 line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1040 if (!line->map_bitmap)
1041 return -ENOMEM;
1043 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1045 /* will be initialized using bb info from map_bitmap */
1046 line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1047 if (!line->invalid_bitmap) {
1048 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1049 line->map_bitmap = NULL;
1050 return -ENOMEM;
1053 return 0;
1056 /* For now lines are always assumed full lines. Thus, smeta former and current
1057 * lun bitmaps are omitted.
1059 static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1060 int init)
1062 struct nvm_tgt_dev *dev = pblk->dev;
1063 struct nvm_geo *geo = &dev->geo;
1064 struct pblk_line_meta *lm = &pblk->lm;
1065 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1066 u64 off;
1067 int bit = -1;
1068 int emeta_secs;
1070 line->sec_in_line = lm->sec_per_line;
1072 /* Capture bad block information on line mapping bitmaps */
1073 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1074 bit + 1)) < lm->blk_per_line) {
1075 off = bit * geo->ws_opt;
1076 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1077 lm->sec_per_line);
1078 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1079 lm->sec_per_line);
1080 line->sec_in_line -= geo->clba;
1083 /* Mark smeta metadata sectors as bad sectors */
1084 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1085 off = bit * geo->ws_opt;
1086 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1087 line->sec_in_line -= lm->smeta_sec;
1088 line->cur_sec = off + lm->smeta_sec;
1090 if (init && pblk_line_smeta_write(pblk, line, off)) {
1091 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1092 return 0;
1095 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1097 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1098 * blocks to make sure that there are enough sectors to store emeta
1100 emeta_secs = lm->emeta_sec[0];
1101 off = lm->sec_per_line;
1102 while (emeta_secs) {
1103 off -= geo->ws_opt;
1104 if (!test_bit(off, line->invalid_bitmap)) {
1105 bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1106 emeta_secs -= geo->ws_opt;
1110 line->emeta_ssec = off;
1111 line->sec_in_line -= lm->emeta_sec[0];
1112 line->nr_valid_lbas = 0;
1113 line->left_msecs = line->sec_in_line;
1114 *line->vsc = cpu_to_le32(line->sec_in_line);
1116 if (lm->sec_per_line - line->sec_in_line !=
1117 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1118 spin_lock(&line->lock);
1119 line->state = PBLK_LINESTATE_BAD;
1120 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1121 line->state);
1122 spin_unlock(&line->lock);
1124 list_add_tail(&line->list, &l_mg->bad_list);
1125 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1127 return 0;
1130 return 1;
1133 static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1135 struct pblk_line_meta *lm = &pblk->lm;
1136 struct nvm_tgt_dev *dev = pblk->dev;
1137 struct nvm_geo *geo = &dev->geo;
1138 int blk_to_erase = atomic_read(&line->blk_in_line);
1139 int i;
1141 for (i = 0; i < lm->blk_per_line; i++) {
1142 struct pblk_lun *rlun = &pblk->luns[i];
1143 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1144 int state = line->chks[pos].state;
1146 /* Free chunks should not be erased */
1147 if (state & NVM_CHK_ST_FREE) {
1148 set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1149 line->erase_bitmap);
1150 blk_to_erase--;
1154 return blk_to_erase;
1157 static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1159 struct pblk_line_meta *lm = &pblk->lm;
1160 int blk_in_line = atomic_read(&line->blk_in_line);
1161 int blk_to_erase;
1163 /* Bad blocks do not need to be erased */
1164 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1166 spin_lock(&line->lock);
1168 /* If we have not written to this line, we need to mark up free chunks
1169 * as already erased
1171 if (line->state == PBLK_LINESTATE_NEW) {
1172 blk_to_erase = pblk_prepare_new_line(pblk, line);
1173 line->state = PBLK_LINESTATE_FREE;
1174 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1175 line->state);
1176 } else {
1177 blk_to_erase = blk_in_line;
1180 if (blk_in_line < lm->min_blk_line) {
1181 spin_unlock(&line->lock);
1182 return -EAGAIN;
1185 if (line->state != PBLK_LINESTATE_FREE) {
1186 WARN(1, "pblk: corrupted line %d, state %d\n",
1187 line->id, line->state);
1188 spin_unlock(&line->lock);
1189 return -EINTR;
1192 line->state = PBLK_LINESTATE_OPEN;
1193 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1194 line->state);
1196 atomic_set(&line->left_eblks, blk_to_erase);
1197 atomic_set(&line->left_seblks, blk_to_erase);
1199 line->meta_distance = lm->meta_distance;
1200 spin_unlock(&line->lock);
1202 kref_init(&line->ref);
1203 atomic_set(&line->sec_to_update, 0);
1205 return 0;
1208 /* Line allocations in the recovery path are always single threaded */
1209 int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1211 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1212 int ret;
1214 spin_lock(&l_mg->free_lock);
1215 l_mg->data_line = line;
1216 list_del(&line->list);
1218 ret = pblk_line_prepare(pblk, line);
1219 if (ret) {
1220 list_add(&line->list, &l_mg->free_list);
1221 spin_unlock(&l_mg->free_lock);
1222 return ret;
1224 spin_unlock(&l_mg->free_lock);
1226 ret = pblk_line_alloc_bitmaps(pblk, line);
1227 if (ret)
1228 goto fail;
1230 if (!pblk_line_init_bb(pblk, line, 0)) {
1231 ret = -EINTR;
1232 goto fail;
1235 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1236 return 0;
1238 fail:
1239 spin_lock(&l_mg->free_lock);
1240 list_add(&line->list, &l_mg->free_list);
1241 spin_unlock(&l_mg->free_lock);
1243 return ret;
1246 void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1248 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1250 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1251 line->map_bitmap = NULL;
1252 line->smeta = NULL;
1253 line->emeta = NULL;
1256 static void pblk_line_reinit(struct pblk_line *line)
1258 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1260 line->map_bitmap = NULL;
1261 line->invalid_bitmap = NULL;
1262 line->smeta = NULL;
1263 line->emeta = NULL;
1266 void pblk_line_free(struct pblk_line *line)
1268 struct pblk *pblk = line->pblk;
1269 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1271 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1272 mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1274 pblk_line_reinit(line);
1277 struct pblk_line *pblk_line_get(struct pblk *pblk)
1279 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1280 struct pblk_line_meta *lm = &pblk->lm;
1281 struct pblk_line *line;
1282 int ret, bit;
1284 lockdep_assert_held(&l_mg->free_lock);
1286 retry:
1287 if (list_empty(&l_mg->free_list)) {
1288 pblk_err(pblk, "no free lines\n");
1289 return NULL;
1292 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1293 list_del(&line->list);
1294 l_mg->nr_free_lines--;
1296 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1297 if (unlikely(bit >= lm->blk_per_line)) {
1298 spin_lock(&line->lock);
1299 line->state = PBLK_LINESTATE_BAD;
1300 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1301 line->state);
1302 spin_unlock(&line->lock);
1304 list_add_tail(&line->list, &l_mg->bad_list);
1306 pblk_debug(pblk, "line %d is bad\n", line->id);
1307 goto retry;
1310 ret = pblk_line_prepare(pblk, line);
1311 if (ret) {
1312 switch (ret) {
1313 case -EAGAIN:
1314 list_add(&line->list, &l_mg->bad_list);
1315 goto retry;
1316 case -EINTR:
1317 list_add(&line->list, &l_mg->corrupt_list);
1318 goto retry;
1319 default:
1320 pblk_err(pblk, "failed to prepare line %d\n", line->id);
1321 list_add(&line->list, &l_mg->free_list);
1322 l_mg->nr_free_lines++;
1323 return NULL;
1327 return line;
1330 static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1331 struct pblk_line *line)
1333 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1334 struct pblk_line *retry_line;
1336 retry:
1337 spin_lock(&l_mg->free_lock);
1338 retry_line = pblk_line_get(pblk);
1339 if (!retry_line) {
1340 l_mg->data_line = NULL;
1341 spin_unlock(&l_mg->free_lock);
1342 return NULL;
1345 retry_line->map_bitmap = line->map_bitmap;
1346 retry_line->invalid_bitmap = line->invalid_bitmap;
1347 retry_line->smeta = line->smeta;
1348 retry_line->emeta = line->emeta;
1349 retry_line->meta_line = line->meta_line;
1351 pblk_line_reinit(line);
1353 l_mg->data_line = retry_line;
1354 spin_unlock(&l_mg->free_lock);
1356 pblk_rl_free_lines_dec(&pblk->rl, line, false);
1358 if (pblk_line_erase(pblk, retry_line))
1359 goto retry;
1361 return retry_line;
1364 static void pblk_set_space_limit(struct pblk *pblk)
1366 struct pblk_rl *rl = &pblk->rl;
1368 atomic_set(&rl->rb_space, 0);
1371 struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1373 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1374 struct pblk_line *line;
1376 spin_lock(&l_mg->free_lock);
1377 line = pblk_line_get(pblk);
1378 if (!line) {
1379 spin_unlock(&l_mg->free_lock);
1380 return NULL;
1383 line->seq_nr = l_mg->d_seq_nr++;
1384 line->type = PBLK_LINETYPE_DATA;
1385 l_mg->data_line = line;
1387 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1389 /* Allocate next line for preparation */
1390 l_mg->data_next = pblk_line_get(pblk);
1391 if (!l_mg->data_next) {
1392 /* If we cannot get a new line, we need to stop the pipeline.
1393 * Only allow as many writes in as we can store safely and then
1394 * fail gracefully
1396 pblk_set_space_limit(pblk);
1398 l_mg->data_next = NULL;
1399 } else {
1400 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1401 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1403 spin_unlock(&l_mg->free_lock);
1405 if (pblk_line_alloc_bitmaps(pblk, line))
1406 return NULL;
1408 if (pblk_line_erase(pblk, line)) {
1409 line = pblk_line_retry(pblk, line);
1410 if (!line)
1411 return NULL;
1414 retry_setup:
1415 if (!pblk_line_init_metadata(pblk, line, NULL)) {
1416 line = pblk_line_retry(pblk, line);
1417 if (!line)
1418 return NULL;
1420 goto retry_setup;
1423 if (!pblk_line_init_bb(pblk, line, 1)) {
1424 line = pblk_line_retry(pblk, line);
1425 if (!line)
1426 return NULL;
1428 goto retry_setup;
1431 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1433 return line;
1436 void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1438 struct pblk_line *line;
1440 line = pblk_ppa_to_line(pblk, ppa);
1441 kref_put(&line->ref, pblk_line_put_wq);
1444 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1446 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1447 int i;
1449 for (i = 0; i < rqd->nr_ppas; i++)
1450 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1453 static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1455 lockdep_assert_held(&pblk->l_mg.free_lock);
1457 pblk_set_space_limit(pblk);
1458 pblk->state = PBLK_STATE_STOPPING;
1459 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1462 static void pblk_line_close_meta_sync(struct pblk *pblk)
1464 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1465 struct pblk_line_meta *lm = &pblk->lm;
1466 struct pblk_line *line, *tline;
1467 LIST_HEAD(list);
1469 spin_lock(&l_mg->close_lock);
1470 if (list_empty(&l_mg->emeta_list)) {
1471 spin_unlock(&l_mg->close_lock);
1472 return;
1475 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1476 spin_unlock(&l_mg->close_lock);
1478 list_for_each_entry_safe(line, tline, &list, list) {
1479 struct pblk_emeta *emeta = line->emeta;
1481 while (emeta->mem < lm->emeta_len[0]) {
1482 int ret;
1484 ret = pblk_submit_meta_io(pblk, line);
1485 if (ret) {
1486 pblk_err(pblk, "sync meta line %d failed (%d)\n",
1487 line->id, ret);
1488 return;
1493 pblk_wait_for_meta(pblk);
1494 flush_workqueue(pblk->close_wq);
1497 void __pblk_pipeline_flush(struct pblk *pblk)
1499 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1500 int ret;
1502 spin_lock(&l_mg->free_lock);
1503 if (pblk->state == PBLK_STATE_RECOVERING ||
1504 pblk->state == PBLK_STATE_STOPPED) {
1505 spin_unlock(&l_mg->free_lock);
1506 return;
1508 pblk->state = PBLK_STATE_RECOVERING;
1509 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1510 spin_unlock(&l_mg->free_lock);
1512 pblk_flush_writer(pblk);
1513 pblk_wait_for_meta(pblk);
1515 ret = pblk_recov_pad(pblk);
1516 if (ret) {
1517 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1518 return;
1521 flush_workqueue(pblk->bb_wq);
1522 pblk_line_close_meta_sync(pblk);
1525 void __pblk_pipeline_stop(struct pblk *pblk)
1527 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1529 spin_lock(&l_mg->free_lock);
1530 pblk->state = PBLK_STATE_STOPPED;
1531 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1532 l_mg->data_line = NULL;
1533 l_mg->data_next = NULL;
1534 spin_unlock(&l_mg->free_lock);
1537 void pblk_pipeline_stop(struct pblk *pblk)
1539 __pblk_pipeline_flush(pblk);
1540 __pblk_pipeline_stop(pblk);
1543 struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1545 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1546 struct pblk_line *cur, *new = NULL;
1547 unsigned int left_seblks;
1549 new = l_mg->data_next;
1550 if (!new)
1551 goto out;
1553 spin_lock(&l_mg->free_lock);
1554 cur = l_mg->data_line;
1555 l_mg->data_line = new;
1557 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1558 spin_unlock(&l_mg->free_lock);
1560 retry_erase:
1561 left_seblks = atomic_read(&new->left_seblks);
1562 if (left_seblks) {
1563 /* If line is not fully erased, erase it */
1564 if (atomic_read(&new->left_eblks)) {
1565 if (pblk_line_erase(pblk, new))
1566 goto out;
1567 } else {
1568 io_schedule();
1570 goto retry_erase;
1573 if (pblk_line_alloc_bitmaps(pblk, new))
1574 return NULL;
1576 retry_setup:
1577 if (!pblk_line_init_metadata(pblk, new, cur)) {
1578 new = pblk_line_retry(pblk, new);
1579 if (!new)
1580 goto out;
1582 goto retry_setup;
1585 if (!pblk_line_init_bb(pblk, new, 1)) {
1586 new = pblk_line_retry(pblk, new);
1587 if (!new)
1588 goto out;
1590 goto retry_setup;
1593 pblk_rl_free_lines_dec(&pblk->rl, new, true);
1595 /* Allocate next line for preparation */
1596 spin_lock(&l_mg->free_lock);
1597 l_mg->data_next = pblk_line_get(pblk);
1598 if (!l_mg->data_next) {
1599 /* If we cannot get a new line, we need to stop the pipeline.
1600 * Only allow as many writes in as we can store safely and then
1601 * fail gracefully
1603 pblk_stop_writes(pblk, new);
1604 l_mg->data_next = NULL;
1605 } else {
1606 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1607 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1609 spin_unlock(&l_mg->free_lock);
1611 out:
1612 return new;
1615 static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1617 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1618 struct pblk_gc *gc = &pblk->gc;
1620 spin_lock(&line->lock);
1621 WARN_ON(line->state != PBLK_LINESTATE_GC);
1622 if (line->w_err_gc->has_gc_err) {
1623 spin_unlock(&line->lock);
1624 pblk_err(pblk, "line %d had errors during GC\n", line->id);
1625 pblk_put_line_back(pblk, line);
1626 line->w_err_gc->has_gc_err = 0;
1627 return;
1630 line->state = PBLK_LINESTATE_FREE;
1631 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1632 line->state);
1633 line->gc_group = PBLK_LINEGC_NONE;
1634 pblk_line_free(line);
1636 if (line->w_err_gc->has_write_err) {
1637 pblk_rl_werr_line_out(&pblk->rl);
1638 line->w_err_gc->has_write_err = 0;
1641 spin_unlock(&line->lock);
1642 atomic_dec(&gc->pipeline_gc);
1644 spin_lock(&l_mg->free_lock);
1645 list_add_tail(&line->list, &l_mg->free_list);
1646 l_mg->nr_free_lines++;
1647 spin_unlock(&l_mg->free_lock);
1649 pblk_rl_free_lines_inc(&pblk->rl, line);
1652 static void pblk_line_put_ws(struct work_struct *work)
1654 struct pblk_line_ws *line_put_ws = container_of(work,
1655 struct pblk_line_ws, ws);
1656 struct pblk *pblk = line_put_ws->pblk;
1657 struct pblk_line *line = line_put_ws->line;
1659 __pblk_line_put(pblk, line);
1660 mempool_free(line_put_ws, &pblk->gen_ws_pool);
1663 void pblk_line_put(struct kref *ref)
1665 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1666 struct pblk *pblk = line->pblk;
1668 __pblk_line_put(pblk, line);
1671 void pblk_line_put_wq(struct kref *ref)
1673 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1674 struct pblk *pblk = line->pblk;
1675 struct pblk_line_ws *line_put_ws;
1677 line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1678 if (!line_put_ws)
1679 return;
1681 line_put_ws->pblk = pblk;
1682 line_put_ws->line = line;
1683 line_put_ws->priv = NULL;
1685 INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1686 queue_work(pblk->r_end_wq, &line_put_ws->ws);
1689 int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1691 struct nvm_rq *rqd;
1692 int err;
1694 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1696 pblk_setup_e_rq(pblk, rqd, ppa);
1698 rqd->end_io = pblk_end_io_erase;
1699 rqd->private = pblk;
1701 trace_pblk_chunk_reset(pblk_disk_name(pblk),
1702 &ppa, PBLK_CHUNK_RESET_START);
1704 /* The write thread schedules erases so that it minimizes disturbances
1705 * with writes. Thus, there is no need to take the LUN semaphore.
1707 err = pblk_submit_io(pblk, rqd, NULL);
1708 if (err) {
1709 struct nvm_tgt_dev *dev = pblk->dev;
1710 struct nvm_geo *geo = &dev->geo;
1712 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1713 pblk_ppa_to_line_id(ppa),
1714 pblk_ppa_to_pos(geo, ppa));
1717 return err;
1720 struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1722 return pblk->l_mg.data_line;
1725 /* For now, always erase next line */
1726 struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1728 return pblk->l_mg.data_next;
1731 int pblk_line_is_full(struct pblk_line *line)
1733 return (line->left_msecs == 0);
1736 static void pblk_line_should_sync_meta(struct pblk *pblk)
1738 if (pblk_rl_is_limit(&pblk->rl))
1739 pblk_line_close_meta_sync(pblk);
1742 void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1744 struct nvm_tgt_dev *dev = pblk->dev;
1745 struct nvm_geo *geo = &dev->geo;
1746 struct pblk_line_meta *lm = &pblk->lm;
1747 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1748 struct list_head *move_list;
1749 int i;
1751 #ifdef CONFIG_NVM_PBLK_DEBUG
1752 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1753 "pblk: corrupt closed line %d\n", line->id);
1754 #endif
1756 spin_lock(&l_mg->free_lock);
1757 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1758 spin_unlock(&l_mg->free_lock);
1760 spin_lock(&l_mg->gc_lock);
1761 spin_lock(&line->lock);
1762 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1763 line->state = PBLK_LINESTATE_CLOSED;
1764 move_list = pblk_line_gc_list(pblk, line);
1765 list_add_tail(&line->list, move_list);
1767 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1768 line->map_bitmap = NULL;
1769 line->smeta = NULL;
1770 line->emeta = NULL;
1772 for (i = 0; i < lm->blk_per_line; i++) {
1773 struct pblk_lun *rlun = &pblk->luns[i];
1774 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1775 int state = line->chks[pos].state;
1777 if (!(state & NVM_CHK_ST_OFFLINE))
1778 state = NVM_CHK_ST_CLOSED;
1781 spin_unlock(&line->lock);
1782 spin_unlock(&l_mg->gc_lock);
1784 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1785 line->state);
1788 void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1790 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1791 struct pblk_line_meta *lm = &pblk->lm;
1792 struct pblk_emeta *emeta = line->emeta;
1793 struct line_emeta *emeta_buf = emeta->buf;
1794 struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1796 /* No need for exact vsc value; avoid a big line lock and take aprox. */
1797 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1798 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1800 wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1801 wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1802 wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1804 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1805 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1806 guid_copy((guid_t *)&emeta_buf->header.uuid,
1807 &pblk->instance_uuid);
1808 emeta_buf->header.id = cpu_to_le32(line->id);
1809 emeta_buf->header.type = cpu_to_le16(line->type);
1810 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1811 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1812 emeta_buf->header.crc = cpu_to_le32(
1813 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1816 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1817 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1819 spin_lock(&l_mg->close_lock);
1820 spin_lock(&line->lock);
1822 /* Update the in-memory start address for emeta, in case it has
1823 * shifted due to write errors
1825 if (line->emeta_ssec != line->cur_sec)
1826 line->emeta_ssec = line->cur_sec;
1828 list_add_tail(&line->list, &l_mg->emeta_list);
1829 spin_unlock(&line->lock);
1830 spin_unlock(&l_mg->close_lock);
1832 pblk_line_should_sync_meta(pblk);
1835 static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1837 struct pblk_line_meta *lm = &pblk->lm;
1838 unsigned int lba_list_size = lm->emeta_len[2];
1839 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1840 struct pblk_emeta *emeta = line->emeta;
1842 w_err_gc->lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
1843 memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1844 lba_list_size);
1847 void pblk_line_close_ws(struct work_struct *work)
1849 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1850 ws);
1851 struct pblk *pblk = line_ws->pblk;
1852 struct pblk_line *line = line_ws->line;
1853 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1855 /* Write errors makes the emeta start address stored in smeta invalid,
1856 * so keep a copy of the lba list until we've gc'd the line
1858 if (w_err_gc->has_write_err)
1859 pblk_save_lba_list(pblk, line);
1861 pblk_line_close(pblk, line);
1862 mempool_free(line_ws, &pblk->gen_ws_pool);
1865 void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1866 void (*work)(struct work_struct *), gfp_t gfp_mask,
1867 struct workqueue_struct *wq)
1869 struct pblk_line_ws *line_ws;
1871 line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1872 if (!line_ws) {
1873 pblk_err(pblk, "pblk: could not allocate memory\n");
1874 return;
1877 line_ws->pblk = pblk;
1878 line_ws->line = line;
1879 line_ws->priv = priv;
1881 INIT_WORK(&line_ws->ws, work);
1882 queue_work(wq, &line_ws->ws);
1885 static void __pblk_down_chunk(struct pblk *pblk, int pos)
1887 struct pblk_lun *rlun = &pblk->luns[pos];
1888 int ret;
1891 * Only send one inflight I/O per LUN. Since we map at a page
1892 * granurality, all ppas in the I/O will map to the same LUN
1895 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1896 if (ret == -ETIME || ret == -EINTR)
1897 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1898 -ret);
1901 void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1903 struct nvm_tgt_dev *dev = pblk->dev;
1904 struct nvm_geo *geo = &dev->geo;
1905 int pos = pblk_ppa_to_pos(geo, ppa);
1907 __pblk_down_chunk(pblk, pos);
1910 void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1911 unsigned long *lun_bitmap)
1913 struct nvm_tgt_dev *dev = pblk->dev;
1914 struct nvm_geo *geo = &dev->geo;
1915 int pos = pblk_ppa_to_pos(geo, ppa);
1917 /* If the LUN has been locked for this same request, do no attempt to
1918 * lock it again
1920 if (test_and_set_bit(pos, lun_bitmap))
1921 return;
1923 __pblk_down_chunk(pblk, pos);
1926 void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
1928 struct nvm_tgt_dev *dev = pblk->dev;
1929 struct nvm_geo *geo = &dev->geo;
1930 struct pblk_lun *rlun;
1931 int pos = pblk_ppa_to_pos(geo, ppa);
1933 rlun = &pblk->luns[pos];
1934 up(&rlun->wr_sem);
1937 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
1939 struct nvm_tgt_dev *dev = pblk->dev;
1940 struct nvm_geo *geo = &dev->geo;
1941 struct pblk_lun *rlun;
1942 int num_lun = geo->all_luns;
1943 int bit = -1;
1945 while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
1946 rlun = &pblk->luns[bit];
1947 up(&rlun->wr_sem);
1951 void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1953 struct ppa_addr ppa_l2p;
1955 /* logic error: lba out-of-bounds. Ignore update */
1956 if (!(lba < pblk->capacity)) {
1957 WARN(1, "pblk: corrupted L2P map request\n");
1958 return;
1961 spin_lock(&pblk->trans_lock);
1962 ppa_l2p = pblk_trans_map_get(pblk, lba);
1964 if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1965 pblk_map_invalidate(pblk, ppa_l2p);
1967 pblk_trans_map_set(pblk, lba, ppa);
1968 spin_unlock(&pblk->trans_lock);
1971 void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1974 #ifdef CONFIG_NVM_PBLK_DEBUG
1975 /* Callers must ensure that the ppa points to a cache address */
1976 BUG_ON(!pblk_addr_in_cache(ppa));
1977 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1978 #endif
1980 pblk_update_map(pblk, lba, ppa);
1983 int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1984 struct pblk_line *gc_line, u64 paddr_gc)
1986 struct ppa_addr ppa_l2p, ppa_gc;
1987 int ret = 1;
1989 #ifdef CONFIG_NVM_PBLK_DEBUG
1990 /* Callers must ensure that the ppa points to a cache address */
1991 BUG_ON(!pblk_addr_in_cache(ppa_new));
1992 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1993 #endif
1995 /* logic error: lba out-of-bounds. Ignore update */
1996 if (!(lba < pblk->capacity)) {
1997 WARN(1, "pblk: corrupted L2P map request\n");
1998 return 0;
2001 spin_lock(&pblk->trans_lock);
2002 ppa_l2p = pblk_trans_map_get(pblk, lba);
2003 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
2005 if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2006 spin_lock(&gc_line->lock);
2007 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2008 "pblk: corrupted GC update");
2009 spin_unlock(&gc_line->lock);
2011 ret = 0;
2012 goto out;
2015 pblk_trans_map_set(pblk, lba, ppa_new);
2016 out:
2017 spin_unlock(&pblk->trans_lock);
2018 return ret;
2021 void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2022 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2024 struct ppa_addr ppa_l2p;
2026 #ifdef CONFIG_NVM_PBLK_DEBUG
2027 /* Callers must ensure that the ppa points to a device address */
2028 BUG_ON(pblk_addr_in_cache(ppa_mapped));
2029 #endif
2030 /* Invalidate and discard padded entries */
2031 if (lba == ADDR_EMPTY) {
2032 atomic64_inc(&pblk->pad_wa);
2033 #ifdef CONFIG_NVM_PBLK_DEBUG
2034 atomic_long_inc(&pblk->padded_wb);
2035 #endif
2036 if (!pblk_ppa_empty(ppa_mapped))
2037 pblk_map_invalidate(pblk, ppa_mapped);
2038 return;
2041 /* logic error: lba out-of-bounds. Ignore update */
2042 if (!(lba < pblk->capacity)) {
2043 WARN(1, "pblk: corrupted L2P map request\n");
2044 return;
2047 spin_lock(&pblk->trans_lock);
2048 ppa_l2p = pblk_trans_map_get(pblk, lba);
2050 /* Do not update L2P if the cacheline has been updated. In this case,
2051 * the mapped ppa must be invalidated
2053 if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2054 if (!pblk_ppa_empty(ppa_mapped))
2055 pblk_map_invalidate(pblk, ppa_mapped);
2056 goto out;
2059 #ifdef CONFIG_NVM_PBLK_DEBUG
2060 WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2061 #endif
2063 pblk_trans_map_set(pblk, lba, ppa_mapped);
2064 out:
2065 spin_unlock(&pblk->trans_lock);
2068 int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2069 sector_t blba, int nr_secs, bool *from_cache)
2071 int i;
2073 spin_lock(&pblk->trans_lock);
2074 for (i = 0; i < nr_secs; i++) {
2075 struct ppa_addr ppa;
2077 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2079 /* If the L2P entry maps to a line, the reference is valid */
2080 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2081 struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2083 if (i > 0 && *from_cache)
2084 break;
2085 *from_cache = false;
2087 kref_get(&line->ref);
2088 } else {
2089 if (i > 0 && !*from_cache)
2090 break;
2091 *from_cache = true;
2094 spin_unlock(&pblk->trans_lock);
2095 return i;
2098 void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2099 u64 *lba_list, int nr_secs)
2101 u64 lba;
2102 int i;
2104 spin_lock(&pblk->trans_lock);
2105 for (i = 0; i < nr_secs; i++) {
2106 lba = lba_list[i];
2107 if (lba != ADDR_EMPTY) {
2108 /* logic error: lba out-of-bounds. Ignore update */
2109 if (!(lba < pblk->capacity)) {
2110 WARN(1, "pblk: corrupted L2P map request\n");
2111 continue;
2113 ppas[i] = pblk_trans_map_get(pblk, lba);
2116 spin_unlock(&pblk->trans_lock);
2119 void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2121 void *buffer;
2123 if (pblk_is_oob_meta_supported(pblk)) {
2124 /* Just use OOB metadata buffer as always */
2125 buffer = rqd->meta_list;
2126 } else {
2127 /* We need to reuse last page of request (packed metadata)
2128 * in similar way as traditional oob metadata
2130 buffer = page_to_virt(
2131 rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2134 return buffer;
2137 void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2139 void *meta_list = rqd->meta_list;
2140 void *page;
2141 int i = 0;
2143 if (pblk_is_oob_meta_supported(pblk))
2144 return;
2146 page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2147 /* We need to fill oob meta buffer with data from packed metadata */
2148 for (; i < rqd->nr_ppas; i++)
2149 memcpy(pblk_get_meta(pblk, meta_list, i),
2150 page + (i * sizeof(struct pblk_sec_meta)),
2151 sizeof(struct pblk_sec_meta));