2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * pblk-read.c - pblk's read path
21 * There is no guarantee that the value read from cache has not been updated and
22 * resides at another location in the cache. We guarantee though that if the
23 * value is read from the cache, it belongs to the mapped lba. In order to
24 * guarantee and order between writes and reads are ordered, a flush must be
27 static int pblk_read_from_cache(struct pblk
*pblk
, struct bio
*bio
,
28 sector_t lba
, struct ppa_addr ppa
,
29 int bio_iter
, bool advanced_bio
)
31 #ifdef CONFIG_NVM_DEBUG
32 /* Callers must ensure that the ppa points to a cache address */
33 BUG_ON(pblk_ppa_empty(ppa
));
34 BUG_ON(!pblk_addr_in_cache(ppa
));
37 return pblk_rb_copy_to_bio(&pblk
->rwb
, bio
, lba
, ppa
,
38 bio_iter
, advanced_bio
);
41 static void pblk_read_ppalist_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
42 sector_t blba
, unsigned long *read_bitmap
)
44 struct pblk_sec_meta
*meta_list
= rqd
->meta_list
;
45 struct bio
*bio
= rqd
->bio
;
46 struct ppa_addr ppas
[PBLK_MAX_REQ_ADDRS
];
47 int nr_secs
= rqd
->nr_ppas
;
48 bool advanced_bio
= false;
51 pblk_lookup_l2p_seq(pblk
, ppas
, blba
, nr_secs
);
53 for (i
= 0; i
< nr_secs
; i
++) {
54 struct ppa_addr p
= ppas
[i
];
55 sector_t lba
= blba
+ i
;
58 if (pblk_ppa_empty(p
)) {
59 WARN_ON(test_and_set_bit(i
, read_bitmap
));
60 meta_list
[i
].lba
= cpu_to_le64(ADDR_EMPTY
);
62 if (unlikely(!advanced_bio
)) {
63 bio_advance(bio
, (i
) * PBLK_EXPOSED_PAGE_SIZE
);
70 /* Try to read from write buffer. The address is later checked
71 * on the write buffer to prevent retrieving overwritten data.
73 if (pblk_addr_in_cache(p
)) {
74 if (!pblk_read_from_cache(pblk
, bio
, lba
, p
, i
,
76 pblk_lookup_l2p_seq(pblk
, &p
, lba
, 1);
79 WARN_ON(test_and_set_bit(i
, read_bitmap
));
80 meta_list
[i
].lba
= cpu_to_le64(lba
);
82 #ifdef CONFIG_NVM_DEBUG
83 atomic_long_inc(&pblk
->cache_reads
);
86 /* Read from media non-cached sectors */
87 rqd
->ppa_list
[j
++] = p
;
92 bio_advance(bio
, PBLK_EXPOSED_PAGE_SIZE
);
95 if (pblk_io_aligned(pblk
, nr_secs
))
96 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_SEQUENTIAL
);
98 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
100 #ifdef CONFIG_NVM_DEBUG
101 atomic_long_add(nr_secs
, &pblk
->inflight_reads
);
105 static int pblk_submit_read_io(struct pblk
*pblk
, struct nvm_rq
*rqd
)
109 err
= pblk_submit_io(pblk
, rqd
);
116 static void pblk_read_check(struct pblk
*pblk
, struct nvm_rq
*rqd
,
119 struct pblk_sec_meta
*meta_list
= rqd
->meta_list
;
120 int nr_lbas
= rqd
->nr_ppas
;
123 for (i
= 0; i
< nr_lbas
; i
++) {
124 u64 lba
= le64_to_cpu(meta_list
[i
].lba
);
126 if (lba
== ADDR_EMPTY
)
129 WARN(lba
!= blba
+ i
, "pblk: corrupted read LBA\n");
133 static void pblk_read_put_rqd_kref(struct pblk
*pblk
, struct nvm_rq
*rqd
)
135 struct ppa_addr
*ppa_list
;
138 ppa_list
= (rqd
->nr_ppas
> 1) ? rqd
->ppa_list
: &rqd
->ppa_addr
;
140 for (i
= 0; i
< rqd
->nr_ppas
; i
++) {
141 struct ppa_addr ppa
= ppa_list
[i
];
142 struct pblk_line
*line
;
144 line
= &pblk
->lines
[pblk_dev_ppa_to_line(ppa
)];
145 kref_put(&line
->ref
, pblk_line_put_wq
);
149 static void pblk_end_user_read(struct bio
*bio
)
151 #ifdef CONFIG_NVM_DEBUG
152 WARN_ONCE(bio
->bi_status
, "pblk: corrupted read bio\n");
158 static void __pblk_end_io_read(struct pblk
*pblk
, struct nvm_rq
*rqd
,
161 struct pblk_g_ctx
*r_ctx
= nvm_rq_to_pdu(rqd
);
162 struct bio
*bio
= rqd
->bio
;
165 pblk_log_read_err(pblk
, rqd
);
166 #ifdef CONFIG_NVM_DEBUG
168 WARN_ONCE(bio
->bi_status
, "pblk: corrupted read error\n");
171 pblk_read_check(pblk
, rqd
, r_ctx
->lba
);
175 pblk_end_user_read((struct bio
*)r_ctx
->private);
178 pblk_read_put_rqd_kref(pblk
, rqd
);
180 #ifdef CONFIG_NVM_DEBUG
181 atomic_long_add(rqd
->nr_ppas
, &pblk
->sync_reads
);
182 atomic_long_sub(rqd
->nr_ppas
, &pblk
->inflight_reads
);
185 pblk_free_rqd(pblk
, rqd
, PBLK_READ
);
186 atomic_dec(&pblk
->inflight_io
);
189 static void pblk_end_io_read(struct nvm_rq
*rqd
)
191 struct pblk
*pblk
= rqd
->private;
193 __pblk_end_io_read(pblk
, rqd
, true);
196 static int pblk_fill_partial_read_bio(struct pblk
*pblk
, struct nvm_rq
*rqd
,
197 unsigned int bio_init_idx
,
198 unsigned long *read_bitmap
)
200 struct bio
*new_bio
, *bio
= rqd
->bio
;
201 struct pblk_sec_meta
*meta_list
= rqd
->meta_list
;
202 struct bio_vec src_bv
, dst_bv
;
203 void *ppa_ptr
= NULL
;
205 dma_addr_t dma_ppa_list
= 0;
206 __le64
*lba_list_mem
, *lba_list_media
;
207 int nr_secs
= rqd
->nr_ppas
;
208 int nr_holes
= nr_secs
- bitmap_weight(read_bitmap
, nr_secs
);
211 /* Re-use allocated memory for intermediate lbas */
212 lba_list_mem
= (((void *)rqd
->ppa_list
) + pblk_dma_ppa_size
);
213 lba_list_media
= (((void *)rqd
->ppa_list
) + 2 * pblk_dma_ppa_size
);
215 new_bio
= bio_alloc(GFP_KERNEL
, nr_holes
);
217 if (pblk_bio_add_pages(pblk
, new_bio
, GFP_KERNEL
, nr_holes
))
220 if (nr_holes
!= new_bio
->bi_vcnt
) {
221 pr_err("pblk: malformed bio\n");
225 for (i
= 0; i
< nr_secs
; i
++)
226 lba_list_mem
[i
] = meta_list
[i
].lba
;
228 new_bio
->bi_iter
.bi_sector
= 0; /* internal bio */
229 bio_set_op_attrs(new_bio
, REQ_OP_READ
, 0);
232 rqd
->nr_ppas
= nr_holes
;
233 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
235 if (unlikely(nr_holes
== 1)) {
236 ppa_ptr
= rqd
->ppa_list
;
237 dma_ppa_list
= rqd
->dma_ppa_list
;
238 rqd
->ppa_addr
= rqd
->ppa_list
[0];
241 ret
= pblk_submit_io_sync(pblk
, rqd
);
244 pr_err("pblk: sync read IO submission failed\n");
249 atomic_long_inc(&pblk
->read_failed
);
250 #ifdef CONFIG_NVM_DEBUG
251 pblk_print_failed_rqd(pblk
, rqd
, rqd
->error
);
255 if (unlikely(nr_holes
== 1)) {
259 rqd
->ppa_list
= ppa_ptr
;
260 rqd
->dma_ppa_list
= dma_ppa_list
;
261 rqd
->ppa_list
[0] = ppa
;
264 for (i
= 0; i
< nr_secs
; i
++) {
265 lba_list_media
[i
] = meta_list
[i
].lba
;
266 meta_list
[i
].lba
= lba_list_mem
[i
];
269 /* Fill the holes in the original bio */
271 hole
= find_first_zero_bit(read_bitmap
, nr_secs
);
273 int line_id
= pblk_dev_ppa_to_line(rqd
->ppa_list
[i
]);
274 struct pblk_line
*line
= &pblk
->lines
[line_id
];
276 kref_put(&line
->ref
, pblk_line_put
);
278 meta_list
[hole
].lba
= lba_list_media
[i
];
280 src_bv
= new_bio
->bi_io_vec
[i
++];
281 dst_bv
= bio
->bi_io_vec
[bio_init_idx
+ hole
];
283 src_p
= kmap_atomic(src_bv
.bv_page
);
284 dst_p
= kmap_atomic(dst_bv
.bv_page
);
286 memcpy(dst_p
+ dst_bv
.bv_offset
,
287 src_p
+ src_bv
.bv_offset
,
288 PBLK_EXPOSED_PAGE_SIZE
);
290 kunmap_atomic(src_p
);
291 kunmap_atomic(dst_p
);
293 mempool_free(src_bv
.bv_page
, pblk
->page_bio_pool
);
295 hole
= find_next_zero_bit(read_bitmap
, nr_secs
, hole
+ 1);
296 } while (hole
< nr_secs
);
300 /* Complete the original bio and associated request */
303 rqd
->nr_ppas
= nr_secs
;
305 __pblk_end_io_read(pblk
, rqd
, false);
309 /* Free allocated pages in new bio */
310 pblk_bio_free_pages(pblk
, bio
, 0, new_bio
->bi_vcnt
);
311 __pblk_end_io_read(pblk
, rqd
, false);
315 static void pblk_read_rq(struct pblk
*pblk
, struct nvm_rq
*rqd
,
316 sector_t lba
, unsigned long *read_bitmap
)
318 struct pblk_sec_meta
*meta_list
= rqd
->meta_list
;
319 struct bio
*bio
= rqd
->bio
;
322 pblk_lookup_l2p_seq(pblk
, &ppa
, lba
, 1);
324 #ifdef CONFIG_NVM_DEBUG
325 atomic_long_inc(&pblk
->inflight_reads
);
329 if (pblk_ppa_empty(ppa
)) {
330 WARN_ON(test_and_set_bit(0, read_bitmap
));
331 meta_list
[0].lba
= cpu_to_le64(ADDR_EMPTY
);
335 /* Try to read from write buffer. The address is later checked on the
336 * write buffer to prevent retrieving overwritten data.
338 if (pblk_addr_in_cache(ppa
)) {
339 if (!pblk_read_from_cache(pblk
, bio
, lba
, ppa
, 0, 1)) {
340 pblk_lookup_l2p_seq(pblk
, &ppa
, lba
, 1);
344 WARN_ON(test_and_set_bit(0, read_bitmap
));
345 meta_list
[0].lba
= cpu_to_le64(lba
);
347 #ifdef CONFIG_NVM_DEBUG
348 atomic_long_inc(&pblk
->cache_reads
);
354 rqd
->flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
357 int pblk_submit_read(struct pblk
*pblk
, struct bio
*bio
)
359 struct nvm_tgt_dev
*dev
= pblk
->dev
;
360 sector_t blba
= pblk_get_lba(bio
);
361 unsigned int nr_secs
= pblk_get_secs(bio
);
362 struct pblk_g_ctx
*r_ctx
;
364 unsigned int bio_init_idx
;
365 unsigned long read_bitmap
; /* Max 64 ppas per request */
366 int ret
= NVM_IO_ERR
;
368 /* logic error: lba out-of-bounds. Ignore read request */
369 if (blba
>= pblk
->rl
.nr_secs
|| nr_secs
> PBLK_MAX_REQ_ADDRS
) {
370 WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
371 (unsigned long long)blba
, nr_secs
);
375 bitmap_zero(&read_bitmap
, nr_secs
);
377 rqd
= pblk_alloc_rqd(pblk
, PBLK_READ
);
379 rqd
->opcode
= NVM_OP_PREAD
;
381 rqd
->nr_ppas
= nr_secs
;
383 rqd
->end_io
= pblk_end_io_read
;
385 r_ctx
= nvm_rq_to_pdu(rqd
);
388 /* Save the index for this bio's start. This is needed in case
389 * we need to fill a partial read.
391 bio_init_idx
= pblk_get_bi_idx(bio
);
393 rqd
->meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
394 &rqd
->dma_meta_list
);
395 if (!rqd
->meta_list
) {
396 pr_err("pblk: not able to allocate ppa list\n");
401 rqd
->ppa_list
= rqd
->meta_list
+ pblk_dma_meta_size
;
402 rqd
->dma_ppa_list
= rqd
->dma_meta_list
+ pblk_dma_meta_size
;
404 pblk_read_ppalist_rq(pblk
, rqd
, blba
, &read_bitmap
);
406 pblk_read_rq(pblk
, rqd
, blba
, &read_bitmap
);
410 if (bitmap_full(&read_bitmap
, nr_secs
)) {
412 atomic_inc(&pblk
->inflight_io
);
413 __pblk_end_io_read(pblk
, rqd
, false);
417 /* All sectors are to be read from the device */
418 if (bitmap_empty(&read_bitmap
, rqd
->nr_ppas
)) {
419 struct bio
*int_bio
= NULL
;
421 /* Clone read bio to deal with read errors internally */
422 int_bio
= bio_clone_fast(bio
, GFP_KERNEL
, pblk_bio_set
);
424 pr_err("pblk: could not clone read bio\n");
429 r_ctx
->private = bio
;
431 ret
= pblk_submit_read_io(pblk
, rqd
);
433 pr_err("pblk: read IO submission failed\n");
442 /* The read bio request could be partially filled by the write buffer,
443 * but there are some holes that need to be read from the drive.
445 ret
= pblk_fill_partial_read_bio(pblk
, rqd
, bio_init_idx
, &read_bitmap
);
447 pr_err("pblk: failed to perform partial read\n");
454 pblk_free_rqd(pblk
, rqd
, PBLK_READ
);
458 static int read_ppalist_rq_gc(struct pblk
*pblk
, struct nvm_rq
*rqd
,
459 struct pblk_line
*line
, u64
*lba_list
,
460 u64
*paddr_list_gc
, unsigned int nr_secs
)
462 struct ppa_addr ppa_list_l2p
[PBLK_MAX_REQ_ADDRS
];
463 struct ppa_addr ppa_gc
;
467 pblk_lookup_l2p_rand(pblk
, ppa_list_l2p
, lba_list
, nr_secs
);
469 for (i
= 0; i
< nr_secs
; i
++) {
470 if (lba_list
[i
] == ADDR_EMPTY
)
473 ppa_gc
= addr_to_gen_ppa(pblk
, paddr_list_gc
[i
], line
->id
);
474 if (!pblk_ppa_comp(ppa_list_l2p
[i
], ppa_gc
)) {
475 paddr_list_gc
[i
] = lba_list
[i
] = ADDR_EMPTY
;
479 rqd
->ppa_list
[valid_secs
++] = ppa_list_l2p
[i
];
482 #ifdef CONFIG_NVM_DEBUG
483 atomic_long_add(valid_secs
, &pblk
->inflight_reads
);
489 static int read_rq_gc(struct pblk
*pblk
, struct nvm_rq
*rqd
,
490 struct pblk_line
*line
, sector_t lba
,
493 struct ppa_addr ppa_l2p
, ppa_gc
;
496 if (lba
== ADDR_EMPTY
)
499 /* logic error: lba out-of-bounds */
500 if (lba
>= pblk
->rl
.nr_secs
) {
501 WARN(1, "pblk: read lba out of bounds\n");
505 spin_lock(&pblk
->trans_lock
);
506 ppa_l2p
= pblk_trans_map_get(pblk
, lba
);
507 spin_unlock(&pblk
->trans_lock
);
509 ppa_gc
= addr_to_gen_ppa(pblk
, paddr_gc
, line
->id
);
510 if (!pblk_ppa_comp(ppa_l2p
, ppa_gc
))
513 rqd
->ppa_addr
= ppa_l2p
;
516 #ifdef CONFIG_NVM_DEBUG
517 atomic_long_inc(&pblk
->inflight_reads
);
524 int pblk_submit_read_gc(struct pblk
*pblk
, struct pblk_gc_rq
*gc_rq
)
526 struct nvm_tgt_dev
*dev
= pblk
->dev
;
527 struct nvm_geo
*geo
= &dev
->geo
;
533 memset(&rqd
, 0, sizeof(struct nvm_rq
));
535 rqd
.meta_list
= nvm_dev_dma_alloc(dev
->parent
, GFP_KERNEL
,
540 if (gc_rq
->nr_secs
> 1) {
541 rqd
.ppa_list
= rqd
.meta_list
+ pblk_dma_meta_size
;
542 rqd
.dma_ppa_list
= rqd
.dma_meta_list
+ pblk_dma_meta_size
;
544 gc_rq
->secs_to_gc
= read_ppalist_rq_gc(pblk
, &rqd
, gc_rq
->line
,
548 if (gc_rq
->secs_to_gc
== 1)
549 rqd
.ppa_addr
= rqd
.ppa_list
[0];
551 gc_rq
->secs_to_gc
= read_rq_gc(pblk
, &rqd
, gc_rq
->line
,
553 gc_rq
->paddr_list
[0]);
556 if (!(gc_rq
->secs_to_gc
))
559 data_len
= (gc_rq
->secs_to_gc
) * geo
->sec_size
;
560 bio
= pblk_bio_map_addr(pblk
, gc_rq
->data
, gc_rq
->secs_to_gc
, data_len
,
561 PBLK_VMALLOC_META
, GFP_KERNEL
);
563 pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio
));
567 bio
->bi_iter
.bi_sector
= 0; /* internal bio */
568 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
570 rqd
.opcode
= NVM_OP_PREAD
;
571 rqd
.nr_ppas
= gc_rq
->secs_to_gc
;
572 rqd
.flags
= pblk_set_read_mode(pblk
, PBLK_READ_RANDOM
);
575 if (pblk_submit_io_sync(pblk
, &rqd
)) {
577 pr_err("pblk: GC read request failed\n");
581 atomic_dec(&pblk
->inflight_io
);
584 atomic_long_inc(&pblk
->read_failed_gc
);
585 #ifdef CONFIG_NVM_DEBUG
586 pblk_print_failed_rqd(pblk
, &rqd
, rqd
.error
);
590 #ifdef CONFIG_NVM_DEBUG
591 atomic_long_add(gc_rq
->secs_to_gc
, &pblk
->sync_reads
);
592 atomic_long_add(gc_rq
->secs_to_gc
, &pblk
->recov_gc_reads
);
593 atomic_long_sub(gc_rq
->secs_to_gc
, &pblk
->inflight_reads
);
597 nvm_dev_dma_free(dev
->parent
, rqd
.meta_list
, rqd
.dma_meta_list
);
603 nvm_dev_dma_free(dev
->parent
, rqd
.meta_list
, rqd
.dma_meta_list
);