2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
18 * Implementation of a generic nvm manager for Open-Channel SSDs.
23 static void gennvm_blocks_free(struct nvm_dev
*dev
)
25 struct gen_nvm
*gn
= dev
->mp
;
29 gennvm_for_each_lun(gn
, lun
, i
) {
30 if (!lun
->vlun
.blocks
)
32 vfree(lun
->vlun
.blocks
);
36 static void gennvm_luns_free(struct nvm_dev
*dev
)
38 struct gen_nvm
*gn
= dev
->mp
;
43 static int gennvm_luns_init(struct nvm_dev
*dev
, struct gen_nvm
*gn
)
48 gn
->luns
= kcalloc(dev
->nr_luns
, sizeof(struct gen_lun
), GFP_KERNEL
);
52 gennvm_for_each_lun(gn
, lun
, i
) {
53 spin_lock_init(&lun
->vlun
.lock
);
54 INIT_LIST_HEAD(&lun
->free_list
);
55 INIT_LIST_HEAD(&lun
->used_list
);
56 INIT_LIST_HEAD(&lun
->bb_list
);
58 lun
->reserved_blocks
= 2; /* for GC only */
60 lun
->vlun
.lun_id
= i
% dev
->luns_per_chnl
;
61 lun
->vlun
.chnl_id
= i
/ dev
->luns_per_chnl
;
62 lun
->vlun
.nr_free_blocks
= dev
->blks_per_lun
;
63 lun
->vlun
.nr_inuse_blocks
= 0;
64 lun
->vlun
.nr_bad_blocks
= 0;
69 static int gennvm_block_bb(struct ppa_addr ppa
, int nr_blocks
, u8
*blks
,
72 struct gen_nvm
*gn
= private;
73 struct nvm_dev
*dev
= gn
->dev
;
75 struct nvm_block
*blk
;
78 lun
= &gn
->luns
[(dev
->luns_per_chnl
* ppa
.g
.ch
) + ppa
.g
.lun
];
80 for (i
= 0; i
< nr_blocks
; i
++) {
84 blk
= &lun
->vlun
.blocks
[i
];
86 pr_err("gennvm: BB data is out of bounds.\n");
90 list_move_tail(&blk
->list
, &lun
->bb_list
);
91 lun
->vlun
.nr_bad_blocks
++;
92 lun
->vlun
.nr_free_blocks
--;
98 static int gennvm_block_map(u64 slba
, u32 nlb
, __le64
*entries
, void *private)
100 struct nvm_dev
*dev
= private;
101 struct gen_nvm
*gn
= dev
->mp
;
102 sector_t max_pages
= dev
->total_pages
* (dev
->sec_size
>> 9);
103 u64 elba
= slba
+ nlb
;
105 struct nvm_block
*blk
;
109 if (unlikely(elba
> dev
->total_pages
)) {
110 pr_err("gennvm: L2P data from device is out of bounds!\n");
114 for (i
= 0; i
< nlb
; i
++) {
115 u64 pba
= le64_to_cpu(entries
[i
]);
117 if (unlikely(pba
>= max_pages
&& pba
!= U64_MAX
)) {
118 pr_err("gennvm: L2P data entry is out of bounds!\n");
122 /* Address zero is a special one. The first page on a disk is
123 * protected. It often holds internal device boot
129 /* resolve block from physical address */
130 lun_id
= div_u64(pba
, dev
->sec_per_lun
);
131 lun
= &gn
->luns
[lun_id
];
133 /* Calculate block offset into lun */
134 pba
= pba
- (dev
->sec_per_lun
* lun_id
);
135 blk
= &lun
->vlun
.blocks
[div_u64(pba
, dev
->sec_per_blk
)];
138 /* at this point, we don't know anything about the
139 * block. It's up to the FTL on top to re-etablish the
142 list_move_tail(&blk
->list
, &lun
->used_list
);
144 lun
->vlun
.nr_free_blocks
--;
145 lun
->vlun
.nr_inuse_blocks
++;
152 static int gennvm_blocks_init(struct nvm_dev
*dev
, struct gen_nvm
*gn
)
155 struct nvm_block
*block
;
156 sector_t lun_iter
, blk_iter
, cur_block_id
= 0;
159 gennvm_for_each_lun(gn
, lun
, lun_iter
) {
160 lun
->vlun
.blocks
= vzalloc(sizeof(struct nvm_block
) *
162 if (!lun
->vlun
.blocks
)
165 for (blk_iter
= 0; blk_iter
< dev
->blks_per_lun
; blk_iter
++) {
166 block
= &lun
->vlun
.blocks
[blk_iter
];
168 INIT_LIST_HEAD(&block
->list
);
170 block
->lun
= &lun
->vlun
;
171 block
->id
= cur_block_id
++;
173 /* First block is reserved for device */
174 if (unlikely(lun_iter
== 0 && blk_iter
== 0)) {
175 lun
->vlun
.nr_free_blocks
--;
179 list_add_tail(&block
->list
, &lun
->free_list
);
182 if (dev
->ops
->get_bb_tbl
) {
186 ppa
.g
.ch
= lun
->vlun
.chnl_id
;
187 ppa
.g
.lun
= lun
->vlun
.id
;
188 ppa
= generic_to_dev_addr(dev
, ppa
);
190 ret
= dev
->ops
->get_bb_tbl(dev
, ppa
,
192 gennvm_block_bb
, gn
);
194 pr_err("gennvm: could not read BB table\n");
198 if (dev
->ops
->get_l2p_tbl
) {
199 ret
= dev
->ops
->get_l2p_tbl(dev
, 0, dev
->total_pages
,
200 gennvm_block_map
, dev
);
202 pr_err("gennvm: could not read L2P table.\n");
203 pr_warn("gennvm: default block initialization");
210 static void gennvm_free(struct nvm_dev
*dev
)
212 gennvm_blocks_free(dev
);
213 gennvm_luns_free(dev
);
218 static int gennvm_register(struct nvm_dev
*dev
)
223 if (!try_module_get(THIS_MODULE
))
226 gn
= kzalloc(sizeof(struct gen_nvm
), GFP_KERNEL
);
231 gn
->nr_luns
= dev
->nr_luns
;
234 ret
= gennvm_luns_init(dev
, gn
);
236 pr_err("gennvm: could not initialize luns\n");
240 ret
= gennvm_blocks_init(dev
, gn
);
242 pr_err("gennvm: could not initialize blocks\n");
249 module_put(THIS_MODULE
);
253 static void gennvm_unregister(struct nvm_dev
*dev
)
256 module_put(THIS_MODULE
);
259 static struct nvm_block
*gennvm_get_blk(struct nvm_dev
*dev
,
260 struct nvm_lun
*vlun
, unsigned long flags
)
262 struct gen_lun
*lun
= container_of(vlun
, struct gen_lun
, vlun
);
263 struct nvm_block
*blk
= NULL
;
264 int is_gc
= flags
& NVM_IOTYPE_GC
;
266 spin_lock(&vlun
->lock
);
268 if (list_empty(&lun
->free_list
)) {
269 pr_err_ratelimited("gennvm: lun %u have no free pages available",
274 if (!is_gc
&& lun
->vlun
.nr_free_blocks
< lun
->reserved_blocks
)
277 blk
= list_first_entry(&lun
->free_list
, struct nvm_block
, list
);
278 list_move_tail(&blk
->list
, &lun
->used_list
);
281 lun
->vlun
.nr_free_blocks
--;
282 lun
->vlun
.nr_inuse_blocks
++;
285 spin_unlock(&vlun
->lock
);
289 static void gennvm_put_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
)
291 struct nvm_lun
*vlun
= blk
->lun
;
292 struct gen_lun
*lun
= container_of(vlun
, struct gen_lun
, vlun
);
294 spin_lock(&vlun
->lock
);
298 list_move_tail(&blk
->list
, &lun
->free_list
);
299 lun
->vlun
.nr_free_blocks
++;
300 lun
->vlun
.nr_inuse_blocks
--;
304 list_move_tail(&blk
->list
, &lun
->bb_list
);
305 lun
->vlun
.nr_bad_blocks
++;
306 lun
->vlun
.nr_inuse_blocks
--;
310 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
312 list_move_tail(&blk
->list
, &lun
->bb_list
);
313 lun
->vlun
.nr_bad_blocks
++;
314 lun
->vlun
.nr_inuse_blocks
--;
317 spin_unlock(&vlun
->lock
);
320 static void gennvm_addr_to_generic_mode(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
324 if (rqd
->nr_pages
> 1) {
325 for (i
= 0; i
< rqd
->nr_pages
; i
++)
326 rqd
->ppa_list
[i
] = dev_to_generic_addr(dev
,
329 rqd
->ppa_addr
= dev_to_generic_addr(dev
, rqd
->ppa_addr
);
333 static void gennvm_generic_to_addr_mode(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
337 if (rqd
->nr_pages
> 1) {
338 for (i
= 0; i
< rqd
->nr_pages
; i
++)
339 rqd
->ppa_list
[i
] = generic_to_dev_addr(dev
,
342 rqd
->ppa_addr
= generic_to_dev_addr(dev
, rqd
->ppa_addr
);
346 static int gennvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
348 if (!dev
->ops
->submit_io
)
351 /* Convert address space */
352 gennvm_generic_to_addr_mode(dev
, rqd
);
355 return dev
->ops
->submit_io(dev
, rqd
);
358 static void gennvm_blk_set_type(struct nvm_dev
*dev
, struct ppa_addr
*ppa
,
361 struct gen_nvm
*gn
= dev
->mp
;
363 struct nvm_block
*blk
;
365 if (unlikely(ppa
->g
.ch
> dev
->nr_chnls
||
366 ppa
->g
.lun
> dev
->luns_per_chnl
||
367 ppa
->g
.blk
> dev
->blks_per_lun
)) {
369 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
370 ppa
->g
.ch
, dev
->nr_chnls
,
371 ppa
->g
.lun
, dev
->luns_per_chnl
,
372 ppa
->g
.blk
, dev
->blks_per_lun
);
376 lun
= &gn
->luns
[ppa
->g
.lun
* ppa
->g
.ch
];
377 blk
= &lun
->vlun
.blocks
[ppa
->g
.blk
];
379 /* will be moved to bb list on put_blk from target */
383 /* mark block bad. It is expected the target recover from the error. */
384 static void gennvm_mark_blk_bad(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
388 if (!dev
->ops
->set_bb_tbl
)
391 if (dev
->ops
->set_bb_tbl(dev
, rqd
, 1))
394 gennvm_addr_to_generic_mode(dev
, rqd
);
396 /* look up blocks and mark them as bad */
397 if (rqd
->nr_pages
> 1)
398 for (i
= 0; i
< rqd
->nr_pages
; i
++)
399 gennvm_blk_set_type(dev
, &rqd
->ppa_list
[i
], 2);
401 gennvm_blk_set_type(dev
, &rqd
->ppa_addr
, 2);
404 static int gennvm_end_io(struct nvm_rq
*rqd
, int error
)
406 struct nvm_tgt_instance
*ins
= rqd
->ins
;
410 case NVM_RSP_SUCCESS
:
412 case NVM_RSP_ERR_EMPTYPAGE
:
414 case NVM_RSP_ERR_FAILWRITE
:
415 gennvm_mark_blk_bad(rqd
->dev
, rqd
);
420 ret
+= ins
->tt
->end_io(rqd
, error
);
425 static int gennvm_erase_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
,
428 int plane_cnt
= 0, pl_idx
, ret
;
429 struct ppa_addr addr
;
432 if (!dev
->ops
->erase_block
)
435 addr
= block_to_ppa(dev
, blk
);
437 if (dev
->plane_mode
== NVM_PLANE_SINGLE
) {
441 plane_cnt
= (1 << dev
->plane_mode
);
442 rqd
.nr_pages
= plane_cnt
;
444 rqd
.ppa_list
= nvm_dev_dma_alloc(dev
, GFP_KERNEL
,
447 pr_err("gennvm: failed to allocate dma memory\n");
451 for (pl_idx
= 0; pl_idx
< plane_cnt
; pl_idx
++) {
453 rqd
.ppa_list
[pl_idx
] = addr
;
457 gennvm_generic_to_addr_mode(dev
, &rqd
);
459 ret
= dev
->ops
->erase_block(dev
, &rqd
);
462 nvm_dev_dma_free(dev
, rqd
.ppa_list
, rqd
.dma_ppa_list
);
467 static struct nvm_lun
*gennvm_get_lun(struct nvm_dev
*dev
, int lunid
)
469 struct gen_nvm
*gn
= dev
->mp
;
471 return &gn
->luns
[lunid
].vlun
;
474 static void gennvm_lun_info_print(struct nvm_dev
*dev
)
476 struct gen_nvm
*gn
= dev
->mp
;
481 gennvm_for_each_lun(gn
, lun
, i
) {
482 spin_lock(&lun
->vlun
.lock
);
484 pr_info("%s: lun%8u\t%u\t%u\t%u\n",
486 lun
->vlun
.nr_free_blocks
,
487 lun
->vlun
.nr_inuse_blocks
,
488 lun
->vlun
.nr_bad_blocks
);
490 spin_unlock(&lun
->vlun
.lock
);
494 static struct nvmm_type gennvm
= {
496 .version
= {0, 1, 0},
498 .register_mgr
= gennvm_register
,
499 .unregister_mgr
= gennvm_unregister
,
501 .get_blk
= gennvm_get_blk
,
502 .put_blk
= gennvm_put_blk
,
504 .submit_io
= gennvm_submit_io
,
505 .end_io
= gennvm_end_io
,
506 .erase_blk
= gennvm_erase_blk
,
508 .get_lun
= gennvm_get_lun
,
509 .lun_info_print
= gennvm_lun_info_print
,
512 static int __init
gennvm_module_init(void)
514 return nvm_register_mgr(&gennvm
);
517 static void gennvm_module_exit(void)
519 nvm_unregister_mgr(&gennvm
);
522 module_init(gennvm_module_init
);
523 module_exit(gennvm_module_exit
);
524 MODULE_LICENSE("GPL v2");
525 MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");