2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
18 * Implementation of a generic nvm manager for Open-Channel SSDs.
23 static void gennvm_blocks_free(struct nvm_dev
*dev
)
25 struct gen_nvm
*gn
= dev
->mp
;
29 gennvm_for_each_lun(gn
, lun
, i
) {
30 if (!lun
->vlun
.blocks
)
32 vfree(lun
->vlun
.blocks
);
36 static void gennvm_luns_free(struct nvm_dev
*dev
)
38 struct gen_nvm
*gn
= dev
->mp
;
43 static int gennvm_luns_init(struct nvm_dev
*dev
, struct gen_nvm
*gn
)
48 gn
->luns
= kcalloc(dev
->nr_luns
, sizeof(struct gen_lun
), GFP_KERNEL
);
52 gennvm_for_each_lun(gn
, lun
, i
) {
53 spin_lock_init(&lun
->vlun
.lock
);
54 INIT_LIST_HEAD(&lun
->free_list
);
55 INIT_LIST_HEAD(&lun
->used_list
);
56 INIT_LIST_HEAD(&lun
->bb_list
);
58 lun
->reserved_blocks
= 2; /* for GC only */
60 lun
->vlun
.lun_id
= i
% dev
->luns_per_chnl
;
61 lun
->vlun
.chnl_id
= i
/ dev
->luns_per_chnl
;
62 lun
->vlun
.nr_free_blocks
= dev
->blks_per_lun
;
63 lun
->vlun
.nr_open_blocks
= 0;
64 lun
->vlun
.nr_closed_blocks
= 0;
65 lun
->vlun
.nr_bad_blocks
= 0;
70 static int gennvm_block_bb(struct ppa_addr ppa
, int nr_blocks
, u8
*blks
,
73 struct gen_nvm
*gn
= private;
74 struct nvm_dev
*dev
= gn
->dev
;
76 struct nvm_block
*blk
;
79 lun
= &gn
->luns
[(dev
->luns_per_chnl
* ppa
.g
.ch
) + ppa
.g
.lun
];
81 for (i
= 0; i
< nr_blocks
; i
++) {
85 blk
= &lun
->vlun
.blocks
[i
];
87 pr_err("gennvm: BB data is out of bounds.\n");
91 list_move_tail(&blk
->list
, &lun
->bb_list
);
92 lun
->vlun
.nr_bad_blocks
++;
93 lun
->vlun
.nr_free_blocks
--;
99 static int gennvm_block_map(u64 slba
, u32 nlb
, __le64
*entries
, void *private)
101 struct nvm_dev
*dev
= private;
102 struct gen_nvm
*gn
= dev
->mp
;
103 sector_t max_pages
= dev
->total_pages
* (dev
->sec_size
>> 9);
104 u64 elba
= slba
+ nlb
;
106 struct nvm_block
*blk
;
110 if (unlikely(elba
> dev
->total_pages
)) {
111 pr_err("gennvm: L2P data from device is out of bounds!\n");
115 for (i
= 0; i
< nlb
; i
++) {
116 u64 pba
= le64_to_cpu(entries
[i
]);
118 if (unlikely(pba
>= max_pages
&& pba
!= U64_MAX
)) {
119 pr_err("gennvm: L2P data entry is out of bounds!\n");
123 /* Address zero is a special one. The first page on a disk is
124 * protected. It often holds internal device boot
130 /* resolve block from physical address */
131 lun_id
= div_u64(pba
, dev
->sec_per_lun
);
132 lun
= &gn
->luns
[lun_id
];
134 /* Calculate block offset into lun */
135 pba
= pba
- (dev
->sec_per_lun
* lun_id
);
136 blk
= &lun
->vlun
.blocks
[div_u64(pba
, dev
->sec_per_blk
)];
139 /* at this point, we don't know anything about the
140 * block. It's up to the FTL on top to re-etablish the
141 * block state. The block is assumed to be open.
143 list_move_tail(&blk
->list
, &lun
->used_list
);
144 blk
->state
= NVM_BLK_ST_OPEN
;
145 lun
->vlun
.nr_free_blocks
--;
146 lun
->vlun
.nr_open_blocks
++;
153 static int gennvm_blocks_init(struct nvm_dev
*dev
, struct gen_nvm
*gn
)
156 struct nvm_block
*block
;
157 sector_t lun_iter
, blk_iter
, cur_block_id
= 0;
160 gennvm_for_each_lun(gn
, lun
, lun_iter
) {
161 lun
->vlun
.blocks
= vzalloc(sizeof(struct nvm_block
) *
163 if (!lun
->vlun
.blocks
)
166 for (blk_iter
= 0; blk_iter
< dev
->blks_per_lun
; blk_iter
++) {
167 block
= &lun
->vlun
.blocks
[blk_iter
];
169 INIT_LIST_HEAD(&block
->list
);
171 block
->lun
= &lun
->vlun
;
172 block
->id
= cur_block_id
++;
174 /* First block is reserved for device */
175 if (unlikely(lun_iter
== 0 && blk_iter
== 0)) {
176 lun
->vlun
.nr_free_blocks
--;
180 list_add_tail(&block
->list
, &lun
->free_list
);
183 if (dev
->ops
->get_bb_tbl
) {
187 ppa
.g
.ch
= lun
->vlun
.chnl_id
;
188 ppa
.g
.lun
= lun
->vlun
.id
;
189 ppa
= generic_to_dev_addr(dev
, ppa
);
191 ret
= dev
->ops
->get_bb_tbl(dev
, ppa
,
193 gennvm_block_bb
, gn
);
195 pr_err("gennvm: could not read BB table\n");
199 if (dev
->ops
->get_l2p_tbl
) {
200 ret
= dev
->ops
->get_l2p_tbl(dev
, 0, dev
->total_pages
,
201 gennvm_block_map
, dev
);
203 pr_err("gennvm: could not read L2P table.\n");
204 pr_warn("gennvm: default block initialization");
211 static void gennvm_free(struct nvm_dev
*dev
)
213 gennvm_blocks_free(dev
);
214 gennvm_luns_free(dev
);
219 static int gennvm_register(struct nvm_dev
*dev
)
224 if (!try_module_get(THIS_MODULE
))
227 gn
= kzalloc(sizeof(struct gen_nvm
), GFP_KERNEL
);
232 gn
->nr_luns
= dev
->nr_luns
;
235 ret
= gennvm_luns_init(dev
, gn
);
237 pr_err("gennvm: could not initialize luns\n");
241 ret
= gennvm_blocks_init(dev
, gn
);
243 pr_err("gennvm: could not initialize blocks\n");
250 module_put(THIS_MODULE
);
254 static void gennvm_unregister(struct nvm_dev
*dev
)
257 module_put(THIS_MODULE
);
260 static struct nvm_block
*gennvm_get_blk_unlocked(struct nvm_dev
*dev
,
261 struct nvm_lun
*vlun
, unsigned long flags
)
263 struct gen_lun
*lun
= container_of(vlun
, struct gen_lun
, vlun
);
264 struct nvm_block
*blk
= NULL
;
265 int is_gc
= flags
& NVM_IOTYPE_GC
;
267 assert_spin_locked(&vlun
->lock
);
269 if (list_empty(&lun
->free_list
)) {
270 pr_err_ratelimited("gennvm: lun %u have no free pages available",
275 if (!is_gc
&& lun
->vlun
.nr_free_blocks
< lun
->reserved_blocks
)
278 blk
= list_first_entry(&lun
->free_list
, struct nvm_block
, list
);
279 list_move_tail(&blk
->list
, &lun
->used_list
);
280 blk
->state
= NVM_BLK_ST_OPEN
;
282 lun
->vlun
.nr_free_blocks
--;
283 lun
->vlun
.nr_open_blocks
++;
289 static struct nvm_block
*gennvm_get_blk(struct nvm_dev
*dev
,
290 struct nvm_lun
*vlun
, unsigned long flags
)
292 struct nvm_block
*blk
;
294 spin_lock(&vlun
->lock
);
295 blk
= gennvm_get_blk_unlocked(dev
, vlun
, flags
);
296 spin_unlock(&vlun
->lock
);
300 static void gennvm_put_blk_unlocked(struct nvm_dev
*dev
, struct nvm_block
*blk
)
302 struct nvm_lun
*vlun
= blk
->lun
;
303 struct gen_lun
*lun
= container_of(vlun
, struct gen_lun
, vlun
);
305 assert_spin_locked(&vlun
->lock
);
307 if (blk
->state
& NVM_BLK_ST_OPEN
) {
308 list_move_tail(&blk
->list
, &lun
->free_list
);
309 lun
->vlun
.nr_open_blocks
--;
310 lun
->vlun
.nr_free_blocks
++;
311 blk
->state
= NVM_BLK_ST_FREE
;
312 } else if (blk
->state
& NVM_BLK_ST_CLOSED
) {
313 list_move_tail(&blk
->list
, &lun
->free_list
);
314 lun
->vlun
.nr_closed_blocks
--;
315 lun
->vlun
.nr_free_blocks
++;
316 blk
->state
= NVM_BLK_ST_FREE
;
317 } else if (blk
->state
& NVM_BLK_ST_BAD
) {
318 list_move_tail(&blk
->list
, &lun
->bb_list
);
319 lun
->vlun
.nr_bad_blocks
++;
320 blk
->state
= NVM_BLK_ST_BAD
;
323 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
324 blk
->id
, blk
->state
);
325 list_move_tail(&blk
->list
, &lun
->bb_list
);
326 lun
->vlun
.nr_bad_blocks
++;
327 blk
->state
= NVM_BLK_ST_BAD
;
331 static void gennvm_put_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
)
333 struct nvm_lun
*vlun
= blk
->lun
;
335 spin_lock(&vlun
->lock
);
336 gennvm_put_blk_unlocked(dev
, blk
);
337 spin_unlock(&vlun
->lock
);
340 static void gennvm_blk_set_type(struct nvm_dev
*dev
, struct ppa_addr
*ppa
,
343 struct gen_nvm
*gn
= dev
->mp
;
345 struct nvm_block
*blk
;
347 if (unlikely(ppa
->g
.ch
> dev
->nr_chnls
||
348 ppa
->g
.lun
> dev
->luns_per_chnl
||
349 ppa
->g
.blk
> dev
->blks_per_lun
)) {
351 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
352 ppa
->g
.ch
, dev
->nr_chnls
,
353 ppa
->g
.lun
, dev
->luns_per_chnl
,
354 ppa
->g
.blk
, dev
->blks_per_lun
);
358 lun
= &gn
->luns
[ppa
->g
.lun
* ppa
->g
.ch
];
359 blk
= &lun
->vlun
.blocks
[ppa
->g
.blk
];
361 /* will be moved to bb list on put_blk from target */
365 /* mark block bad. It is expected the target recover from the error. */
366 static void gennvm_mark_blk_bad(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
370 if (!dev
->ops
->set_bb_tbl
)
373 if (dev
->ops
->set_bb_tbl(dev
, rqd
, 1))
376 nvm_addr_to_generic_mode(dev
, rqd
);
378 /* look up blocks and mark them as bad */
379 if (rqd
->nr_pages
> 1)
380 for (i
= 0; i
< rqd
->nr_pages
; i
++)
381 gennvm_blk_set_type(dev
, &rqd
->ppa_list
[i
],
384 gennvm_blk_set_type(dev
, &rqd
->ppa_addr
, NVM_BLK_ST_BAD
);
387 static void gennvm_end_io(struct nvm_rq
*rqd
)
389 struct nvm_tgt_instance
*ins
= rqd
->ins
;
391 switch (rqd
->error
) {
392 case NVM_RSP_SUCCESS
:
393 case NVM_RSP_ERR_EMPTYPAGE
:
395 case NVM_RSP_ERR_FAILWRITE
:
396 gennvm_mark_blk_bad(rqd
->dev
, rqd
);
399 ins
->tt
->end_io(rqd
);
402 static int gennvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
404 if (!dev
->ops
->submit_io
)
407 /* Convert address space */
408 nvm_generic_to_addr_mode(dev
, rqd
);
411 rqd
->end_io
= gennvm_end_io
;
412 return dev
->ops
->submit_io(dev
, rqd
);
415 static int gennvm_erase_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
,
418 struct ppa_addr addr
= block_to_ppa(dev
, blk
);
420 return nvm_erase_ppa(dev
, &addr
, 1);
423 static struct nvm_lun
*gennvm_get_lun(struct nvm_dev
*dev
, int lunid
)
425 struct gen_nvm
*gn
= dev
->mp
;
427 return &gn
->luns
[lunid
].vlun
;
430 static void gennvm_lun_info_print(struct nvm_dev
*dev
)
432 struct gen_nvm
*gn
= dev
->mp
;
437 gennvm_for_each_lun(gn
, lun
, i
) {
438 spin_lock(&lun
->vlun
.lock
);
440 pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
442 lun
->vlun
.nr_free_blocks
,
443 lun
->vlun
.nr_open_blocks
,
444 lun
->vlun
.nr_closed_blocks
,
445 lun
->vlun
.nr_bad_blocks
);
447 spin_unlock(&lun
->vlun
.lock
);
451 static struct nvmm_type gennvm
= {
453 .version
= {0, 1, 0},
455 .register_mgr
= gennvm_register
,
456 .unregister_mgr
= gennvm_unregister
,
458 .get_blk_unlocked
= gennvm_get_blk_unlocked
,
459 .put_blk_unlocked
= gennvm_put_blk_unlocked
,
461 .get_blk
= gennvm_get_blk
,
462 .put_blk
= gennvm_put_blk
,
464 .submit_io
= gennvm_submit_io
,
465 .erase_blk
= gennvm_erase_blk
,
467 .get_lun
= gennvm_get_lun
,
468 .lun_info_print
= gennvm_lun_info_print
,
471 static int __init
gennvm_module_init(void)
473 return nvm_register_mgr(&gennvm
);
476 static void gennvm_module_exit(void)
478 nvm_unregister_mgr(&gennvm
);
481 module_init(gennvm_module_init
);
482 module_exit(gennvm_module_exit
);
483 MODULE_LICENSE("GPL v2");
484 MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");