2 * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
18 * Implementation of a generic nvm manager for Open-Channel SSDs.
23 static int gennvm_get_area(struct nvm_dev
*dev
, sector_t
*lba
, sector_t len
)
25 struct gen_nvm
*gn
= dev
->mp
;
26 struct gennvm_area
*area
, *prev
, *next
;
28 sector_t max_sectors
= (dev
->sec_size
* dev
->total_secs
) >> 9;
30 if (len
> max_sectors
)
33 area
= kmalloc(sizeof(struct gennvm_area
), GFP_KERNEL
);
39 spin_lock(&dev
->lock
);
40 list_for_each_entry(next
, &gn
->area_list
, list
) {
41 if (begin
+ len
> next
->begin
) {
49 if ((begin
+ len
) > max_sectors
) {
50 spin_unlock(&dev
->lock
);
55 area
->begin
= *lba
= begin
;
56 area
->end
= begin
+ len
;
58 if (prev
) /* insert into sorted order */
59 list_add(&area
->list
, &prev
->list
);
61 list_add(&area
->list
, &gn
->area_list
);
62 spin_unlock(&dev
->lock
);
67 static void gennvm_put_area(struct nvm_dev
*dev
, sector_t begin
)
69 struct gen_nvm
*gn
= dev
->mp
;
70 struct gennvm_area
*area
;
72 spin_lock(&dev
->lock
);
73 list_for_each_entry(area
, &gn
->area_list
, list
) {
74 if (area
->begin
!= begin
)
77 list_del(&area
->list
);
78 spin_unlock(&dev
->lock
);
82 spin_unlock(&dev
->lock
);
85 static void gennvm_blocks_free(struct nvm_dev
*dev
)
87 struct gen_nvm
*gn
= dev
->mp
;
91 gennvm_for_each_lun(gn
, lun
, i
) {
92 if (!lun
->vlun
.blocks
)
94 vfree(lun
->vlun
.blocks
);
98 static void gennvm_luns_free(struct nvm_dev
*dev
)
100 struct gen_nvm
*gn
= dev
->mp
;
105 static int gennvm_luns_init(struct nvm_dev
*dev
, struct gen_nvm
*gn
)
110 gn
->luns
= kcalloc(dev
->nr_luns
, sizeof(struct gen_lun
), GFP_KERNEL
);
114 gennvm_for_each_lun(gn
, lun
, i
) {
115 spin_lock_init(&lun
->vlun
.lock
);
116 INIT_LIST_HEAD(&lun
->free_list
);
117 INIT_LIST_HEAD(&lun
->used_list
);
118 INIT_LIST_HEAD(&lun
->bb_list
);
120 lun
->reserved_blocks
= 2; /* for GC only */
122 lun
->vlun
.lun_id
= i
% dev
->luns_per_chnl
;
123 lun
->vlun
.chnl_id
= i
/ dev
->luns_per_chnl
;
124 lun
->vlun
.nr_free_blocks
= dev
->blks_per_lun
;
125 lun
->vlun
.nr_open_blocks
= 0;
126 lun
->vlun
.nr_closed_blocks
= 0;
127 lun
->vlun
.nr_bad_blocks
= 0;
132 static int gennvm_block_bb(struct ppa_addr ppa
, int nr_blocks
, u8
*blks
,
135 struct gen_nvm
*gn
= private;
136 struct nvm_dev
*dev
= gn
->dev
;
138 struct nvm_block
*blk
;
141 lun
= &gn
->luns
[(dev
->luns_per_chnl
* ppa
.g
.ch
) + ppa
.g
.lun
];
143 for (i
= 0; i
< nr_blocks
; i
++) {
147 blk
= &lun
->vlun
.blocks
[i
];
149 pr_err("gennvm: BB data is out of bounds.\n");
153 list_move_tail(&blk
->list
, &lun
->bb_list
);
154 lun
->vlun
.nr_bad_blocks
++;
155 lun
->vlun
.nr_free_blocks
--;
161 static int gennvm_block_map(u64 slba
, u32 nlb
, __le64
*entries
, void *private)
163 struct nvm_dev
*dev
= private;
164 struct gen_nvm
*gn
= dev
->mp
;
165 u64 elba
= slba
+ nlb
;
167 struct nvm_block
*blk
;
171 if (unlikely(elba
> dev
->total_secs
)) {
172 pr_err("gennvm: L2P data from device is out of bounds!\n");
176 for (i
= 0; i
< nlb
; i
++) {
177 u64 pba
= le64_to_cpu(entries
[i
]);
179 if (unlikely(pba
>= dev
->total_secs
&& pba
!= U64_MAX
)) {
180 pr_err("gennvm: L2P data entry is out of bounds!\n");
184 /* Address zero is a special one. The first page on a disk is
185 * protected. It often holds internal device boot
191 /* resolve block from physical address */
192 lun_id
= div_u64(pba
, dev
->sec_per_lun
);
193 lun
= &gn
->luns
[lun_id
];
195 /* Calculate block offset into lun */
196 pba
= pba
- (dev
->sec_per_lun
* lun_id
);
197 blk
= &lun
->vlun
.blocks
[div_u64(pba
, dev
->sec_per_blk
)];
200 /* at this point, we don't know anything about the
201 * block. It's up to the FTL on top to re-etablish the
202 * block state. The block is assumed to be open.
204 list_move_tail(&blk
->list
, &lun
->used_list
);
205 blk
->state
= NVM_BLK_ST_OPEN
;
206 lun
->vlun
.nr_free_blocks
--;
207 lun
->vlun
.nr_open_blocks
++;
214 static int gennvm_blocks_init(struct nvm_dev
*dev
, struct gen_nvm
*gn
)
217 struct nvm_block
*block
;
218 sector_t lun_iter
, blk_iter
, cur_block_id
= 0;
221 gennvm_for_each_lun(gn
, lun
, lun_iter
) {
222 lun
->vlun
.blocks
= vzalloc(sizeof(struct nvm_block
) *
224 if (!lun
->vlun
.blocks
)
227 for (blk_iter
= 0; blk_iter
< dev
->blks_per_lun
; blk_iter
++) {
228 block
= &lun
->vlun
.blocks
[blk_iter
];
230 INIT_LIST_HEAD(&block
->list
);
232 block
->lun
= &lun
->vlun
;
233 block
->id
= cur_block_id
++;
235 /* First block is reserved for device */
236 if (unlikely(lun_iter
== 0 && blk_iter
== 0)) {
237 lun
->vlun
.nr_free_blocks
--;
241 list_add_tail(&block
->list
, &lun
->free_list
);
244 if (dev
->ops
->get_bb_tbl
) {
248 ppa
.g
.ch
= lun
->vlun
.chnl_id
;
249 ppa
.g
.lun
= lun
->vlun
.id
;
250 ppa
= generic_to_dev_addr(dev
, ppa
);
252 ret
= dev
->ops
->get_bb_tbl(dev
, ppa
,
254 gennvm_block_bb
, gn
);
256 pr_err("gennvm: could not read BB table\n");
260 if ((dev
->identity
.dom
& NVM_RSP_L2P
) && dev
->ops
->get_l2p_tbl
) {
261 ret
= dev
->ops
->get_l2p_tbl(dev
, 0, dev
->total_secs
,
262 gennvm_block_map
, dev
);
264 pr_err("gennvm: could not read L2P table.\n");
265 pr_warn("gennvm: default block initialization");
272 static void gennvm_free(struct nvm_dev
*dev
)
274 gennvm_blocks_free(dev
);
275 gennvm_luns_free(dev
);
280 static int gennvm_register(struct nvm_dev
*dev
)
285 if (!try_module_get(THIS_MODULE
))
288 gn
= kzalloc(sizeof(struct gen_nvm
), GFP_KERNEL
);
293 gn
->nr_luns
= dev
->nr_luns
;
294 INIT_LIST_HEAD(&gn
->area_list
);
297 ret
= gennvm_luns_init(dev
, gn
);
299 pr_err("gennvm: could not initialize luns\n");
303 ret
= gennvm_blocks_init(dev
, gn
);
305 pr_err("gennvm: could not initialize blocks\n");
312 module_put(THIS_MODULE
);
316 static void gennvm_unregister(struct nvm_dev
*dev
)
319 module_put(THIS_MODULE
);
322 static struct nvm_block
*gennvm_get_blk_unlocked(struct nvm_dev
*dev
,
323 struct nvm_lun
*vlun
, unsigned long flags
)
325 struct gen_lun
*lun
= container_of(vlun
, struct gen_lun
, vlun
);
326 struct nvm_block
*blk
= NULL
;
327 int is_gc
= flags
& NVM_IOTYPE_GC
;
329 assert_spin_locked(&vlun
->lock
);
331 if (list_empty(&lun
->free_list
)) {
332 pr_err_ratelimited("gennvm: lun %u have no free pages available",
337 if (!is_gc
&& lun
->vlun
.nr_free_blocks
< lun
->reserved_blocks
)
340 blk
= list_first_entry(&lun
->free_list
, struct nvm_block
, list
);
341 list_move_tail(&blk
->list
, &lun
->used_list
);
342 blk
->state
= NVM_BLK_ST_OPEN
;
344 lun
->vlun
.nr_free_blocks
--;
345 lun
->vlun
.nr_open_blocks
++;
351 static struct nvm_block
*gennvm_get_blk(struct nvm_dev
*dev
,
352 struct nvm_lun
*vlun
, unsigned long flags
)
354 struct nvm_block
*blk
;
356 spin_lock(&vlun
->lock
);
357 blk
= gennvm_get_blk_unlocked(dev
, vlun
, flags
);
358 spin_unlock(&vlun
->lock
);
362 static void gennvm_put_blk_unlocked(struct nvm_dev
*dev
, struct nvm_block
*blk
)
364 struct nvm_lun
*vlun
= blk
->lun
;
365 struct gen_lun
*lun
= container_of(vlun
, struct gen_lun
, vlun
);
367 assert_spin_locked(&vlun
->lock
);
369 if (blk
->state
& NVM_BLK_ST_OPEN
) {
370 list_move_tail(&blk
->list
, &lun
->free_list
);
371 lun
->vlun
.nr_open_blocks
--;
372 lun
->vlun
.nr_free_blocks
++;
373 blk
->state
= NVM_BLK_ST_FREE
;
374 } else if (blk
->state
& NVM_BLK_ST_CLOSED
) {
375 list_move_tail(&blk
->list
, &lun
->free_list
);
376 lun
->vlun
.nr_closed_blocks
--;
377 lun
->vlun
.nr_free_blocks
++;
378 blk
->state
= NVM_BLK_ST_FREE
;
379 } else if (blk
->state
& NVM_BLK_ST_BAD
) {
380 list_move_tail(&blk
->list
, &lun
->bb_list
);
381 lun
->vlun
.nr_bad_blocks
++;
382 blk
->state
= NVM_BLK_ST_BAD
;
385 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
386 blk
->id
, blk
->state
);
387 list_move_tail(&blk
->list
, &lun
->bb_list
);
388 lun
->vlun
.nr_bad_blocks
++;
389 blk
->state
= NVM_BLK_ST_BAD
;
393 static void gennvm_put_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
)
395 struct nvm_lun
*vlun
= blk
->lun
;
397 spin_lock(&vlun
->lock
);
398 gennvm_put_blk_unlocked(dev
, blk
);
399 spin_unlock(&vlun
->lock
);
402 static void gennvm_blk_set_type(struct nvm_dev
*dev
, struct ppa_addr
*ppa
,
405 struct gen_nvm
*gn
= dev
->mp
;
407 struct nvm_block
*blk
;
409 if (unlikely(ppa
->g
.ch
> dev
->nr_chnls
||
410 ppa
->g
.lun
> dev
->luns_per_chnl
||
411 ppa
->g
.blk
> dev
->blks_per_lun
)) {
413 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
414 ppa
->g
.ch
, dev
->nr_chnls
,
415 ppa
->g
.lun
, dev
->luns_per_chnl
,
416 ppa
->g
.blk
, dev
->blks_per_lun
);
420 lun
= &gn
->luns
[ppa
->g
.lun
* ppa
->g
.ch
];
421 blk
= &lun
->vlun
.blocks
[ppa
->g
.blk
];
423 /* will be moved to bb list on put_blk from target */
427 /* mark block bad. It is expected the target recover from the error. */
428 static void gennvm_mark_blk_bad(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
432 if (!dev
->ops
->set_bb_tbl
)
435 if (dev
->ops
->set_bb_tbl(dev
, rqd
, 1))
438 nvm_addr_to_generic_mode(dev
, rqd
);
440 /* look up blocks and mark them as bad */
441 if (rqd
->nr_pages
> 1)
442 for (i
= 0; i
< rqd
->nr_pages
; i
++)
443 gennvm_blk_set_type(dev
, &rqd
->ppa_list
[i
],
446 gennvm_blk_set_type(dev
, &rqd
->ppa_addr
, NVM_BLK_ST_BAD
);
449 static void gennvm_end_io(struct nvm_rq
*rqd
)
451 struct nvm_tgt_instance
*ins
= rqd
->ins
;
453 switch (rqd
->error
) {
454 case NVM_RSP_SUCCESS
:
455 case NVM_RSP_ERR_EMPTYPAGE
:
457 case NVM_RSP_ERR_FAILWRITE
:
458 gennvm_mark_blk_bad(rqd
->dev
, rqd
);
461 ins
->tt
->end_io(rqd
);
464 static int gennvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
466 if (!dev
->ops
->submit_io
)
469 /* Convert address space */
470 nvm_generic_to_addr_mode(dev
, rqd
);
473 rqd
->end_io
= gennvm_end_io
;
474 return dev
->ops
->submit_io(dev
, rqd
);
477 static int gennvm_erase_blk(struct nvm_dev
*dev
, struct nvm_block
*blk
,
480 struct ppa_addr addr
= block_to_ppa(dev
, blk
);
482 return nvm_erase_ppa(dev
, &addr
, 1);
485 static int gennvm_reserve_lun(struct nvm_dev
*dev
, int lunid
)
487 return test_and_set_bit(lunid
, dev
->lun_map
);
490 static void gennvm_release_lun(struct nvm_dev
*dev
, int lunid
)
492 WARN_ON(!test_and_clear_bit(lunid
, dev
->lun_map
));
495 static struct nvm_lun
*gennvm_get_lun(struct nvm_dev
*dev
, int lunid
)
497 struct gen_nvm
*gn
= dev
->mp
;
499 if (unlikely(lunid
>= dev
->nr_luns
))
502 return &gn
->luns
[lunid
].vlun
;
505 static void gennvm_lun_info_print(struct nvm_dev
*dev
)
507 struct gen_nvm
*gn
= dev
->mp
;
512 gennvm_for_each_lun(gn
, lun
, i
) {
513 spin_lock(&lun
->vlun
.lock
);
515 pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
517 lun
->vlun
.nr_free_blocks
,
518 lun
->vlun
.nr_open_blocks
,
519 lun
->vlun
.nr_closed_blocks
,
520 lun
->vlun
.nr_bad_blocks
);
522 spin_unlock(&lun
->vlun
.lock
);
526 static struct nvmm_type gennvm
= {
528 .version
= {0, 1, 0},
530 .register_mgr
= gennvm_register
,
531 .unregister_mgr
= gennvm_unregister
,
533 .get_blk_unlocked
= gennvm_get_blk_unlocked
,
534 .put_blk_unlocked
= gennvm_put_blk_unlocked
,
536 .get_blk
= gennvm_get_blk
,
537 .put_blk
= gennvm_put_blk
,
539 .submit_io
= gennvm_submit_io
,
540 .erase_blk
= gennvm_erase_blk
,
542 .get_lun
= gennvm_get_lun
,
543 .reserve_lun
= gennvm_reserve_lun
,
544 .release_lun
= gennvm_release_lun
,
545 .lun_info_print
= gennvm_lun_info_print
,
547 .get_area
= gennvm_get_area
,
548 .put_area
= gennvm_put_area
,
552 static int __init
gennvm_module_init(void)
554 return nvm_register_mgr(&gennvm
);
557 static void gennvm_module_exit(void)
559 nvm_unregister_mgr(&gennvm
);
562 module_init(gennvm_module_init
);
563 module_exit(gennvm_module_exit
);
564 MODULE_LICENSE("GPL v2");
565 MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");