2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/miscdevice.h>
28 #include <linux/lightnvm.h>
29 #include <linux/sched/sysctl.h>
31 static LIST_HEAD(nvm_tgt_types
);
32 static DECLARE_RWSEM(nvm_tgtt_lock
);
33 static LIST_HEAD(nvm_devices
);
34 static DECLARE_RWSEM(nvm_lock
);
36 /* Map between virtual and physical channel and lun */
44 struct nvm_ch_map
*chnls
;
48 static struct nvm_target
*nvm_find_target(struct nvm_dev
*dev
, const char *name
)
50 struct nvm_target
*tgt
;
52 list_for_each_entry(tgt
, &dev
->targets
, list
)
53 if (!strcmp(name
, tgt
->disk
->disk_name
))
59 static bool nvm_target_exists(const char *name
)
62 struct nvm_target
*tgt
;
65 down_write(&nvm_lock
);
66 list_for_each_entry(dev
, &nvm_devices
, devices
) {
67 mutex_lock(&dev
->mlock
);
68 list_for_each_entry(tgt
, &dev
->targets
, list
) {
69 if (!strcmp(name
, tgt
->disk
->disk_name
)) {
71 mutex_unlock(&dev
->mlock
);
75 mutex_unlock(&dev
->mlock
);
83 static int nvm_reserve_luns(struct nvm_dev
*dev
, int lun_begin
, int lun_end
)
87 for (i
= lun_begin
; i
<= lun_end
; i
++) {
88 if (test_and_set_bit(i
, dev
->lun_map
)) {
89 pr_err("nvm: lun %d already allocated\n", i
);
96 while (--i
>= lun_begin
)
97 clear_bit(i
, dev
->lun_map
);
102 static void nvm_release_luns_err(struct nvm_dev
*dev
, int lun_begin
,
107 for (i
= lun_begin
; i
<= lun_end
; i
++)
108 WARN_ON(!test_and_clear_bit(i
, dev
->lun_map
));
111 static void nvm_remove_tgt_dev(struct nvm_tgt_dev
*tgt_dev
, int clear
)
113 struct nvm_dev
*dev
= tgt_dev
->parent
;
114 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
117 for (i
= 0; i
< dev_map
->nr_chnls
; i
++) {
118 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
119 int *lun_offs
= ch_map
->lun_offs
;
120 int ch
= i
+ ch_map
->ch_off
;
123 for (j
= 0; j
< ch_map
->nr_luns
; j
++) {
124 int lun
= j
+ lun_offs
[j
];
125 int lunid
= (ch
* dev
->geo
.nr_luns
) + lun
;
127 WARN_ON(!test_and_clear_bit(lunid
,
132 kfree(ch_map
->lun_offs
);
135 kfree(dev_map
->chnls
);
138 kfree(tgt_dev
->luns
);
142 static struct nvm_tgt_dev
*nvm_create_tgt_dev(struct nvm_dev
*dev
,
143 u16 lun_begin
, u16 lun_end
,
146 struct nvm_tgt_dev
*tgt_dev
= NULL
;
147 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
148 struct nvm_dev_map
*dev_map
;
149 struct ppa_addr
*luns
;
150 int nr_luns
= lun_end
- lun_begin
+ 1;
151 int luns_left
= nr_luns
;
152 int nr_chnls
= nr_luns
/ dev
->geo
.nr_luns
;
153 int nr_chnls_mod
= nr_luns
% dev
->geo
.nr_luns
;
154 int bch
= lun_begin
/ dev
->geo
.nr_luns
;
155 int blun
= lun_begin
% dev
->geo
.nr_luns
;
157 int lun_balanced
= 1;
161 nr_chnls
= (nr_chnls_mod
== 0) ? nr_chnls
: nr_chnls
+ 1;
163 dev_map
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
167 dev_map
->chnls
= kcalloc(nr_chnls
, sizeof(struct nvm_ch_map
),
172 luns
= kcalloc(nr_luns
, sizeof(struct ppa_addr
), GFP_KERNEL
);
176 prev_nr_luns
= (luns_left
> dev
->geo
.nr_luns
) ?
177 dev
->geo
.nr_luns
: luns_left
;
178 for (i
= 0; i
< nr_chnls
; i
++) {
179 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[i
+ bch
];
180 int *lun_roffs
= ch_rmap
->lun_offs
;
181 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
183 int luns_in_chnl
= (luns_left
> dev
->geo
.nr_luns
) ?
184 dev
->geo
.nr_luns
: luns_left
;
186 if (lun_balanced
&& prev_nr_luns
!= luns_in_chnl
)
189 ch_map
->ch_off
= ch_rmap
->ch_off
= bch
;
190 ch_map
->nr_luns
= luns_in_chnl
;
192 lun_offs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
196 for (j
= 0; j
< luns_in_chnl
; j
++) {
198 luns
[lunid
].g
.ch
= i
;
199 luns
[lunid
++].g
.lun
= j
;
202 lun_roffs
[j
+ blun
] = blun
;
205 ch_map
->lun_offs
= lun_offs
;
207 /* when starting a new channel, lun offset is reset */
209 luns_left
-= luns_in_chnl
;
212 dev_map
->nr_chnls
= nr_chnls
;
214 tgt_dev
= kmalloc(sizeof(struct nvm_tgt_dev
), GFP_KERNEL
);
218 memcpy(&tgt_dev
->geo
, &dev
->geo
, sizeof(struct nvm_geo
));
219 /* Target device only owns a portion of the physical device */
220 tgt_dev
->geo
.nr_chnls
= nr_chnls
;
221 tgt_dev
->geo
.all_luns
= nr_luns
;
222 tgt_dev
->geo
.nr_luns
= (lun_balanced
) ? prev_nr_luns
: -1;
223 tgt_dev
->geo
.op
= op
;
224 tgt_dev
->total_secs
= nr_luns
* tgt_dev
->geo
.sec_per_lun
;
226 tgt_dev
->map
= dev_map
;
227 tgt_dev
->luns
= luns
;
228 memcpy(&tgt_dev
->identity
, &dev
->identity
, sizeof(struct nvm_id
));
230 tgt_dev
->parent
= dev
;
235 kfree(dev_map
->chnls
[i
].lun_offs
);
238 kfree(dev_map
->chnls
);
245 static const struct block_device_operations nvm_fops
= {
246 .owner
= THIS_MODULE
,
249 static struct nvm_tgt_type
*__nvm_find_target_type(const char *name
)
251 struct nvm_tgt_type
*tt
;
253 list_for_each_entry(tt
, &nvm_tgt_types
, list
)
254 if (!strcmp(name
, tt
->name
))
260 static struct nvm_tgt_type
*nvm_find_target_type(const char *name
)
262 struct nvm_tgt_type
*tt
;
264 down_write(&nvm_tgtt_lock
);
265 tt
= __nvm_find_target_type(name
);
266 up_write(&nvm_tgtt_lock
);
271 static int nvm_config_check_luns(struct nvm_geo
*geo
, int lun_begin
,
274 if (lun_begin
> lun_end
|| lun_end
>= geo
->all_luns
) {
275 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
276 lun_begin
, lun_end
, geo
->all_luns
- 1);
283 static int __nvm_config_simple(struct nvm_dev
*dev
,
284 struct nvm_ioctl_create_simple
*s
)
286 struct nvm_geo
*geo
= &dev
->geo
;
288 if (s
->lun_begin
== -1 && s
->lun_end
== -1) {
290 s
->lun_end
= geo
->all_luns
- 1;
293 return nvm_config_check_luns(geo
, s
->lun_begin
, s
->lun_end
);
296 static int __nvm_config_extended(struct nvm_dev
*dev
,
297 struct nvm_ioctl_create_extended
*e
)
299 struct nvm_geo
*geo
= &dev
->geo
;
301 if (e
->lun_begin
== 0xFFFF && e
->lun_end
== 0xFFFF) {
303 e
->lun_end
= dev
->geo
.all_luns
- 1;
306 /* op not set falls into target's default */
308 e
->op
= NVM_TARGET_DEFAULT_OP
;
310 if (e
->op
< NVM_TARGET_MIN_OP
||
311 e
->op
> NVM_TARGET_MAX_OP
) {
312 pr_err("nvm: invalid over provisioning value\n");
316 return nvm_config_check_luns(geo
, e
->lun_begin
, e
->lun_end
);
319 static int nvm_create_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_create
*create
)
321 struct nvm_ioctl_create_extended e
;
322 struct request_queue
*tqueue
;
323 struct gendisk
*tdisk
;
324 struct nvm_tgt_type
*tt
;
325 struct nvm_target
*t
;
326 struct nvm_tgt_dev
*tgt_dev
;
330 switch (create
->conf
.type
) {
331 case NVM_CONFIG_TYPE_SIMPLE
:
332 ret
= __nvm_config_simple(dev
, &create
->conf
.s
);
336 e
.lun_begin
= create
->conf
.s
.lun_begin
;
337 e
.lun_end
= create
->conf
.s
.lun_end
;
338 e
.op
= NVM_TARGET_DEFAULT_OP
;
340 case NVM_CONFIG_TYPE_EXTENDED
:
341 ret
= __nvm_config_extended(dev
, &create
->conf
.e
);
348 pr_err("nvm: config type not valid\n");
352 tt
= nvm_find_target_type(create
->tgttype
);
354 pr_err("nvm: target type %s not found\n", create
->tgttype
);
358 if (nvm_target_exists(create
->tgtname
)) {
359 pr_err("nvm: target name already exists (%s)\n",
364 ret
= nvm_reserve_luns(dev
, e
.lun_begin
, e
.lun_end
);
368 t
= kmalloc(sizeof(struct nvm_target
), GFP_KERNEL
);
374 tgt_dev
= nvm_create_tgt_dev(dev
, e
.lun_begin
, e
.lun_end
, e
.op
);
376 pr_err("nvm: could not create target device\n");
381 tdisk
= alloc_disk(0);
387 tqueue
= blk_alloc_queue_node(GFP_KERNEL
, dev
->q
->node
);
392 blk_queue_make_request(tqueue
, tt
->make_rq
);
394 strlcpy(tdisk
->disk_name
, create
->tgtname
, sizeof(tdisk
->disk_name
));
395 tdisk
->flags
= GENHD_FL_EXT_DEVT
;
397 tdisk
->first_minor
= 0;
398 tdisk
->fops
= &nvm_fops
;
399 tdisk
->queue
= tqueue
;
401 targetdata
= tt
->init(tgt_dev
, tdisk
, create
->flags
);
402 if (IS_ERR(targetdata
)) {
403 ret
= PTR_ERR(targetdata
);
407 tdisk
->private_data
= targetdata
;
408 tqueue
->queuedata
= targetdata
;
410 blk_queue_max_hw_sectors(tqueue
, 8 * dev
->ops
->max_phys_sect
);
412 set_capacity(tdisk
, tt
->capacity(targetdata
));
415 if (tt
->sysfs_init
&& tt
->sysfs_init(tdisk
)) {
424 mutex_lock(&dev
->mlock
);
425 list_add_tail(&t
->list
, &dev
->targets
);
426 mutex_unlock(&dev
->mlock
);
428 __module_get(tt
->owner
);
433 tt
->exit(targetdata
);
435 blk_cleanup_queue(tqueue
);
440 nvm_remove_tgt_dev(tgt_dev
, 0);
444 nvm_release_luns_err(dev
, e
.lun_begin
, e
.lun_end
);
448 static void __nvm_remove_target(struct nvm_target
*t
)
450 struct nvm_tgt_type
*tt
= t
->type
;
451 struct gendisk
*tdisk
= t
->disk
;
452 struct request_queue
*q
= tdisk
->queue
;
455 blk_cleanup_queue(q
);
458 tt
->sysfs_exit(tdisk
);
461 tt
->exit(tdisk
->private_data
);
463 nvm_remove_tgt_dev(t
->dev
, 1);
465 module_put(t
->type
->owner
);
472 * nvm_remove_tgt - Removes a target from the media manager
474 * @remove: ioctl structure with target name to remove.
481 static int nvm_remove_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_remove
*remove
)
483 struct nvm_target
*t
;
485 mutex_lock(&dev
->mlock
);
486 t
= nvm_find_target(dev
, remove
->tgtname
);
488 mutex_unlock(&dev
->mlock
);
491 __nvm_remove_target(t
);
492 mutex_unlock(&dev
->mlock
);
497 static int nvm_register_map(struct nvm_dev
*dev
)
499 struct nvm_dev_map
*rmap
;
502 rmap
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
506 rmap
->chnls
= kcalloc(dev
->geo
.nr_chnls
, sizeof(struct nvm_ch_map
),
511 for (i
= 0; i
< dev
->geo
.nr_chnls
; i
++) {
512 struct nvm_ch_map
*ch_rmap
;
514 int luns_in_chnl
= dev
->geo
.nr_luns
;
516 ch_rmap
= &rmap
->chnls
[i
];
518 ch_rmap
->ch_off
= -1;
519 ch_rmap
->nr_luns
= luns_in_chnl
;
521 lun_roffs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
525 for (j
= 0; j
< luns_in_chnl
; j
++)
528 ch_rmap
->lun_offs
= lun_roffs
;
536 kfree(rmap
->chnls
[i
].lun_offs
);
543 static void nvm_unregister_map(struct nvm_dev
*dev
)
545 struct nvm_dev_map
*rmap
= dev
->rmap
;
548 for (i
= 0; i
< dev
->geo
.nr_chnls
; i
++)
549 kfree(rmap
->chnls
[i
].lun_offs
);
555 static void nvm_map_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
557 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
558 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[p
->g
.ch
];
559 int lun_off
= ch_map
->lun_offs
[p
->g
.lun
];
561 p
->g
.ch
+= ch_map
->ch_off
;
565 static void nvm_map_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
567 struct nvm_dev
*dev
= tgt_dev
->parent
;
568 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
569 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[p
->g
.ch
];
570 int lun_roff
= ch_rmap
->lun_offs
[p
->g
.lun
];
572 p
->g
.ch
-= ch_rmap
->ch_off
;
573 p
->g
.lun
-= lun_roff
;
576 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
,
577 struct ppa_addr
*ppa_list
, int nr_ppas
)
581 for (i
= 0; i
< nr_ppas
; i
++) {
582 nvm_map_to_dev(tgt_dev
, &ppa_list
[i
]);
583 ppa_list
[i
] = generic_to_dev_addr(tgt_dev
, ppa_list
[i
]);
587 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
,
588 struct ppa_addr
*ppa_list
, int nr_ppas
)
592 for (i
= 0; i
< nr_ppas
; i
++) {
593 ppa_list
[i
] = dev_to_generic_addr(tgt_dev
, ppa_list
[i
]);
594 nvm_map_to_tgt(tgt_dev
, &ppa_list
[i
]);
598 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
600 if (rqd
->nr_ppas
== 1) {
601 nvm_ppa_tgt_to_dev(tgt_dev
, &rqd
->ppa_addr
, 1);
605 nvm_ppa_tgt_to_dev(tgt_dev
, rqd
->ppa_list
, rqd
->nr_ppas
);
608 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
610 if (rqd
->nr_ppas
== 1) {
611 nvm_ppa_dev_to_tgt(tgt_dev
, &rqd
->ppa_addr
, 1);
615 nvm_ppa_dev_to_tgt(tgt_dev
, rqd
->ppa_list
, rqd
->nr_ppas
);
618 int nvm_register_tgt_type(struct nvm_tgt_type
*tt
)
622 down_write(&nvm_tgtt_lock
);
623 if (__nvm_find_target_type(tt
->name
))
626 list_add(&tt
->list
, &nvm_tgt_types
);
627 up_write(&nvm_tgtt_lock
);
631 EXPORT_SYMBOL(nvm_register_tgt_type
);
633 void nvm_unregister_tgt_type(struct nvm_tgt_type
*tt
)
638 down_write(&nvm_tgtt_lock
);
640 up_write(&nvm_tgtt_lock
);
642 EXPORT_SYMBOL(nvm_unregister_tgt_type
);
644 void *nvm_dev_dma_alloc(struct nvm_dev
*dev
, gfp_t mem_flags
,
645 dma_addr_t
*dma_handler
)
647 return dev
->ops
->dev_dma_alloc(dev
, dev
->dma_pool
, mem_flags
,
650 EXPORT_SYMBOL(nvm_dev_dma_alloc
);
652 void nvm_dev_dma_free(struct nvm_dev
*dev
, void *addr
, dma_addr_t dma_handler
)
654 dev
->ops
->dev_dma_free(dev
->dma_pool
, addr
, dma_handler
);
656 EXPORT_SYMBOL(nvm_dev_dma_free
);
658 static struct nvm_dev
*nvm_find_nvm_dev(const char *name
)
662 list_for_each_entry(dev
, &nvm_devices
, devices
)
663 if (!strcmp(name
, dev
->name
))
669 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
,
670 const struct ppa_addr
*ppas
, int nr_ppas
)
672 struct nvm_dev
*dev
= tgt_dev
->parent
;
673 struct nvm_geo
*geo
= &tgt_dev
->geo
;
674 int i
, plane_cnt
, pl_idx
;
677 if (geo
->plane_mode
== NVM_PLANE_SINGLE
&& nr_ppas
== 1) {
678 rqd
->nr_ppas
= nr_ppas
;
679 rqd
->ppa_addr
= ppas
[0];
684 rqd
->nr_ppas
= nr_ppas
;
685 rqd
->ppa_list
= nvm_dev_dma_alloc(dev
, GFP_KERNEL
, &rqd
->dma_ppa_list
);
686 if (!rqd
->ppa_list
) {
687 pr_err("nvm: failed to allocate dma memory\n");
691 plane_cnt
= geo
->plane_mode
;
692 rqd
->nr_ppas
*= plane_cnt
;
694 for (i
= 0; i
< nr_ppas
; i
++) {
695 for (pl_idx
= 0; pl_idx
< plane_cnt
; pl_idx
++) {
698 rqd
->ppa_list
[(pl_idx
* nr_ppas
) + i
] = ppa
;
705 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
,
711 nvm_dev_dma_free(tgt_dev
->parent
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
715 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*ppas
,
716 int nr_ppas
, int type
)
718 struct nvm_dev
*dev
= tgt_dev
->parent
;
722 if (nr_ppas
> dev
->ops
->max_phys_sect
) {
723 pr_err("nvm: unable to update all blocks atomically\n");
727 memset(&rqd
, 0, sizeof(struct nvm_rq
));
729 nvm_set_rqd_ppalist(tgt_dev
, &rqd
, ppas
, nr_ppas
);
730 nvm_rq_tgt_to_dev(tgt_dev
, &rqd
);
732 ret
= dev
->ops
->set_bb_tbl(dev
, &rqd
.ppa_addr
, rqd
.nr_ppas
, type
);
733 nvm_free_rqd_ppalist(tgt_dev
, &rqd
);
735 pr_err("nvm: failed bb mark\n");
741 EXPORT_SYMBOL(nvm_set_tgt_bb_tbl
);
743 int nvm_max_phys_sects(struct nvm_tgt_dev
*tgt_dev
)
745 struct nvm_dev
*dev
= tgt_dev
->parent
;
747 return dev
->ops
->max_phys_sect
;
749 EXPORT_SYMBOL(nvm_max_phys_sects
);
751 int nvm_submit_io(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
753 struct nvm_dev
*dev
= tgt_dev
->parent
;
756 if (!dev
->ops
->submit_io
)
759 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
763 /* In case of error, fail with right address format */
764 ret
= dev
->ops
->submit_io(dev
, rqd
);
766 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
769 EXPORT_SYMBOL(nvm_submit_io
);
771 int nvm_submit_io_sync(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
773 struct nvm_dev
*dev
= tgt_dev
->parent
;
776 if (!dev
->ops
->submit_io_sync
)
779 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
783 /* In case of error, fail with right address format */
784 ret
= dev
->ops
->submit_io_sync(dev
, rqd
);
785 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
789 EXPORT_SYMBOL(nvm_submit_io_sync
);
791 void nvm_end_io(struct nvm_rq
*rqd
)
793 struct nvm_tgt_dev
*tgt_dev
= rqd
->dev
;
795 /* Convert address space */
797 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
802 EXPORT_SYMBOL(nvm_end_io
);
805 * folds a bad block list from its plane representation to its virtual
806 * block representation. The fold is done in place and reduced size is
809 * If any of the planes status are bad or grown bad block, the virtual block
810 * is marked bad. If not bad, the first plane state acts as the block state.
812 int nvm_bb_tbl_fold(struct nvm_dev
*dev
, u8
*blks
, int nr_blks
)
814 struct nvm_geo
*geo
= &dev
->geo
;
815 int blk
, offset
, pl
, blktype
;
817 if (nr_blks
!= geo
->nr_chks
* geo
->plane_mode
)
820 for (blk
= 0; blk
< geo
->nr_chks
; blk
++) {
821 offset
= blk
* geo
->plane_mode
;
822 blktype
= blks
[offset
];
824 /* Bad blocks on any planes take precedence over other types */
825 for (pl
= 0; pl
< geo
->plane_mode
; pl
++) {
826 if (blks
[offset
+ pl
] &
827 (NVM_BLK_T_BAD
|NVM_BLK_T_GRWN_BAD
)) {
828 blktype
= blks
[offset
+ pl
];
838 EXPORT_SYMBOL(nvm_bb_tbl_fold
);
840 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr ppa
,
843 struct nvm_dev
*dev
= tgt_dev
->parent
;
845 nvm_ppa_tgt_to_dev(tgt_dev
, &ppa
, 1);
847 return dev
->ops
->get_bb_tbl(dev
, ppa
, blks
);
849 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl
);
851 static int nvm_core_init(struct nvm_dev
*dev
)
853 struct nvm_id
*id
= &dev
->identity
;
854 struct nvm_id_group
*grp
= &id
->grp
;
855 struct nvm_geo
*geo
= &dev
->geo
;
858 memcpy(&geo
->ppaf
, &id
->ppaf
, sizeof(struct nvm_addr_format
));
860 if (grp
->mtype
!= 0) {
861 pr_err("nvm: memory type not supported\n");
865 /* Whole device values */
866 geo
->nr_chnls
= grp
->num_ch
;
867 geo
->nr_luns
= grp
->num_lun
;
869 /* Generic device geometry values */
870 geo
->ws_min
= grp
->ws_min
;
871 geo
->ws_opt
= grp
->ws_opt
;
872 geo
->ws_seq
= grp
->ws_seq
;
873 geo
->ws_per_chk
= grp
->ws_per_chk
;
874 geo
->nr_chks
= grp
->num_chk
;
875 geo
->sec_size
= grp
->csecs
;
876 geo
->oob_size
= grp
->sos
;
877 geo
->mccap
= grp
->mccap
;
878 geo
->max_rq_size
= dev
->ops
->max_phys_sect
* geo
->sec_size
;
880 geo
->sec_per_chk
= grp
->clba
;
881 geo
->sec_per_lun
= geo
->sec_per_chk
* geo
->nr_chks
;
882 geo
->all_luns
= geo
->nr_luns
* geo
->nr_chnls
;
884 /* 1.2 spec device geometry values */
885 geo
->plane_mode
= 1 << geo
->ws_seq
;
886 geo
->nr_planes
= geo
->ws_opt
/ geo
->ws_min
;
887 geo
->sec_per_pg
= geo
->ws_min
;
888 geo
->sec_per_pl
= geo
->sec_per_pg
* geo
->nr_planes
;
890 dev
->total_secs
= geo
->all_luns
* geo
->sec_per_lun
;
891 dev
->lun_map
= kcalloc(BITS_TO_LONGS(geo
->all_luns
),
892 sizeof(unsigned long), GFP_KERNEL
);
896 INIT_LIST_HEAD(&dev
->area_list
);
897 INIT_LIST_HEAD(&dev
->targets
);
898 mutex_init(&dev
->mlock
);
899 spin_lock_init(&dev
->lock
);
901 ret
= nvm_register_map(dev
);
905 blk_queue_logical_block_size(dev
->q
, geo
->sec_size
);
912 static void nvm_free(struct nvm_dev
*dev
)
918 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
920 nvm_unregister_map(dev
);
925 static int nvm_init(struct nvm_dev
*dev
)
927 struct nvm_geo
*geo
= &dev
->geo
;
930 if (dev
->ops
->identity(dev
, &dev
->identity
)) {
931 pr_err("nvm: device could not be identified\n");
935 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
936 dev
->identity
.ver_id
, dev
->identity
.vmnt
);
938 if (dev
->identity
.ver_id
!= 1) {
939 pr_err("nvm: device not supported by kernel.");
943 ret
= nvm_core_init(dev
);
945 pr_err("nvm: could not initialize core structures.\n");
949 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
950 dev
->name
, geo
->sec_per_pg
, geo
->nr_planes
,
951 geo
->ws_per_chk
, geo
->nr_chks
,
952 geo
->all_luns
, geo
->nr_chnls
);
955 pr_err("nvm: failed to initialize nvm\n");
959 struct nvm_dev
*nvm_alloc_dev(int node
)
961 return kzalloc_node(sizeof(struct nvm_dev
), GFP_KERNEL
, node
);
963 EXPORT_SYMBOL(nvm_alloc_dev
);
965 int nvm_register(struct nvm_dev
*dev
)
969 if (!dev
->q
|| !dev
->ops
)
972 if (dev
->ops
->max_phys_sect
> 256) {
973 pr_info("nvm: max sectors supported is 256.\n");
977 if (dev
->ops
->max_phys_sect
> 1) {
978 dev
->dma_pool
= dev
->ops
->create_dma_pool(dev
, "ppalist");
979 if (!dev
->dma_pool
) {
980 pr_err("nvm: could not create dma pool\n");
989 /* register device with a supported media manager */
990 down_write(&nvm_lock
);
991 list_add(&dev
->devices
, &nvm_devices
);
996 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
999 EXPORT_SYMBOL(nvm_register
);
1001 void nvm_unregister(struct nvm_dev
*dev
)
1003 struct nvm_target
*t
, *tmp
;
1005 mutex_lock(&dev
->mlock
);
1006 list_for_each_entry_safe(t
, tmp
, &dev
->targets
, list
) {
1007 if (t
->dev
->parent
!= dev
)
1009 __nvm_remove_target(t
);
1011 mutex_unlock(&dev
->mlock
);
1013 down_write(&nvm_lock
);
1014 list_del(&dev
->devices
);
1015 up_write(&nvm_lock
);
1019 EXPORT_SYMBOL(nvm_unregister
);
1021 static int __nvm_configure_create(struct nvm_ioctl_create
*create
)
1023 struct nvm_dev
*dev
;
1025 down_write(&nvm_lock
);
1026 dev
= nvm_find_nvm_dev(create
->dev
);
1027 up_write(&nvm_lock
);
1030 pr_err("nvm: device not found\n");
1034 return nvm_create_tgt(dev
, create
);
1037 static long nvm_ioctl_info(struct file
*file
, void __user
*arg
)
1039 struct nvm_ioctl_info
*info
;
1040 struct nvm_tgt_type
*tt
;
1043 if (!capable(CAP_SYS_ADMIN
))
1046 info
= memdup_user(arg
, sizeof(struct nvm_ioctl_info
));
1050 info
->version
[0] = NVM_VERSION_MAJOR
;
1051 info
->version
[1] = NVM_VERSION_MINOR
;
1052 info
->version
[2] = NVM_VERSION_PATCH
;
1054 down_write(&nvm_tgtt_lock
);
1055 list_for_each_entry(tt
, &nvm_tgt_types
, list
) {
1056 struct nvm_ioctl_info_tgt
*tgt
= &info
->tgts
[tgt_iter
];
1058 tgt
->version
[0] = tt
->version
[0];
1059 tgt
->version
[1] = tt
->version
[1];
1060 tgt
->version
[2] = tt
->version
[2];
1061 strncpy(tgt
->tgtname
, tt
->name
, NVM_TTYPE_NAME_MAX
);
1066 info
->tgtsize
= tgt_iter
;
1067 up_write(&nvm_tgtt_lock
);
1069 if (copy_to_user(arg
, info
, sizeof(struct nvm_ioctl_info
))) {
1078 static long nvm_ioctl_get_devices(struct file
*file
, void __user
*arg
)
1080 struct nvm_ioctl_get_devices
*devices
;
1081 struct nvm_dev
*dev
;
1084 if (!capable(CAP_SYS_ADMIN
))
1087 devices
= kzalloc(sizeof(struct nvm_ioctl_get_devices
), GFP_KERNEL
);
1091 down_write(&nvm_lock
);
1092 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1093 struct nvm_ioctl_device_info
*info
= &devices
->info
[i
];
1095 strlcpy(info
->devname
, dev
->name
, sizeof(info
->devname
));
1097 /* kept for compatibility */
1098 info
->bmversion
[0] = 1;
1099 info
->bmversion
[1] = 0;
1100 info
->bmversion
[2] = 0;
1101 strlcpy(info
->bmname
, "gennvm", sizeof(info
->bmname
));
1105 pr_err("nvm: max 31 devices can be reported.\n");
1109 up_write(&nvm_lock
);
1111 devices
->nr_devices
= i
;
1113 if (copy_to_user(arg
, devices
,
1114 sizeof(struct nvm_ioctl_get_devices
))) {
1123 static long nvm_ioctl_dev_create(struct file
*file
, void __user
*arg
)
1125 struct nvm_ioctl_create create
;
1127 if (!capable(CAP_SYS_ADMIN
))
1130 if (copy_from_user(&create
, arg
, sizeof(struct nvm_ioctl_create
)))
1133 if (create
.conf
.type
== NVM_CONFIG_TYPE_EXTENDED
&&
1134 create
.conf
.e
.rsv
!= 0) {
1135 pr_err("nvm: reserved config field in use\n");
1139 create
.dev
[DISK_NAME_LEN
- 1] = '\0';
1140 create
.tgttype
[NVM_TTYPE_NAME_MAX
- 1] = '\0';
1141 create
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1143 if (create
.flags
!= 0) {
1144 __u32 flags
= create
.flags
;
1146 /* Check for valid flags */
1147 if (flags
& NVM_TARGET_FACTORY
)
1148 flags
&= ~NVM_TARGET_FACTORY
;
1151 pr_err("nvm: flag not supported\n");
1156 return __nvm_configure_create(&create
);
1159 static long nvm_ioctl_dev_remove(struct file
*file
, void __user
*arg
)
1161 struct nvm_ioctl_remove remove
;
1162 struct nvm_dev
*dev
;
1165 if (!capable(CAP_SYS_ADMIN
))
1168 if (copy_from_user(&remove
, arg
, sizeof(struct nvm_ioctl_remove
)))
1171 remove
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1173 if (remove
.flags
!= 0) {
1174 pr_err("nvm: no flags supported\n");
1178 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1179 ret
= nvm_remove_tgt(dev
, &remove
);
1187 /* kept for compatibility reasons */
1188 static long nvm_ioctl_dev_init(struct file
*file
, void __user
*arg
)
1190 struct nvm_ioctl_dev_init init
;
1192 if (!capable(CAP_SYS_ADMIN
))
1195 if (copy_from_user(&init
, arg
, sizeof(struct nvm_ioctl_dev_init
)))
1198 if (init
.flags
!= 0) {
1199 pr_err("nvm: no flags supported\n");
1206 /* Kept for compatibility reasons */
1207 static long nvm_ioctl_dev_factory(struct file
*file
, void __user
*arg
)
1209 struct nvm_ioctl_dev_factory fact
;
1211 if (!capable(CAP_SYS_ADMIN
))
1214 if (copy_from_user(&fact
, arg
, sizeof(struct nvm_ioctl_dev_factory
)))
1217 fact
.dev
[DISK_NAME_LEN
- 1] = '\0';
1219 if (fact
.flags
& ~(NVM_FACTORY_NR_BITS
- 1))
1225 static long nvm_ctl_ioctl(struct file
*file
, uint cmd
, unsigned long arg
)
1227 void __user
*argp
= (void __user
*)arg
;
1231 return nvm_ioctl_info(file
, argp
);
1232 case NVM_GET_DEVICES
:
1233 return nvm_ioctl_get_devices(file
, argp
);
1234 case NVM_DEV_CREATE
:
1235 return nvm_ioctl_dev_create(file
, argp
);
1236 case NVM_DEV_REMOVE
:
1237 return nvm_ioctl_dev_remove(file
, argp
);
1239 return nvm_ioctl_dev_init(file
, argp
);
1240 case NVM_DEV_FACTORY
:
1241 return nvm_ioctl_dev_factory(file
, argp
);
1246 static const struct file_operations _ctl_fops
= {
1247 .open
= nonseekable_open
,
1248 .unlocked_ioctl
= nvm_ctl_ioctl
,
1249 .owner
= THIS_MODULE
,
1250 .llseek
= noop_llseek
,
1253 static struct miscdevice _nvm_misc
= {
1254 .minor
= MISC_DYNAMIC_MINOR
,
1256 .nodename
= "lightnvm/control",
1259 builtin_misc_device(_nvm_misc
);