2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/miscdevice.h>
28 #include <linux/lightnvm.h>
29 #include <linux/sched/sysctl.h>
31 static LIST_HEAD(nvm_tgt_types
);
32 static DECLARE_RWSEM(nvm_tgtt_lock
);
33 static LIST_HEAD(nvm_devices
);
34 static DECLARE_RWSEM(nvm_lock
);
36 /* Map between virtual and physical channel and lun */
44 struct nvm_ch_map
*chnls
;
48 static struct nvm_target
*nvm_find_target(struct nvm_dev
*dev
, const char *name
)
50 struct nvm_target
*tgt
;
52 list_for_each_entry(tgt
, &dev
->targets
, list
)
53 if (!strcmp(name
, tgt
->disk
->disk_name
))
59 static bool nvm_target_exists(const char *name
)
62 struct nvm_target
*tgt
;
65 down_write(&nvm_lock
);
66 list_for_each_entry(dev
, &nvm_devices
, devices
) {
67 mutex_lock(&dev
->mlock
);
68 list_for_each_entry(tgt
, &dev
->targets
, list
) {
69 if (!strcmp(name
, tgt
->disk
->disk_name
)) {
71 mutex_unlock(&dev
->mlock
);
75 mutex_unlock(&dev
->mlock
);
83 static int nvm_reserve_luns(struct nvm_dev
*dev
, int lun_begin
, int lun_end
)
87 for (i
= lun_begin
; i
<= lun_end
; i
++) {
88 if (test_and_set_bit(i
, dev
->lun_map
)) {
89 pr_err("nvm: lun %d already allocated\n", i
);
96 while (--i
>= lun_begin
)
97 clear_bit(i
, dev
->lun_map
);
102 static void nvm_release_luns_err(struct nvm_dev
*dev
, int lun_begin
,
107 for (i
= lun_begin
; i
<= lun_end
; i
++)
108 WARN_ON(!test_and_clear_bit(i
, dev
->lun_map
));
111 static void nvm_remove_tgt_dev(struct nvm_tgt_dev
*tgt_dev
, int clear
)
113 struct nvm_dev
*dev
= tgt_dev
->parent
;
114 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
117 for (i
= 0; i
< dev_map
->num_ch
; i
++) {
118 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
119 int *lun_offs
= ch_map
->lun_offs
;
120 int ch
= i
+ ch_map
->ch_off
;
123 for (j
= 0; j
< ch_map
->num_lun
; j
++) {
124 int lun
= j
+ lun_offs
[j
];
125 int lunid
= (ch
* dev
->geo
.num_lun
) + lun
;
127 WARN_ON(!test_and_clear_bit(lunid
,
132 kfree(ch_map
->lun_offs
);
135 kfree(dev_map
->chnls
);
138 kfree(tgt_dev
->luns
);
142 static struct nvm_tgt_dev
*nvm_create_tgt_dev(struct nvm_dev
*dev
,
143 u16 lun_begin
, u16 lun_end
,
146 struct nvm_tgt_dev
*tgt_dev
= NULL
;
147 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
148 struct nvm_dev_map
*dev_map
;
149 struct ppa_addr
*luns
;
150 int num_lun
= lun_end
- lun_begin
+ 1;
151 int luns_left
= num_lun
;
152 int num_ch
= num_lun
/ dev
->geo
.num_lun
;
153 int num_ch_mod
= num_lun
% dev
->geo
.num_lun
;
154 int bch
= lun_begin
/ dev
->geo
.num_lun
;
155 int blun
= lun_begin
% dev
->geo
.num_lun
;
157 int lun_balanced
= 1;
158 int sec_per_lun
, prev_num_lun
;
161 num_ch
= (num_ch_mod
== 0) ? num_ch
: num_ch
+ 1;
163 dev_map
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
167 dev_map
->chnls
= kcalloc(num_ch
, sizeof(struct nvm_ch_map
), GFP_KERNEL
);
171 luns
= kcalloc(num_lun
, sizeof(struct ppa_addr
), GFP_KERNEL
);
175 prev_num_lun
= (luns_left
> dev
->geo
.num_lun
) ?
176 dev
->geo
.num_lun
: luns_left
;
177 for (i
= 0; i
< num_ch
; i
++) {
178 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[i
+ bch
];
179 int *lun_roffs
= ch_rmap
->lun_offs
;
180 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
182 int luns_in_chnl
= (luns_left
> dev
->geo
.num_lun
) ?
183 dev
->geo
.num_lun
: luns_left
;
185 if (lun_balanced
&& prev_num_lun
!= luns_in_chnl
)
188 ch_map
->ch_off
= ch_rmap
->ch_off
= bch
;
189 ch_map
->num_lun
= luns_in_chnl
;
191 lun_offs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
195 for (j
= 0; j
< luns_in_chnl
; j
++) {
197 luns
[lunid
].a
.ch
= i
;
198 luns
[lunid
++].a
.lun
= j
;
201 lun_roffs
[j
+ blun
] = blun
;
204 ch_map
->lun_offs
= lun_offs
;
206 /* when starting a new channel, lun offset is reset */
208 luns_left
-= luns_in_chnl
;
211 dev_map
->num_ch
= num_ch
;
213 tgt_dev
= kmalloc(sizeof(struct nvm_tgt_dev
), GFP_KERNEL
);
217 /* Inherit device geometry from parent */
218 memcpy(&tgt_dev
->geo
, &dev
->geo
, sizeof(struct nvm_geo
));
220 /* Target device only owns a portion of the physical device */
221 tgt_dev
->geo
.num_ch
= num_ch
;
222 tgt_dev
->geo
.num_lun
= (lun_balanced
) ? prev_num_lun
: -1;
223 tgt_dev
->geo
.all_luns
= num_lun
;
224 tgt_dev
->geo
.all_chunks
= num_lun
* dev
->geo
.num_chk
;
226 tgt_dev
->geo
.op
= op
;
228 sec_per_lun
= dev
->geo
.clba
* dev
->geo
.num_chk
;
229 tgt_dev
->geo
.total_secs
= num_lun
* sec_per_lun
;
232 tgt_dev
->map
= dev_map
;
233 tgt_dev
->luns
= luns
;
234 tgt_dev
->parent
= dev
;
239 kfree(dev_map
->chnls
[i
].lun_offs
);
242 kfree(dev_map
->chnls
);
249 static const struct block_device_operations nvm_fops
= {
250 .owner
= THIS_MODULE
,
253 static struct nvm_tgt_type
*__nvm_find_target_type(const char *name
)
255 struct nvm_tgt_type
*tt
;
257 list_for_each_entry(tt
, &nvm_tgt_types
, list
)
258 if (!strcmp(name
, tt
->name
))
264 static struct nvm_tgt_type
*nvm_find_target_type(const char *name
)
266 struct nvm_tgt_type
*tt
;
268 down_write(&nvm_tgtt_lock
);
269 tt
= __nvm_find_target_type(name
);
270 up_write(&nvm_tgtt_lock
);
275 static int nvm_config_check_luns(struct nvm_geo
*geo
, int lun_begin
,
278 if (lun_begin
> lun_end
|| lun_end
>= geo
->all_luns
) {
279 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
280 lun_begin
, lun_end
, geo
->all_luns
- 1);
287 static int __nvm_config_simple(struct nvm_dev
*dev
,
288 struct nvm_ioctl_create_simple
*s
)
290 struct nvm_geo
*geo
= &dev
->geo
;
292 if (s
->lun_begin
== -1 && s
->lun_end
== -1) {
294 s
->lun_end
= geo
->all_luns
- 1;
297 return nvm_config_check_luns(geo
, s
->lun_begin
, s
->lun_end
);
300 static int __nvm_config_extended(struct nvm_dev
*dev
,
301 struct nvm_ioctl_create_extended
*e
)
303 if (e
->lun_begin
== 0xFFFF && e
->lun_end
== 0xFFFF) {
305 e
->lun_end
= dev
->geo
.all_luns
- 1;
308 /* op not set falls into target's default */
309 if (e
->op
== 0xFFFF) {
310 e
->op
= NVM_TARGET_DEFAULT_OP
;
311 } else if (e
->op
< NVM_TARGET_MIN_OP
|| e
->op
> NVM_TARGET_MAX_OP
) {
312 pr_err("nvm: invalid over provisioning value\n");
316 return nvm_config_check_luns(&dev
->geo
, e
->lun_begin
, e
->lun_end
);
319 static int nvm_create_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_create
*create
)
321 struct nvm_ioctl_create_extended e
;
322 struct request_queue
*tqueue
;
323 struct gendisk
*tdisk
;
324 struct nvm_tgt_type
*tt
;
325 struct nvm_target
*t
;
326 struct nvm_tgt_dev
*tgt_dev
;
330 switch (create
->conf
.type
) {
331 case NVM_CONFIG_TYPE_SIMPLE
:
332 ret
= __nvm_config_simple(dev
, &create
->conf
.s
);
336 e
.lun_begin
= create
->conf
.s
.lun_begin
;
337 e
.lun_end
= create
->conf
.s
.lun_end
;
338 e
.op
= NVM_TARGET_DEFAULT_OP
;
340 case NVM_CONFIG_TYPE_EXTENDED
:
341 ret
= __nvm_config_extended(dev
, &create
->conf
.e
);
348 pr_err("nvm: config type not valid\n");
352 tt
= nvm_find_target_type(create
->tgttype
);
354 pr_err("nvm: target type %s not found\n", create
->tgttype
);
358 if ((tt
->flags
& NVM_TGT_F_HOST_L2P
) != (dev
->geo
.dom
& NVM_RSP_L2P
)) {
359 pr_err("nvm: device is incompatible with target L2P type.\n");
363 if (nvm_target_exists(create
->tgtname
)) {
364 pr_err("nvm: target name already exists (%s)\n",
369 ret
= nvm_reserve_luns(dev
, e
.lun_begin
, e
.lun_end
);
373 t
= kmalloc(sizeof(struct nvm_target
), GFP_KERNEL
);
379 tgt_dev
= nvm_create_tgt_dev(dev
, e
.lun_begin
, e
.lun_end
, e
.op
);
381 pr_err("nvm: could not create target device\n");
386 tdisk
= alloc_disk(0);
392 tqueue
= blk_alloc_queue_node(GFP_KERNEL
, dev
->q
->node
, NULL
);
397 blk_queue_make_request(tqueue
, tt
->make_rq
);
399 strlcpy(tdisk
->disk_name
, create
->tgtname
, sizeof(tdisk
->disk_name
));
400 tdisk
->flags
= GENHD_FL_EXT_DEVT
;
402 tdisk
->first_minor
= 0;
403 tdisk
->fops
= &nvm_fops
;
404 tdisk
->queue
= tqueue
;
406 targetdata
= tt
->init(tgt_dev
, tdisk
, create
->flags
);
407 if (IS_ERR(targetdata
)) {
408 ret
= PTR_ERR(targetdata
);
412 tdisk
->private_data
= targetdata
;
413 tqueue
->queuedata
= targetdata
;
415 blk_queue_max_hw_sectors(tqueue
,
416 (dev
->geo
.csecs
>> 9) * NVM_MAX_VLBA
);
418 set_capacity(tdisk
, tt
->capacity(targetdata
));
421 if (tt
->sysfs_init
&& tt
->sysfs_init(tdisk
)) {
430 mutex_lock(&dev
->mlock
);
431 list_add_tail(&t
->list
, &dev
->targets
);
432 mutex_unlock(&dev
->mlock
);
434 __module_get(tt
->owner
);
439 tt
->exit(targetdata
, true);
441 blk_cleanup_queue(tqueue
);
446 nvm_remove_tgt_dev(tgt_dev
, 0);
450 nvm_release_luns_err(dev
, e
.lun_begin
, e
.lun_end
);
454 static void __nvm_remove_target(struct nvm_target
*t
, bool graceful
)
456 struct nvm_tgt_type
*tt
= t
->type
;
457 struct gendisk
*tdisk
= t
->disk
;
458 struct request_queue
*q
= tdisk
->queue
;
461 blk_cleanup_queue(q
);
464 tt
->sysfs_exit(tdisk
);
467 tt
->exit(tdisk
->private_data
, graceful
);
469 nvm_remove_tgt_dev(t
->dev
, 1);
471 module_put(t
->type
->owner
);
478 * nvm_remove_tgt - Removes a target from the media manager
480 * @remove: ioctl structure with target name to remove.
487 static int nvm_remove_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_remove
*remove
)
489 struct nvm_target
*t
;
491 mutex_lock(&dev
->mlock
);
492 t
= nvm_find_target(dev
, remove
->tgtname
);
494 mutex_unlock(&dev
->mlock
);
497 __nvm_remove_target(t
, true);
498 mutex_unlock(&dev
->mlock
);
503 static int nvm_register_map(struct nvm_dev
*dev
)
505 struct nvm_dev_map
*rmap
;
508 rmap
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
512 rmap
->chnls
= kcalloc(dev
->geo
.num_ch
, sizeof(struct nvm_ch_map
),
517 for (i
= 0; i
< dev
->geo
.num_ch
; i
++) {
518 struct nvm_ch_map
*ch_rmap
;
520 int luns_in_chnl
= dev
->geo
.num_lun
;
522 ch_rmap
= &rmap
->chnls
[i
];
524 ch_rmap
->ch_off
= -1;
525 ch_rmap
->num_lun
= luns_in_chnl
;
527 lun_roffs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
531 for (j
= 0; j
< luns_in_chnl
; j
++)
534 ch_rmap
->lun_offs
= lun_roffs
;
542 kfree(rmap
->chnls
[i
].lun_offs
);
549 static void nvm_unregister_map(struct nvm_dev
*dev
)
551 struct nvm_dev_map
*rmap
= dev
->rmap
;
554 for (i
= 0; i
< dev
->geo
.num_ch
; i
++)
555 kfree(rmap
->chnls
[i
].lun_offs
);
561 static void nvm_map_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
563 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
564 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[p
->a
.ch
];
565 int lun_off
= ch_map
->lun_offs
[p
->a
.lun
];
567 p
->a
.ch
+= ch_map
->ch_off
;
571 static void nvm_map_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
573 struct nvm_dev
*dev
= tgt_dev
->parent
;
574 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
575 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[p
->a
.ch
];
576 int lun_roff
= ch_rmap
->lun_offs
[p
->a
.lun
];
578 p
->a
.ch
-= ch_rmap
->ch_off
;
579 p
->a
.lun
-= lun_roff
;
582 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
,
583 struct ppa_addr
*ppa_list
, int nr_ppas
)
587 for (i
= 0; i
< nr_ppas
; i
++) {
588 nvm_map_to_dev(tgt_dev
, &ppa_list
[i
]);
589 ppa_list
[i
] = generic_to_dev_addr(tgt_dev
->parent
, ppa_list
[i
]);
593 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
,
594 struct ppa_addr
*ppa_list
, int nr_ppas
)
598 for (i
= 0; i
< nr_ppas
; i
++) {
599 ppa_list
[i
] = dev_to_generic_addr(tgt_dev
->parent
, ppa_list
[i
]);
600 nvm_map_to_tgt(tgt_dev
, &ppa_list
[i
]);
604 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
606 struct ppa_addr
*ppa_list
= nvm_rq_to_ppa_list(rqd
);
608 nvm_ppa_tgt_to_dev(tgt_dev
, ppa_list
, rqd
->nr_ppas
);
611 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
613 struct ppa_addr
*ppa_list
= nvm_rq_to_ppa_list(rqd
);
615 nvm_ppa_dev_to_tgt(tgt_dev
, ppa_list
, rqd
->nr_ppas
);
618 int nvm_register_tgt_type(struct nvm_tgt_type
*tt
)
622 down_write(&nvm_tgtt_lock
);
623 if (__nvm_find_target_type(tt
->name
))
626 list_add(&tt
->list
, &nvm_tgt_types
);
627 up_write(&nvm_tgtt_lock
);
631 EXPORT_SYMBOL(nvm_register_tgt_type
);
633 void nvm_unregister_tgt_type(struct nvm_tgt_type
*tt
)
638 down_write(&nvm_tgtt_lock
);
640 up_write(&nvm_tgtt_lock
);
642 EXPORT_SYMBOL(nvm_unregister_tgt_type
);
644 void *nvm_dev_dma_alloc(struct nvm_dev
*dev
, gfp_t mem_flags
,
645 dma_addr_t
*dma_handler
)
647 return dev
->ops
->dev_dma_alloc(dev
, dev
->dma_pool
, mem_flags
,
650 EXPORT_SYMBOL(nvm_dev_dma_alloc
);
652 void nvm_dev_dma_free(struct nvm_dev
*dev
, void *addr
, dma_addr_t dma_handler
)
654 dev
->ops
->dev_dma_free(dev
->dma_pool
, addr
, dma_handler
);
656 EXPORT_SYMBOL(nvm_dev_dma_free
);
658 static struct nvm_dev
*nvm_find_nvm_dev(const char *name
)
662 list_for_each_entry(dev
, &nvm_devices
, devices
)
663 if (!strcmp(name
, dev
->name
))
669 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
,
670 const struct ppa_addr
*ppas
, int nr_ppas
)
672 struct nvm_dev
*dev
= tgt_dev
->parent
;
673 struct nvm_geo
*geo
= &tgt_dev
->geo
;
674 int i
, plane_cnt
, pl_idx
;
677 if (geo
->pln_mode
== NVM_PLANE_SINGLE
&& nr_ppas
== 1) {
678 rqd
->nr_ppas
= nr_ppas
;
679 rqd
->ppa_addr
= ppas
[0];
684 rqd
->nr_ppas
= nr_ppas
;
685 rqd
->ppa_list
= nvm_dev_dma_alloc(dev
, GFP_KERNEL
, &rqd
->dma_ppa_list
);
686 if (!rqd
->ppa_list
) {
687 pr_err("nvm: failed to allocate dma memory\n");
691 plane_cnt
= geo
->pln_mode
;
692 rqd
->nr_ppas
*= plane_cnt
;
694 for (i
= 0; i
< nr_ppas
; i
++) {
695 for (pl_idx
= 0; pl_idx
< plane_cnt
; pl_idx
++) {
698 rqd
->ppa_list
[(pl_idx
* nr_ppas
) + i
] = ppa
;
705 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
,
711 nvm_dev_dma_free(tgt_dev
->parent
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
714 static int nvm_set_flags(struct nvm_geo
*geo
, struct nvm_rq
*rqd
)
718 if (geo
->version
== NVM_OCSSD_SPEC_20
)
722 flags
|= geo
->pln_mode
>> 1;
724 if (rqd
->opcode
== NVM_OP_PREAD
)
725 flags
|= (NVM_IO_SCRAMBLE_ENABLE
| NVM_IO_SUSPEND
);
726 else if (rqd
->opcode
== NVM_OP_PWRITE
)
727 flags
|= NVM_IO_SCRAMBLE_ENABLE
;
732 int nvm_submit_io(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
734 struct nvm_dev
*dev
= tgt_dev
->parent
;
737 if (!dev
->ops
->submit_io
)
740 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
743 rqd
->flags
= nvm_set_flags(&tgt_dev
->geo
, rqd
);
745 /* In case of error, fail with right address format */
746 ret
= dev
->ops
->submit_io(dev
, rqd
);
748 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
751 EXPORT_SYMBOL(nvm_submit_io
);
753 int nvm_submit_io_sync(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
755 struct nvm_dev
*dev
= tgt_dev
->parent
;
758 if (!dev
->ops
->submit_io_sync
)
761 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
764 rqd
->flags
= nvm_set_flags(&tgt_dev
->geo
, rqd
);
766 /* In case of error, fail with right address format */
767 ret
= dev
->ops
->submit_io_sync(dev
, rqd
);
768 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
772 EXPORT_SYMBOL(nvm_submit_io_sync
);
774 void nvm_end_io(struct nvm_rq
*rqd
)
776 struct nvm_tgt_dev
*tgt_dev
= rqd
->dev
;
778 /* Convert address space */
780 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
785 EXPORT_SYMBOL(nvm_end_io
);
787 static int nvm_submit_io_sync_raw(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
789 if (!dev
->ops
->submit_io_sync
)
792 rqd
->flags
= nvm_set_flags(&dev
->geo
, rqd
);
794 return dev
->ops
->submit_io_sync(dev
, rqd
);
797 static int nvm_bb_chunk_sense(struct nvm_dev
*dev
, struct ppa_addr ppa
)
799 struct nvm_rq rqd
= { NULL
};
801 struct bio_vec bio_vec
;
805 page
= alloc_page(GFP_KERNEL
);
809 bio_init(&bio
, &bio_vec
, 1);
810 bio_add_page(&bio
, page
, PAGE_SIZE
, 0);
811 bio_set_op_attrs(&bio
, REQ_OP_READ
, 0);
814 rqd
.opcode
= NVM_OP_PREAD
;
817 rqd
.ppa_addr
= generic_to_dev_addr(dev
, ppa
);
819 ret
= nvm_submit_io_sync_raw(dev
, &rqd
);
829 * Scans a 1.2 chunk first and last page to determine if its state.
830 * If the chunk is found to be open, also scan it to update the write
833 static int nvm_bb_chunk_scan(struct nvm_dev
*dev
, struct ppa_addr ppa
,
834 struct nvm_chk_meta
*meta
)
836 struct nvm_geo
*geo
= &dev
->geo
;
839 /* sense first page */
840 ret
= nvm_bb_chunk_sense(dev
, ppa
);
841 if (ret
< 0) /* io error */
843 else if (ret
== 0) /* valid data */
844 meta
->state
= NVM_CHK_ST_OPEN
;
847 * If empty page, the chunk is free, else it is an
848 * actual io error. In that case, mark it offline.
851 case NVM_RSP_ERR_EMPTYPAGE
:
852 meta
->state
= NVM_CHK_ST_FREE
;
854 case NVM_RSP_ERR_FAILCRC
:
855 case NVM_RSP_ERR_FAILECC
:
856 case NVM_RSP_WARN_HIGHECC
:
857 meta
->state
= NVM_CHK_ST_OPEN
;
860 return -ret
; /* other io error */
864 /* sense last page */
865 ppa
.g
.pg
= geo
->num_pg
- 1;
866 ppa
.g
.pl
= geo
->num_pln
- 1;
868 ret
= nvm_bb_chunk_sense(dev
, ppa
);
869 if (ret
< 0) /* io error */
871 else if (ret
== 0) { /* Chunk fully written */
872 meta
->state
= NVM_CHK_ST_CLOSED
;
873 meta
->wp
= geo
->clba
;
875 } else if (ret
> 0) {
877 case NVM_RSP_ERR_EMPTYPAGE
:
878 case NVM_RSP_ERR_FAILCRC
:
879 case NVM_RSP_ERR_FAILECC
:
880 case NVM_RSP_WARN_HIGHECC
:
881 meta
->state
= NVM_CHK_ST_OPEN
;
884 return -ret
; /* other io error */
890 * chunk is open, we scan sequentially to update the write pointer.
891 * We make the assumption that targets write data across all planes
892 * before moving to the next page.
894 for (pg
= 0; pg
< geo
->num_pg
; pg
++) {
895 for (pl
= 0; pl
< geo
->num_pln
; pl
++) {
899 ret
= nvm_bb_chunk_sense(dev
, ppa
);
900 if (ret
< 0) /* io error */
903 meta
->wp
+= geo
->ws_min
;
904 } else if (ret
> 0) {
906 case NVM_RSP_ERR_EMPTYPAGE
:
908 case NVM_RSP_ERR_FAILCRC
:
909 case NVM_RSP_ERR_FAILECC
:
910 case NVM_RSP_WARN_HIGHECC
:
911 meta
->wp
+= geo
->ws_min
;
914 return -ret
; /* other io error */
924 * folds a bad block list from its plane representation to its
925 * chunk representation.
927 * If any of the planes status are bad or grown bad, the chunk is marked
928 * offline. If not bad, the first plane state acts as the chunk state.
930 static int nvm_bb_to_chunk(struct nvm_dev
*dev
, struct ppa_addr ppa
,
931 u8
*blks
, int nr_blks
, struct nvm_chk_meta
*meta
)
933 struct nvm_geo
*geo
= &dev
->geo
;
934 int ret
, blk
, pl
, offset
, blktype
;
936 for (blk
= 0; blk
< geo
->num_chk
; blk
++) {
937 offset
= blk
* geo
->pln_mode
;
938 blktype
= blks
[offset
];
940 for (pl
= 0; pl
< geo
->pln_mode
; pl
++) {
941 if (blks
[offset
+ pl
] &
942 (NVM_BLK_T_BAD
|NVM_BLK_T_GRWN_BAD
)) {
943 blktype
= blks
[offset
+ pl
];
951 meta
->type
= NVM_CHK_TP_W_SEQ
;
953 meta
->slba
= generic_to_dev_addr(dev
, ppa
).ppa
;
954 meta
->cnlb
= dev
->geo
.clba
;
956 if (blktype
== NVM_BLK_T_FREE
) {
957 ret
= nvm_bb_chunk_scan(dev
, ppa
, meta
);
961 meta
->state
= NVM_CHK_ST_OFFLINE
;
970 static int nvm_get_bb_meta(struct nvm_dev
*dev
, sector_t slba
,
971 int nchks
, struct nvm_chk_meta
*meta
)
973 struct nvm_geo
*geo
= &dev
->geo
;
976 int ch
, lun
, nr_blks
;
980 ppa
= dev_to_generic_addr(dev
, ppa
);
985 if ((nchks
% geo
->num_chk
) != 0)
988 nr_blks
= geo
->num_chk
* geo
->pln_mode
;
990 blks
= kmalloc(nr_blks
, GFP_KERNEL
);
994 for (ch
= ppa
.g
.ch
; ch
< geo
->num_ch
; ch
++) {
995 for (lun
= ppa
.g
.lun
; lun
< geo
->num_lun
; lun
++) {
996 struct ppa_addr ppa_gen
, ppa_dev
;
1003 ppa_gen
.g
.lun
= lun
;
1004 ppa_dev
= generic_to_dev_addr(dev
, ppa_gen
);
1006 ret
= dev
->ops
->get_bb_tbl(dev
, ppa_dev
, blks
);
1010 ret
= nvm_bb_to_chunk(dev
, ppa_gen
, blks
, nr_blks
,
1015 meta
+= geo
->num_chk
;
1016 nchks
-= geo
->num_chk
;
1024 int nvm_get_chunk_meta(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr ppa
,
1025 int nchks
, struct nvm_chk_meta
*meta
)
1027 struct nvm_dev
*dev
= tgt_dev
->parent
;
1029 nvm_ppa_tgt_to_dev(tgt_dev
, &ppa
, 1);
1031 if (dev
->geo
.version
== NVM_OCSSD_SPEC_12
)
1032 return nvm_get_bb_meta(dev
, (sector_t
)ppa
.ppa
, nchks
, meta
);
1034 return dev
->ops
->get_chk_meta(dev
, (sector_t
)ppa
.ppa
, nchks
, meta
);
1036 EXPORT_SYMBOL_GPL(nvm_get_chunk_meta
);
1038 int nvm_set_chunk_meta(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*ppas
,
1039 int nr_ppas
, int type
)
1041 struct nvm_dev
*dev
= tgt_dev
->parent
;
1045 if (dev
->geo
.version
== NVM_OCSSD_SPEC_20
)
1048 if (nr_ppas
> NVM_MAX_VLBA
) {
1049 pr_err("nvm: unable to update all blocks atomically\n");
1053 memset(&rqd
, 0, sizeof(struct nvm_rq
));
1055 nvm_set_rqd_ppalist(tgt_dev
, &rqd
, ppas
, nr_ppas
);
1056 nvm_rq_tgt_to_dev(tgt_dev
, &rqd
);
1058 ret
= dev
->ops
->set_bb_tbl(dev
, &rqd
.ppa_addr
, rqd
.nr_ppas
, type
);
1059 nvm_free_rqd_ppalist(tgt_dev
, &rqd
);
1065 EXPORT_SYMBOL_GPL(nvm_set_chunk_meta
);
1067 static int nvm_core_init(struct nvm_dev
*dev
)
1069 struct nvm_geo
*geo
= &dev
->geo
;
1072 dev
->lun_map
= kcalloc(BITS_TO_LONGS(geo
->all_luns
),
1073 sizeof(unsigned long), GFP_KERNEL
);
1077 INIT_LIST_HEAD(&dev
->area_list
);
1078 INIT_LIST_HEAD(&dev
->targets
);
1079 mutex_init(&dev
->mlock
);
1080 spin_lock_init(&dev
->lock
);
1082 ret
= nvm_register_map(dev
);
1088 kfree(dev
->lun_map
);
1092 static void nvm_free(struct nvm_dev
*dev
)
1098 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
1100 nvm_unregister_map(dev
);
1101 kfree(dev
->lun_map
);
1105 static int nvm_init(struct nvm_dev
*dev
)
1107 struct nvm_geo
*geo
= &dev
->geo
;
1110 if (dev
->ops
->identity(dev
)) {
1111 pr_err("nvm: device could not be identified\n");
1115 pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
1116 geo
->major_ver_id
, geo
->minor_ver_id
,
1119 ret
= nvm_core_init(dev
);
1121 pr_err("nvm: could not initialize core structures.\n");
1125 pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
1126 dev
->name
, dev
->geo
.ws_min
, dev
->geo
.ws_opt
,
1127 dev
->geo
.num_chk
, dev
->geo
.all_luns
,
1131 pr_err("nvm: failed to initialize nvm\n");
1135 struct nvm_dev
*nvm_alloc_dev(int node
)
1137 return kzalloc_node(sizeof(struct nvm_dev
), GFP_KERNEL
, node
);
1139 EXPORT_SYMBOL(nvm_alloc_dev
);
1141 int nvm_register(struct nvm_dev
*dev
)
1145 if (!dev
->q
|| !dev
->ops
)
1148 dev
->dma_pool
= dev
->ops
->create_dma_pool(dev
, "ppalist");
1149 if (!dev
->dma_pool
) {
1150 pr_err("nvm: could not create dma pool\n");
1154 ret
= nvm_init(dev
);
1158 /* register device with a supported media manager */
1159 down_write(&nvm_lock
);
1160 list_add(&dev
->devices
, &nvm_devices
);
1161 up_write(&nvm_lock
);
1165 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
1168 EXPORT_SYMBOL(nvm_register
);
1170 void nvm_unregister(struct nvm_dev
*dev
)
1172 struct nvm_target
*t
, *tmp
;
1174 mutex_lock(&dev
->mlock
);
1175 list_for_each_entry_safe(t
, tmp
, &dev
->targets
, list
) {
1176 if (t
->dev
->parent
!= dev
)
1178 __nvm_remove_target(t
, false);
1180 mutex_unlock(&dev
->mlock
);
1182 down_write(&nvm_lock
);
1183 list_del(&dev
->devices
);
1184 up_write(&nvm_lock
);
1188 EXPORT_SYMBOL(nvm_unregister
);
1190 static int __nvm_configure_create(struct nvm_ioctl_create
*create
)
1192 struct nvm_dev
*dev
;
1194 down_write(&nvm_lock
);
1195 dev
= nvm_find_nvm_dev(create
->dev
);
1196 up_write(&nvm_lock
);
1199 pr_err("nvm: device not found\n");
1203 return nvm_create_tgt(dev
, create
);
1206 static long nvm_ioctl_info(struct file
*file
, void __user
*arg
)
1208 struct nvm_ioctl_info
*info
;
1209 struct nvm_tgt_type
*tt
;
1212 info
= memdup_user(arg
, sizeof(struct nvm_ioctl_info
));
1216 info
->version
[0] = NVM_VERSION_MAJOR
;
1217 info
->version
[1] = NVM_VERSION_MINOR
;
1218 info
->version
[2] = NVM_VERSION_PATCH
;
1220 down_write(&nvm_tgtt_lock
);
1221 list_for_each_entry(tt
, &nvm_tgt_types
, list
) {
1222 struct nvm_ioctl_info_tgt
*tgt
= &info
->tgts
[tgt_iter
];
1224 tgt
->version
[0] = tt
->version
[0];
1225 tgt
->version
[1] = tt
->version
[1];
1226 tgt
->version
[2] = tt
->version
[2];
1227 strncpy(tgt
->tgtname
, tt
->name
, NVM_TTYPE_NAME_MAX
);
1232 info
->tgtsize
= tgt_iter
;
1233 up_write(&nvm_tgtt_lock
);
1235 if (copy_to_user(arg
, info
, sizeof(struct nvm_ioctl_info
))) {
1244 static long nvm_ioctl_get_devices(struct file
*file
, void __user
*arg
)
1246 struct nvm_ioctl_get_devices
*devices
;
1247 struct nvm_dev
*dev
;
1250 devices
= kzalloc(sizeof(struct nvm_ioctl_get_devices
), GFP_KERNEL
);
1254 down_write(&nvm_lock
);
1255 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1256 struct nvm_ioctl_device_info
*info
= &devices
->info
[i
];
1258 strlcpy(info
->devname
, dev
->name
, sizeof(info
->devname
));
1260 /* kept for compatibility */
1261 info
->bmversion
[0] = 1;
1262 info
->bmversion
[1] = 0;
1263 info
->bmversion
[2] = 0;
1264 strlcpy(info
->bmname
, "gennvm", sizeof(info
->bmname
));
1268 pr_err("nvm: max 31 devices can be reported.\n");
1272 up_write(&nvm_lock
);
1274 devices
->nr_devices
= i
;
1276 if (copy_to_user(arg
, devices
,
1277 sizeof(struct nvm_ioctl_get_devices
))) {
1286 static long nvm_ioctl_dev_create(struct file
*file
, void __user
*arg
)
1288 struct nvm_ioctl_create create
;
1290 if (copy_from_user(&create
, arg
, sizeof(struct nvm_ioctl_create
)))
1293 if (create
.conf
.type
== NVM_CONFIG_TYPE_EXTENDED
&&
1294 create
.conf
.e
.rsv
!= 0) {
1295 pr_err("nvm: reserved config field in use\n");
1299 create
.dev
[DISK_NAME_LEN
- 1] = '\0';
1300 create
.tgttype
[NVM_TTYPE_NAME_MAX
- 1] = '\0';
1301 create
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1303 if (create
.flags
!= 0) {
1304 __u32 flags
= create
.flags
;
1306 /* Check for valid flags */
1307 if (flags
& NVM_TARGET_FACTORY
)
1308 flags
&= ~NVM_TARGET_FACTORY
;
1311 pr_err("nvm: flag not supported\n");
1316 return __nvm_configure_create(&create
);
1319 static long nvm_ioctl_dev_remove(struct file
*file
, void __user
*arg
)
1321 struct nvm_ioctl_remove remove
;
1322 struct nvm_dev
*dev
;
1325 if (copy_from_user(&remove
, arg
, sizeof(struct nvm_ioctl_remove
)))
1328 remove
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1330 if (remove
.flags
!= 0) {
1331 pr_err("nvm: no flags supported\n");
1335 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1336 ret
= nvm_remove_tgt(dev
, &remove
);
1344 /* kept for compatibility reasons */
1345 static long nvm_ioctl_dev_init(struct file
*file
, void __user
*arg
)
1347 struct nvm_ioctl_dev_init init
;
1349 if (copy_from_user(&init
, arg
, sizeof(struct nvm_ioctl_dev_init
)))
1352 if (init
.flags
!= 0) {
1353 pr_err("nvm: no flags supported\n");
1360 /* Kept for compatibility reasons */
1361 static long nvm_ioctl_dev_factory(struct file
*file
, void __user
*arg
)
1363 struct nvm_ioctl_dev_factory fact
;
1365 if (copy_from_user(&fact
, arg
, sizeof(struct nvm_ioctl_dev_factory
)))
1368 fact
.dev
[DISK_NAME_LEN
- 1] = '\0';
1370 if (fact
.flags
& ~(NVM_FACTORY_NR_BITS
- 1))
1376 static long nvm_ctl_ioctl(struct file
*file
, uint cmd
, unsigned long arg
)
1378 void __user
*argp
= (void __user
*)arg
;
1380 if (!capable(CAP_SYS_ADMIN
))
1385 return nvm_ioctl_info(file
, argp
);
1386 case NVM_GET_DEVICES
:
1387 return nvm_ioctl_get_devices(file
, argp
);
1388 case NVM_DEV_CREATE
:
1389 return nvm_ioctl_dev_create(file
, argp
);
1390 case NVM_DEV_REMOVE
:
1391 return nvm_ioctl_dev_remove(file
, argp
);
1393 return nvm_ioctl_dev_init(file
, argp
);
1394 case NVM_DEV_FACTORY
:
1395 return nvm_ioctl_dev_factory(file
, argp
);
1400 static const struct file_operations _ctl_fops
= {
1401 .open
= nonseekable_open
,
1402 .unlocked_ioctl
= nvm_ctl_ioctl
,
1403 .owner
= THIS_MODULE
,
1404 .llseek
= noop_llseek
,
1407 static struct miscdevice _nvm_misc
= {
1408 .minor
= MISC_DYNAMIC_MINOR
,
1410 .nodename
= "lightnvm/control",
1413 builtin_misc_device(_nvm_misc
);