1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
4 * Initial release: Matias Bjorling <m@bjorling.me>
7 #include <linux/list.h>
8 #include <linux/types.h>
10 #include <linux/bitmap.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/miscdevice.h>
14 #include <linux/lightnvm.h>
15 #include <linux/sched/sysctl.h>
17 static LIST_HEAD(nvm_tgt_types
);
18 static DECLARE_RWSEM(nvm_tgtt_lock
);
19 static LIST_HEAD(nvm_devices
);
20 static DECLARE_RWSEM(nvm_lock
);
22 /* Map between virtual and physical channel and lun */
30 struct nvm_ch_map
*chnls
;
34 static void nvm_free(struct kref
*ref
);
36 static struct nvm_target
*nvm_find_target(struct nvm_dev
*dev
, const char *name
)
38 struct nvm_target
*tgt
;
40 list_for_each_entry(tgt
, &dev
->targets
, list
)
41 if (!strcmp(name
, tgt
->disk
->disk_name
))
47 static bool nvm_target_exists(const char *name
)
50 struct nvm_target
*tgt
;
53 down_write(&nvm_lock
);
54 list_for_each_entry(dev
, &nvm_devices
, devices
) {
55 mutex_lock(&dev
->mlock
);
56 list_for_each_entry(tgt
, &dev
->targets
, list
) {
57 if (!strcmp(name
, tgt
->disk
->disk_name
)) {
59 mutex_unlock(&dev
->mlock
);
63 mutex_unlock(&dev
->mlock
);
71 static int nvm_reserve_luns(struct nvm_dev
*dev
, int lun_begin
, int lun_end
)
75 for (i
= lun_begin
; i
<= lun_end
; i
++) {
76 if (test_and_set_bit(i
, dev
->lun_map
)) {
77 pr_err("nvm: lun %d already allocated\n", i
);
84 while (--i
>= lun_begin
)
85 clear_bit(i
, dev
->lun_map
);
90 static void nvm_release_luns_err(struct nvm_dev
*dev
, int lun_begin
,
95 for (i
= lun_begin
; i
<= lun_end
; i
++)
96 WARN_ON(!test_and_clear_bit(i
, dev
->lun_map
));
99 static void nvm_remove_tgt_dev(struct nvm_tgt_dev
*tgt_dev
, int clear
)
101 struct nvm_dev
*dev
= tgt_dev
->parent
;
102 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
105 for (i
= 0; i
< dev_map
->num_ch
; i
++) {
106 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
107 int *lun_offs
= ch_map
->lun_offs
;
108 int ch
= i
+ ch_map
->ch_off
;
111 for (j
= 0; j
< ch_map
->num_lun
; j
++) {
112 int lun
= j
+ lun_offs
[j
];
113 int lunid
= (ch
* dev
->geo
.num_lun
) + lun
;
115 WARN_ON(!test_and_clear_bit(lunid
,
120 kfree(ch_map
->lun_offs
);
123 kfree(dev_map
->chnls
);
126 kfree(tgt_dev
->luns
);
130 static struct nvm_tgt_dev
*nvm_create_tgt_dev(struct nvm_dev
*dev
,
131 u16 lun_begin
, u16 lun_end
,
134 struct nvm_tgt_dev
*tgt_dev
= NULL
;
135 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
136 struct nvm_dev_map
*dev_map
;
137 struct ppa_addr
*luns
;
138 int num_lun
= lun_end
- lun_begin
+ 1;
139 int luns_left
= num_lun
;
140 int num_ch
= num_lun
/ dev
->geo
.num_lun
;
141 int num_ch_mod
= num_lun
% dev
->geo
.num_lun
;
142 int bch
= lun_begin
/ dev
->geo
.num_lun
;
143 int blun
= lun_begin
% dev
->geo
.num_lun
;
145 int lun_balanced
= 1;
146 int sec_per_lun
, prev_num_lun
;
149 num_ch
= (num_ch_mod
== 0) ? num_ch
: num_ch
+ 1;
151 dev_map
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
155 dev_map
->chnls
= kcalloc(num_ch
, sizeof(struct nvm_ch_map
), GFP_KERNEL
);
159 luns
= kcalloc(num_lun
, sizeof(struct ppa_addr
), GFP_KERNEL
);
163 prev_num_lun
= (luns_left
> dev
->geo
.num_lun
) ?
164 dev
->geo
.num_lun
: luns_left
;
165 for (i
= 0; i
< num_ch
; i
++) {
166 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[i
+ bch
];
167 int *lun_roffs
= ch_rmap
->lun_offs
;
168 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
170 int luns_in_chnl
= (luns_left
> dev
->geo
.num_lun
) ?
171 dev
->geo
.num_lun
: luns_left
;
173 if (lun_balanced
&& prev_num_lun
!= luns_in_chnl
)
176 ch_map
->ch_off
= ch_rmap
->ch_off
= bch
;
177 ch_map
->num_lun
= luns_in_chnl
;
179 lun_offs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
183 for (j
= 0; j
< luns_in_chnl
; j
++) {
185 luns
[lunid
].a
.ch
= i
;
186 luns
[lunid
++].a
.lun
= j
;
189 lun_roffs
[j
+ blun
] = blun
;
192 ch_map
->lun_offs
= lun_offs
;
194 /* when starting a new channel, lun offset is reset */
196 luns_left
-= luns_in_chnl
;
199 dev_map
->num_ch
= num_ch
;
201 tgt_dev
= kmalloc(sizeof(struct nvm_tgt_dev
), GFP_KERNEL
);
205 /* Inherit device geometry from parent */
206 memcpy(&tgt_dev
->geo
, &dev
->geo
, sizeof(struct nvm_geo
));
208 /* Target device only owns a portion of the physical device */
209 tgt_dev
->geo
.num_ch
= num_ch
;
210 tgt_dev
->geo
.num_lun
= (lun_balanced
) ? prev_num_lun
: -1;
211 tgt_dev
->geo
.all_luns
= num_lun
;
212 tgt_dev
->geo
.all_chunks
= num_lun
* dev
->geo
.num_chk
;
214 tgt_dev
->geo
.op
= op
;
216 sec_per_lun
= dev
->geo
.clba
* dev
->geo
.num_chk
;
217 tgt_dev
->geo
.total_secs
= num_lun
* sec_per_lun
;
220 tgt_dev
->map
= dev_map
;
221 tgt_dev
->luns
= luns
;
222 tgt_dev
->parent
= dev
;
227 kfree(dev_map
->chnls
[i
].lun_offs
);
230 kfree(dev_map
->chnls
);
237 static const struct block_device_operations nvm_fops
= {
238 .owner
= THIS_MODULE
,
241 static struct nvm_tgt_type
*__nvm_find_target_type(const char *name
)
243 struct nvm_tgt_type
*tt
;
245 list_for_each_entry(tt
, &nvm_tgt_types
, list
)
246 if (!strcmp(name
, tt
->name
))
252 static struct nvm_tgt_type
*nvm_find_target_type(const char *name
)
254 struct nvm_tgt_type
*tt
;
256 down_write(&nvm_tgtt_lock
);
257 tt
= __nvm_find_target_type(name
);
258 up_write(&nvm_tgtt_lock
);
263 static int nvm_config_check_luns(struct nvm_geo
*geo
, int lun_begin
,
266 if (lun_begin
> lun_end
|| lun_end
>= geo
->all_luns
) {
267 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
268 lun_begin
, lun_end
, geo
->all_luns
- 1);
275 static int __nvm_config_simple(struct nvm_dev
*dev
,
276 struct nvm_ioctl_create_simple
*s
)
278 struct nvm_geo
*geo
= &dev
->geo
;
280 if (s
->lun_begin
== -1 && s
->lun_end
== -1) {
282 s
->lun_end
= geo
->all_luns
- 1;
285 return nvm_config_check_luns(geo
, s
->lun_begin
, s
->lun_end
);
288 static int __nvm_config_extended(struct nvm_dev
*dev
,
289 struct nvm_ioctl_create_extended
*e
)
291 if (e
->lun_begin
== 0xFFFF && e
->lun_end
== 0xFFFF) {
293 e
->lun_end
= dev
->geo
.all_luns
- 1;
296 /* op not set falls into target's default */
297 if (e
->op
== 0xFFFF) {
298 e
->op
= NVM_TARGET_DEFAULT_OP
;
299 } else if (e
->op
< NVM_TARGET_MIN_OP
|| e
->op
> NVM_TARGET_MAX_OP
) {
300 pr_err("nvm: invalid over provisioning value\n");
304 return nvm_config_check_luns(&dev
->geo
, e
->lun_begin
, e
->lun_end
);
307 static int nvm_create_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_create
*create
)
309 struct nvm_ioctl_create_extended e
;
310 struct request_queue
*tqueue
;
311 struct gendisk
*tdisk
;
312 struct nvm_tgt_type
*tt
;
313 struct nvm_target
*t
;
314 struct nvm_tgt_dev
*tgt_dev
;
319 switch (create
->conf
.type
) {
320 case NVM_CONFIG_TYPE_SIMPLE
:
321 ret
= __nvm_config_simple(dev
, &create
->conf
.s
);
325 e
.lun_begin
= create
->conf
.s
.lun_begin
;
326 e
.lun_end
= create
->conf
.s
.lun_end
;
327 e
.op
= NVM_TARGET_DEFAULT_OP
;
329 case NVM_CONFIG_TYPE_EXTENDED
:
330 ret
= __nvm_config_extended(dev
, &create
->conf
.e
);
337 pr_err("nvm: config type not valid\n");
341 tt
= nvm_find_target_type(create
->tgttype
);
343 pr_err("nvm: target type %s not found\n", create
->tgttype
);
347 if ((tt
->flags
& NVM_TGT_F_HOST_L2P
) != (dev
->geo
.dom
& NVM_RSP_L2P
)) {
348 pr_err("nvm: device is incompatible with target L2P type.\n");
352 if (nvm_target_exists(create
->tgtname
)) {
353 pr_err("nvm: target name already exists (%s)\n",
358 ret
= nvm_reserve_luns(dev
, e
.lun_begin
, e
.lun_end
);
362 t
= kmalloc(sizeof(struct nvm_target
), GFP_KERNEL
);
368 tgt_dev
= nvm_create_tgt_dev(dev
, e
.lun_begin
, e
.lun_end
, e
.op
);
370 pr_err("nvm: could not create target device\n");
375 tdisk
= alloc_disk(0);
381 tqueue
= blk_alloc_queue_node(GFP_KERNEL
, dev
->q
->node
);
386 blk_queue_make_request(tqueue
, tt
->make_rq
);
388 strlcpy(tdisk
->disk_name
, create
->tgtname
, sizeof(tdisk
->disk_name
));
389 tdisk
->flags
= GENHD_FL_EXT_DEVT
;
391 tdisk
->first_minor
= 0;
392 tdisk
->fops
= &nvm_fops
;
393 tdisk
->queue
= tqueue
;
395 targetdata
= tt
->init(tgt_dev
, tdisk
, create
->flags
);
396 if (IS_ERR(targetdata
)) {
397 ret
= PTR_ERR(targetdata
);
401 tdisk
->private_data
= targetdata
;
402 tqueue
->queuedata
= targetdata
;
404 mdts
= (dev
->geo
.csecs
>> 9) * NVM_MAX_VLBA
;
406 mdts
= min_t(u32
, dev
->geo
.mdts
,
407 (dev
->geo
.csecs
>> 9) * NVM_MAX_VLBA
);
409 blk_queue_max_hw_sectors(tqueue
, mdts
);
411 set_capacity(tdisk
, tt
->capacity(targetdata
));
414 if (tt
->sysfs_init
&& tt
->sysfs_init(tdisk
)) {
423 mutex_lock(&dev
->mlock
);
424 list_add_tail(&t
->list
, &dev
->targets
);
425 mutex_unlock(&dev
->mlock
);
427 __module_get(tt
->owner
);
432 tt
->exit(targetdata
, true);
434 blk_cleanup_queue(tqueue
);
439 nvm_remove_tgt_dev(tgt_dev
, 0);
443 nvm_release_luns_err(dev
, e
.lun_begin
, e
.lun_end
);
447 static void __nvm_remove_target(struct nvm_target
*t
, bool graceful
)
449 struct nvm_tgt_type
*tt
= t
->type
;
450 struct gendisk
*tdisk
= t
->disk
;
451 struct request_queue
*q
= tdisk
->queue
;
454 blk_cleanup_queue(q
);
457 tt
->sysfs_exit(tdisk
);
460 tt
->exit(tdisk
->private_data
, graceful
);
462 nvm_remove_tgt_dev(t
->dev
, 1);
464 module_put(t
->type
->owner
);
471 * nvm_remove_tgt - Removes a target from the media manager
472 * @remove: ioctl structure with target name to remove.
479 static int nvm_remove_tgt(struct nvm_ioctl_remove
*remove
)
481 struct nvm_target
*t
= NULL
;
484 down_read(&nvm_lock
);
485 list_for_each_entry(dev
, &nvm_devices
, devices
) {
486 mutex_lock(&dev
->mlock
);
487 t
= nvm_find_target(dev
, remove
->tgtname
);
489 mutex_unlock(&dev
->mlock
);
492 mutex_unlock(&dev
->mlock
);
499 __nvm_remove_target(t
, true);
500 kref_put(&dev
->ref
, nvm_free
);
505 static int nvm_register_map(struct nvm_dev
*dev
)
507 struct nvm_dev_map
*rmap
;
510 rmap
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
514 rmap
->chnls
= kcalloc(dev
->geo
.num_ch
, sizeof(struct nvm_ch_map
),
519 for (i
= 0; i
< dev
->geo
.num_ch
; i
++) {
520 struct nvm_ch_map
*ch_rmap
;
522 int luns_in_chnl
= dev
->geo
.num_lun
;
524 ch_rmap
= &rmap
->chnls
[i
];
526 ch_rmap
->ch_off
= -1;
527 ch_rmap
->num_lun
= luns_in_chnl
;
529 lun_roffs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
533 for (j
= 0; j
< luns_in_chnl
; j
++)
536 ch_rmap
->lun_offs
= lun_roffs
;
544 kfree(rmap
->chnls
[i
].lun_offs
);
551 static void nvm_unregister_map(struct nvm_dev
*dev
)
553 struct nvm_dev_map
*rmap
= dev
->rmap
;
556 for (i
= 0; i
< dev
->geo
.num_ch
; i
++)
557 kfree(rmap
->chnls
[i
].lun_offs
);
563 static void nvm_map_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
565 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
566 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[p
->a
.ch
];
567 int lun_off
= ch_map
->lun_offs
[p
->a
.lun
];
569 p
->a
.ch
+= ch_map
->ch_off
;
573 static void nvm_map_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
575 struct nvm_dev
*dev
= tgt_dev
->parent
;
576 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
577 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[p
->a
.ch
];
578 int lun_roff
= ch_rmap
->lun_offs
[p
->a
.lun
];
580 p
->a
.ch
-= ch_rmap
->ch_off
;
581 p
->a
.lun
-= lun_roff
;
584 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
,
585 struct ppa_addr
*ppa_list
, int nr_ppas
)
589 for (i
= 0; i
< nr_ppas
; i
++) {
590 nvm_map_to_dev(tgt_dev
, &ppa_list
[i
]);
591 ppa_list
[i
] = generic_to_dev_addr(tgt_dev
->parent
, ppa_list
[i
]);
595 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
,
596 struct ppa_addr
*ppa_list
, int nr_ppas
)
600 for (i
= 0; i
< nr_ppas
; i
++) {
601 ppa_list
[i
] = dev_to_generic_addr(tgt_dev
->parent
, ppa_list
[i
]);
602 nvm_map_to_tgt(tgt_dev
, &ppa_list
[i
]);
606 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
608 struct ppa_addr
*ppa_list
= nvm_rq_to_ppa_list(rqd
);
610 nvm_ppa_tgt_to_dev(tgt_dev
, ppa_list
, rqd
->nr_ppas
);
613 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
615 struct ppa_addr
*ppa_list
= nvm_rq_to_ppa_list(rqd
);
617 nvm_ppa_dev_to_tgt(tgt_dev
, ppa_list
, rqd
->nr_ppas
);
620 int nvm_register_tgt_type(struct nvm_tgt_type
*tt
)
624 down_write(&nvm_tgtt_lock
);
625 if (__nvm_find_target_type(tt
->name
))
628 list_add(&tt
->list
, &nvm_tgt_types
);
629 up_write(&nvm_tgtt_lock
);
633 EXPORT_SYMBOL(nvm_register_tgt_type
);
635 void nvm_unregister_tgt_type(struct nvm_tgt_type
*tt
)
640 down_write(&nvm_tgtt_lock
);
642 up_write(&nvm_tgtt_lock
);
644 EXPORT_SYMBOL(nvm_unregister_tgt_type
);
646 void *nvm_dev_dma_alloc(struct nvm_dev
*dev
, gfp_t mem_flags
,
647 dma_addr_t
*dma_handler
)
649 return dev
->ops
->dev_dma_alloc(dev
, dev
->dma_pool
, mem_flags
,
652 EXPORT_SYMBOL(nvm_dev_dma_alloc
);
654 void nvm_dev_dma_free(struct nvm_dev
*dev
, void *addr
, dma_addr_t dma_handler
)
656 dev
->ops
->dev_dma_free(dev
->dma_pool
, addr
, dma_handler
);
658 EXPORT_SYMBOL(nvm_dev_dma_free
);
660 static struct nvm_dev
*nvm_find_nvm_dev(const char *name
)
664 list_for_each_entry(dev
, &nvm_devices
, devices
)
665 if (!strcmp(name
, dev
->name
))
671 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
,
672 const struct ppa_addr
*ppas
, int nr_ppas
)
674 struct nvm_dev
*dev
= tgt_dev
->parent
;
675 struct nvm_geo
*geo
= &tgt_dev
->geo
;
676 int i
, plane_cnt
, pl_idx
;
679 if (geo
->pln_mode
== NVM_PLANE_SINGLE
&& nr_ppas
== 1) {
680 rqd
->nr_ppas
= nr_ppas
;
681 rqd
->ppa_addr
= ppas
[0];
686 rqd
->nr_ppas
= nr_ppas
;
687 rqd
->ppa_list
= nvm_dev_dma_alloc(dev
, GFP_KERNEL
, &rqd
->dma_ppa_list
);
688 if (!rqd
->ppa_list
) {
689 pr_err("nvm: failed to allocate dma memory\n");
693 plane_cnt
= geo
->pln_mode
;
694 rqd
->nr_ppas
*= plane_cnt
;
696 for (i
= 0; i
< nr_ppas
; i
++) {
697 for (pl_idx
= 0; pl_idx
< plane_cnt
; pl_idx
++) {
700 rqd
->ppa_list
[(pl_idx
* nr_ppas
) + i
] = ppa
;
707 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
,
713 nvm_dev_dma_free(tgt_dev
->parent
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
716 static int nvm_set_flags(struct nvm_geo
*geo
, struct nvm_rq
*rqd
)
720 if (geo
->version
== NVM_OCSSD_SPEC_20
)
724 flags
|= geo
->pln_mode
>> 1;
726 if (rqd
->opcode
== NVM_OP_PREAD
)
727 flags
|= (NVM_IO_SCRAMBLE_ENABLE
| NVM_IO_SUSPEND
);
728 else if (rqd
->opcode
== NVM_OP_PWRITE
)
729 flags
|= NVM_IO_SCRAMBLE_ENABLE
;
734 int nvm_submit_io(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
736 struct nvm_dev
*dev
= tgt_dev
->parent
;
739 if (!dev
->ops
->submit_io
)
742 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
745 rqd
->flags
= nvm_set_flags(&tgt_dev
->geo
, rqd
);
747 /* In case of error, fail with right address format */
748 ret
= dev
->ops
->submit_io(dev
, rqd
);
750 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
753 EXPORT_SYMBOL(nvm_submit_io
);
755 int nvm_submit_io_sync(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
757 struct nvm_dev
*dev
= tgt_dev
->parent
;
760 if (!dev
->ops
->submit_io_sync
)
763 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
766 rqd
->flags
= nvm_set_flags(&tgt_dev
->geo
, rqd
);
768 /* In case of error, fail with right address format */
769 ret
= dev
->ops
->submit_io_sync(dev
, rqd
);
770 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
774 EXPORT_SYMBOL(nvm_submit_io_sync
);
776 void nvm_end_io(struct nvm_rq
*rqd
)
778 struct nvm_tgt_dev
*tgt_dev
= rqd
->dev
;
780 /* Convert address space */
782 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
787 EXPORT_SYMBOL(nvm_end_io
);
789 static int nvm_submit_io_sync_raw(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
791 if (!dev
->ops
->submit_io_sync
)
794 rqd
->flags
= nvm_set_flags(&dev
->geo
, rqd
);
796 return dev
->ops
->submit_io_sync(dev
, rqd
);
799 static int nvm_bb_chunk_sense(struct nvm_dev
*dev
, struct ppa_addr ppa
)
801 struct nvm_rq rqd
= { NULL
};
803 struct bio_vec bio_vec
;
807 page
= alloc_page(GFP_KERNEL
);
811 bio_init(&bio
, &bio_vec
, 1);
812 bio_add_page(&bio
, page
, PAGE_SIZE
, 0);
813 bio_set_op_attrs(&bio
, REQ_OP_READ
, 0);
816 rqd
.opcode
= NVM_OP_PREAD
;
819 rqd
.ppa_addr
= generic_to_dev_addr(dev
, ppa
);
821 ret
= nvm_submit_io_sync_raw(dev
, &rqd
);
831 * Scans a 1.2 chunk first and last page to determine if its state.
832 * If the chunk is found to be open, also scan it to update the write
835 static int nvm_bb_chunk_scan(struct nvm_dev
*dev
, struct ppa_addr ppa
,
836 struct nvm_chk_meta
*meta
)
838 struct nvm_geo
*geo
= &dev
->geo
;
841 /* sense first page */
842 ret
= nvm_bb_chunk_sense(dev
, ppa
);
843 if (ret
< 0) /* io error */
845 else if (ret
== 0) /* valid data */
846 meta
->state
= NVM_CHK_ST_OPEN
;
849 * If empty page, the chunk is free, else it is an
850 * actual io error. In that case, mark it offline.
853 case NVM_RSP_ERR_EMPTYPAGE
:
854 meta
->state
= NVM_CHK_ST_FREE
;
856 case NVM_RSP_ERR_FAILCRC
:
857 case NVM_RSP_ERR_FAILECC
:
858 case NVM_RSP_WARN_HIGHECC
:
859 meta
->state
= NVM_CHK_ST_OPEN
;
862 return -ret
; /* other io error */
866 /* sense last page */
867 ppa
.g
.pg
= geo
->num_pg
- 1;
868 ppa
.g
.pl
= geo
->num_pln
- 1;
870 ret
= nvm_bb_chunk_sense(dev
, ppa
);
871 if (ret
< 0) /* io error */
873 else if (ret
== 0) { /* Chunk fully written */
874 meta
->state
= NVM_CHK_ST_CLOSED
;
875 meta
->wp
= geo
->clba
;
877 } else if (ret
> 0) {
879 case NVM_RSP_ERR_EMPTYPAGE
:
880 case NVM_RSP_ERR_FAILCRC
:
881 case NVM_RSP_ERR_FAILECC
:
882 case NVM_RSP_WARN_HIGHECC
:
883 meta
->state
= NVM_CHK_ST_OPEN
;
886 return -ret
; /* other io error */
892 * chunk is open, we scan sequentially to update the write pointer.
893 * We make the assumption that targets write data across all planes
894 * before moving to the next page.
896 for (pg
= 0; pg
< geo
->num_pg
; pg
++) {
897 for (pl
= 0; pl
< geo
->num_pln
; pl
++) {
901 ret
= nvm_bb_chunk_sense(dev
, ppa
);
902 if (ret
< 0) /* io error */
905 meta
->wp
+= geo
->ws_min
;
906 } else if (ret
> 0) {
908 case NVM_RSP_ERR_EMPTYPAGE
:
910 case NVM_RSP_ERR_FAILCRC
:
911 case NVM_RSP_ERR_FAILECC
:
912 case NVM_RSP_WARN_HIGHECC
:
913 meta
->wp
+= geo
->ws_min
;
916 return -ret
; /* other io error */
926 * folds a bad block list from its plane representation to its
927 * chunk representation.
929 * If any of the planes status are bad or grown bad, the chunk is marked
930 * offline. If not bad, the first plane state acts as the chunk state.
932 static int nvm_bb_to_chunk(struct nvm_dev
*dev
, struct ppa_addr ppa
,
933 u8
*blks
, int nr_blks
, struct nvm_chk_meta
*meta
)
935 struct nvm_geo
*geo
= &dev
->geo
;
936 int ret
, blk
, pl
, offset
, blktype
;
938 for (blk
= 0; blk
< geo
->num_chk
; blk
++) {
939 offset
= blk
* geo
->pln_mode
;
940 blktype
= blks
[offset
];
942 for (pl
= 0; pl
< geo
->pln_mode
; pl
++) {
943 if (blks
[offset
+ pl
] &
944 (NVM_BLK_T_BAD
|NVM_BLK_T_GRWN_BAD
)) {
945 blktype
= blks
[offset
+ pl
];
953 meta
->type
= NVM_CHK_TP_W_SEQ
;
955 meta
->slba
= generic_to_dev_addr(dev
, ppa
).ppa
;
956 meta
->cnlb
= dev
->geo
.clba
;
958 if (blktype
== NVM_BLK_T_FREE
) {
959 ret
= nvm_bb_chunk_scan(dev
, ppa
, meta
);
963 meta
->state
= NVM_CHK_ST_OFFLINE
;
972 static int nvm_get_bb_meta(struct nvm_dev
*dev
, sector_t slba
,
973 int nchks
, struct nvm_chk_meta
*meta
)
975 struct nvm_geo
*geo
= &dev
->geo
;
978 int ch
, lun
, nr_blks
;
982 ppa
= dev_to_generic_addr(dev
, ppa
);
987 if ((nchks
% geo
->num_chk
) != 0)
990 nr_blks
= geo
->num_chk
* geo
->pln_mode
;
992 blks
= kmalloc(nr_blks
, GFP_KERNEL
);
996 for (ch
= ppa
.g
.ch
; ch
< geo
->num_ch
; ch
++) {
997 for (lun
= ppa
.g
.lun
; lun
< geo
->num_lun
; lun
++) {
998 struct ppa_addr ppa_gen
, ppa_dev
;
1005 ppa_gen
.g
.lun
= lun
;
1006 ppa_dev
= generic_to_dev_addr(dev
, ppa_gen
);
1008 ret
= dev
->ops
->get_bb_tbl(dev
, ppa_dev
, blks
);
1012 ret
= nvm_bb_to_chunk(dev
, ppa_gen
, blks
, nr_blks
,
1017 meta
+= geo
->num_chk
;
1018 nchks
-= geo
->num_chk
;
1026 int nvm_get_chunk_meta(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr ppa
,
1027 int nchks
, struct nvm_chk_meta
*meta
)
1029 struct nvm_dev
*dev
= tgt_dev
->parent
;
1031 nvm_ppa_tgt_to_dev(tgt_dev
, &ppa
, 1);
1033 if (dev
->geo
.version
== NVM_OCSSD_SPEC_12
)
1034 return nvm_get_bb_meta(dev
, (sector_t
)ppa
.ppa
, nchks
, meta
);
1036 return dev
->ops
->get_chk_meta(dev
, (sector_t
)ppa
.ppa
, nchks
, meta
);
1038 EXPORT_SYMBOL_GPL(nvm_get_chunk_meta
);
1040 int nvm_set_chunk_meta(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*ppas
,
1041 int nr_ppas
, int type
)
1043 struct nvm_dev
*dev
= tgt_dev
->parent
;
1047 if (dev
->geo
.version
== NVM_OCSSD_SPEC_20
)
1050 if (nr_ppas
> NVM_MAX_VLBA
) {
1051 pr_err("nvm: unable to update all blocks atomically\n");
1055 memset(&rqd
, 0, sizeof(struct nvm_rq
));
1057 nvm_set_rqd_ppalist(tgt_dev
, &rqd
, ppas
, nr_ppas
);
1058 nvm_rq_tgt_to_dev(tgt_dev
, &rqd
);
1060 ret
= dev
->ops
->set_bb_tbl(dev
, &rqd
.ppa_addr
, rqd
.nr_ppas
, type
);
1061 nvm_free_rqd_ppalist(tgt_dev
, &rqd
);
1067 EXPORT_SYMBOL_GPL(nvm_set_chunk_meta
);
1069 static int nvm_core_init(struct nvm_dev
*dev
)
1071 struct nvm_geo
*geo
= &dev
->geo
;
1074 dev
->lun_map
= kcalloc(BITS_TO_LONGS(geo
->all_luns
),
1075 sizeof(unsigned long), GFP_KERNEL
);
1079 INIT_LIST_HEAD(&dev
->area_list
);
1080 INIT_LIST_HEAD(&dev
->targets
);
1081 mutex_init(&dev
->mlock
);
1082 spin_lock_init(&dev
->lock
);
1084 ret
= nvm_register_map(dev
);
1090 kfree(dev
->lun_map
);
1094 static void nvm_free(struct kref
*ref
)
1096 struct nvm_dev
*dev
= container_of(ref
, struct nvm_dev
, ref
);
1099 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
1102 nvm_unregister_map(dev
);
1104 kfree(dev
->lun_map
);
1108 static int nvm_init(struct nvm_dev
*dev
)
1110 struct nvm_geo
*geo
= &dev
->geo
;
1113 if (dev
->ops
->identity(dev
)) {
1114 pr_err("nvm: device could not be identified\n");
1118 pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n",
1119 geo
->major_ver_id
, geo
->minor_ver_id
,
1122 ret
= nvm_core_init(dev
);
1124 pr_err("nvm: could not initialize core structures.\n");
1128 pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n",
1129 dev
->name
, dev
->geo
.ws_min
, dev
->geo
.ws_opt
,
1130 dev
->geo
.num_chk
, dev
->geo
.all_luns
,
1134 pr_err("nvm: failed to initialize nvm\n");
1138 struct nvm_dev
*nvm_alloc_dev(int node
)
1140 struct nvm_dev
*dev
;
1142 dev
= kzalloc_node(sizeof(struct nvm_dev
), GFP_KERNEL
, node
);
1144 kref_init(&dev
->ref
);
1148 EXPORT_SYMBOL(nvm_alloc_dev
);
1150 int nvm_register(struct nvm_dev
*dev
)
1152 int ret
, exp_pool_size
;
1154 if (!dev
->q
|| !dev
->ops
) {
1155 kref_put(&dev
->ref
, nvm_free
);
1159 ret
= nvm_init(dev
);
1161 kref_put(&dev
->ref
, nvm_free
);
1165 exp_pool_size
= max_t(int, PAGE_SIZE
,
1166 (NVM_MAX_VLBA
* (sizeof(u64
) + dev
->geo
.sos
)));
1167 exp_pool_size
= round_up(exp_pool_size
, PAGE_SIZE
);
1169 dev
->dma_pool
= dev
->ops
->create_dma_pool(dev
, "ppalist",
1171 if (!dev
->dma_pool
) {
1172 pr_err("nvm: could not create dma pool\n");
1173 kref_put(&dev
->ref
, nvm_free
);
1177 /* register device with a supported media manager */
1178 down_write(&nvm_lock
);
1179 list_add(&dev
->devices
, &nvm_devices
);
1180 up_write(&nvm_lock
);
1184 EXPORT_SYMBOL(nvm_register
);
1186 void nvm_unregister(struct nvm_dev
*dev
)
1188 struct nvm_target
*t
, *tmp
;
1190 mutex_lock(&dev
->mlock
);
1191 list_for_each_entry_safe(t
, tmp
, &dev
->targets
, list
) {
1192 if (t
->dev
->parent
!= dev
)
1194 __nvm_remove_target(t
, false);
1195 kref_put(&dev
->ref
, nvm_free
);
1197 mutex_unlock(&dev
->mlock
);
1199 down_write(&nvm_lock
);
1200 list_del(&dev
->devices
);
1201 up_write(&nvm_lock
);
1203 kref_put(&dev
->ref
, nvm_free
);
1205 EXPORT_SYMBOL(nvm_unregister
);
1207 static int __nvm_configure_create(struct nvm_ioctl_create
*create
)
1209 struct nvm_dev
*dev
;
1212 down_write(&nvm_lock
);
1213 dev
= nvm_find_nvm_dev(create
->dev
);
1214 up_write(&nvm_lock
);
1217 pr_err("nvm: device not found\n");
1221 kref_get(&dev
->ref
);
1222 ret
= nvm_create_tgt(dev
, create
);
1224 kref_put(&dev
->ref
, nvm_free
);
1229 static long nvm_ioctl_info(struct file
*file
, void __user
*arg
)
1231 struct nvm_ioctl_info
*info
;
1232 struct nvm_tgt_type
*tt
;
1235 info
= memdup_user(arg
, sizeof(struct nvm_ioctl_info
));
1239 info
->version
[0] = NVM_VERSION_MAJOR
;
1240 info
->version
[1] = NVM_VERSION_MINOR
;
1241 info
->version
[2] = NVM_VERSION_PATCH
;
1243 down_write(&nvm_tgtt_lock
);
1244 list_for_each_entry(tt
, &nvm_tgt_types
, list
) {
1245 struct nvm_ioctl_info_tgt
*tgt
= &info
->tgts
[tgt_iter
];
1247 tgt
->version
[0] = tt
->version
[0];
1248 tgt
->version
[1] = tt
->version
[1];
1249 tgt
->version
[2] = tt
->version
[2];
1250 strncpy(tgt
->tgtname
, tt
->name
, NVM_TTYPE_NAME_MAX
);
1255 info
->tgtsize
= tgt_iter
;
1256 up_write(&nvm_tgtt_lock
);
1258 if (copy_to_user(arg
, info
, sizeof(struct nvm_ioctl_info
))) {
1267 static long nvm_ioctl_get_devices(struct file
*file
, void __user
*arg
)
1269 struct nvm_ioctl_get_devices
*devices
;
1270 struct nvm_dev
*dev
;
1273 devices
= kzalloc(sizeof(struct nvm_ioctl_get_devices
), GFP_KERNEL
);
1277 down_write(&nvm_lock
);
1278 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1279 struct nvm_ioctl_device_info
*info
= &devices
->info
[i
];
1281 strlcpy(info
->devname
, dev
->name
, sizeof(info
->devname
));
1283 /* kept for compatibility */
1284 info
->bmversion
[0] = 1;
1285 info
->bmversion
[1] = 0;
1286 info
->bmversion
[2] = 0;
1287 strlcpy(info
->bmname
, "gennvm", sizeof(info
->bmname
));
1291 pr_err("nvm: max 31 devices can be reported.\n");
1295 up_write(&nvm_lock
);
1297 devices
->nr_devices
= i
;
1299 if (copy_to_user(arg
, devices
,
1300 sizeof(struct nvm_ioctl_get_devices
))) {
1309 static long nvm_ioctl_dev_create(struct file
*file
, void __user
*arg
)
1311 struct nvm_ioctl_create create
;
1313 if (copy_from_user(&create
, arg
, sizeof(struct nvm_ioctl_create
)))
1316 if (create
.conf
.type
== NVM_CONFIG_TYPE_EXTENDED
&&
1317 create
.conf
.e
.rsv
!= 0) {
1318 pr_err("nvm: reserved config field in use\n");
1322 create
.dev
[DISK_NAME_LEN
- 1] = '\0';
1323 create
.tgttype
[NVM_TTYPE_NAME_MAX
- 1] = '\0';
1324 create
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1326 if (create
.flags
!= 0) {
1327 __u32 flags
= create
.flags
;
1329 /* Check for valid flags */
1330 if (flags
& NVM_TARGET_FACTORY
)
1331 flags
&= ~NVM_TARGET_FACTORY
;
1334 pr_err("nvm: flag not supported\n");
1339 return __nvm_configure_create(&create
);
1342 static long nvm_ioctl_dev_remove(struct file
*file
, void __user
*arg
)
1344 struct nvm_ioctl_remove remove
;
1346 if (copy_from_user(&remove
, arg
, sizeof(struct nvm_ioctl_remove
)))
1349 remove
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1351 if (remove
.flags
!= 0) {
1352 pr_err("nvm: no flags supported\n");
1356 return nvm_remove_tgt(&remove
);
1359 /* kept for compatibility reasons */
1360 static long nvm_ioctl_dev_init(struct file
*file
, void __user
*arg
)
1362 struct nvm_ioctl_dev_init init
;
1364 if (copy_from_user(&init
, arg
, sizeof(struct nvm_ioctl_dev_init
)))
1367 if (init
.flags
!= 0) {
1368 pr_err("nvm: no flags supported\n");
1375 /* Kept for compatibility reasons */
1376 static long nvm_ioctl_dev_factory(struct file
*file
, void __user
*arg
)
1378 struct nvm_ioctl_dev_factory fact
;
1380 if (copy_from_user(&fact
, arg
, sizeof(struct nvm_ioctl_dev_factory
)))
1383 fact
.dev
[DISK_NAME_LEN
- 1] = '\0';
1385 if (fact
.flags
& ~(NVM_FACTORY_NR_BITS
- 1))
1391 static long nvm_ctl_ioctl(struct file
*file
, uint cmd
, unsigned long arg
)
1393 void __user
*argp
= (void __user
*)arg
;
1395 if (!capable(CAP_SYS_ADMIN
))
1400 return nvm_ioctl_info(file
, argp
);
1401 case NVM_GET_DEVICES
:
1402 return nvm_ioctl_get_devices(file
, argp
);
1403 case NVM_DEV_CREATE
:
1404 return nvm_ioctl_dev_create(file
, argp
);
1405 case NVM_DEV_REMOVE
:
1406 return nvm_ioctl_dev_remove(file
, argp
);
1408 return nvm_ioctl_dev_init(file
, argp
);
1409 case NVM_DEV_FACTORY
:
1410 return nvm_ioctl_dev_factory(file
, argp
);
1415 static const struct file_operations _ctl_fops
= {
1416 .open
= nonseekable_open
,
1417 .unlocked_ioctl
= nvm_ctl_ioctl
,
1418 .owner
= THIS_MODULE
,
1419 .llseek
= noop_llseek
,
1422 static struct miscdevice _nvm_misc
= {
1423 .minor
= MISC_DYNAMIC_MINOR
,
1425 .nodename
= "lightnvm/control",
1428 builtin_misc_device(_nvm_misc
);