2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/miscdevice.h>
28 #include <linux/lightnvm.h>
29 #include <linux/sched/sysctl.h>
31 static LIST_HEAD(nvm_tgt_types
);
32 static DECLARE_RWSEM(nvm_tgtt_lock
);
33 static LIST_HEAD(nvm_devices
);
34 static DECLARE_RWSEM(nvm_lock
);
36 /* Map between virtual and physical channel and lun */
44 struct nvm_ch_map
*chnls
;
49 struct list_head list
;
51 sector_t end
; /* end is excluded */
54 static struct nvm_target
*nvm_find_target(struct nvm_dev
*dev
, const char *name
)
56 struct nvm_target
*tgt
;
58 list_for_each_entry(tgt
, &dev
->targets
, list
)
59 if (!strcmp(name
, tgt
->disk
->disk_name
))
65 static int nvm_reserve_luns(struct nvm_dev
*dev
, int lun_begin
, int lun_end
)
69 for (i
= lun_begin
; i
<= lun_end
; i
++) {
70 if (test_and_set_bit(i
, dev
->lun_map
)) {
71 pr_err("nvm: lun %d already allocated\n", i
);
78 while (--i
>= lun_begin
)
79 clear_bit(i
, dev
->lun_map
);
84 static void nvm_release_luns_err(struct nvm_dev
*dev
, int lun_begin
,
89 for (i
= lun_begin
; i
<= lun_end
; i
++)
90 WARN_ON(!test_and_clear_bit(i
, dev
->lun_map
));
93 static void nvm_remove_tgt_dev(struct nvm_tgt_dev
*tgt_dev
, int clear
)
95 struct nvm_dev
*dev
= tgt_dev
->parent
;
96 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
99 for (i
= 0; i
< dev_map
->nr_chnls
; i
++) {
100 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
101 int *lun_offs
= ch_map
->lun_offs
;
102 int ch
= i
+ ch_map
->ch_off
;
105 for (j
= 0; j
< ch_map
->nr_luns
; j
++) {
106 int lun
= j
+ lun_offs
[j
];
107 int lunid
= (ch
* dev
->geo
.luns_per_chnl
) + lun
;
109 WARN_ON(!test_and_clear_bit(lunid
,
114 kfree(ch_map
->lun_offs
);
117 kfree(dev_map
->chnls
);
120 kfree(tgt_dev
->luns
);
124 static struct nvm_tgt_dev
*nvm_create_tgt_dev(struct nvm_dev
*dev
,
125 int lun_begin
, int lun_end
)
127 struct nvm_tgt_dev
*tgt_dev
= NULL
;
128 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
129 struct nvm_dev_map
*dev_map
;
130 struct ppa_addr
*luns
;
131 int nr_luns
= lun_end
- lun_begin
+ 1;
132 int luns_left
= nr_luns
;
133 int nr_chnls
= nr_luns
/ dev
->geo
.luns_per_chnl
;
134 int nr_chnls_mod
= nr_luns
% dev
->geo
.luns_per_chnl
;
135 int bch
= lun_begin
/ dev
->geo
.luns_per_chnl
;
136 int blun
= lun_begin
% dev
->geo
.luns_per_chnl
;
138 int lun_balanced
= 1;
142 nr_chnls
= (nr_chnls_mod
== 0) ? nr_chnls
: nr_chnls
+ 1;
144 dev_map
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
148 dev_map
->chnls
= kcalloc(nr_chnls
, sizeof(struct nvm_ch_map
),
153 luns
= kcalloc(nr_luns
, sizeof(struct ppa_addr
), GFP_KERNEL
);
157 prev_nr_luns
= (luns_left
> dev
->geo
.luns_per_chnl
) ?
158 dev
->geo
.luns_per_chnl
: luns_left
;
159 for (i
= 0; i
< nr_chnls
; i
++) {
160 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[i
+ bch
];
161 int *lun_roffs
= ch_rmap
->lun_offs
;
162 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
164 int luns_in_chnl
= (luns_left
> dev
->geo
.luns_per_chnl
) ?
165 dev
->geo
.luns_per_chnl
: luns_left
;
167 if (lun_balanced
&& prev_nr_luns
!= luns_in_chnl
)
170 ch_map
->ch_off
= ch_rmap
->ch_off
= bch
;
171 ch_map
->nr_luns
= luns_in_chnl
;
173 lun_offs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
177 for (j
= 0; j
< luns_in_chnl
; j
++) {
179 luns
[lunid
].g
.ch
= i
;
180 luns
[lunid
++].g
.lun
= j
;
183 lun_roffs
[j
+ blun
] = blun
;
186 ch_map
->lun_offs
= lun_offs
;
188 /* when starting a new channel, lun offset is reset */
190 luns_left
-= luns_in_chnl
;
193 dev_map
->nr_chnls
= nr_chnls
;
195 tgt_dev
= kmalloc(sizeof(struct nvm_tgt_dev
), GFP_KERNEL
);
199 memcpy(&tgt_dev
->geo
, &dev
->geo
, sizeof(struct nvm_geo
));
200 /* Target device only owns a portion of the physical device */
201 tgt_dev
->geo
.nr_chnls
= nr_chnls
;
202 tgt_dev
->geo
.nr_luns
= nr_luns
;
203 tgt_dev
->geo
.luns_per_chnl
= (lun_balanced
) ? prev_nr_luns
: -1;
204 tgt_dev
->total_secs
= nr_luns
* tgt_dev
->geo
.sec_per_lun
;
206 tgt_dev
->map
= dev_map
;
207 tgt_dev
->luns
= luns
;
208 memcpy(&tgt_dev
->identity
, &dev
->identity
, sizeof(struct nvm_id
));
210 tgt_dev
->parent
= dev
;
215 kfree(dev_map
->chnls
[i
].lun_offs
);
218 kfree(dev_map
->chnls
);
225 static const struct block_device_operations nvm_fops
= {
226 .owner
= THIS_MODULE
,
229 static struct nvm_tgt_type
*nvm_find_target_type(const char *name
, int lock
)
231 struct nvm_tgt_type
*tmp
, *tt
= NULL
;
234 down_write(&nvm_tgtt_lock
);
236 list_for_each_entry(tmp
, &nvm_tgt_types
, list
)
237 if (!strcmp(name
, tmp
->name
)) {
243 up_write(&nvm_tgtt_lock
);
247 static int nvm_create_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_create
*create
)
249 struct nvm_ioctl_create_simple
*s
= &create
->conf
.s
;
250 struct request_queue
*tqueue
;
251 struct gendisk
*tdisk
;
252 struct nvm_tgt_type
*tt
;
253 struct nvm_target
*t
;
254 struct nvm_tgt_dev
*tgt_dev
;
258 tt
= nvm_find_target_type(create
->tgttype
, 1);
260 pr_err("nvm: target type %s not found\n", create
->tgttype
);
264 mutex_lock(&dev
->mlock
);
265 t
= nvm_find_target(dev
, create
->tgtname
);
267 pr_err("nvm: target name already exists.\n");
268 mutex_unlock(&dev
->mlock
);
271 mutex_unlock(&dev
->mlock
);
273 ret
= nvm_reserve_luns(dev
, s
->lun_begin
, s
->lun_end
);
277 t
= kmalloc(sizeof(struct nvm_target
), GFP_KERNEL
);
283 tgt_dev
= nvm_create_tgt_dev(dev
, s
->lun_begin
, s
->lun_end
);
285 pr_err("nvm: could not create target device\n");
290 tdisk
= alloc_disk(0);
296 tqueue
= blk_alloc_queue_node(GFP_KERNEL
, dev
->q
->node
);
301 blk_queue_make_request(tqueue
, tt
->make_rq
);
303 strlcpy(tdisk
->disk_name
, create
->tgtname
, sizeof(tdisk
->disk_name
));
304 tdisk
->flags
= GENHD_FL_EXT_DEVT
;
306 tdisk
->first_minor
= 0;
307 tdisk
->fops
= &nvm_fops
;
308 tdisk
->queue
= tqueue
;
310 targetdata
= tt
->init(tgt_dev
, tdisk
, create
->flags
);
311 if (IS_ERR(targetdata
)) {
312 ret
= PTR_ERR(targetdata
);
316 tdisk
->private_data
= targetdata
;
317 tqueue
->queuedata
= targetdata
;
319 blk_queue_max_hw_sectors(tqueue
, 8 * dev
->ops
->max_phys_sect
);
321 set_capacity(tdisk
, tt
->capacity(targetdata
));
324 if (tt
->sysfs_init
&& tt
->sysfs_init(tdisk
)) {
333 mutex_lock(&dev
->mlock
);
334 list_add_tail(&t
->list
, &dev
->targets
);
335 mutex_unlock(&dev
->mlock
);
337 __module_get(tt
->owner
);
342 tt
->exit(targetdata
);
344 blk_cleanup_queue(tqueue
);
349 nvm_remove_tgt_dev(tgt_dev
, 0);
353 nvm_release_luns_err(dev
, s
->lun_begin
, s
->lun_end
);
357 static void __nvm_remove_target(struct nvm_target
*t
)
359 struct nvm_tgt_type
*tt
= t
->type
;
360 struct gendisk
*tdisk
= t
->disk
;
361 struct request_queue
*q
= tdisk
->queue
;
364 blk_cleanup_queue(q
);
367 tt
->sysfs_exit(tdisk
);
370 tt
->exit(tdisk
->private_data
);
372 nvm_remove_tgt_dev(t
->dev
, 1);
374 module_put(t
->type
->owner
);
381 * nvm_remove_tgt - Removes a target from the media manager
383 * @remove: ioctl structure with target name to remove.
390 static int nvm_remove_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_remove
*remove
)
392 struct nvm_target
*t
;
394 mutex_lock(&dev
->mlock
);
395 t
= nvm_find_target(dev
, remove
->tgtname
);
397 mutex_unlock(&dev
->mlock
);
400 __nvm_remove_target(t
);
401 mutex_unlock(&dev
->mlock
);
406 static int nvm_register_map(struct nvm_dev
*dev
)
408 struct nvm_dev_map
*rmap
;
411 rmap
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
415 rmap
->chnls
= kcalloc(dev
->geo
.nr_chnls
, sizeof(struct nvm_ch_map
),
420 for (i
= 0; i
< dev
->geo
.nr_chnls
; i
++) {
421 struct nvm_ch_map
*ch_rmap
;
423 int luns_in_chnl
= dev
->geo
.luns_per_chnl
;
425 ch_rmap
= &rmap
->chnls
[i
];
427 ch_rmap
->ch_off
= -1;
428 ch_rmap
->nr_luns
= luns_in_chnl
;
430 lun_roffs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
434 for (j
= 0; j
< luns_in_chnl
; j
++)
437 ch_rmap
->lun_offs
= lun_roffs
;
445 kfree(rmap
->chnls
[i
].lun_offs
);
452 static void nvm_unregister_map(struct nvm_dev
*dev
)
454 struct nvm_dev_map
*rmap
= dev
->rmap
;
457 for (i
= 0; i
< dev
->geo
.nr_chnls
; i
++)
458 kfree(rmap
->chnls
[i
].lun_offs
);
464 static void nvm_map_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
466 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
467 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[p
->g
.ch
];
468 int lun_off
= ch_map
->lun_offs
[p
->g
.lun
];
470 p
->g
.ch
+= ch_map
->ch_off
;
474 static void nvm_map_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
476 struct nvm_dev
*dev
= tgt_dev
->parent
;
477 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
478 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[p
->g
.ch
];
479 int lun_roff
= ch_rmap
->lun_offs
[p
->g
.lun
];
481 p
->g
.ch
-= ch_rmap
->ch_off
;
482 p
->g
.lun
-= lun_roff
;
485 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
,
486 struct ppa_addr
*ppa_list
, int nr_ppas
)
490 for (i
= 0; i
< nr_ppas
; i
++) {
491 nvm_map_to_dev(tgt_dev
, &ppa_list
[i
]);
492 ppa_list
[i
] = generic_to_dev_addr(tgt_dev
, ppa_list
[i
]);
496 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
,
497 struct ppa_addr
*ppa_list
, int nr_ppas
)
501 for (i
= 0; i
< nr_ppas
; i
++) {
502 ppa_list
[i
] = dev_to_generic_addr(tgt_dev
, ppa_list
[i
]);
503 nvm_map_to_tgt(tgt_dev
, &ppa_list
[i
]);
507 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
509 if (rqd
->nr_ppas
== 1) {
510 nvm_ppa_tgt_to_dev(tgt_dev
, &rqd
->ppa_addr
, 1);
514 nvm_ppa_tgt_to_dev(tgt_dev
, rqd
->ppa_list
, rqd
->nr_ppas
);
517 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
519 if (rqd
->nr_ppas
== 1) {
520 nvm_ppa_dev_to_tgt(tgt_dev
, &rqd
->ppa_addr
, 1);
524 nvm_ppa_dev_to_tgt(tgt_dev
, rqd
->ppa_list
, rqd
->nr_ppas
);
527 void nvm_part_to_tgt(struct nvm_dev
*dev
, sector_t
*entries
,
530 struct nvm_geo
*geo
= &dev
->geo
;
531 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
534 for (i
= 0; i
< len
; i
++) {
535 struct nvm_ch_map
*ch_rmap
;
537 struct ppa_addr gaddr
;
538 u64 pba
= le64_to_cpu(entries
[i
]);
544 gaddr
= linear_to_generic_addr(geo
, pba
);
545 ch_rmap
= &dev_rmap
->chnls
[gaddr
.g
.ch
];
546 lun_roffs
= ch_rmap
->lun_offs
;
548 diff
= ((ch_rmap
->ch_off
* geo
->luns_per_chnl
) +
549 (lun_roffs
[gaddr
.g
.lun
])) * geo
->sec_per_lun
;
551 entries
[i
] -= cpu_to_le64(diff
);
554 EXPORT_SYMBOL(nvm_part_to_tgt
);
556 int nvm_register_tgt_type(struct nvm_tgt_type
*tt
)
560 down_write(&nvm_tgtt_lock
);
561 if (nvm_find_target_type(tt
->name
, 0))
564 list_add(&tt
->list
, &nvm_tgt_types
);
565 up_write(&nvm_tgtt_lock
);
569 EXPORT_SYMBOL(nvm_register_tgt_type
);
571 void nvm_unregister_tgt_type(struct nvm_tgt_type
*tt
)
576 down_write(&nvm_tgtt_lock
);
578 up_write(&nvm_tgtt_lock
);
580 EXPORT_SYMBOL(nvm_unregister_tgt_type
);
582 void *nvm_dev_dma_alloc(struct nvm_dev
*dev
, gfp_t mem_flags
,
583 dma_addr_t
*dma_handler
)
585 return dev
->ops
->dev_dma_alloc(dev
, dev
->dma_pool
, mem_flags
,
588 EXPORT_SYMBOL(nvm_dev_dma_alloc
);
590 void nvm_dev_dma_free(struct nvm_dev
*dev
, void *addr
, dma_addr_t dma_handler
)
592 dev
->ops
->dev_dma_free(dev
->dma_pool
, addr
, dma_handler
);
594 EXPORT_SYMBOL(nvm_dev_dma_free
);
596 static struct nvm_dev
*nvm_find_nvm_dev(const char *name
)
600 list_for_each_entry(dev
, &nvm_devices
, devices
)
601 if (!strcmp(name
, dev
->name
))
607 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
,
608 const struct ppa_addr
*ppas
, int nr_ppas
)
610 struct nvm_dev
*dev
= tgt_dev
->parent
;
611 struct nvm_geo
*geo
= &tgt_dev
->geo
;
612 int i
, plane_cnt
, pl_idx
;
615 if (geo
->plane_mode
== NVM_PLANE_SINGLE
&& nr_ppas
== 1) {
616 rqd
->nr_ppas
= nr_ppas
;
617 rqd
->ppa_addr
= ppas
[0];
622 rqd
->nr_ppas
= nr_ppas
;
623 rqd
->ppa_list
= nvm_dev_dma_alloc(dev
, GFP_KERNEL
, &rqd
->dma_ppa_list
);
624 if (!rqd
->ppa_list
) {
625 pr_err("nvm: failed to allocate dma memory\n");
629 plane_cnt
= geo
->plane_mode
;
630 rqd
->nr_ppas
*= plane_cnt
;
632 for (i
= 0; i
< nr_ppas
; i
++) {
633 for (pl_idx
= 0; pl_idx
< plane_cnt
; pl_idx
++) {
636 rqd
->ppa_list
[(pl_idx
* nr_ppas
) + i
] = ppa
;
643 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
,
649 nvm_dev_dma_free(tgt_dev
->parent
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
653 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*ppas
,
654 int nr_ppas
, int type
)
656 struct nvm_dev
*dev
= tgt_dev
->parent
;
660 if (nr_ppas
> dev
->ops
->max_phys_sect
) {
661 pr_err("nvm: unable to update all blocks atomically\n");
665 memset(&rqd
, 0, sizeof(struct nvm_rq
));
667 nvm_set_rqd_ppalist(tgt_dev
, &rqd
, ppas
, nr_ppas
);
668 nvm_rq_tgt_to_dev(tgt_dev
, &rqd
);
670 ret
= dev
->ops
->set_bb_tbl(dev
, &rqd
.ppa_addr
, rqd
.nr_ppas
, type
);
671 nvm_free_rqd_ppalist(tgt_dev
, &rqd
);
673 pr_err("nvm: failed bb mark\n");
679 EXPORT_SYMBOL(nvm_set_tgt_bb_tbl
);
681 int nvm_max_phys_sects(struct nvm_tgt_dev
*tgt_dev
)
683 struct nvm_dev
*dev
= tgt_dev
->parent
;
685 return dev
->ops
->max_phys_sect
;
687 EXPORT_SYMBOL(nvm_max_phys_sects
);
689 int nvm_submit_io(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
691 struct nvm_dev
*dev
= tgt_dev
->parent
;
694 if (!dev
->ops
->submit_io
)
697 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
701 /* In case of error, fail with right address format */
702 ret
= dev
->ops
->submit_io(dev
, rqd
);
704 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
707 EXPORT_SYMBOL(nvm_submit_io
);
709 int nvm_submit_io_sync(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
711 struct nvm_dev
*dev
= tgt_dev
->parent
;
714 if (!dev
->ops
->submit_io_sync
)
717 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
721 /* In case of error, fail with right address format */
722 ret
= dev
->ops
->submit_io_sync(dev
, rqd
);
723 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
727 EXPORT_SYMBOL(nvm_submit_io_sync
);
729 int nvm_erase_sync(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*ppas
,
732 struct nvm_geo
*geo
= &tgt_dev
->geo
;
736 memset(&rqd
, 0, sizeof(struct nvm_rq
));
738 rqd
.opcode
= NVM_OP_ERASE
;
739 rqd
.flags
= geo
->plane_mode
>> 1;
741 ret
= nvm_set_rqd_ppalist(tgt_dev
, &rqd
, ppas
, nr_ppas
);
745 ret
= nvm_submit_io_sync(tgt_dev
, &rqd
);
747 pr_err("rrpr: erase I/O submission failed: %d\n", ret
);
752 nvm_free_rqd_ppalist(tgt_dev
, &rqd
);
756 EXPORT_SYMBOL(nvm_erase_sync
);
758 int nvm_get_l2p_tbl(struct nvm_tgt_dev
*tgt_dev
, u64 slba
, u32 nlb
,
759 nvm_l2p_update_fn
*update_l2p
, void *priv
)
761 struct nvm_dev
*dev
= tgt_dev
->parent
;
763 if (!dev
->ops
->get_l2p_tbl
)
766 return dev
->ops
->get_l2p_tbl(dev
, slba
, nlb
, update_l2p
, priv
);
768 EXPORT_SYMBOL(nvm_get_l2p_tbl
);
770 int nvm_get_area(struct nvm_tgt_dev
*tgt_dev
, sector_t
*lba
, sector_t len
)
772 struct nvm_dev
*dev
= tgt_dev
->parent
;
773 struct nvm_geo
*geo
= &dev
->geo
;
774 struct nvm_area
*area
, *prev
, *next
;
776 sector_t max_sectors
= (geo
->sec_size
* dev
->total_secs
) >> 9;
778 if (len
> max_sectors
)
781 area
= kmalloc(sizeof(struct nvm_area
), GFP_KERNEL
);
787 spin_lock(&dev
->lock
);
788 list_for_each_entry(next
, &dev
->area_list
, list
) {
789 if (begin
+ len
> next
->begin
) {
797 if ((begin
+ len
) > max_sectors
) {
798 spin_unlock(&dev
->lock
);
803 area
->begin
= *lba
= begin
;
804 area
->end
= begin
+ len
;
806 if (prev
) /* insert into sorted order */
807 list_add(&area
->list
, &prev
->list
);
809 list_add(&area
->list
, &dev
->area_list
);
810 spin_unlock(&dev
->lock
);
814 EXPORT_SYMBOL(nvm_get_area
);
816 void nvm_put_area(struct nvm_tgt_dev
*tgt_dev
, sector_t begin
)
818 struct nvm_dev
*dev
= tgt_dev
->parent
;
819 struct nvm_area
*area
;
821 spin_lock(&dev
->lock
);
822 list_for_each_entry(area
, &dev
->area_list
, list
) {
823 if (area
->begin
!= begin
)
826 list_del(&area
->list
);
827 spin_unlock(&dev
->lock
);
831 spin_unlock(&dev
->lock
);
833 EXPORT_SYMBOL(nvm_put_area
);
835 void nvm_end_io(struct nvm_rq
*rqd
)
837 struct nvm_tgt_dev
*tgt_dev
= rqd
->dev
;
839 /* Convert address space */
841 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
846 EXPORT_SYMBOL(nvm_end_io
);
849 * folds a bad block list from its plane representation to its virtual
850 * block representation. The fold is done in place and reduced size is
853 * If any of the planes status are bad or grown bad block, the virtual block
854 * is marked bad. If not bad, the first plane state acts as the block state.
856 int nvm_bb_tbl_fold(struct nvm_dev
*dev
, u8
*blks
, int nr_blks
)
858 struct nvm_geo
*geo
= &dev
->geo
;
859 int blk
, offset
, pl
, blktype
;
861 if (nr_blks
!= geo
->blks_per_lun
* geo
->plane_mode
)
864 for (blk
= 0; blk
< geo
->blks_per_lun
; blk
++) {
865 offset
= blk
* geo
->plane_mode
;
866 blktype
= blks
[offset
];
868 /* Bad blocks on any planes take precedence over other types */
869 for (pl
= 0; pl
< geo
->plane_mode
; pl
++) {
870 if (blks
[offset
+ pl
] &
871 (NVM_BLK_T_BAD
|NVM_BLK_T_GRWN_BAD
)) {
872 blktype
= blks
[offset
+ pl
];
880 return geo
->blks_per_lun
;
882 EXPORT_SYMBOL(nvm_bb_tbl_fold
);
884 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr ppa
,
887 struct nvm_dev
*dev
= tgt_dev
->parent
;
889 nvm_ppa_tgt_to_dev(tgt_dev
, &ppa
, 1);
891 return dev
->ops
->get_bb_tbl(dev
, ppa
, blks
);
893 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl
);
895 static int nvm_init_slc_tbl(struct nvm_dev
*dev
, struct nvm_id_group
*grp
)
897 struct nvm_geo
*geo
= &dev
->geo
;
900 dev
->lps_per_blk
= geo
->pgs_per_blk
;
901 dev
->lptbl
= kcalloc(dev
->lps_per_blk
, sizeof(int), GFP_KERNEL
);
905 /* Just a linear array */
906 for (i
= 0; i
< dev
->lps_per_blk
; i
++)
912 static int nvm_init_mlc_tbl(struct nvm_dev
*dev
, struct nvm_id_group
*grp
)
915 struct nvm_id_lp_mlc
*mlc
= &grp
->lptbl
.mlc
;
920 dev
->lps_per_blk
= mlc
->num_pairs
;
921 dev
->lptbl
= kcalloc(dev
->lps_per_blk
, sizeof(int), GFP_KERNEL
);
925 /* The lower page table encoding consists of a list of bytes, where each
926 * has a lower and an upper half. The first half byte maintains the
927 * increment value and every value after is an offset added to the
928 * previous incrementation value
930 dev
->lptbl
[0] = mlc
->pairs
[0] & 0xF;
931 for (i
= 1; i
< dev
->lps_per_blk
; i
++) {
932 p
= mlc
->pairs
[i
>> 1];
933 if (i
& 0x1) /* upper */
934 dev
->lptbl
[i
] = dev
->lptbl
[i
- 1] + ((p
& 0xF0) >> 4);
936 dev
->lptbl
[i
] = dev
->lptbl
[i
- 1] + (p
& 0xF);
942 static int nvm_core_init(struct nvm_dev
*dev
)
944 struct nvm_id
*id
= &dev
->identity
;
945 struct nvm_id_group
*grp
= &id
->grp
;
946 struct nvm_geo
*geo
= &dev
->geo
;
949 /* Whole device values */
950 geo
->nr_chnls
= grp
->num_ch
;
951 geo
->luns_per_chnl
= grp
->num_lun
;
953 /* Generic device values */
954 geo
->pgs_per_blk
= grp
->num_pg
;
955 geo
->blks_per_lun
= grp
->num_blk
;
956 geo
->nr_planes
= grp
->num_pln
;
957 geo
->fpg_size
= grp
->fpg_sz
;
958 geo
->pfpg_size
= grp
->fpg_sz
* grp
->num_pln
;
959 geo
->sec_size
= grp
->csecs
;
960 geo
->oob_size
= grp
->sos
;
961 geo
->sec_per_pg
= grp
->fpg_sz
/ grp
->csecs
;
962 geo
->mccap
= grp
->mccap
;
963 memcpy(&geo
->ppaf
, &id
->ppaf
, sizeof(struct nvm_addr_format
));
965 geo
->plane_mode
= NVM_PLANE_SINGLE
;
966 geo
->max_rq_size
= dev
->ops
->max_phys_sect
* geo
->sec_size
;
968 if (grp
->mpos
& 0x020202)
969 geo
->plane_mode
= NVM_PLANE_DOUBLE
;
970 if (grp
->mpos
& 0x040404)
971 geo
->plane_mode
= NVM_PLANE_QUAD
;
973 if (grp
->mtype
!= 0) {
974 pr_err("nvm: memory type not supported\n");
978 /* calculated values */
979 geo
->sec_per_pl
= geo
->sec_per_pg
* geo
->nr_planes
;
980 geo
->sec_per_blk
= geo
->sec_per_pl
* geo
->pgs_per_blk
;
981 geo
->sec_per_lun
= geo
->sec_per_blk
* geo
->blks_per_lun
;
982 geo
->nr_luns
= geo
->luns_per_chnl
* geo
->nr_chnls
;
984 dev
->total_secs
= geo
->nr_luns
* geo
->sec_per_lun
;
985 dev
->lun_map
= kcalloc(BITS_TO_LONGS(geo
->nr_luns
),
986 sizeof(unsigned long), GFP_KERNEL
);
990 switch (grp
->fmtype
) {
991 case NVM_ID_FMTYPE_SLC
:
992 if (nvm_init_slc_tbl(dev
, grp
)) {
997 case NVM_ID_FMTYPE_MLC
:
998 if (nvm_init_mlc_tbl(dev
, grp
)) {
1004 pr_err("nvm: flash type not supported\n");
1009 INIT_LIST_HEAD(&dev
->area_list
);
1010 INIT_LIST_HEAD(&dev
->targets
);
1011 mutex_init(&dev
->mlock
);
1012 spin_lock_init(&dev
->lock
);
1014 ret
= nvm_register_map(dev
);
1018 blk_queue_logical_block_size(dev
->q
, geo
->sec_size
);
1021 kfree(dev
->lun_map
);
1025 static void nvm_free(struct nvm_dev
*dev
)
1031 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
1033 nvm_unregister_map(dev
);
1035 kfree(dev
->lun_map
);
1039 static int nvm_init(struct nvm_dev
*dev
)
1041 struct nvm_geo
*geo
= &dev
->geo
;
1044 if (dev
->ops
->identity(dev
, &dev
->identity
)) {
1045 pr_err("nvm: device could not be identified\n");
1049 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
1050 dev
->identity
.ver_id
, dev
->identity
.vmnt
);
1052 if (dev
->identity
.ver_id
!= 1) {
1053 pr_err("nvm: device not supported by kernel.");
1057 ret
= nvm_core_init(dev
);
1059 pr_err("nvm: could not initialize core structures.\n");
1063 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
1064 dev
->name
, geo
->sec_per_pg
, geo
->nr_planes
,
1065 geo
->pgs_per_blk
, geo
->blks_per_lun
,
1066 geo
->nr_luns
, geo
->nr_chnls
);
1069 pr_err("nvm: failed to initialize nvm\n");
1073 struct nvm_dev
*nvm_alloc_dev(int node
)
1075 return kzalloc_node(sizeof(struct nvm_dev
), GFP_KERNEL
, node
);
1077 EXPORT_SYMBOL(nvm_alloc_dev
);
1079 int nvm_register(struct nvm_dev
*dev
)
1083 if (!dev
->q
|| !dev
->ops
)
1086 if (dev
->ops
->max_phys_sect
> 256) {
1087 pr_info("nvm: max sectors supported is 256.\n");
1091 if (dev
->ops
->max_phys_sect
> 1) {
1092 dev
->dma_pool
= dev
->ops
->create_dma_pool(dev
, "ppalist");
1093 if (!dev
->dma_pool
) {
1094 pr_err("nvm: could not create dma pool\n");
1099 ret
= nvm_init(dev
);
1103 /* register device with a supported media manager */
1104 down_write(&nvm_lock
);
1105 list_add(&dev
->devices
, &nvm_devices
);
1106 up_write(&nvm_lock
);
1110 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
1113 EXPORT_SYMBOL(nvm_register
);
1115 void nvm_unregister(struct nvm_dev
*dev
)
1117 struct nvm_target
*t
, *tmp
;
1119 mutex_lock(&dev
->mlock
);
1120 list_for_each_entry_safe(t
, tmp
, &dev
->targets
, list
) {
1121 if (t
->dev
->parent
!= dev
)
1123 __nvm_remove_target(t
);
1125 mutex_unlock(&dev
->mlock
);
1127 down_write(&nvm_lock
);
1128 list_del(&dev
->devices
);
1129 up_write(&nvm_lock
);
1133 EXPORT_SYMBOL(nvm_unregister
);
1135 static int __nvm_configure_create(struct nvm_ioctl_create
*create
)
1137 struct nvm_dev
*dev
;
1138 struct nvm_ioctl_create_simple
*s
;
1140 down_write(&nvm_lock
);
1141 dev
= nvm_find_nvm_dev(create
->dev
);
1142 up_write(&nvm_lock
);
1145 pr_err("nvm: device not found\n");
1149 if (create
->conf
.type
!= NVM_CONFIG_TYPE_SIMPLE
) {
1150 pr_err("nvm: config type not valid\n");
1153 s
= &create
->conf
.s
;
1155 if (s
->lun_begin
== -1 && s
->lun_end
== -1) {
1157 s
->lun_end
= dev
->geo
.nr_luns
- 1;
1160 if (s
->lun_begin
> s
->lun_end
|| s
->lun_end
>= dev
->geo
.nr_luns
) {
1161 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
1162 s
->lun_begin
, s
->lun_end
, dev
->geo
.nr_luns
- 1);
1166 return nvm_create_tgt(dev
, create
);
1169 static long nvm_ioctl_info(struct file
*file
, void __user
*arg
)
1171 struct nvm_ioctl_info
*info
;
1172 struct nvm_tgt_type
*tt
;
1175 if (!capable(CAP_SYS_ADMIN
))
1178 info
= memdup_user(arg
, sizeof(struct nvm_ioctl_info
));
1182 info
->version
[0] = NVM_VERSION_MAJOR
;
1183 info
->version
[1] = NVM_VERSION_MINOR
;
1184 info
->version
[2] = NVM_VERSION_PATCH
;
1186 down_write(&nvm_tgtt_lock
);
1187 list_for_each_entry(tt
, &nvm_tgt_types
, list
) {
1188 struct nvm_ioctl_info_tgt
*tgt
= &info
->tgts
[tgt_iter
];
1190 tgt
->version
[0] = tt
->version
[0];
1191 tgt
->version
[1] = tt
->version
[1];
1192 tgt
->version
[2] = tt
->version
[2];
1193 strncpy(tgt
->tgtname
, tt
->name
, NVM_TTYPE_NAME_MAX
);
1198 info
->tgtsize
= tgt_iter
;
1199 up_write(&nvm_tgtt_lock
);
1201 if (copy_to_user(arg
, info
, sizeof(struct nvm_ioctl_info
))) {
1210 static long nvm_ioctl_get_devices(struct file
*file
, void __user
*arg
)
1212 struct nvm_ioctl_get_devices
*devices
;
1213 struct nvm_dev
*dev
;
1216 if (!capable(CAP_SYS_ADMIN
))
1219 devices
= kzalloc(sizeof(struct nvm_ioctl_get_devices
), GFP_KERNEL
);
1223 down_write(&nvm_lock
);
1224 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1225 struct nvm_ioctl_device_info
*info
= &devices
->info
[i
];
1227 strlcpy(info
->devname
, dev
->name
, sizeof(info
->devname
));
1229 /* kept for compatibility */
1230 info
->bmversion
[0] = 1;
1231 info
->bmversion
[1] = 0;
1232 info
->bmversion
[2] = 0;
1233 strlcpy(info
->bmname
, "gennvm", sizeof(info
->bmname
));
1237 pr_err("nvm: max 31 devices can be reported.\n");
1241 up_write(&nvm_lock
);
1243 devices
->nr_devices
= i
;
1245 if (copy_to_user(arg
, devices
,
1246 sizeof(struct nvm_ioctl_get_devices
))) {
1255 static long nvm_ioctl_dev_create(struct file
*file
, void __user
*arg
)
1257 struct nvm_ioctl_create create
;
1259 if (!capable(CAP_SYS_ADMIN
))
1262 if (copy_from_user(&create
, arg
, sizeof(struct nvm_ioctl_create
)))
1265 create
.dev
[DISK_NAME_LEN
- 1] = '\0';
1266 create
.tgttype
[NVM_TTYPE_NAME_MAX
- 1] = '\0';
1267 create
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1269 if (create
.flags
!= 0) {
1270 __u32 flags
= create
.flags
;
1272 /* Check for valid flags */
1273 if (flags
& NVM_TARGET_FACTORY
)
1274 flags
&= ~NVM_TARGET_FACTORY
;
1277 pr_err("nvm: flag not supported\n");
1282 return __nvm_configure_create(&create
);
1285 static long nvm_ioctl_dev_remove(struct file
*file
, void __user
*arg
)
1287 struct nvm_ioctl_remove remove
;
1288 struct nvm_dev
*dev
;
1291 if (!capable(CAP_SYS_ADMIN
))
1294 if (copy_from_user(&remove
, arg
, sizeof(struct nvm_ioctl_remove
)))
1297 remove
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1299 if (remove
.flags
!= 0) {
1300 pr_err("nvm: no flags supported\n");
1304 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1305 ret
= nvm_remove_tgt(dev
, &remove
);
1313 /* kept for compatibility reasons */
1314 static long nvm_ioctl_dev_init(struct file
*file
, void __user
*arg
)
1316 struct nvm_ioctl_dev_init init
;
1318 if (!capable(CAP_SYS_ADMIN
))
1321 if (copy_from_user(&init
, arg
, sizeof(struct nvm_ioctl_dev_init
)))
1324 if (init
.flags
!= 0) {
1325 pr_err("nvm: no flags supported\n");
1332 /* Kept for compatibility reasons */
1333 static long nvm_ioctl_dev_factory(struct file
*file
, void __user
*arg
)
1335 struct nvm_ioctl_dev_factory fact
;
1337 if (!capable(CAP_SYS_ADMIN
))
1340 if (copy_from_user(&fact
, arg
, sizeof(struct nvm_ioctl_dev_factory
)))
1343 fact
.dev
[DISK_NAME_LEN
- 1] = '\0';
1345 if (fact
.flags
& ~(NVM_FACTORY_NR_BITS
- 1))
1351 static long nvm_ctl_ioctl(struct file
*file
, uint cmd
, unsigned long arg
)
1353 void __user
*argp
= (void __user
*)arg
;
1357 return nvm_ioctl_info(file
, argp
);
1358 case NVM_GET_DEVICES
:
1359 return nvm_ioctl_get_devices(file
, argp
);
1360 case NVM_DEV_CREATE
:
1361 return nvm_ioctl_dev_create(file
, argp
);
1362 case NVM_DEV_REMOVE
:
1363 return nvm_ioctl_dev_remove(file
, argp
);
1365 return nvm_ioctl_dev_init(file
, argp
);
1366 case NVM_DEV_FACTORY
:
1367 return nvm_ioctl_dev_factory(file
, argp
);
1372 static const struct file_operations _ctl_fops
= {
1373 .open
= nonseekable_open
,
1374 .unlocked_ioctl
= nvm_ctl_ioctl
,
1375 .owner
= THIS_MODULE
,
1376 .llseek
= noop_llseek
,
1379 static struct miscdevice _nvm_misc
= {
1380 .minor
= MISC_DYNAMIC_MINOR
,
1382 .nodename
= "lightnvm/control",
1385 builtin_misc_device(_nvm_misc
);