2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/moduleparam.h>
26 #include <linux/miscdevice.h>
27 #include <linux/lightnvm.h>
28 #include <linux/sched/sysctl.h>
30 static LIST_HEAD(nvm_tgt_types
);
31 static DECLARE_RWSEM(nvm_tgtt_lock
);
32 static LIST_HEAD(nvm_devices
);
33 static DECLARE_RWSEM(nvm_lock
);
35 /* Map between virtual and physical channel and lun */
43 struct nvm_ch_map
*chnls
;
48 struct list_head list
;
50 sector_t end
; /* end is excluded */
53 static struct nvm_target
*nvm_find_target(struct nvm_dev
*dev
, const char *name
)
55 struct nvm_target
*tgt
;
57 list_for_each_entry(tgt
, &dev
->targets
, list
)
58 if (!strcmp(name
, tgt
->disk
->disk_name
))
64 static int nvm_reserve_luns(struct nvm_dev
*dev
, int lun_begin
, int lun_end
)
68 for (i
= lun_begin
; i
<= lun_end
; i
++) {
69 if (test_and_set_bit(i
, dev
->lun_map
)) {
70 pr_err("nvm: lun %d already allocated\n", i
);
77 while (--i
>= lun_begin
)
78 clear_bit(i
, dev
->lun_map
);
83 static void nvm_release_luns_err(struct nvm_dev
*dev
, int lun_begin
,
88 for (i
= lun_begin
; i
<= lun_end
; i
++)
89 WARN_ON(!test_and_clear_bit(i
, dev
->lun_map
));
92 static void nvm_remove_tgt_dev(struct nvm_tgt_dev
*tgt_dev
, int clear
)
94 struct nvm_dev
*dev
= tgt_dev
->parent
;
95 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
98 for (i
= 0; i
< dev_map
->nr_chnls
; i
++) {
99 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
100 int *lun_offs
= ch_map
->lun_offs
;
101 int ch
= i
+ ch_map
->ch_off
;
104 for (j
= 0; j
< ch_map
->nr_luns
; j
++) {
105 int lun
= j
+ lun_offs
[j
];
106 int lunid
= (ch
* dev
->geo
.luns_per_chnl
) + lun
;
108 WARN_ON(!test_and_clear_bit(lunid
,
113 kfree(ch_map
->lun_offs
);
116 kfree(dev_map
->chnls
);
119 kfree(tgt_dev
->luns
);
123 static struct nvm_tgt_dev
*nvm_create_tgt_dev(struct nvm_dev
*dev
,
124 int lun_begin
, int lun_end
)
126 struct nvm_tgt_dev
*tgt_dev
= NULL
;
127 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
128 struct nvm_dev_map
*dev_map
;
129 struct ppa_addr
*luns
;
130 int nr_luns
= lun_end
- lun_begin
+ 1;
131 int luns_left
= nr_luns
;
132 int nr_chnls
= nr_luns
/ dev
->geo
.luns_per_chnl
;
133 int nr_chnls_mod
= nr_luns
% dev
->geo
.luns_per_chnl
;
134 int bch
= lun_begin
/ dev
->geo
.luns_per_chnl
;
135 int blun
= lun_begin
% dev
->geo
.luns_per_chnl
;
137 int lun_balanced
= 1;
141 nr_chnls
= nr_luns
/ dev
->geo
.luns_per_chnl
;
142 nr_chnls
= (nr_chnls_mod
== 0) ? nr_chnls
: nr_chnls
+ 1;
144 dev_map
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
148 dev_map
->chnls
= kcalloc(nr_chnls
, sizeof(struct nvm_ch_map
),
153 luns
= kcalloc(nr_luns
, sizeof(struct ppa_addr
), GFP_KERNEL
);
157 prev_nr_luns
= (luns_left
> dev
->geo
.luns_per_chnl
) ?
158 dev
->geo
.luns_per_chnl
: luns_left
;
159 for (i
= 0; i
< nr_chnls
; i
++) {
160 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[i
+ bch
];
161 int *lun_roffs
= ch_rmap
->lun_offs
;
162 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[i
];
164 int luns_in_chnl
= (luns_left
> dev
->geo
.luns_per_chnl
) ?
165 dev
->geo
.luns_per_chnl
: luns_left
;
167 if (lun_balanced
&& prev_nr_luns
!= luns_in_chnl
)
170 ch_map
->ch_off
= ch_rmap
->ch_off
= bch
;
171 ch_map
->nr_luns
= luns_in_chnl
;
173 lun_offs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
177 for (j
= 0; j
< luns_in_chnl
; j
++) {
179 luns
[lunid
].g
.ch
= i
;
180 luns
[lunid
++].g
.lun
= j
;
183 lun_roffs
[j
+ blun
] = blun
;
186 ch_map
->lun_offs
= lun_offs
;
188 /* when starting a new channel, lun offset is reset */
190 luns_left
-= luns_in_chnl
;
193 dev_map
->nr_chnls
= nr_chnls
;
195 tgt_dev
= kmalloc(sizeof(struct nvm_tgt_dev
), GFP_KERNEL
);
199 memcpy(&tgt_dev
->geo
, &dev
->geo
, sizeof(struct nvm_geo
));
200 /* Target device only owns a portion of the physical device */
201 tgt_dev
->geo
.nr_chnls
= nr_chnls
;
202 tgt_dev
->geo
.nr_luns
= nr_luns
;
203 tgt_dev
->geo
.luns_per_chnl
= (lun_balanced
) ? prev_nr_luns
: -1;
204 tgt_dev
->total_secs
= nr_luns
* tgt_dev
->geo
.sec_per_lun
;
206 tgt_dev
->map
= dev_map
;
207 tgt_dev
->luns
= luns
;
208 memcpy(&tgt_dev
->identity
, &dev
->identity
, sizeof(struct nvm_id
));
210 tgt_dev
->parent
= dev
;
215 kfree(dev_map
->chnls
[i
].lun_offs
);
218 kfree(dev_map
->chnls
);
225 static const struct block_device_operations nvm_fops
= {
226 .owner
= THIS_MODULE
,
229 static int nvm_create_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_create
*create
)
231 struct nvm_ioctl_create_simple
*s
= &create
->conf
.s
;
232 struct request_queue
*tqueue
;
233 struct gendisk
*tdisk
;
234 struct nvm_tgt_type
*tt
;
235 struct nvm_target
*t
;
236 struct nvm_tgt_dev
*tgt_dev
;
240 tt
= nvm_find_target_type(create
->tgttype
, 1);
242 pr_err("nvm: target type %s not found\n", create
->tgttype
);
246 mutex_lock(&dev
->mlock
);
247 t
= nvm_find_target(dev
, create
->tgtname
);
249 pr_err("nvm: target name already exists.\n");
250 mutex_unlock(&dev
->mlock
);
253 mutex_unlock(&dev
->mlock
);
255 ret
= nvm_reserve_luns(dev
, s
->lun_begin
, s
->lun_end
);
259 t
= kmalloc(sizeof(struct nvm_target
), GFP_KERNEL
);
265 tgt_dev
= nvm_create_tgt_dev(dev
, s
->lun_begin
, s
->lun_end
);
267 pr_err("nvm: could not create target device\n");
272 tdisk
= alloc_disk(0);
278 tqueue
= blk_alloc_queue_node(GFP_KERNEL
, dev
->q
->node
);
283 blk_queue_make_request(tqueue
, tt
->make_rq
);
285 strlcpy(tdisk
->disk_name
, create
->tgtname
, sizeof(tdisk
->disk_name
));
286 tdisk
->flags
= GENHD_FL_EXT_DEVT
;
288 tdisk
->first_minor
= 0;
289 tdisk
->fops
= &nvm_fops
;
290 tdisk
->queue
= tqueue
;
292 targetdata
= tt
->init(tgt_dev
, tdisk
, create
->flags
);
293 if (IS_ERR(targetdata
)) {
294 ret
= PTR_ERR(targetdata
);
298 tdisk
->private_data
= targetdata
;
299 tqueue
->queuedata
= targetdata
;
301 blk_queue_max_hw_sectors(tqueue
, 8 * dev
->ops
->max_phys_sect
);
303 set_capacity(tdisk
, tt
->capacity(targetdata
));
306 if (tt
->sysfs_init
&& tt
->sysfs_init(tdisk
)) {
315 mutex_lock(&dev
->mlock
);
316 list_add_tail(&t
->list
, &dev
->targets
);
317 mutex_unlock(&dev
->mlock
);
322 tt
->exit(targetdata
);
324 blk_cleanup_queue(tqueue
);
329 nvm_remove_tgt_dev(tgt_dev
, 0);
333 nvm_release_luns_err(dev
, s
->lun_begin
, s
->lun_end
);
337 static void __nvm_remove_target(struct nvm_target
*t
)
339 struct nvm_tgt_type
*tt
= t
->type
;
340 struct gendisk
*tdisk
= t
->disk
;
341 struct request_queue
*q
= tdisk
->queue
;
344 blk_cleanup_queue(q
);
347 tt
->sysfs_exit(tdisk
);
350 tt
->exit(tdisk
->private_data
);
352 nvm_remove_tgt_dev(t
->dev
, 1);
360 * nvm_remove_tgt - Removes a target from the media manager
362 * @remove: ioctl structure with target name to remove.
369 static int nvm_remove_tgt(struct nvm_dev
*dev
, struct nvm_ioctl_remove
*remove
)
371 struct nvm_target
*t
;
373 mutex_lock(&dev
->mlock
);
374 t
= nvm_find_target(dev
, remove
->tgtname
);
376 mutex_unlock(&dev
->mlock
);
379 __nvm_remove_target(t
);
380 mutex_unlock(&dev
->mlock
);
385 static int nvm_register_map(struct nvm_dev
*dev
)
387 struct nvm_dev_map
*rmap
;
390 rmap
= kmalloc(sizeof(struct nvm_dev_map
), GFP_KERNEL
);
394 rmap
->chnls
= kcalloc(dev
->geo
.nr_chnls
, sizeof(struct nvm_ch_map
),
399 for (i
= 0; i
< dev
->geo
.nr_chnls
; i
++) {
400 struct nvm_ch_map
*ch_rmap
;
402 int luns_in_chnl
= dev
->geo
.luns_per_chnl
;
404 ch_rmap
= &rmap
->chnls
[i
];
406 ch_rmap
->ch_off
= -1;
407 ch_rmap
->nr_luns
= luns_in_chnl
;
409 lun_roffs
= kcalloc(luns_in_chnl
, sizeof(int), GFP_KERNEL
);
413 for (j
= 0; j
< luns_in_chnl
; j
++)
416 ch_rmap
->lun_offs
= lun_roffs
;
424 kfree(rmap
->chnls
[i
].lun_offs
);
431 static void nvm_unregister_map(struct nvm_dev
*dev
)
433 struct nvm_dev_map
*rmap
= dev
->rmap
;
436 for (i
= 0; i
< dev
->geo
.nr_chnls
; i
++)
437 kfree(rmap
->chnls
[i
].lun_offs
);
443 static void nvm_map_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
445 struct nvm_dev_map
*dev_map
= tgt_dev
->map
;
446 struct nvm_ch_map
*ch_map
= &dev_map
->chnls
[p
->g
.ch
];
447 int lun_off
= ch_map
->lun_offs
[p
->g
.lun
];
449 p
->g
.ch
+= ch_map
->ch_off
;
453 static void nvm_map_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*p
)
455 struct nvm_dev
*dev
= tgt_dev
->parent
;
456 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
457 struct nvm_ch_map
*ch_rmap
= &dev_rmap
->chnls
[p
->g
.ch
];
458 int lun_roff
= ch_rmap
->lun_offs
[p
->g
.lun
];
460 p
->g
.ch
-= ch_rmap
->ch_off
;
461 p
->g
.lun
-= lun_roff
;
464 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
,
465 struct ppa_addr
*ppa_list
, int nr_ppas
)
469 for (i
= 0; i
< nr_ppas
; i
++) {
470 nvm_map_to_dev(tgt_dev
, &ppa_list
[i
]);
471 ppa_list
[i
] = generic_to_dev_addr(tgt_dev
, ppa_list
[i
]);
475 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
,
476 struct ppa_addr
*ppa_list
, int nr_ppas
)
480 for (i
= 0; i
< nr_ppas
; i
++) {
481 ppa_list
[i
] = dev_to_generic_addr(tgt_dev
, ppa_list
[i
]);
482 nvm_map_to_tgt(tgt_dev
, &ppa_list
[i
]);
486 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
488 if (rqd
->nr_ppas
== 1) {
489 nvm_ppa_tgt_to_dev(tgt_dev
, &rqd
->ppa_addr
, 1);
493 nvm_ppa_tgt_to_dev(tgt_dev
, rqd
->ppa_list
, rqd
->nr_ppas
);
496 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
498 if (rqd
->nr_ppas
== 1) {
499 nvm_ppa_dev_to_tgt(tgt_dev
, &rqd
->ppa_addr
, 1);
503 nvm_ppa_dev_to_tgt(tgt_dev
, rqd
->ppa_list
, rqd
->nr_ppas
);
506 void nvm_part_to_tgt(struct nvm_dev
*dev
, sector_t
*entries
,
509 struct nvm_geo
*geo
= &dev
->geo
;
510 struct nvm_dev_map
*dev_rmap
= dev
->rmap
;
513 for (i
= 0; i
< len
; i
++) {
514 struct nvm_ch_map
*ch_rmap
;
516 struct ppa_addr gaddr
;
517 u64 pba
= le64_to_cpu(entries
[i
]);
523 gaddr
= linear_to_generic_addr(geo
, pba
);
524 ch_rmap
= &dev_rmap
->chnls
[gaddr
.g
.ch
];
525 lun_roffs
= ch_rmap
->lun_offs
;
527 diff
= ((ch_rmap
->ch_off
* geo
->luns_per_chnl
) +
528 (lun_roffs
[gaddr
.g
.lun
])) * geo
->sec_per_lun
;
530 entries
[i
] -= cpu_to_le64(diff
);
533 EXPORT_SYMBOL(nvm_part_to_tgt
);
535 struct nvm_tgt_type
*nvm_find_target_type(const char *name
, int lock
)
537 struct nvm_tgt_type
*tmp
, *tt
= NULL
;
540 down_write(&nvm_tgtt_lock
);
542 list_for_each_entry(tmp
, &nvm_tgt_types
, list
)
543 if (!strcmp(name
, tmp
->name
)) {
549 up_write(&nvm_tgtt_lock
);
552 EXPORT_SYMBOL(nvm_find_target_type
);
554 int nvm_register_tgt_type(struct nvm_tgt_type
*tt
)
558 down_write(&nvm_tgtt_lock
);
559 if (nvm_find_target_type(tt
->name
, 0))
562 list_add(&tt
->list
, &nvm_tgt_types
);
563 up_write(&nvm_tgtt_lock
);
567 EXPORT_SYMBOL(nvm_register_tgt_type
);
569 void nvm_unregister_tgt_type(struct nvm_tgt_type
*tt
)
574 down_write(&nvm_lock
);
578 EXPORT_SYMBOL(nvm_unregister_tgt_type
);
580 void *nvm_dev_dma_alloc(struct nvm_dev
*dev
, gfp_t mem_flags
,
581 dma_addr_t
*dma_handler
)
583 return dev
->ops
->dev_dma_alloc(dev
, dev
->dma_pool
, mem_flags
,
586 EXPORT_SYMBOL(nvm_dev_dma_alloc
);
588 void nvm_dev_dma_free(struct nvm_dev
*dev
, void *addr
, dma_addr_t dma_handler
)
590 dev
->ops
->dev_dma_free(dev
->dma_pool
, addr
, dma_handler
);
592 EXPORT_SYMBOL(nvm_dev_dma_free
);
594 static struct nvm_dev
*nvm_find_nvm_dev(const char *name
)
598 list_for_each_entry(dev
, &nvm_devices
, devices
)
599 if (!strcmp(name
, dev
->name
))
605 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*ppas
,
606 int nr_ppas
, int type
)
608 struct nvm_dev
*dev
= tgt_dev
->parent
;
612 if (nr_ppas
> dev
->ops
->max_phys_sect
) {
613 pr_err("nvm: unable to update all blocks atomically\n");
617 memset(&rqd
, 0, sizeof(struct nvm_rq
));
619 nvm_set_rqd_ppalist(tgt_dev
, &rqd
, ppas
, nr_ppas
, 1);
620 nvm_rq_tgt_to_dev(tgt_dev
, &rqd
);
622 ret
= dev
->ops
->set_bb_tbl(dev
, &rqd
.ppa_addr
, rqd
.nr_ppas
, type
);
623 nvm_free_rqd_ppalist(tgt_dev
, &rqd
);
625 pr_err("nvm: failed bb mark\n");
631 EXPORT_SYMBOL(nvm_set_tgt_bb_tbl
);
633 int nvm_max_phys_sects(struct nvm_tgt_dev
*tgt_dev
)
635 struct nvm_dev
*dev
= tgt_dev
->parent
;
637 return dev
->ops
->max_phys_sect
;
639 EXPORT_SYMBOL(nvm_max_phys_sects
);
641 int nvm_submit_io(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
643 struct nvm_dev
*dev
= tgt_dev
->parent
;
646 if (!dev
->ops
->submit_io
)
649 nvm_rq_tgt_to_dev(tgt_dev
, rqd
);
653 /* In case of error, fail with right address format */
654 ret
= dev
->ops
->submit_io(dev
, rqd
);
656 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
659 EXPORT_SYMBOL(nvm_submit_io
);
661 static void nvm_end_io_sync(struct nvm_rq
*rqd
)
663 struct completion
*waiting
= rqd
->private;
668 int nvm_erase_sync(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr
*ppas
,
671 struct nvm_geo
*geo
= &tgt_dev
->geo
;
674 DECLARE_COMPLETION_ONSTACK(wait
);
676 memset(&rqd
, 0, sizeof(struct nvm_rq
));
678 rqd
.opcode
= NVM_OP_ERASE
;
679 rqd
.end_io
= nvm_end_io_sync
;
681 rqd
.flags
= geo
->plane_mode
>> 1;
683 ret
= nvm_set_rqd_ppalist(tgt_dev
, &rqd
, ppas
, nr_ppas
, 1);
687 ret
= nvm_submit_io(tgt_dev
, &rqd
);
689 pr_err("rrpr: erase I/O submission failed: %d\n", ret
);
692 wait_for_completion_io(&wait
);
695 nvm_free_rqd_ppalist(tgt_dev
, &rqd
);
699 EXPORT_SYMBOL(nvm_erase_sync
);
701 int nvm_get_l2p_tbl(struct nvm_tgt_dev
*tgt_dev
, u64 slba
, u32 nlb
,
702 nvm_l2p_update_fn
*update_l2p
, void *priv
)
704 struct nvm_dev
*dev
= tgt_dev
->parent
;
706 if (!dev
->ops
->get_l2p_tbl
)
709 return dev
->ops
->get_l2p_tbl(dev
, slba
, nlb
, update_l2p
, priv
);
711 EXPORT_SYMBOL(nvm_get_l2p_tbl
);
713 int nvm_get_area(struct nvm_tgt_dev
*tgt_dev
, sector_t
*lba
, sector_t len
)
715 struct nvm_dev
*dev
= tgt_dev
->parent
;
716 struct nvm_geo
*geo
= &dev
->geo
;
717 struct nvm_area
*area
, *prev
, *next
;
719 sector_t max_sectors
= (geo
->sec_size
* dev
->total_secs
) >> 9;
721 if (len
> max_sectors
)
724 area
= kmalloc(sizeof(struct nvm_area
), GFP_KERNEL
);
730 spin_lock(&dev
->lock
);
731 list_for_each_entry(next
, &dev
->area_list
, list
) {
732 if (begin
+ len
> next
->begin
) {
740 if ((begin
+ len
) > max_sectors
) {
741 spin_unlock(&dev
->lock
);
746 area
->begin
= *lba
= begin
;
747 area
->end
= begin
+ len
;
749 if (prev
) /* insert into sorted order */
750 list_add(&area
->list
, &prev
->list
);
752 list_add(&area
->list
, &dev
->area_list
);
753 spin_unlock(&dev
->lock
);
757 EXPORT_SYMBOL(nvm_get_area
);
759 void nvm_put_area(struct nvm_tgt_dev
*tgt_dev
, sector_t begin
)
761 struct nvm_dev
*dev
= tgt_dev
->parent
;
762 struct nvm_area
*area
;
764 spin_lock(&dev
->lock
);
765 list_for_each_entry(area
, &dev
->area_list
, list
) {
766 if (area
->begin
!= begin
)
769 list_del(&area
->list
);
770 spin_unlock(&dev
->lock
);
774 spin_unlock(&dev
->lock
);
776 EXPORT_SYMBOL(nvm_put_area
);
778 int nvm_set_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
,
779 const struct ppa_addr
*ppas
, int nr_ppas
, int vblk
)
781 struct nvm_dev
*dev
= tgt_dev
->parent
;
782 struct nvm_geo
*geo
= &tgt_dev
->geo
;
783 int i
, plane_cnt
, pl_idx
;
786 if ((!vblk
|| geo
->plane_mode
== NVM_PLANE_SINGLE
) && nr_ppas
== 1) {
787 rqd
->nr_ppas
= nr_ppas
;
788 rqd
->ppa_addr
= ppas
[0];
793 rqd
->nr_ppas
= nr_ppas
;
794 rqd
->ppa_list
= nvm_dev_dma_alloc(dev
, GFP_KERNEL
, &rqd
->dma_ppa_list
);
795 if (!rqd
->ppa_list
) {
796 pr_err("nvm: failed to allocate dma memory\n");
801 for (i
= 0; i
< nr_ppas
; i
++)
802 rqd
->ppa_list
[i
] = ppas
[i
];
804 plane_cnt
= geo
->plane_mode
;
805 rqd
->nr_ppas
*= plane_cnt
;
807 for (i
= 0; i
< nr_ppas
; i
++) {
808 for (pl_idx
= 0; pl_idx
< plane_cnt
; pl_idx
++) {
811 rqd
->ppa_list
[(pl_idx
* nr_ppas
) + i
] = ppa
;
818 EXPORT_SYMBOL(nvm_set_rqd_ppalist
);
820 void nvm_free_rqd_ppalist(struct nvm_tgt_dev
*tgt_dev
, struct nvm_rq
*rqd
)
825 nvm_dev_dma_free(tgt_dev
->parent
, rqd
->ppa_list
, rqd
->dma_ppa_list
);
827 EXPORT_SYMBOL(nvm_free_rqd_ppalist
);
829 void nvm_end_io(struct nvm_rq
*rqd
)
831 struct nvm_tgt_dev
*tgt_dev
= rqd
->dev
;
833 /* Convert address space */
835 nvm_rq_dev_to_tgt(tgt_dev
, rqd
);
840 EXPORT_SYMBOL(nvm_end_io
);
843 * folds a bad block list from its plane representation to its virtual
844 * block representation. The fold is done in place and reduced size is
847 * If any of the planes status are bad or grown bad block, the virtual block
848 * is marked bad. If not bad, the first plane state acts as the block state.
850 int nvm_bb_tbl_fold(struct nvm_dev
*dev
, u8
*blks
, int nr_blks
)
852 struct nvm_geo
*geo
= &dev
->geo
;
853 int blk
, offset
, pl
, blktype
;
855 if (nr_blks
!= geo
->blks_per_lun
* geo
->plane_mode
)
858 for (blk
= 0; blk
< geo
->blks_per_lun
; blk
++) {
859 offset
= blk
* geo
->plane_mode
;
860 blktype
= blks
[offset
];
862 /* Bad blocks on any planes take precedence over other types */
863 for (pl
= 0; pl
< geo
->plane_mode
; pl
++) {
864 if (blks
[offset
+ pl
] &
865 (NVM_BLK_T_BAD
|NVM_BLK_T_GRWN_BAD
)) {
866 blktype
= blks
[offset
+ pl
];
874 return geo
->blks_per_lun
;
876 EXPORT_SYMBOL(nvm_bb_tbl_fold
);
878 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev
*tgt_dev
, struct ppa_addr ppa
,
881 struct nvm_dev
*dev
= tgt_dev
->parent
;
883 nvm_ppa_tgt_to_dev(tgt_dev
, &ppa
, 1);
885 return dev
->ops
->get_bb_tbl(dev
, ppa
, blks
);
887 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl
);
889 static int nvm_init_slc_tbl(struct nvm_dev
*dev
, struct nvm_id_group
*grp
)
891 struct nvm_geo
*geo
= &dev
->geo
;
894 dev
->lps_per_blk
= geo
->pgs_per_blk
;
895 dev
->lptbl
= kcalloc(dev
->lps_per_blk
, sizeof(int), GFP_KERNEL
);
899 /* Just a linear array */
900 for (i
= 0; i
< dev
->lps_per_blk
; i
++)
906 static int nvm_init_mlc_tbl(struct nvm_dev
*dev
, struct nvm_id_group
*grp
)
909 struct nvm_id_lp_mlc
*mlc
= &grp
->lptbl
.mlc
;
914 dev
->lps_per_blk
= mlc
->num_pairs
;
915 dev
->lptbl
= kcalloc(dev
->lps_per_blk
, sizeof(int), GFP_KERNEL
);
919 /* The lower page table encoding consists of a list of bytes, where each
920 * has a lower and an upper half. The first half byte maintains the
921 * increment value and every value after is an offset added to the
922 * previous incrementation value
924 dev
->lptbl
[0] = mlc
->pairs
[0] & 0xF;
925 for (i
= 1; i
< dev
->lps_per_blk
; i
++) {
926 p
= mlc
->pairs
[i
>> 1];
927 if (i
& 0x1) /* upper */
928 dev
->lptbl
[i
] = dev
->lptbl
[i
- 1] + ((p
& 0xF0) >> 4);
930 dev
->lptbl
[i
] = dev
->lptbl
[i
- 1] + (p
& 0xF);
936 static int nvm_core_init(struct nvm_dev
*dev
)
938 struct nvm_id
*id
= &dev
->identity
;
939 struct nvm_id_group
*grp
= &id
->grp
;
940 struct nvm_geo
*geo
= &dev
->geo
;
943 /* Whole device values */
944 geo
->nr_chnls
= grp
->num_ch
;
945 geo
->luns_per_chnl
= grp
->num_lun
;
947 /* Generic device values */
948 geo
->pgs_per_blk
= grp
->num_pg
;
949 geo
->blks_per_lun
= grp
->num_blk
;
950 geo
->nr_planes
= grp
->num_pln
;
951 geo
->fpg_size
= grp
->fpg_sz
;
952 geo
->pfpg_size
= grp
->fpg_sz
* grp
->num_pln
;
953 geo
->sec_size
= grp
->csecs
;
954 geo
->oob_size
= grp
->sos
;
955 geo
->sec_per_pg
= grp
->fpg_sz
/ grp
->csecs
;
956 geo
->mccap
= grp
->mccap
;
957 memcpy(&geo
->ppaf
, &id
->ppaf
, sizeof(struct nvm_addr_format
));
959 geo
->plane_mode
= NVM_PLANE_SINGLE
;
960 geo
->max_rq_size
= dev
->ops
->max_phys_sect
* geo
->sec_size
;
962 if (grp
->mpos
& 0x020202)
963 geo
->plane_mode
= NVM_PLANE_DOUBLE
;
964 if (grp
->mpos
& 0x040404)
965 geo
->plane_mode
= NVM_PLANE_QUAD
;
967 if (grp
->mtype
!= 0) {
968 pr_err("nvm: memory type not supported\n");
972 /* calculated values */
973 geo
->sec_per_pl
= geo
->sec_per_pg
* geo
->nr_planes
;
974 geo
->sec_per_blk
= geo
->sec_per_pl
* geo
->pgs_per_blk
;
975 geo
->sec_per_lun
= geo
->sec_per_blk
* geo
->blks_per_lun
;
976 geo
->nr_luns
= geo
->luns_per_chnl
* geo
->nr_chnls
;
978 dev
->total_secs
= geo
->nr_luns
* geo
->sec_per_lun
;
979 dev
->lun_map
= kcalloc(BITS_TO_LONGS(geo
->nr_luns
),
980 sizeof(unsigned long), GFP_KERNEL
);
984 switch (grp
->fmtype
) {
985 case NVM_ID_FMTYPE_SLC
:
986 if (nvm_init_slc_tbl(dev
, grp
)) {
991 case NVM_ID_FMTYPE_MLC
:
992 if (nvm_init_mlc_tbl(dev
, grp
)) {
998 pr_err("nvm: flash type not supported\n");
1003 INIT_LIST_HEAD(&dev
->area_list
);
1004 INIT_LIST_HEAD(&dev
->targets
);
1005 mutex_init(&dev
->mlock
);
1006 spin_lock_init(&dev
->lock
);
1008 ret
= nvm_register_map(dev
);
1012 blk_queue_logical_block_size(dev
->q
, geo
->sec_size
);
1015 kfree(dev
->lun_map
);
1019 static void nvm_free(struct nvm_dev
*dev
)
1025 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
1027 nvm_unregister_map(dev
);
1029 kfree(dev
->lun_map
);
1033 static int nvm_init(struct nvm_dev
*dev
)
1035 struct nvm_geo
*geo
= &dev
->geo
;
1038 if (dev
->ops
->identity(dev
, &dev
->identity
)) {
1039 pr_err("nvm: device could not be identified\n");
1043 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
1044 dev
->identity
.ver_id
, dev
->identity
.vmnt
);
1046 if (dev
->identity
.ver_id
!= 1) {
1047 pr_err("nvm: device not supported by kernel.");
1051 ret
= nvm_core_init(dev
);
1053 pr_err("nvm: could not initialize core structures.\n");
1057 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
1058 dev
->name
, geo
->sec_per_pg
, geo
->nr_planes
,
1059 geo
->pgs_per_blk
, geo
->blks_per_lun
,
1060 geo
->nr_luns
, geo
->nr_chnls
);
1063 pr_err("nvm: failed to initialize nvm\n");
1067 struct nvm_dev
*nvm_alloc_dev(int node
)
1069 return kzalloc_node(sizeof(struct nvm_dev
), GFP_KERNEL
, node
);
1071 EXPORT_SYMBOL(nvm_alloc_dev
);
1073 int nvm_register(struct nvm_dev
*dev
)
1077 if (!dev
->q
|| !dev
->ops
)
1080 if (dev
->ops
->max_phys_sect
> 256) {
1081 pr_info("nvm: max sectors supported is 256.\n");
1085 if (dev
->ops
->max_phys_sect
> 1) {
1086 dev
->dma_pool
= dev
->ops
->create_dma_pool(dev
, "ppalist");
1087 if (!dev
->dma_pool
) {
1088 pr_err("nvm: could not create dma pool\n");
1093 ret
= nvm_init(dev
);
1097 /* register device with a supported media manager */
1098 down_write(&nvm_lock
);
1099 list_add(&dev
->devices
, &nvm_devices
);
1100 up_write(&nvm_lock
);
1104 dev
->ops
->destroy_dma_pool(dev
->dma_pool
);
1107 EXPORT_SYMBOL(nvm_register
);
1109 void nvm_unregister(struct nvm_dev
*dev
)
1111 struct nvm_target
*t
, *tmp
;
1113 mutex_lock(&dev
->mlock
);
1114 list_for_each_entry_safe(t
, tmp
, &dev
->targets
, list
) {
1115 if (t
->dev
->parent
!= dev
)
1117 __nvm_remove_target(t
);
1119 mutex_unlock(&dev
->mlock
);
1121 down_write(&nvm_lock
);
1122 list_del(&dev
->devices
);
1123 up_write(&nvm_lock
);
1127 EXPORT_SYMBOL(nvm_unregister
);
1129 static int __nvm_configure_create(struct nvm_ioctl_create
*create
)
1131 struct nvm_dev
*dev
;
1132 struct nvm_ioctl_create_simple
*s
;
1134 down_write(&nvm_lock
);
1135 dev
= nvm_find_nvm_dev(create
->dev
);
1136 up_write(&nvm_lock
);
1139 pr_err("nvm: device not found\n");
1143 if (create
->conf
.type
!= NVM_CONFIG_TYPE_SIMPLE
) {
1144 pr_err("nvm: config type not valid\n");
1147 s
= &create
->conf
.s
;
1149 if (s
->lun_begin
== -1 && s
->lun_end
== -1) {
1151 s
->lun_end
= dev
->geo
.nr_luns
- 1;
1154 if (s
->lun_begin
> s
->lun_end
|| s
->lun_end
>= dev
->geo
.nr_luns
) {
1155 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
1156 s
->lun_begin
, s
->lun_end
, dev
->geo
.nr_luns
- 1);
1160 return nvm_create_tgt(dev
, create
);
1163 static long nvm_ioctl_info(struct file
*file
, void __user
*arg
)
1165 struct nvm_ioctl_info
*info
;
1166 struct nvm_tgt_type
*tt
;
1169 if (!capable(CAP_SYS_ADMIN
))
1172 info
= memdup_user(arg
, sizeof(struct nvm_ioctl_info
));
1176 info
->version
[0] = NVM_VERSION_MAJOR
;
1177 info
->version
[1] = NVM_VERSION_MINOR
;
1178 info
->version
[2] = NVM_VERSION_PATCH
;
1180 down_write(&nvm_lock
);
1181 list_for_each_entry(tt
, &nvm_tgt_types
, list
) {
1182 struct nvm_ioctl_info_tgt
*tgt
= &info
->tgts
[tgt_iter
];
1184 tgt
->version
[0] = tt
->version
[0];
1185 tgt
->version
[1] = tt
->version
[1];
1186 tgt
->version
[2] = tt
->version
[2];
1187 strncpy(tgt
->tgtname
, tt
->name
, NVM_TTYPE_NAME_MAX
);
1192 info
->tgtsize
= tgt_iter
;
1193 up_write(&nvm_lock
);
1195 if (copy_to_user(arg
, info
, sizeof(struct nvm_ioctl_info
))) {
1204 static long nvm_ioctl_get_devices(struct file
*file
, void __user
*arg
)
1206 struct nvm_ioctl_get_devices
*devices
;
1207 struct nvm_dev
*dev
;
1210 if (!capable(CAP_SYS_ADMIN
))
1213 devices
= kzalloc(sizeof(struct nvm_ioctl_get_devices
), GFP_KERNEL
);
1217 down_write(&nvm_lock
);
1218 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1219 struct nvm_ioctl_device_info
*info
= &devices
->info
[i
];
1221 strlcpy(info
->devname
, dev
->name
, sizeof(info
->devname
));
1223 /* kept for compatibility */
1224 info
->bmversion
[0] = 1;
1225 info
->bmversion
[1] = 0;
1226 info
->bmversion
[2] = 0;
1227 strlcpy(info
->bmname
, "gennvm", sizeof(info
->bmname
));
1231 pr_err("nvm: max 31 devices can be reported.\n");
1235 up_write(&nvm_lock
);
1237 devices
->nr_devices
= i
;
1239 if (copy_to_user(arg
, devices
,
1240 sizeof(struct nvm_ioctl_get_devices
))) {
1249 static long nvm_ioctl_dev_create(struct file
*file
, void __user
*arg
)
1251 struct nvm_ioctl_create create
;
1253 if (!capable(CAP_SYS_ADMIN
))
1256 if (copy_from_user(&create
, arg
, sizeof(struct nvm_ioctl_create
)))
1259 create
.dev
[DISK_NAME_LEN
- 1] = '\0';
1260 create
.tgttype
[NVM_TTYPE_NAME_MAX
- 1] = '\0';
1261 create
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1263 if (create
.flags
!= 0) {
1264 __u32 flags
= create
.flags
;
1266 /* Check for valid flags */
1267 if (flags
& NVM_TARGET_FACTORY
)
1268 flags
&= ~NVM_TARGET_FACTORY
;
1271 pr_err("nvm: flag not supported\n");
1276 return __nvm_configure_create(&create
);
1279 static long nvm_ioctl_dev_remove(struct file
*file
, void __user
*arg
)
1281 struct nvm_ioctl_remove remove
;
1282 struct nvm_dev
*dev
;
1285 if (!capable(CAP_SYS_ADMIN
))
1288 if (copy_from_user(&remove
, arg
, sizeof(struct nvm_ioctl_remove
)))
1291 remove
.tgtname
[DISK_NAME_LEN
- 1] = '\0';
1293 if (remove
.flags
!= 0) {
1294 pr_err("nvm: no flags supported\n");
1298 list_for_each_entry(dev
, &nvm_devices
, devices
) {
1299 ret
= nvm_remove_tgt(dev
, &remove
);
1307 /* kept for compatibility reasons */
1308 static long nvm_ioctl_dev_init(struct file
*file
, void __user
*arg
)
1310 struct nvm_ioctl_dev_init init
;
1312 if (!capable(CAP_SYS_ADMIN
))
1315 if (copy_from_user(&init
, arg
, sizeof(struct nvm_ioctl_dev_init
)))
1318 if (init
.flags
!= 0) {
1319 pr_err("nvm: no flags supported\n");
1326 /* Kept for compatibility reasons */
1327 static long nvm_ioctl_dev_factory(struct file
*file
, void __user
*arg
)
1329 struct nvm_ioctl_dev_factory fact
;
1331 if (!capable(CAP_SYS_ADMIN
))
1334 if (copy_from_user(&fact
, arg
, sizeof(struct nvm_ioctl_dev_factory
)))
1337 fact
.dev
[DISK_NAME_LEN
- 1] = '\0';
1339 if (fact
.flags
& ~(NVM_FACTORY_NR_BITS
- 1))
1345 static long nvm_ctl_ioctl(struct file
*file
, uint cmd
, unsigned long arg
)
1347 void __user
*argp
= (void __user
*)arg
;
1351 return nvm_ioctl_info(file
, argp
);
1352 case NVM_GET_DEVICES
:
1353 return nvm_ioctl_get_devices(file
, argp
);
1354 case NVM_DEV_CREATE
:
1355 return nvm_ioctl_dev_create(file
, argp
);
1356 case NVM_DEV_REMOVE
:
1357 return nvm_ioctl_dev_remove(file
, argp
);
1359 return nvm_ioctl_dev_init(file
, argp
);
1360 case NVM_DEV_FACTORY
:
1361 return nvm_ioctl_dev_factory(file
, argp
);
1366 static const struct file_operations _ctl_fops
= {
1367 .open
= nonseekable_open
,
1368 .unlocked_ioctl
= nvm_ctl_ioctl
,
1369 .owner
= THIS_MODULE
,
1370 .llseek
= noop_llseek
,
1373 static struct miscdevice _nvm_misc
= {
1374 .minor
= MISC_DYNAMIC_MINOR
,
1376 .nodename
= "lightnvm/control",
1379 builtin_misc_device(_nvm_misc
);