treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / lightnvm / core.c
blob7543e395a2c646bf5fa3ececeac902964f9a8433
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
4 * Initial release: Matias Bjorling <m@bjorling.me>
5 */
7 #define pr_fmt(fmt) "nvm: " fmt
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/sem.h>
12 #include <linux/bitmap.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/miscdevice.h>
16 #include <linux/lightnvm.h>
17 #include <linux/sched/sysctl.h>
19 static LIST_HEAD(nvm_tgt_types);
20 static DECLARE_RWSEM(nvm_tgtt_lock);
21 static LIST_HEAD(nvm_devices);
22 static DECLARE_RWSEM(nvm_lock);
24 /* Map between virtual and physical channel and lun */
25 struct nvm_ch_map {
26 int ch_off;
27 int num_lun;
28 int *lun_offs;
31 struct nvm_dev_map {
32 struct nvm_ch_map *chnls;
33 int num_ch;
36 static void nvm_free(struct kref *ref);
38 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
40 struct nvm_target *tgt;
42 list_for_each_entry(tgt, &dev->targets, list)
43 if (!strcmp(name, tgt->disk->disk_name))
44 return tgt;
46 return NULL;
49 static bool nvm_target_exists(const char *name)
51 struct nvm_dev *dev;
52 struct nvm_target *tgt;
53 bool ret = false;
55 down_write(&nvm_lock);
56 list_for_each_entry(dev, &nvm_devices, devices) {
57 mutex_lock(&dev->mlock);
58 list_for_each_entry(tgt, &dev->targets, list) {
59 if (!strcmp(name, tgt->disk->disk_name)) {
60 ret = true;
61 mutex_unlock(&dev->mlock);
62 goto out;
65 mutex_unlock(&dev->mlock);
68 out:
69 up_write(&nvm_lock);
70 return ret;
73 static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
75 int i;
77 for (i = lun_begin; i <= lun_end; i++) {
78 if (test_and_set_bit(i, dev->lun_map)) {
79 pr_err("lun %d already allocated\n", i);
80 goto err;
84 return 0;
85 err:
86 while (--i >= lun_begin)
87 clear_bit(i, dev->lun_map);
89 return -EBUSY;
92 static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
93 int lun_end)
95 int i;
97 for (i = lun_begin; i <= lun_end; i++)
98 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
101 static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
103 struct nvm_dev *dev = tgt_dev->parent;
104 struct nvm_dev_map *dev_map = tgt_dev->map;
105 int i, j;
107 for (i = 0; i < dev_map->num_ch; i++) {
108 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
109 int *lun_offs = ch_map->lun_offs;
110 int ch = i + ch_map->ch_off;
112 if (clear) {
113 for (j = 0; j < ch_map->num_lun; j++) {
114 int lun = j + lun_offs[j];
115 int lunid = (ch * dev->geo.num_lun) + lun;
117 WARN_ON(!test_and_clear_bit(lunid,
118 dev->lun_map));
122 kfree(ch_map->lun_offs);
125 kfree(dev_map->chnls);
126 kfree(dev_map);
128 kfree(tgt_dev->luns);
129 kfree(tgt_dev);
132 static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
133 u16 lun_begin, u16 lun_end,
134 u16 op)
136 struct nvm_tgt_dev *tgt_dev = NULL;
137 struct nvm_dev_map *dev_rmap = dev->rmap;
138 struct nvm_dev_map *dev_map;
139 struct ppa_addr *luns;
140 int num_lun = lun_end - lun_begin + 1;
141 int luns_left = num_lun;
142 int num_ch = num_lun / dev->geo.num_lun;
143 int num_ch_mod = num_lun % dev->geo.num_lun;
144 int bch = lun_begin / dev->geo.num_lun;
145 int blun = lun_begin % dev->geo.num_lun;
146 int lunid = 0;
147 int lun_balanced = 1;
148 int sec_per_lun, prev_num_lun;
149 int i, j;
151 num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
153 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
154 if (!dev_map)
155 goto err_dev;
157 dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
158 if (!dev_map->chnls)
159 goto err_chnls;
161 luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
162 if (!luns)
163 goto err_luns;
165 prev_num_lun = (luns_left > dev->geo.num_lun) ?
166 dev->geo.num_lun : luns_left;
167 for (i = 0; i < num_ch; i++) {
168 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
169 int *lun_roffs = ch_rmap->lun_offs;
170 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
171 int *lun_offs;
172 int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
173 dev->geo.num_lun : luns_left;
175 if (lun_balanced && prev_num_lun != luns_in_chnl)
176 lun_balanced = 0;
178 ch_map->ch_off = ch_rmap->ch_off = bch;
179 ch_map->num_lun = luns_in_chnl;
181 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
182 if (!lun_offs)
183 goto err_ch;
185 for (j = 0; j < luns_in_chnl; j++) {
186 luns[lunid].ppa = 0;
187 luns[lunid].a.ch = i;
188 luns[lunid++].a.lun = j;
190 lun_offs[j] = blun;
191 lun_roffs[j + blun] = blun;
194 ch_map->lun_offs = lun_offs;
196 /* when starting a new channel, lun offset is reset */
197 blun = 0;
198 luns_left -= luns_in_chnl;
201 dev_map->num_ch = num_ch;
203 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
204 if (!tgt_dev)
205 goto err_ch;
207 /* Inherit device geometry from parent */
208 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
210 /* Target device only owns a portion of the physical device */
211 tgt_dev->geo.num_ch = num_ch;
212 tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
213 tgt_dev->geo.all_luns = num_lun;
214 tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
216 tgt_dev->geo.op = op;
218 sec_per_lun = dev->geo.clba * dev->geo.num_chk;
219 tgt_dev->geo.total_secs = num_lun * sec_per_lun;
221 tgt_dev->q = dev->q;
222 tgt_dev->map = dev_map;
223 tgt_dev->luns = luns;
224 tgt_dev->parent = dev;
226 return tgt_dev;
227 err_ch:
228 while (--i >= 0)
229 kfree(dev_map->chnls[i].lun_offs);
230 kfree(luns);
231 err_luns:
232 kfree(dev_map->chnls);
233 err_chnls:
234 kfree(dev_map);
235 err_dev:
236 return tgt_dev;
239 static const struct block_device_operations nvm_fops = {
240 .owner = THIS_MODULE,
243 static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
245 struct nvm_tgt_type *tt;
247 list_for_each_entry(tt, &nvm_tgt_types, list)
248 if (!strcmp(name, tt->name))
249 return tt;
251 return NULL;
254 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
256 struct nvm_tgt_type *tt;
258 down_write(&nvm_tgtt_lock);
259 tt = __nvm_find_target_type(name);
260 up_write(&nvm_tgtt_lock);
262 return tt;
265 static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
266 int lun_end)
268 if (lun_begin > lun_end || lun_end >= geo->all_luns) {
269 pr_err("lun out of bound (%u:%u > %u)\n",
270 lun_begin, lun_end, geo->all_luns - 1);
271 return -EINVAL;
274 return 0;
277 static int __nvm_config_simple(struct nvm_dev *dev,
278 struct nvm_ioctl_create_simple *s)
280 struct nvm_geo *geo = &dev->geo;
282 if (s->lun_begin == -1 && s->lun_end == -1) {
283 s->lun_begin = 0;
284 s->lun_end = geo->all_luns - 1;
287 return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
290 static int __nvm_config_extended(struct nvm_dev *dev,
291 struct nvm_ioctl_create_extended *e)
293 if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
294 e->lun_begin = 0;
295 e->lun_end = dev->geo.all_luns - 1;
298 /* op not set falls into target's default */
299 if (e->op == 0xFFFF) {
300 e->op = NVM_TARGET_DEFAULT_OP;
301 } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
302 pr_err("invalid over provisioning value\n");
303 return -EINVAL;
306 return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
309 static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
311 struct nvm_ioctl_create_extended e;
312 struct request_queue *tqueue;
313 struct gendisk *tdisk;
314 struct nvm_tgt_type *tt;
315 struct nvm_target *t;
316 struct nvm_tgt_dev *tgt_dev;
317 void *targetdata;
318 unsigned int mdts;
319 int ret;
321 switch (create->conf.type) {
322 case NVM_CONFIG_TYPE_SIMPLE:
323 ret = __nvm_config_simple(dev, &create->conf.s);
324 if (ret)
325 return ret;
327 e.lun_begin = create->conf.s.lun_begin;
328 e.lun_end = create->conf.s.lun_end;
329 e.op = NVM_TARGET_DEFAULT_OP;
330 break;
331 case NVM_CONFIG_TYPE_EXTENDED:
332 ret = __nvm_config_extended(dev, &create->conf.e);
333 if (ret)
334 return ret;
336 e = create->conf.e;
337 break;
338 default:
339 pr_err("config type not valid\n");
340 return -EINVAL;
343 tt = nvm_find_target_type(create->tgttype);
344 if (!tt) {
345 pr_err("target type %s not found\n", create->tgttype);
346 return -EINVAL;
349 if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
350 pr_err("device is incompatible with target L2P type.\n");
351 return -EINVAL;
354 if (nvm_target_exists(create->tgtname)) {
355 pr_err("target name already exists (%s)\n",
356 create->tgtname);
357 return -EINVAL;
360 ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
361 if (ret)
362 return ret;
364 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
365 if (!t) {
366 ret = -ENOMEM;
367 goto err_reserve;
370 tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
371 if (!tgt_dev) {
372 pr_err("could not create target device\n");
373 ret = -ENOMEM;
374 goto err_t;
377 tdisk = alloc_disk(0);
378 if (!tdisk) {
379 ret = -ENOMEM;
380 goto err_dev;
383 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
384 if (!tqueue) {
385 ret = -ENOMEM;
386 goto err_disk;
388 blk_queue_make_request(tqueue, tt->make_rq);
390 strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
391 tdisk->flags = GENHD_FL_EXT_DEVT;
392 tdisk->major = 0;
393 tdisk->first_minor = 0;
394 tdisk->fops = &nvm_fops;
395 tdisk->queue = tqueue;
397 targetdata = tt->init(tgt_dev, tdisk, create->flags);
398 if (IS_ERR(targetdata)) {
399 ret = PTR_ERR(targetdata);
400 goto err_init;
403 tdisk->private_data = targetdata;
404 tqueue->queuedata = targetdata;
406 mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
407 if (dev->geo.mdts) {
408 mdts = min_t(u32, dev->geo.mdts,
409 (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
411 blk_queue_max_hw_sectors(tqueue, mdts);
413 set_capacity(tdisk, tt->capacity(targetdata));
414 add_disk(tdisk);
416 if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
417 ret = -ENOMEM;
418 goto err_sysfs;
421 t->type = tt;
422 t->disk = tdisk;
423 t->dev = tgt_dev;
425 mutex_lock(&dev->mlock);
426 list_add_tail(&t->list, &dev->targets);
427 mutex_unlock(&dev->mlock);
429 __module_get(tt->owner);
431 return 0;
432 err_sysfs:
433 if (tt->exit)
434 tt->exit(targetdata, true);
435 err_init:
436 blk_cleanup_queue(tqueue);
437 tdisk->queue = NULL;
438 err_disk:
439 put_disk(tdisk);
440 err_dev:
441 nvm_remove_tgt_dev(tgt_dev, 0);
442 err_t:
443 kfree(t);
444 err_reserve:
445 nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
446 return ret;
449 static void __nvm_remove_target(struct nvm_target *t, bool graceful)
451 struct nvm_tgt_type *tt = t->type;
452 struct gendisk *tdisk = t->disk;
453 struct request_queue *q = tdisk->queue;
455 del_gendisk(tdisk);
456 blk_cleanup_queue(q);
458 if (tt->sysfs_exit)
459 tt->sysfs_exit(tdisk);
461 if (tt->exit)
462 tt->exit(tdisk->private_data, graceful);
464 nvm_remove_tgt_dev(t->dev, 1);
465 put_disk(tdisk);
466 module_put(t->type->owner);
468 list_del(&t->list);
469 kfree(t);
473 * nvm_remove_tgt - Removes a target from the media manager
474 * @remove: ioctl structure with target name to remove.
476 * Returns:
477 * 0: on success
478 * 1: on not found
479 * <0: on error
481 static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
483 struct nvm_target *t = NULL;
484 struct nvm_dev *dev;
486 down_read(&nvm_lock);
487 list_for_each_entry(dev, &nvm_devices, devices) {
488 mutex_lock(&dev->mlock);
489 t = nvm_find_target(dev, remove->tgtname);
490 if (t) {
491 mutex_unlock(&dev->mlock);
492 break;
494 mutex_unlock(&dev->mlock);
496 up_read(&nvm_lock);
498 if (!t) {
499 pr_err("failed to remove target %s\n",
500 remove->tgtname);
501 return 1;
504 __nvm_remove_target(t, true);
505 kref_put(&dev->ref, nvm_free);
507 return 0;
510 static int nvm_register_map(struct nvm_dev *dev)
512 struct nvm_dev_map *rmap;
513 int i, j;
515 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
516 if (!rmap)
517 goto err_rmap;
519 rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
520 GFP_KERNEL);
521 if (!rmap->chnls)
522 goto err_chnls;
524 for (i = 0; i < dev->geo.num_ch; i++) {
525 struct nvm_ch_map *ch_rmap;
526 int *lun_roffs;
527 int luns_in_chnl = dev->geo.num_lun;
529 ch_rmap = &rmap->chnls[i];
531 ch_rmap->ch_off = -1;
532 ch_rmap->num_lun = luns_in_chnl;
534 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
535 if (!lun_roffs)
536 goto err_ch;
538 for (j = 0; j < luns_in_chnl; j++)
539 lun_roffs[j] = -1;
541 ch_rmap->lun_offs = lun_roffs;
544 dev->rmap = rmap;
546 return 0;
547 err_ch:
548 while (--i >= 0)
549 kfree(rmap->chnls[i].lun_offs);
550 err_chnls:
551 kfree(rmap);
552 err_rmap:
553 return -ENOMEM;
556 static void nvm_unregister_map(struct nvm_dev *dev)
558 struct nvm_dev_map *rmap = dev->rmap;
559 int i;
561 for (i = 0; i < dev->geo.num_ch; i++)
562 kfree(rmap->chnls[i].lun_offs);
564 kfree(rmap->chnls);
565 kfree(rmap);
568 static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
570 struct nvm_dev_map *dev_map = tgt_dev->map;
571 struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
572 int lun_off = ch_map->lun_offs[p->a.lun];
574 p->a.ch += ch_map->ch_off;
575 p->a.lun += lun_off;
578 static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
580 struct nvm_dev *dev = tgt_dev->parent;
581 struct nvm_dev_map *dev_rmap = dev->rmap;
582 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
583 int lun_roff = ch_rmap->lun_offs[p->a.lun];
585 p->a.ch -= ch_rmap->ch_off;
586 p->a.lun -= lun_roff;
589 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
590 struct ppa_addr *ppa_list, int nr_ppas)
592 int i;
594 for (i = 0; i < nr_ppas; i++) {
595 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
596 ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
600 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
601 struct ppa_addr *ppa_list, int nr_ppas)
603 int i;
605 for (i = 0; i < nr_ppas; i++) {
606 ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
607 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
611 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
613 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
615 nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
618 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
620 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
622 nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
625 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
627 int ret = 0;
629 down_write(&nvm_tgtt_lock);
630 if (__nvm_find_target_type(tt->name))
631 ret = -EEXIST;
632 else
633 list_add(&tt->list, &nvm_tgt_types);
634 up_write(&nvm_tgtt_lock);
636 return ret;
638 EXPORT_SYMBOL(nvm_register_tgt_type);
640 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
642 if (!tt)
643 return;
645 down_write(&nvm_tgtt_lock);
646 list_del(&tt->list);
647 up_write(&nvm_tgtt_lock);
649 EXPORT_SYMBOL(nvm_unregister_tgt_type);
651 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
652 dma_addr_t *dma_handler)
654 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
655 dma_handler);
657 EXPORT_SYMBOL(nvm_dev_dma_alloc);
659 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
661 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
663 EXPORT_SYMBOL(nvm_dev_dma_free);
665 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
667 struct nvm_dev *dev;
669 list_for_each_entry(dev, &nvm_devices, devices)
670 if (!strcmp(name, dev->name))
671 return dev;
673 return NULL;
676 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
677 const struct ppa_addr *ppas, int nr_ppas)
679 struct nvm_dev *dev = tgt_dev->parent;
680 struct nvm_geo *geo = &tgt_dev->geo;
681 int i, plane_cnt, pl_idx;
682 struct ppa_addr ppa;
684 if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
685 rqd->nr_ppas = nr_ppas;
686 rqd->ppa_addr = ppas[0];
688 return 0;
691 rqd->nr_ppas = nr_ppas;
692 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
693 if (!rqd->ppa_list) {
694 pr_err("failed to allocate dma memory\n");
695 return -ENOMEM;
698 plane_cnt = geo->pln_mode;
699 rqd->nr_ppas *= plane_cnt;
701 for (i = 0; i < nr_ppas; i++) {
702 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
703 ppa = ppas[i];
704 ppa.g.pl = pl_idx;
705 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
709 return 0;
712 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
713 struct nvm_rq *rqd)
715 if (!rqd->ppa_list)
716 return;
718 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
721 static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
723 int flags = 0;
725 if (geo->version == NVM_OCSSD_SPEC_20)
726 return 0;
728 if (rqd->is_seq)
729 flags |= geo->pln_mode >> 1;
731 if (rqd->opcode == NVM_OP_PREAD)
732 flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
733 else if (rqd->opcode == NVM_OP_PWRITE)
734 flags |= NVM_IO_SCRAMBLE_ENABLE;
736 return flags;
739 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
741 struct nvm_dev *dev = tgt_dev->parent;
742 int ret;
744 if (!dev->ops->submit_io)
745 return -ENODEV;
747 nvm_rq_tgt_to_dev(tgt_dev, rqd);
749 rqd->dev = tgt_dev;
750 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
752 /* In case of error, fail with right address format */
753 ret = dev->ops->submit_io(dev, rqd, buf);
754 if (ret)
755 nvm_rq_dev_to_tgt(tgt_dev, rqd);
756 return ret;
758 EXPORT_SYMBOL(nvm_submit_io);
760 static void nvm_sync_end_io(struct nvm_rq *rqd)
762 struct completion *waiting = rqd->private;
764 complete(waiting);
767 static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
768 void *buf)
770 DECLARE_COMPLETION_ONSTACK(wait);
771 int ret = 0;
773 rqd->end_io = nvm_sync_end_io;
774 rqd->private = &wait;
776 ret = dev->ops->submit_io(dev, rqd, buf);
777 if (ret)
778 return ret;
780 wait_for_completion_io(&wait);
782 return 0;
785 int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
786 void *buf)
788 struct nvm_dev *dev = tgt_dev->parent;
789 int ret;
791 if (!dev->ops->submit_io)
792 return -ENODEV;
794 nvm_rq_tgt_to_dev(tgt_dev, rqd);
796 rqd->dev = tgt_dev;
797 rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
799 ret = nvm_submit_io_wait(dev, rqd, buf);
801 return ret;
803 EXPORT_SYMBOL(nvm_submit_io_sync);
805 void nvm_end_io(struct nvm_rq *rqd)
807 struct nvm_tgt_dev *tgt_dev = rqd->dev;
809 /* Convert address space */
810 if (tgt_dev)
811 nvm_rq_dev_to_tgt(tgt_dev, rqd);
813 if (rqd->end_io)
814 rqd->end_io(rqd);
816 EXPORT_SYMBOL(nvm_end_io);
818 static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
820 if (!dev->ops->submit_io)
821 return -ENODEV;
823 rqd->dev = NULL;
824 rqd->flags = nvm_set_flags(&dev->geo, rqd);
826 return nvm_submit_io_wait(dev, rqd, NULL);
829 static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
831 struct nvm_rq rqd = { NULL };
832 struct bio bio;
833 struct bio_vec bio_vec;
834 struct page *page;
835 int ret;
837 page = alloc_page(GFP_KERNEL);
838 if (!page)
839 return -ENOMEM;
841 bio_init(&bio, &bio_vec, 1);
842 bio_add_page(&bio, page, PAGE_SIZE, 0);
843 bio_set_op_attrs(&bio, REQ_OP_READ, 0);
845 rqd.bio = &bio;
846 rqd.opcode = NVM_OP_PREAD;
847 rqd.is_seq = 1;
848 rqd.nr_ppas = 1;
849 rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
851 ret = nvm_submit_io_sync_raw(dev, &rqd);
852 if (ret)
853 return ret;
855 __free_page(page);
857 return rqd.error;
861 * Scans a 1.2 chunk first and last page to determine if its state.
862 * If the chunk is found to be open, also scan it to update the write
863 * pointer.
865 static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
866 struct nvm_chk_meta *meta)
868 struct nvm_geo *geo = &dev->geo;
869 int ret, pg, pl;
871 /* sense first page */
872 ret = nvm_bb_chunk_sense(dev, ppa);
873 if (ret < 0) /* io error */
874 return ret;
875 else if (ret == 0) /* valid data */
876 meta->state = NVM_CHK_ST_OPEN;
877 else if (ret > 0) {
879 * If empty page, the chunk is free, else it is an
880 * actual io error. In that case, mark it offline.
882 switch (ret) {
883 case NVM_RSP_ERR_EMPTYPAGE:
884 meta->state = NVM_CHK_ST_FREE;
885 return 0;
886 case NVM_RSP_ERR_FAILCRC:
887 case NVM_RSP_ERR_FAILECC:
888 case NVM_RSP_WARN_HIGHECC:
889 meta->state = NVM_CHK_ST_OPEN;
890 goto scan;
891 default:
892 return -ret; /* other io error */
896 /* sense last page */
897 ppa.g.pg = geo->num_pg - 1;
898 ppa.g.pl = geo->num_pln - 1;
900 ret = nvm_bb_chunk_sense(dev, ppa);
901 if (ret < 0) /* io error */
902 return ret;
903 else if (ret == 0) { /* Chunk fully written */
904 meta->state = NVM_CHK_ST_CLOSED;
905 meta->wp = geo->clba;
906 return 0;
907 } else if (ret > 0) {
908 switch (ret) {
909 case NVM_RSP_ERR_EMPTYPAGE:
910 case NVM_RSP_ERR_FAILCRC:
911 case NVM_RSP_ERR_FAILECC:
912 case NVM_RSP_WARN_HIGHECC:
913 meta->state = NVM_CHK_ST_OPEN;
914 break;
915 default:
916 return -ret; /* other io error */
920 scan:
922 * chunk is open, we scan sequentially to update the write pointer.
923 * We make the assumption that targets write data across all planes
924 * before moving to the next page.
926 for (pg = 0; pg < geo->num_pg; pg++) {
927 for (pl = 0; pl < geo->num_pln; pl++) {
928 ppa.g.pg = pg;
929 ppa.g.pl = pl;
931 ret = nvm_bb_chunk_sense(dev, ppa);
932 if (ret < 0) /* io error */
933 return ret;
934 else if (ret == 0) {
935 meta->wp += geo->ws_min;
936 } else if (ret > 0) {
937 switch (ret) {
938 case NVM_RSP_ERR_EMPTYPAGE:
939 return 0;
940 case NVM_RSP_ERR_FAILCRC:
941 case NVM_RSP_ERR_FAILECC:
942 case NVM_RSP_WARN_HIGHECC:
943 meta->wp += geo->ws_min;
944 break;
945 default:
946 return -ret; /* other io error */
952 return 0;
956 * folds a bad block list from its plane representation to its
957 * chunk representation.
959 * If any of the planes status are bad or grown bad, the chunk is marked
960 * offline. If not bad, the first plane state acts as the chunk state.
962 static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
963 u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
965 struct nvm_geo *geo = &dev->geo;
966 int ret, blk, pl, offset, blktype;
968 for (blk = 0; blk < geo->num_chk; blk++) {
969 offset = blk * geo->pln_mode;
970 blktype = blks[offset];
972 for (pl = 0; pl < geo->pln_mode; pl++) {
973 if (blks[offset + pl] &
974 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
975 blktype = blks[offset + pl];
976 break;
980 ppa.g.blk = blk;
982 meta->wp = 0;
983 meta->type = NVM_CHK_TP_W_SEQ;
984 meta->wi = 0;
985 meta->slba = generic_to_dev_addr(dev, ppa).ppa;
986 meta->cnlb = dev->geo.clba;
988 if (blktype == NVM_BLK_T_FREE) {
989 ret = nvm_bb_chunk_scan(dev, ppa, meta);
990 if (ret)
991 return ret;
992 } else {
993 meta->state = NVM_CHK_ST_OFFLINE;
996 meta++;
999 return 0;
1002 static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
1003 int nchks, struct nvm_chk_meta *meta)
1005 struct nvm_geo *geo = &dev->geo;
1006 struct ppa_addr ppa;
1007 u8 *blks;
1008 int ch, lun, nr_blks;
1009 int ret = 0;
1011 ppa.ppa = slba;
1012 ppa = dev_to_generic_addr(dev, ppa);
1014 if (ppa.g.blk != 0)
1015 return -EINVAL;
1017 if ((nchks % geo->num_chk) != 0)
1018 return -EINVAL;
1020 nr_blks = geo->num_chk * geo->pln_mode;
1022 blks = kmalloc(nr_blks, GFP_KERNEL);
1023 if (!blks)
1024 return -ENOMEM;
1026 for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1027 for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1028 struct ppa_addr ppa_gen, ppa_dev;
1030 if (!nchks)
1031 goto done;
1033 ppa_gen.ppa = 0;
1034 ppa_gen.g.ch = ch;
1035 ppa_gen.g.lun = lun;
1036 ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1038 ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1039 if (ret)
1040 goto done;
1042 ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1043 meta);
1044 if (ret)
1045 goto done;
1047 meta += geo->num_chk;
1048 nchks -= geo->num_chk;
1051 done:
1052 kfree(blks);
1053 return ret;
1056 int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1057 int nchks, struct nvm_chk_meta *meta)
1059 struct nvm_dev *dev = tgt_dev->parent;
1061 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1063 if (dev->geo.version == NVM_OCSSD_SPEC_12)
1064 return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1066 return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1068 EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1070 int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1071 int nr_ppas, int type)
1073 struct nvm_dev *dev = tgt_dev->parent;
1074 struct nvm_rq rqd;
1075 int ret;
1077 if (dev->geo.version == NVM_OCSSD_SPEC_20)
1078 return 0;
1080 if (nr_ppas > NVM_MAX_VLBA) {
1081 pr_err("unable to update all blocks atomically\n");
1082 return -EINVAL;
1085 memset(&rqd, 0, sizeof(struct nvm_rq));
1087 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1088 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1090 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1091 nvm_free_rqd_ppalist(tgt_dev, &rqd);
1092 if (ret)
1093 return -EINVAL;
1095 return 0;
1097 EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1099 static int nvm_core_init(struct nvm_dev *dev)
1101 struct nvm_geo *geo = &dev->geo;
1102 int ret;
1104 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1105 sizeof(unsigned long), GFP_KERNEL);
1106 if (!dev->lun_map)
1107 return -ENOMEM;
1109 INIT_LIST_HEAD(&dev->area_list);
1110 INIT_LIST_HEAD(&dev->targets);
1111 mutex_init(&dev->mlock);
1112 spin_lock_init(&dev->lock);
1114 ret = nvm_register_map(dev);
1115 if (ret)
1116 goto err_fmtype;
1118 return 0;
1119 err_fmtype:
1120 kfree(dev->lun_map);
1121 return ret;
1124 static void nvm_free(struct kref *ref)
1126 struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
1128 if (dev->dma_pool)
1129 dev->ops->destroy_dma_pool(dev->dma_pool);
1131 if (dev->rmap)
1132 nvm_unregister_map(dev);
1134 kfree(dev->lun_map);
1135 kfree(dev);
1138 static int nvm_init(struct nvm_dev *dev)
1140 struct nvm_geo *geo = &dev->geo;
1141 int ret = -EINVAL;
1143 if (dev->ops->identity(dev)) {
1144 pr_err("device could not be identified\n");
1145 goto err;
1148 pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
1149 geo->minor_ver_id, geo->vmnt);
1151 ret = nvm_core_init(dev);
1152 if (ret) {
1153 pr_err("could not initialize core structures.\n");
1154 goto err;
1157 pr_info("registered %s [%u/%u/%u/%u/%u]\n",
1158 dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1159 dev->geo.num_chk, dev->geo.all_luns,
1160 dev->geo.num_ch);
1161 return 0;
1162 err:
1163 pr_err("failed to initialize nvm\n");
1164 return ret;
1167 struct nvm_dev *nvm_alloc_dev(int node)
1169 struct nvm_dev *dev;
1171 dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1172 if (dev)
1173 kref_init(&dev->ref);
1175 return dev;
1177 EXPORT_SYMBOL(nvm_alloc_dev);
1179 int nvm_register(struct nvm_dev *dev)
1181 int ret, exp_pool_size;
1183 if (!dev->q || !dev->ops) {
1184 kref_put(&dev->ref, nvm_free);
1185 return -EINVAL;
1188 ret = nvm_init(dev);
1189 if (ret) {
1190 kref_put(&dev->ref, nvm_free);
1191 return ret;
1194 exp_pool_size = max_t(int, PAGE_SIZE,
1195 (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1196 exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1198 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1199 exp_pool_size);
1200 if (!dev->dma_pool) {
1201 pr_err("could not create dma pool\n");
1202 kref_put(&dev->ref, nvm_free);
1203 return -ENOMEM;
1206 /* register device with a supported media manager */
1207 down_write(&nvm_lock);
1208 list_add(&dev->devices, &nvm_devices);
1209 up_write(&nvm_lock);
1211 return 0;
1213 EXPORT_SYMBOL(nvm_register);
1215 void nvm_unregister(struct nvm_dev *dev)
1217 struct nvm_target *t, *tmp;
1219 mutex_lock(&dev->mlock);
1220 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1221 if (t->dev->parent != dev)
1222 continue;
1223 __nvm_remove_target(t, false);
1224 kref_put(&dev->ref, nvm_free);
1226 mutex_unlock(&dev->mlock);
1228 down_write(&nvm_lock);
1229 list_del(&dev->devices);
1230 up_write(&nvm_lock);
1232 kref_put(&dev->ref, nvm_free);
1234 EXPORT_SYMBOL(nvm_unregister);
1236 static int __nvm_configure_create(struct nvm_ioctl_create *create)
1238 struct nvm_dev *dev;
1239 int ret;
1241 down_write(&nvm_lock);
1242 dev = nvm_find_nvm_dev(create->dev);
1243 up_write(&nvm_lock);
1245 if (!dev) {
1246 pr_err("device not found\n");
1247 return -EINVAL;
1250 kref_get(&dev->ref);
1251 ret = nvm_create_tgt(dev, create);
1252 if (ret)
1253 kref_put(&dev->ref, nvm_free);
1255 return ret;
1258 static long nvm_ioctl_info(struct file *file, void __user *arg)
1260 struct nvm_ioctl_info *info;
1261 struct nvm_tgt_type *tt;
1262 int tgt_iter = 0;
1264 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1265 if (IS_ERR(info))
1266 return -EFAULT;
1268 info->version[0] = NVM_VERSION_MAJOR;
1269 info->version[1] = NVM_VERSION_MINOR;
1270 info->version[2] = NVM_VERSION_PATCH;
1272 down_write(&nvm_tgtt_lock);
1273 list_for_each_entry(tt, &nvm_tgt_types, list) {
1274 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1276 tgt->version[0] = tt->version[0];
1277 tgt->version[1] = tt->version[1];
1278 tgt->version[2] = tt->version[2];
1279 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1281 tgt_iter++;
1284 info->tgtsize = tgt_iter;
1285 up_write(&nvm_tgtt_lock);
1287 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1288 kfree(info);
1289 return -EFAULT;
1292 kfree(info);
1293 return 0;
1296 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1298 struct nvm_ioctl_get_devices *devices;
1299 struct nvm_dev *dev;
1300 int i = 0;
1302 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1303 if (!devices)
1304 return -ENOMEM;
1306 down_write(&nvm_lock);
1307 list_for_each_entry(dev, &nvm_devices, devices) {
1308 struct nvm_ioctl_device_info *info = &devices->info[i];
1310 strlcpy(info->devname, dev->name, sizeof(info->devname));
1312 /* kept for compatibility */
1313 info->bmversion[0] = 1;
1314 info->bmversion[1] = 0;
1315 info->bmversion[2] = 0;
1316 strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1317 i++;
1319 if (i > 31) {
1320 pr_err("max 31 devices can be reported.\n");
1321 break;
1324 up_write(&nvm_lock);
1326 devices->nr_devices = i;
1328 if (copy_to_user(arg, devices,
1329 sizeof(struct nvm_ioctl_get_devices))) {
1330 kfree(devices);
1331 return -EFAULT;
1334 kfree(devices);
1335 return 0;
1338 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1340 struct nvm_ioctl_create create;
1342 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1343 return -EFAULT;
1345 if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1346 create.conf.e.rsv != 0) {
1347 pr_err("reserved config field in use\n");
1348 return -EINVAL;
1351 create.dev[DISK_NAME_LEN - 1] = '\0';
1352 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1353 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1355 if (create.flags != 0) {
1356 __u32 flags = create.flags;
1358 /* Check for valid flags */
1359 if (flags & NVM_TARGET_FACTORY)
1360 flags &= ~NVM_TARGET_FACTORY;
1362 if (flags) {
1363 pr_err("flag not supported\n");
1364 return -EINVAL;
1368 return __nvm_configure_create(&create);
1371 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1373 struct nvm_ioctl_remove remove;
1375 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1376 return -EFAULT;
1378 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1380 if (remove.flags != 0) {
1381 pr_err("no flags supported\n");
1382 return -EINVAL;
1385 return nvm_remove_tgt(&remove);
1388 /* kept for compatibility reasons */
1389 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1391 struct nvm_ioctl_dev_init init;
1393 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1394 return -EFAULT;
1396 if (init.flags != 0) {
1397 pr_err("no flags supported\n");
1398 return -EINVAL;
1401 return 0;
1404 /* Kept for compatibility reasons */
1405 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1407 struct nvm_ioctl_dev_factory fact;
1409 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1410 return -EFAULT;
1412 fact.dev[DISK_NAME_LEN - 1] = '\0';
1414 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1415 return -EINVAL;
1417 return 0;
1420 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1422 void __user *argp = (void __user *)arg;
1424 if (!capable(CAP_SYS_ADMIN))
1425 return -EPERM;
1427 switch (cmd) {
1428 case NVM_INFO:
1429 return nvm_ioctl_info(file, argp);
1430 case NVM_GET_DEVICES:
1431 return nvm_ioctl_get_devices(file, argp);
1432 case NVM_DEV_CREATE:
1433 return nvm_ioctl_dev_create(file, argp);
1434 case NVM_DEV_REMOVE:
1435 return nvm_ioctl_dev_remove(file, argp);
1436 case NVM_DEV_INIT:
1437 return nvm_ioctl_dev_init(file, argp);
1438 case NVM_DEV_FACTORY:
1439 return nvm_ioctl_dev_factory(file, argp);
1441 return 0;
1444 static const struct file_operations _ctl_fops = {
1445 .open = nonseekable_open,
1446 .unlocked_ioctl = nvm_ctl_ioctl,
1447 .owner = THIS_MODULE,
1448 .llseek = noop_llseek,
1451 static struct miscdevice _nvm_misc = {
1452 .minor = MISC_DYNAMIC_MINOR,
1453 .name = "lightnvm",
1454 .nodename = "lightnvm/control",
1455 .fops = &_ctl_fops,
1457 builtin_misc_device(_nvm_misc);