2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/sysctl.h>
30 #include <uapi/linux/lightnvm.h>
32 enum nvme_nvm_admin_opcode
{
33 nvme_nvm_admin_identity
= 0xe2,
34 nvme_nvm_admin_get_l2p_tbl
= 0xea,
35 nvme_nvm_admin_get_bb_tbl
= 0xf2,
36 nvme_nvm_admin_set_bb_tbl
= 0xf1,
39 struct nvme_nvm_hb_rw
{
55 struct nvme_nvm_ph_rw
{
71 struct nvme_nvm_identity
{
83 struct nvme_nvm_l2ptbl
{
96 struct nvme_nvm_getbbtbl
{
108 struct nvme_nvm_setbbtbl
{
123 struct nvme_nvm_erase_blk
{
138 struct nvme_nvm_command
{
140 struct nvme_common_command common
;
141 struct nvme_nvm_identity identity
;
142 struct nvme_nvm_hb_rw hb_rw
;
143 struct nvme_nvm_ph_rw ph_rw
;
144 struct nvme_nvm_l2ptbl l2p
;
145 struct nvme_nvm_getbbtbl get_bb
;
146 struct nvme_nvm_setbbtbl set_bb
;
147 struct nvme_nvm_erase_blk erase
;
151 #define NVME_NVM_LP_MLC_PAIRS 886
152 struct nvme_nvm_lp_mlc
{
154 __u8 pairs
[NVME_NVM_LP_MLC_PAIRS
];
157 struct nvme_nvm_lp_tbl
{
159 struct nvme_nvm_lp_mlc mlc
;
162 struct nvme_nvm_id_group
{
186 struct nvme_nvm_lp_tbl lptbl
;
189 struct nvme_nvm_addr_format
{
212 struct nvme_nvm_addr_format ppaf
;
214 struct nvme_nvm_id_group groups
[4];
217 struct nvme_nvm_bb_tbl
{
232 * Check we didn't inadvertently grow the command struct
234 static inline void _nvme_nvm_check_size(void)
236 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity
) != 64);
237 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw
) != 64);
238 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw
) != 64);
239 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl
) != 64);
240 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl
) != 64);
241 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl
) != 64);
242 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk
) != 64);
243 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group
) != 960);
244 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format
) != 16);
245 BUILD_BUG_ON(sizeof(struct nvme_nvm_id
) != NVME_IDENTIFY_DATA_SIZE
);
246 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl
) != 64);
249 static int init_grps(struct nvm_id
*nvm_id
, struct nvme_nvm_id
*nvme_nvm_id
)
251 struct nvme_nvm_id_group
*src
;
252 struct nvm_id_group
*dst
;
254 if (nvme_nvm_id
->cgrps
!= 1)
257 src
= &nvme_nvm_id
->groups
[0];
260 dst
->mtype
= src
->mtype
;
261 dst
->fmtype
= src
->fmtype
;
262 dst
->num_ch
= src
->num_ch
;
263 dst
->num_lun
= src
->num_lun
;
264 dst
->num_pln
= src
->num_pln
;
266 dst
->num_pg
= le16_to_cpu(src
->num_pg
);
267 dst
->num_blk
= le16_to_cpu(src
->num_blk
);
268 dst
->fpg_sz
= le16_to_cpu(src
->fpg_sz
);
269 dst
->csecs
= le16_to_cpu(src
->csecs
);
270 dst
->sos
= le16_to_cpu(src
->sos
);
272 dst
->trdt
= le32_to_cpu(src
->trdt
);
273 dst
->trdm
= le32_to_cpu(src
->trdm
);
274 dst
->tprt
= le32_to_cpu(src
->tprt
);
275 dst
->tprm
= le32_to_cpu(src
->tprm
);
276 dst
->tbet
= le32_to_cpu(src
->tbet
);
277 dst
->tbem
= le32_to_cpu(src
->tbem
);
278 dst
->mpos
= le32_to_cpu(src
->mpos
);
279 dst
->mccap
= le32_to_cpu(src
->mccap
);
281 dst
->cpar
= le16_to_cpu(src
->cpar
);
283 if (dst
->fmtype
== NVM_ID_FMTYPE_MLC
) {
284 memcpy(dst
->lptbl
.id
, src
->lptbl
.id
, 8);
285 dst
->lptbl
.mlc
.num_pairs
=
286 le16_to_cpu(src
->lptbl
.mlc
.num_pairs
);
288 if (dst
->lptbl
.mlc
.num_pairs
> NVME_NVM_LP_MLC_PAIRS
) {
289 pr_err("nvm: number of MLC pairs not supported\n");
293 memcpy(dst
->lptbl
.mlc
.pairs
, src
->lptbl
.mlc
.pairs
,
294 dst
->lptbl
.mlc
.num_pairs
);
300 static int nvme_nvm_identity(struct nvm_dev
*nvmdev
, struct nvm_id
*nvm_id
)
302 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
303 struct nvme_nvm_id
*nvme_nvm_id
;
304 struct nvme_nvm_command c
= {};
307 c
.identity
.opcode
= nvme_nvm_admin_identity
;
308 c
.identity
.nsid
= cpu_to_le32(ns
->ns_id
);
309 c
.identity
.chnl_off
= 0;
311 nvme_nvm_id
= kmalloc(sizeof(struct nvme_nvm_id
), GFP_KERNEL
);
315 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
316 nvme_nvm_id
, sizeof(struct nvme_nvm_id
));
322 nvm_id
->ver_id
= nvme_nvm_id
->ver_id
;
323 nvm_id
->vmnt
= nvme_nvm_id
->vmnt
;
324 nvm_id
->cap
= le32_to_cpu(nvme_nvm_id
->cap
);
325 nvm_id
->dom
= le32_to_cpu(nvme_nvm_id
->dom
);
326 memcpy(&nvm_id
->ppaf
, &nvme_nvm_id
->ppaf
,
327 sizeof(struct nvm_addr_format
));
329 ret
= init_grps(nvm_id
, nvme_nvm_id
);
335 static int nvme_nvm_get_l2p_tbl(struct nvm_dev
*nvmdev
, u64 slba
, u32 nlb
,
336 nvm_l2p_update_fn
*update_l2p
, void *priv
)
338 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
339 struct nvme_nvm_command c
= {};
340 u32 len
= queue_max_hw_sectors(ns
->ctrl
->admin_q
) << 9;
341 u32 nlb_pr_rq
= len
/ sizeof(u64
);
346 c
.l2p
.opcode
= nvme_nvm_admin_get_l2p_tbl
;
347 c
.l2p
.nsid
= cpu_to_le32(ns
->ns_id
);
348 entries
= kmalloc(len
, GFP_KERNEL
);
353 u32 cmd_nlb
= min(nlb_pr_rq
, nlb
);
354 u64 elba
= slba
+ cmd_nlb
;
356 c
.l2p
.slba
= cpu_to_le64(cmd_slba
);
357 c
.l2p
.nlb
= cpu_to_le32(cmd_nlb
);
359 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
,
360 (struct nvme_command
*)&c
, entries
, len
);
362 dev_err(ns
->ctrl
->device
,
363 "L2P table transfer failed (%d)\n", ret
);
368 if (unlikely(elba
> nvmdev
->total_secs
)) {
369 pr_err("nvm: L2P data from device is out of bounds!\n");
374 /* Transform physical address to target address space */
375 nvm_part_to_tgt(nvmdev
, entries
, cmd_nlb
);
377 if (update_l2p(cmd_slba
, cmd_nlb
, entries
, priv
)) {
391 static int nvme_nvm_get_bb_tbl(struct nvm_dev
*nvmdev
, struct ppa_addr ppa
,
394 struct request_queue
*q
= nvmdev
->q
;
395 struct nvm_geo
*geo
= &nvmdev
->geo
;
396 struct nvme_ns
*ns
= q
->queuedata
;
397 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
398 struct nvme_nvm_command c
= {};
399 struct nvme_nvm_bb_tbl
*bb_tbl
;
400 int nr_blks
= geo
->blks_per_lun
* geo
->plane_mode
;
401 int tblsz
= sizeof(struct nvme_nvm_bb_tbl
) + nr_blks
;
404 c
.get_bb
.opcode
= nvme_nvm_admin_get_bb_tbl
;
405 c
.get_bb
.nsid
= cpu_to_le32(ns
->ns_id
);
406 c
.get_bb
.spba
= cpu_to_le64(ppa
.ppa
);
408 bb_tbl
= kzalloc(tblsz
, GFP_KERNEL
);
412 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, (struct nvme_command
*)&c
,
415 dev_err(ctrl
->device
, "get bad block table failed (%d)\n", ret
);
420 if (bb_tbl
->tblid
[0] != 'B' || bb_tbl
->tblid
[1] != 'B' ||
421 bb_tbl
->tblid
[2] != 'L' || bb_tbl
->tblid
[3] != 'T') {
422 dev_err(ctrl
->device
, "bbt format mismatch\n");
427 if (le16_to_cpu(bb_tbl
->verid
) != 1) {
429 dev_err(ctrl
->device
, "bbt version not supported\n");
433 if (le32_to_cpu(bb_tbl
->tblks
) != nr_blks
) {
435 dev_err(ctrl
->device
,
436 "bbt unsuspected blocks returned (%u!=%u)",
437 le32_to_cpu(bb_tbl
->tblks
), nr_blks
);
441 memcpy(blks
, bb_tbl
->blk
, geo
->blks_per_lun
* geo
->plane_mode
);
447 static int nvme_nvm_set_bb_tbl(struct nvm_dev
*nvmdev
, struct ppa_addr
*ppas
,
448 int nr_ppas
, int type
)
450 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
451 struct nvme_nvm_command c
= {};
454 c
.set_bb
.opcode
= nvme_nvm_admin_set_bb_tbl
;
455 c
.set_bb
.nsid
= cpu_to_le32(ns
->ns_id
);
456 c
.set_bb
.spba
= cpu_to_le64(ppas
->ppa
);
457 c
.set_bb
.nlb
= cpu_to_le16(nr_ppas
- 1);
458 c
.set_bb
.value
= type
;
460 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
463 dev_err(ns
->ctrl
->device
, "set bad block table failed (%d)\n",
468 static inline void nvme_nvm_rqtocmd(struct nvm_rq
*rqd
, struct nvme_ns
*ns
,
469 struct nvme_nvm_command
*c
)
471 c
->ph_rw
.opcode
= rqd
->opcode
;
472 c
->ph_rw
.nsid
= cpu_to_le32(ns
->ns_id
);
473 c
->ph_rw
.spba
= cpu_to_le64(rqd
->ppa_addr
.ppa
);
474 c
->ph_rw
.metadata
= cpu_to_le64(rqd
->dma_meta_list
);
475 c
->ph_rw
.control
= cpu_to_le16(rqd
->flags
);
476 c
->ph_rw
.length
= cpu_to_le16(rqd
->nr_ppas
- 1);
478 if (rqd
->opcode
== NVM_OP_HBWRITE
|| rqd
->opcode
== NVM_OP_HBREAD
)
479 c
->hb_rw
.slba
= cpu_to_le64(nvme_block_nr(ns
,
480 rqd
->bio
->bi_iter
.bi_sector
));
483 static void nvme_nvm_end_io(struct request
*rq
, blk_status_t status
)
485 struct nvm_rq
*rqd
= rq
->end_io_data
;
487 rqd
->ppa_status
= le64_to_cpu(nvme_req(rq
)->result
.u64
);
488 rqd
->error
= nvme_req(rq
)->status
;
491 kfree(nvme_req(rq
)->cmd
);
492 blk_mq_free_request(rq
);
495 static int nvme_nvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
497 struct request_queue
*q
= dev
->q
;
498 struct nvme_ns
*ns
= q
->queuedata
;
500 struct bio
*bio
= rqd
->bio
;
501 struct nvme_nvm_command
*cmd
;
503 cmd
= kzalloc(sizeof(struct nvme_nvm_command
), GFP_KERNEL
);
507 nvme_nvm_rqtocmd(rqd
, ns
, cmd
);
509 rq
= nvme_alloc_request(q
, (struct nvme_command
*)cmd
, 0, NVME_QID_ANY
);
514 rq
->cmd_flags
&= ~REQ_FAILFAST_DRIVER
;
517 blk_init_request_from_bio(rq
, bio
);
519 rq
->ioprio
= IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE
, IOPRIO_NORM
);
523 rq
->end_io_data
= rqd
;
525 blk_execute_rq_nowait(q
, NULL
, rq
, 0, nvme_nvm_end_io
);
530 static void *nvme_nvm_create_dma_pool(struct nvm_dev
*nvmdev
, char *name
)
532 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
534 return dma_pool_create(name
, ns
->ctrl
->dev
, PAGE_SIZE
, PAGE_SIZE
, 0);
537 static void nvme_nvm_destroy_dma_pool(void *pool
)
539 struct dma_pool
*dma_pool
= pool
;
541 dma_pool_destroy(dma_pool
);
544 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev
*dev
, void *pool
,
545 gfp_t mem_flags
, dma_addr_t
*dma_handler
)
547 return dma_pool_alloc(pool
, mem_flags
, dma_handler
);
550 static void nvme_nvm_dev_dma_free(void *pool
, void *addr
,
551 dma_addr_t dma_handler
)
553 dma_pool_free(pool
, addr
, dma_handler
);
556 static struct nvm_dev_ops nvme_nvm_dev_ops
= {
557 .identity
= nvme_nvm_identity
,
559 .get_l2p_tbl
= nvme_nvm_get_l2p_tbl
,
561 .get_bb_tbl
= nvme_nvm_get_bb_tbl
,
562 .set_bb_tbl
= nvme_nvm_set_bb_tbl
,
564 .submit_io
= nvme_nvm_submit_io
,
566 .create_dma_pool
= nvme_nvm_create_dma_pool
,
567 .destroy_dma_pool
= nvme_nvm_destroy_dma_pool
,
568 .dev_dma_alloc
= nvme_nvm_dev_dma_alloc
,
569 .dev_dma_free
= nvme_nvm_dev_dma_free
,
574 static int nvme_nvm_submit_user_cmd(struct request_queue
*q
,
576 struct nvme_nvm_command
*vcmd
,
577 void __user
*ubuf
, unsigned int bufflen
,
578 void __user
*meta_buf
, unsigned int meta_len
,
579 void __user
*ppa_buf
, unsigned int ppa_len
,
580 u32
*result
, u64
*status
, unsigned int timeout
)
582 bool write
= nvme_is_write((struct nvme_command
*)vcmd
);
583 struct nvm_dev
*dev
= ns
->ndev
;
584 struct gendisk
*disk
= ns
->disk
;
586 struct bio
*bio
= NULL
;
587 __le64
*ppa_list
= NULL
;
589 __le64
*metadata
= NULL
;
590 dma_addr_t metadata_dma
;
591 DECLARE_COMPLETION_ONSTACK(wait
);
594 rq
= nvme_alloc_request(q
, (struct nvme_command
*)vcmd
, 0,
601 rq
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
603 rq
->cmd_flags
&= ~REQ_FAILFAST_DRIVER
;
605 if (ppa_buf
&& ppa_len
) {
606 ppa_list
= dma_pool_alloc(dev
->dma_pool
, GFP_KERNEL
, &ppa_dma
);
611 if (copy_from_user(ppa_list
, (void __user
*)ppa_buf
,
612 sizeof(u64
) * (ppa_len
+ 1))) {
616 vcmd
->ph_rw
.spba
= cpu_to_le64(ppa_dma
);
618 vcmd
->ph_rw
.spba
= cpu_to_le64((uintptr_t)ppa_buf
);
621 if (ubuf
&& bufflen
) {
622 ret
= blk_rq_map_user(q
, rq
, NULL
, ubuf
, bufflen
, GFP_KERNEL
);
627 if (meta_buf
&& meta_len
) {
628 metadata
= dma_pool_alloc(dev
->dma_pool
, GFP_KERNEL
,
636 if (copy_from_user(metadata
,
637 (void __user
*)meta_buf
,
643 vcmd
->ph_rw
.metadata
= cpu_to_le64(metadata_dma
);
649 bio
->bi_bdev
= bdget_disk(disk
, 0);
657 blk_execute_rq(q
, NULL
, rq
, 0);
659 if (nvme_req(rq
)->flags
& NVME_REQ_CANCELLED
)
661 else if (nvme_req(rq
)->status
& 0x7ff)
664 *result
= nvme_req(rq
)->status
& 0x7ff;
666 *status
= le64_to_cpu(nvme_req(rq
)->result
.u64
);
668 if (metadata
&& !ret
&& !write
) {
669 if (copy_to_user(meta_buf
, (void *)metadata
, meta_len
))
673 if (meta_buf
&& meta_len
)
674 dma_pool_free(dev
->dma_pool
, metadata
, metadata_dma
);
677 if (disk
&& bio
->bi_bdev
)
679 blk_rq_unmap_user(bio
);
682 if (ppa_buf
&& ppa_len
)
683 dma_pool_free(dev
->dma_pool
, ppa_list
, ppa_dma
);
685 blk_mq_free_request(rq
);
690 static int nvme_nvm_submit_vio(struct nvme_ns
*ns
,
691 struct nvm_user_vio __user
*uvio
)
693 struct nvm_user_vio vio
;
694 struct nvme_nvm_command c
;
698 if (copy_from_user(&vio
, uvio
, sizeof(vio
)))
703 memset(&c
, 0, sizeof(c
));
704 c
.ph_rw
.opcode
= vio
.opcode
;
705 c
.ph_rw
.nsid
= cpu_to_le32(ns
->ns_id
);
706 c
.ph_rw
.control
= cpu_to_le16(vio
.control
);
707 c
.ph_rw
.length
= cpu_to_le16(vio
.nppas
);
709 length
= (vio
.nppas
+ 1) << ns
->lba_shift
;
711 ret
= nvme_nvm_submit_user_cmd(ns
->queue
, ns
, &c
,
712 (void __user
*)(uintptr_t)vio
.addr
, length
,
713 (void __user
*)(uintptr_t)vio
.metadata
,
715 (void __user
*)(uintptr_t)vio
.ppa_list
, vio
.nppas
,
716 &vio
.result
, &vio
.status
, 0);
718 if (ret
&& copy_to_user(uvio
, &vio
, sizeof(vio
)))
724 static int nvme_nvm_user_vcmd(struct nvme_ns
*ns
, int admin
,
725 struct nvm_passthru_vio __user
*uvcmd
)
727 struct nvm_passthru_vio vcmd
;
728 struct nvme_nvm_command c
;
729 struct request_queue
*q
;
730 unsigned int timeout
= 0;
733 if (copy_from_user(&vcmd
, uvcmd
, sizeof(vcmd
)))
735 if ((vcmd
.opcode
!= 0xF2) && (!capable(CAP_SYS_ADMIN
)))
740 memset(&c
, 0, sizeof(c
));
741 c
.common
.opcode
= vcmd
.opcode
;
742 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
743 c
.common
.cdw2
[0] = cpu_to_le32(vcmd
.cdw2
);
744 c
.common
.cdw2
[1] = cpu_to_le32(vcmd
.cdw3
);
746 c
.ph_rw
.length
= cpu_to_le16(vcmd
.nppas
);
747 c
.ph_rw
.control
= cpu_to_le16(vcmd
.control
);
748 c
.common
.cdw10
[3] = cpu_to_le32(vcmd
.cdw13
);
749 c
.common
.cdw10
[4] = cpu_to_le32(vcmd
.cdw14
);
750 c
.common
.cdw10
[5] = cpu_to_le32(vcmd
.cdw15
);
753 timeout
= msecs_to_jiffies(vcmd
.timeout_ms
);
755 q
= admin
? ns
->ctrl
->admin_q
: ns
->queue
;
757 ret
= nvme_nvm_submit_user_cmd(q
, ns
,
758 (struct nvme_nvm_command
*)&c
,
759 (void __user
*)(uintptr_t)vcmd
.addr
, vcmd
.data_len
,
760 (void __user
*)(uintptr_t)vcmd
.metadata
,
762 (void __user
*)(uintptr_t)vcmd
.ppa_list
, vcmd
.nppas
,
763 &vcmd
.result
, &vcmd
.status
, timeout
);
765 if (ret
&& copy_to_user(uvcmd
, &vcmd
, sizeof(vcmd
)))
771 int nvme_nvm_ioctl(struct nvme_ns
*ns
, unsigned int cmd
, unsigned long arg
)
774 case NVME_NVM_IOCTL_ADMIN_VIO
:
775 return nvme_nvm_user_vcmd(ns
, 1, (void __user
*)arg
);
776 case NVME_NVM_IOCTL_IO_VIO
:
777 return nvme_nvm_user_vcmd(ns
, 0, (void __user
*)arg
);
778 case NVME_NVM_IOCTL_SUBMIT_VIO
:
779 return nvme_nvm_submit_vio(ns
, (void __user
*)arg
);
785 int nvme_nvm_register(struct nvme_ns
*ns
, char *disk_name
, int node
)
787 struct request_queue
*q
= ns
->queue
;
790 _nvme_nvm_check_size();
792 dev
= nvm_alloc_dev(node
);
797 memcpy(dev
->name
, disk_name
, DISK_NAME_LEN
);
798 dev
->ops
= &nvme_nvm_dev_ops
;
799 dev
->private_data
= ns
;
802 return nvm_register(dev
);
805 void nvme_nvm_unregister(struct nvme_ns
*ns
)
807 nvm_unregister(ns
->ndev
);
810 static ssize_t
nvm_dev_attr_show(struct device
*dev
,
811 struct device_attribute
*dattr
, char *page
)
813 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
814 struct nvm_dev
*ndev
= ns
->ndev
;
816 struct nvm_id_group
*grp
;
817 struct attribute
*attr
;
822 id
= &ndev
->identity
;
826 if (strcmp(attr
->name
, "version") == 0) {
827 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->ver_id
);
828 } else if (strcmp(attr
->name
, "vendor_opcode") == 0) {
829 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->vmnt
);
830 } else if (strcmp(attr
->name
, "capabilities") == 0) {
831 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->cap
);
832 } else if (strcmp(attr
->name
, "device_mode") == 0) {
833 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->dom
);
834 /* kept for compatibility */
835 } else if (strcmp(attr
->name
, "media_manager") == 0) {
836 return scnprintf(page
, PAGE_SIZE
, "%s\n", "gennvm");
837 } else if (strcmp(attr
->name
, "ppa_format") == 0) {
838 return scnprintf(page
, PAGE_SIZE
,
839 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
840 id
->ppaf
.ch_offset
, id
->ppaf
.ch_len
,
841 id
->ppaf
.lun_offset
, id
->ppaf
.lun_len
,
842 id
->ppaf
.pln_offset
, id
->ppaf
.pln_len
,
843 id
->ppaf
.blk_offset
, id
->ppaf
.blk_len
,
844 id
->ppaf
.pg_offset
, id
->ppaf
.pg_len
,
845 id
->ppaf
.sect_offset
, id
->ppaf
.sect_len
);
846 } else if (strcmp(attr
->name
, "media_type") == 0) { /* u8 */
847 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->mtype
);
848 } else if (strcmp(attr
->name
, "flash_media_type") == 0) {
849 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->fmtype
);
850 } else if (strcmp(attr
->name
, "num_channels") == 0) {
851 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_ch
);
852 } else if (strcmp(attr
->name
, "num_luns") == 0) {
853 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_lun
);
854 } else if (strcmp(attr
->name
, "num_planes") == 0) {
855 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_pln
);
856 } else if (strcmp(attr
->name
, "num_blocks") == 0) { /* u16 */
857 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_blk
);
858 } else if (strcmp(attr
->name
, "num_pages") == 0) {
859 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_pg
);
860 } else if (strcmp(attr
->name
, "page_size") == 0) {
861 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->fpg_sz
);
862 } else if (strcmp(attr
->name
, "hw_sector_size") == 0) {
863 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->csecs
);
864 } else if (strcmp(attr
->name
, "oob_sector_size") == 0) {/* u32 */
865 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->sos
);
866 } else if (strcmp(attr
->name
, "read_typ") == 0) {
867 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->trdt
);
868 } else if (strcmp(attr
->name
, "read_max") == 0) {
869 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->trdm
);
870 } else if (strcmp(attr
->name
, "prog_typ") == 0) {
871 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tprt
);
872 } else if (strcmp(attr
->name
, "prog_max") == 0) {
873 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tprm
);
874 } else if (strcmp(attr
->name
, "erase_typ") == 0) {
875 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tbet
);
876 } else if (strcmp(attr
->name
, "erase_max") == 0) {
877 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tbem
);
878 } else if (strcmp(attr
->name
, "multiplane_modes") == 0) {
879 return scnprintf(page
, PAGE_SIZE
, "0x%08x\n", grp
->mpos
);
880 } else if (strcmp(attr
->name
, "media_capabilities") == 0) {
881 return scnprintf(page
, PAGE_SIZE
, "0x%08x\n", grp
->mccap
);
882 } else if (strcmp(attr
->name
, "max_phys_secs") == 0) {
883 return scnprintf(page
, PAGE_SIZE
, "%u\n",
884 ndev
->ops
->max_phys_sect
);
886 return scnprintf(page
,
888 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
893 #define NVM_DEV_ATTR_RO(_name) \
894 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
896 static NVM_DEV_ATTR_RO(version
);
897 static NVM_DEV_ATTR_RO(vendor_opcode
);
898 static NVM_DEV_ATTR_RO(capabilities
);
899 static NVM_DEV_ATTR_RO(device_mode
);
900 static NVM_DEV_ATTR_RO(ppa_format
);
901 static NVM_DEV_ATTR_RO(media_manager
);
903 static NVM_DEV_ATTR_RO(media_type
);
904 static NVM_DEV_ATTR_RO(flash_media_type
);
905 static NVM_DEV_ATTR_RO(num_channels
);
906 static NVM_DEV_ATTR_RO(num_luns
);
907 static NVM_DEV_ATTR_RO(num_planes
);
908 static NVM_DEV_ATTR_RO(num_blocks
);
909 static NVM_DEV_ATTR_RO(num_pages
);
910 static NVM_DEV_ATTR_RO(page_size
);
911 static NVM_DEV_ATTR_RO(hw_sector_size
);
912 static NVM_DEV_ATTR_RO(oob_sector_size
);
913 static NVM_DEV_ATTR_RO(read_typ
);
914 static NVM_DEV_ATTR_RO(read_max
);
915 static NVM_DEV_ATTR_RO(prog_typ
);
916 static NVM_DEV_ATTR_RO(prog_max
);
917 static NVM_DEV_ATTR_RO(erase_typ
);
918 static NVM_DEV_ATTR_RO(erase_max
);
919 static NVM_DEV_ATTR_RO(multiplane_modes
);
920 static NVM_DEV_ATTR_RO(media_capabilities
);
921 static NVM_DEV_ATTR_RO(max_phys_secs
);
923 static struct attribute
*nvm_dev_attrs
[] = {
924 &dev_attr_version
.attr
,
925 &dev_attr_vendor_opcode
.attr
,
926 &dev_attr_capabilities
.attr
,
927 &dev_attr_device_mode
.attr
,
928 &dev_attr_media_manager
.attr
,
930 &dev_attr_ppa_format
.attr
,
931 &dev_attr_media_type
.attr
,
932 &dev_attr_flash_media_type
.attr
,
933 &dev_attr_num_channels
.attr
,
934 &dev_attr_num_luns
.attr
,
935 &dev_attr_num_planes
.attr
,
936 &dev_attr_num_blocks
.attr
,
937 &dev_attr_num_pages
.attr
,
938 &dev_attr_page_size
.attr
,
939 &dev_attr_hw_sector_size
.attr
,
940 &dev_attr_oob_sector_size
.attr
,
941 &dev_attr_read_typ
.attr
,
942 &dev_attr_read_max
.attr
,
943 &dev_attr_prog_typ
.attr
,
944 &dev_attr_prog_max
.attr
,
945 &dev_attr_erase_typ
.attr
,
946 &dev_attr_erase_max
.attr
,
947 &dev_attr_multiplane_modes
.attr
,
948 &dev_attr_media_capabilities
.attr
,
949 &dev_attr_max_phys_secs
.attr
,
953 static const struct attribute_group nvm_dev_attr_group
= {
955 .attrs
= nvm_dev_attrs
,
958 int nvme_nvm_register_sysfs(struct nvme_ns
*ns
)
960 return sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
961 &nvm_dev_attr_group
);
964 void nvme_nvm_unregister_sysfs(struct nvme_ns
*ns
)
966 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
967 &nvm_dev_attr_group
);
970 /* move to shared place when used in multiple places. */
971 #define PCI_VENDOR_ID_CNEX 0x1d1d
972 #define PCI_DEVICE_ID_CNEX_WL 0x2807
973 #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
975 int nvme_nvm_ns_supported(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
977 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
978 /* XXX: this is poking into PCI structures from generic code! */
979 struct pci_dev
*pdev
= to_pci_dev(ctrl
->dev
);
981 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
982 if (pdev
->vendor
== PCI_VENDOR_ID_CNEX
&&
983 pdev
->device
== PCI_DEVICE_ID_CNEX_QEMU
&&
987 /* CNEX Labs - PCI ID + Vendor specific bit */
988 if (pdev
->vendor
== PCI_VENDOR_ID_CNEX
&&
989 pdev
->device
== PCI_DEVICE_ID_CNEX_WL
&&