2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/sysctl.h>
30 #include <uapi/linux/lightnvm.h>
32 enum nvme_nvm_admin_opcode
{
33 nvme_nvm_admin_identity
= 0xe2,
34 nvme_nvm_admin_get_bb_tbl
= 0xf2,
35 nvme_nvm_admin_set_bb_tbl
= 0xf1,
38 struct nvme_nvm_ph_rw
{
54 struct nvme_nvm_identity
{
66 struct nvme_nvm_getbbtbl
{
78 struct nvme_nvm_setbbtbl
{
93 struct nvme_nvm_erase_blk
{
108 struct nvme_nvm_command
{
110 struct nvme_common_command common
;
111 struct nvme_nvm_identity identity
;
112 struct nvme_nvm_ph_rw ph_rw
;
113 struct nvme_nvm_getbbtbl get_bb
;
114 struct nvme_nvm_setbbtbl set_bb
;
115 struct nvme_nvm_erase_blk erase
;
119 #define NVME_NVM_LP_MLC_PAIRS 886
120 struct nvme_nvm_lp_mlc
{
122 __u8 pairs
[NVME_NVM_LP_MLC_PAIRS
];
125 struct nvme_nvm_lp_tbl
{
127 struct nvme_nvm_lp_mlc mlc
;
130 struct nvme_nvm_id_group
{
154 struct nvme_nvm_lp_tbl lptbl
;
157 struct nvme_nvm_addr_format
{
180 struct nvme_nvm_addr_format ppaf
;
182 struct nvme_nvm_id_group groups
[4];
185 struct nvme_nvm_bb_tbl
{
200 * Check we didn't inadvertently grow the command struct
202 static inline void _nvme_nvm_check_size(void)
204 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity
) != 64);
205 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw
) != 64);
206 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl
) != 64);
207 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl
) != 64);
208 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk
) != 64);
209 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group
) != 960);
210 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format
) != 16);
211 BUILD_BUG_ON(sizeof(struct nvme_nvm_id
) != NVME_IDENTIFY_DATA_SIZE
);
212 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl
) != 64);
215 static int init_grps(struct nvm_id
*nvm_id
, struct nvme_nvm_id
*nvme_nvm_id
)
217 struct nvme_nvm_id_group
*src
;
218 struct nvm_id_group
*grp
;
219 int sec_per_pg
, sec_per_pl
, pg_per_blk
;
221 if (nvme_nvm_id
->cgrps
!= 1)
224 src
= &nvme_nvm_id
->groups
[0];
227 grp
->mtype
= src
->mtype
;
228 grp
->fmtype
= src
->fmtype
;
230 grp
->num_ch
= src
->num_ch
;
231 grp
->num_lun
= src
->num_lun
;
233 grp
->num_chk
= le16_to_cpu(src
->num_chk
);
234 grp
->csecs
= le16_to_cpu(src
->csecs
);
235 grp
->sos
= le16_to_cpu(src
->sos
);
237 pg_per_blk
= le16_to_cpu(src
->num_pg
);
238 sec_per_pg
= le16_to_cpu(src
->fpg_sz
) / grp
->csecs
;
239 sec_per_pl
= sec_per_pg
* src
->num_pln
;
240 grp
->clba
= sec_per_pl
* pg_per_blk
;
241 grp
->ws_per_chk
= pg_per_blk
;
243 grp
->mpos
= le32_to_cpu(src
->mpos
);
244 grp
->cpar
= le16_to_cpu(src
->cpar
);
245 grp
->mccap
= le32_to_cpu(src
->mccap
);
247 grp
->ws_opt
= grp
->ws_min
= sec_per_pg
;
248 grp
->ws_seq
= NVM_IO_SNGL_ACCESS
;
250 if (grp
->mpos
& 0x020202) {
251 grp
->ws_seq
= NVM_IO_DUAL_ACCESS
;
253 } else if (grp
->mpos
& 0x040404) {
254 grp
->ws_seq
= NVM_IO_QUAD_ACCESS
;
258 grp
->trdt
= le32_to_cpu(src
->trdt
);
259 grp
->trdm
= le32_to_cpu(src
->trdm
);
260 grp
->tprt
= le32_to_cpu(src
->tprt
);
261 grp
->tprm
= le32_to_cpu(src
->tprm
);
262 grp
->tbet
= le32_to_cpu(src
->tbet
);
263 grp
->tbem
= le32_to_cpu(src
->tbem
);
265 /* 1.2 compatibility */
266 grp
->num_pln
= src
->num_pln
;
267 grp
->num_pg
= le16_to_cpu(src
->num_pg
);
268 grp
->fpg_sz
= le16_to_cpu(src
->fpg_sz
);
273 static int nvme_nvm_identity(struct nvm_dev
*nvmdev
, struct nvm_id
*nvm_id
)
275 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
276 struct nvme_nvm_id
*nvme_nvm_id
;
277 struct nvme_nvm_command c
= {};
280 c
.identity
.opcode
= nvme_nvm_admin_identity
;
281 c
.identity
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
282 c
.identity
.chnl_off
= 0;
284 nvme_nvm_id
= kmalloc(sizeof(struct nvme_nvm_id
), GFP_KERNEL
);
288 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
289 nvme_nvm_id
, sizeof(struct nvme_nvm_id
));
295 nvm_id
->ver_id
= nvme_nvm_id
->ver_id
;
296 nvm_id
->vmnt
= nvme_nvm_id
->vmnt
;
297 nvm_id
->cap
= le32_to_cpu(nvme_nvm_id
->cap
);
298 nvm_id
->dom
= le32_to_cpu(nvme_nvm_id
->dom
);
299 memcpy(&nvm_id
->ppaf
, &nvme_nvm_id
->ppaf
,
300 sizeof(struct nvm_addr_format
));
302 ret
= init_grps(nvm_id
, nvme_nvm_id
);
308 static int nvme_nvm_get_bb_tbl(struct nvm_dev
*nvmdev
, struct ppa_addr ppa
,
311 struct request_queue
*q
= nvmdev
->q
;
312 struct nvm_geo
*geo
= &nvmdev
->geo
;
313 struct nvme_ns
*ns
= q
->queuedata
;
314 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
315 struct nvme_nvm_command c
= {};
316 struct nvme_nvm_bb_tbl
*bb_tbl
;
317 int nr_blks
= geo
->nr_chks
* geo
->plane_mode
;
318 int tblsz
= sizeof(struct nvme_nvm_bb_tbl
) + nr_blks
;
321 c
.get_bb
.opcode
= nvme_nvm_admin_get_bb_tbl
;
322 c
.get_bb
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
323 c
.get_bb
.spba
= cpu_to_le64(ppa
.ppa
);
325 bb_tbl
= kzalloc(tblsz
, GFP_KERNEL
);
329 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, (struct nvme_command
*)&c
,
332 dev_err(ctrl
->device
, "get bad block table failed (%d)\n", ret
);
337 if (bb_tbl
->tblid
[0] != 'B' || bb_tbl
->tblid
[1] != 'B' ||
338 bb_tbl
->tblid
[2] != 'L' || bb_tbl
->tblid
[3] != 'T') {
339 dev_err(ctrl
->device
, "bbt format mismatch\n");
344 if (le16_to_cpu(bb_tbl
->verid
) != 1) {
346 dev_err(ctrl
->device
, "bbt version not supported\n");
350 if (le32_to_cpu(bb_tbl
->tblks
) != nr_blks
) {
352 dev_err(ctrl
->device
,
353 "bbt unsuspected blocks returned (%u!=%u)",
354 le32_to_cpu(bb_tbl
->tblks
), nr_blks
);
358 memcpy(blks
, bb_tbl
->blk
, geo
->nr_chks
* geo
->plane_mode
);
364 static int nvme_nvm_set_bb_tbl(struct nvm_dev
*nvmdev
, struct ppa_addr
*ppas
,
365 int nr_ppas
, int type
)
367 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
368 struct nvme_nvm_command c
= {};
371 c
.set_bb
.opcode
= nvme_nvm_admin_set_bb_tbl
;
372 c
.set_bb
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
373 c
.set_bb
.spba
= cpu_to_le64(ppas
->ppa
);
374 c
.set_bb
.nlb
= cpu_to_le16(nr_ppas
- 1);
375 c
.set_bb
.value
= type
;
377 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
380 dev_err(ns
->ctrl
->device
, "set bad block table failed (%d)\n",
385 static inline void nvme_nvm_rqtocmd(struct nvm_rq
*rqd
, struct nvme_ns
*ns
,
386 struct nvme_nvm_command
*c
)
388 c
->ph_rw
.opcode
= rqd
->opcode
;
389 c
->ph_rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
390 c
->ph_rw
.spba
= cpu_to_le64(rqd
->ppa_addr
.ppa
);
391 c
->ph_rw
.metadata
= cpu_to_le64(rqd
->dma_meta_list
);
392 c
->ph_rw
.control
= cpu_to_le16(rqd
->flags
);
393 c
->ph_rw
.length
= cpu_to_le16(rqd
->nr_ppas
- 1);
396 static void nvme_nvm_end_io(struct request
*rq
, blk_status_t status
)
398 struct nvm_rq
*rqd
= rq
->end_io_data
;
400 rqd
->ppa_status
= le64_to_cpu(nvme_req(rq
)->result
.u64
);
401 rqd
->error
= nvme_req(rq
)->status
;
404 kfree(nvme_req(rq
)->cmd
);
405 blk_mq_free_request(rq
);
408 static struct request
*nvme_nvm_alloc_request(struct request_queue
*q
,
410 struct nvme_nvm_command
*cmd
)
412 struct nvme_ns
*ns
= q
->queuedata
;
415 nvme_nvm_rqtocmd(rqd
, ns
, cmd
);
417 rq
= nvme_alloc_request(q
, (struct nvme_command
*)cmd
, 0, NVME_QID_ANY
);
421 rq
->cmd_flags
&= ~REQ_FAILFAST_DRIVER
;
424 blk_init_request_from_bio(rq
, rqd
->bio
);
426 rq
->ioprio
= IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE
, IOPRIO_NORM
);
433 static int nvme_nvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
435 struct request_queue
*q
= dev
->q
;
436 struct nvme_nvm_command
*cmd
;
439 cmd
= kzalloc(sizeof(struct nvme_nvm_command
), GFP_KERNEL
);
443 rq
= nvme_nvm_alloc_request(q
, rqd
, cmd
);
449 rq
->end_io_data
= rqd
;
451 blk_execute_rq_nowait(q
, NULL
, rq
, 0, nvme_nvm_end_io
);
456 static int nvme_nvm_submit_io_sync(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
458 struct request_queue
*q
= dev
->q
;
460 struct nvme_nvm_command cmd
;
463 memset(&cmd
, 0, sizeof(struct nvme_nvm_command
));
465 rq
= nvme_nvm_alloc_request(q
, rqd
, &cmd
);
469 /* I/Os can fail and the error is signaled through rqd. Callers must
470 * handle the error accordingly.
472 blk_execute_rq(q
, NULL
, rq
, 0);
473 if (nvme_req(rq
)->flags
& NVME_REQ_CANCELLED
)
476 rqd
->ppa_status
= le64_to_cpu(nvme_req(rq
)->result
.u64
);
477 rqd
->error
= nvme_req(rq
)->status
;
479 blk_mq_free_request(rq
);
484 static void *nvme_nvm_create_dma_pool(struct nvm_dev
*nvmdev
, char *name
)
486 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
488 return dma_pool_create(name
, ns
->ctrl
->dev
, PAGE_SIZE
, PAGE_SIZE
, 0);
491 static void nvme_nvm_destroy_dma_pool(void *pool
)
493 struct dma_pool
*dma_pool
= pool
;
495 dma_pool_destroy(dma_pool
);
498 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev
*dev
, void *pool
,
499 gfp_t mem_flags
, dma_addr_t
*dma_handler
)
501 return dma_pool_alloc(pool
, mem_flags
, dma_handler
);
504 static void nvme_nvm_dev_dma_free(void *pool
, void *addr
,
505 dma_addr_t dma_handler
)
507 dma_pool_free(pool
, addr
, dma_handler
);
510 static struct nvm_dev_ops nvme_nvm_dev_ops
= {
511 .identity
= nvme_nvm_identity
,
513 .get_bb_tbl
= nvme_nvm_get_bb_tbl
,
514 .set_bb_tbl
= nvme_nvm_set_bb_tbl
,
516 .submit_io
= nvme_nvm_submit_io
,
517 .submit_io_sync
= nvme_nvm_submit_io_sync
,
519 .create_dma_pool
= nvme_nvm_create_dma_pool
,
520 .destroy_dma_pool
= nvme_nvm_destroy_dma_pool
,
521 .dev_dma_alloc
= nvme_nvm_dev_dma_alloc
,
522 .dev_dma_free
= nvme_nvm_dev_dma_free
,
527 static int nvme_nvm_submit_user_cmd(struct request_queue
*q
,
529 struct nvme_nvm_command
*vcmd
,
530 void __user
*ubuf
, unsigned int bufflen
,
531 void __user
*meta_buf
, unsigned int meta_len
,
532 void __user
*ppa_buf
, unsigned int ppa_len
,
533 u32
*result
, u64
*status
, unsigned int timeout
)
535 bool write
= nvme_is_write((struct nvme_command
*)vcmd
);
536 struct nvm_dev
*dev
= ns
->ndev
;
537 struct gendisk
*disk
= ns
->disk
;
539 struct bio
*bio
= NULL
;
540 __le64
*ppa_list
= NULL
;
542 __le64
*metadata
= NULL
;
543 dma_addr_t metadata_dma
;
544 DECLARE_COMPLETION_ONSTACK(wait
);
547 rq
= nvme_alloc_request(q
, (struct nvme_command
*)vcmd
, 0,
554 rq
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
556 if (ppa_buf
&& ppa_len
) {
557 ppa_list
= dma_pool_alloc(dev
->dma_pool
, GFP_KERNEL
, &ppa_dma
);
562 if (copy_from_user(ppa_list
, (void __user
*)ppa_buf
,
563 sizeof(u64
) * (ppa_len
+ 1))) {
567 vcmd
->ph_rw
.spba
= cpu_to_le64(ppa_dma
);
569 vcmd
->ph_rw
.spba
= cpu_to_le64((uintptr_t)ppa_buf
);
572 if (ubuf
&& bufflen
) {
573 ret
= blk_rq_map_user(q
, rq
, NULL
, ubuf
, bufflen
, GFP_KERNEL
);
578 if (meta_buf
&& meta_len
) {
579 metadata
= dma_pool_alloc(dev
->dma_pool
, GFP_KERNEL
,
587 if (copy_from_user(metadata
,
588 (void __user
*)meta_buf
,
594 vcmd
->ph_rw
.metadata
= cpu_to_le64(metadata_dma
);
600 blk_execute_rq(q
, NULL
, rq
, 0);
602 if (nvme_req(rq
)->flags
& NVME_REQ_CANCELLED
)
604 else if (nvme_req(rq
)->status
& 0x7ff)
607 *result
= nvme_req(rq
)->status
& 0x7ff;
609 *status
= le64_to_cpu(nvme_req(rq
)->result
.u64
);
611 if (metadata
&& !ret
&& !write
) {
612 if (copy_to_user(meta_buf
, (void *)metadata
, meta_len
))
616 if (meta_buf
&& meta_len
)
617 dma_pool_free(dev
->dma_pool
, metadata
, metadata_dma
);
620 blk_rq_unmap_user(bio
);
622 if (ppa_buf
&& ppa_len
)
623 dma_pool_free(dev
->dma_pool
, ppa_list
, ppa_dma
);
625 blk_mq_free_request(rq
);
630 static int nvme_nvm_submit_vio(struct nvme_ns
*ns
,
631 struct nvm_user_vio __user
*uvio
)
633 struct nvm_user_vio vio
;
634 struct nvme_nvm_command c
;
638 if (copy_from_user(&vio
, uvio
, sizeof(vio
)))
643 memset(&c
, 0, sizeof(c
));
644 c
.ph_rw
.opcode
= vio
.opcode
;
645 c
.ph_rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
646 c
.ph_rw
.control
= cpu_to_le16(vio
.control
);
647 c
.ph_rw
.length
= cpu_to_le16(vio
.nppas
);
649 length
= (vio
.nppas
+ 1) << ns
->lba_shift
;
651 ret
= nvme_nvm_submit_user_cmd(ns
->queue
, ns
, &c
,
652 (void __user
*)(uintptr_t)vio
.addr
, length
,
653 (void __user
*)(uintptr_t)vio
.metadata
,
655 (void __user
*)(uintptr_t)vio
.ppa_list
, vio
.nppas
,
656 &vio
.result
, &vio
.status
, 0);
658 if (ret
&& copy_to_user(uvio
, &vio
, sizeof(vio
)))
664 static int nvme_nvm_user_vcmd(struct nvme_ns
*ns
, int admin
,
665 struct nvm_passthru_vio __user
*uvcmd
)
667 struct nvm_passthru_vio vcmd
;
668 struct nvme_nvm_command c
;
669 struct request_queue
*q
;
670 unsigned int timeout
= 0;
673 if (copy_from_user(&vcmd
, uvcmd
, sizeof(vcmd
)))
675 if ((vcmd
.opcode
!= 0xF2) && (!capable(CAP_SYS_ADMIN
)))
680 memset(&c
, 0, sizeof(c
));
681 c
.common
.opcode
= vcmd
.opcode
;
682 c
.common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
683 c
.common
.cdw2
[0] = cpu_to_le32(vcmd
.cdw2
);
684 c
.common
.cdw2
[1] = cpu_to_le32(vcmd
.cdw3
);
686 c
.ph_rw
.length
= cpu_to_le16(vcmd
.nppas
);
687 c
.ph_rw
.control
= cpu_to_le16(vcmd
.control
);
688 c
.common
.cdw10
[3] = cpu_to_le32(vcmd
.cdw13
);
689 c
.common
.cdw10
[4] = cpu_to_le32(vcmd
.cdw14
);
690 c
.common
.cdw10
[5] = cpu_to_le32(vcmd
.cdw15
);
693 timeout
= msecs_to_jiffies(vcmd
.timeout_ms
);
695 q
= admin
? ns
->ctrl
->admin_q
: ns
->queue
;
697 ret
= nvme_nvm_submit_user_cmd(q
, ns
,
698 (struct nvme_nvm_command
*)&c
,
699 (void __user
*)(uintptr_t)vcmd
.addr
, vcmd
.data_len
,
700 (void __user
*)(uintptr_t)vcmd
.metadata
,
702 (void __user
*)(uintptr_t)vcmd
.ppa_list
, vcmd
.nppas
,
703 &vcmd
.result
, &vcmd
.status
, timeout
);
705 if (ret
&& copy_to_user(uvcmd
, &vcmd
, sizeof(vcmd
)))
711 int nvme_nvm_ioctl(struct nvme_ns
*ns
, unsigned int cmd
, unsigned long arg
)
714 case NVME_NVM_IOCTL_ADMIN_VIO
:
715 return nvme_nvm_user_vcmd(ns
, 1, (void __user
*)arg
);
716 case NVME_NVM_IOCTL_IO_VIO
:
717 return nvme_nvm_user_vcmd(ns
, 0, (void __user
*)arg
);
718 case NVME_NVM_IOCTL_SUBMIT_VIO
:
719 return nvme_nvm_submit_vio(ns
, (void __user
*)arg
);
725 int nvme_nvm_register(struct nvme_ns
*ns
, char *disk_name
, int node
)
727 struct request_queue
*q
= ns
->queue
;
730 _nvme_nvm_check_size();
732 dev
= nvm_alloc_dev(node
);
737 memcpy(dev
->name
, disk_name
, DISK_NAME_LEN
);
738 dev
->ops
= &nvme_nvm_dev_ops
;
739 dev
->private_data
= ns
;
742 return nvm_register(dev
);
745 void nvme_nvm_unregister(struct nvme_ns
*ns
)
747 nvm_unregister(ns
->ndev
);
750 static ssize_t
nvm_dev_attr_show(struct device
*dev
,
751 struct device_attribute
*dattr
, char *page
)
753 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
754 struct nvm_dev
*ndev
= ns
->ndev
;
756 struct nvm_id_group
*grp
;
757 struct attribute
*attr
;
762 id
= &ndev
->identity
;
766 if (strcmp(attr
->name
, "version") == 0) {
767 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->ver_id
);
768 } else if (strcmp(attr
->name
, "vendor_opcode") == 0) {
769 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->vmnt
);
770 } else if (strcmp(attr
->name
, "capabilities") == 0) {
771 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->cap
);
772 } else if (strcmp(attr
->name
, "device_mode") == 0) {
773 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->dom
);
774 /* kept for compatibility */
775 } else if (strcmp(attr
->name
, "media_manager") == 0) {
776 return scnprintf(page
, PAGE_SIZE
, "%s\n", "gennvm");
777 } else if (strcmp(attr
->name
, "ppa_format") == 0) {
778 return scnprintf(page
, PAGE_SIZE
,
779 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
780 id
->ppaf
.ch_offset
, id
->ppaf
.ch_len
,
781 id
->ppaf
.lun_offset
, id
->ppaf
.lun_len
,
782 id
->ppaf
.pln_offset
, id
->ppaf
.pln_len
,
783 id
->ppaf
.blk_offset
, id
->ppaf
.blk_len
,
784 id
->ppaf
.pg_offset
, id
->ppaf
.pg_len
,
785 id
->ppaf
.sect_offset
, id
->ppaf
.sect_len
);
786 } else if (strcmp(attr
->name
, "media_type") == 0) { /* u8 */
787 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->mtype
);
788 } else if (strcmp(attr
->name
, "flash_media_type") == 0) {
789 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->fmtype
);
790 } else if (strcmp(attr
->name
, "num_channels") == 0) {
791 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_ch
);
792 } else if (strcmp(attr
->name
, "num_luns") == 0) {
793 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_lun
);
794 } else if (strcmp(attr
->name
, "num_planes") == 0) {
795 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_pln
);
796 } else if (strcmp(attr
->name
, "num_blocks") == 0) { /* u16 */
797 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_chk
);
798 } else if (strcmp(attr
->name
, "num_pages") == 0) {
799 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_pg
);
800 } else if (strcmp(attr
->name
, "page_size") == 0) {
801 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->fpg_sz
);
802 } else if (strcmp(attr
->name
, "hw_sector_size") == 0) {
803 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->csecs
);
804 } else if (strcmp(attr
->name
, "oob_sector_size") == 0) {/* u32 */
805 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->sos
);
806 } else if (strcmp(attr
->name
, "read_typ") == 0) {
807 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->trdt
);
808 } else if (strcmp(attr
->name
, "read_max") == 0) {
809 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->trdm
);
810 } else if (strcmp(attr
->name
, "prog_typ") == 0) {
811 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tprt
);
812 } else if (strcmp(attr
->name
, "prog_max") == 0) {
813 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tprm
);
814 } else if (strcmp(attr
->name
, "erase_typ") == 0) {
815 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tbet
);
816 } else if (strcmp(attr
->name
, "erase_max") == 0) {
817 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tbem
);
818 } else if (strcmp(attr
->name
, "multiplane_modes") == 0) {
819 return scnprintf(page
, PAGE_SIZE
, "0x%08x\n", grp
->mpos
);
820 } else if (strcmp(attr
->name
, "media_capabilities") == 0) {
821 return scnprintf(page
, PAGE_SIZE
, "0x%08x\n", grp
->mccap
);
822 } else if (strcmp(attr
->name
, "max_phys_secs") == 0) {
823 return scnprintf(page
, PAGE_SIZE
, "%u\n",
824 ndev
->ops
->max_phys_sect
);
826 return scnprintf(page
,
828 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
833 #define NVM_DEV_ATTR_RO(_name) \
834 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
836 static NVM_DEV_ATTR_RO(version
);
837 static NVM_DEV_ATTR_RO(vendor_opcode
);
838 static NVM_DEV_ATTR_RO(capabilities
);
839 static NVM_DEV_ATTR_RO(device_mode
);
840 static NVM_DEV_ATTR_RO(ppa_format
);
841 static NVM_DEV_ATTR_RO(media_manager
);
843 static NVM_DEV_ATTR_RO(media_type
);
844 static NVM_DEV_ATTR_RO(flash_media_type
);
845 static NVM_DEV_ATTR_RO(num_channels
);
846 static NVM_DEV_ATTR_RO(num_luns
);
847 static NVM_DEV_ATTR_RO(num_planes
);
848 static NVM_DEV_ATTR_RO(num_blocks
);
849 static NVM_DEV_ATTR_RO(num_pages
);
850 static NVM_DEV_ATTR_RO(page_size
);
851 static NVM_DEV_ATTR_RO(hw_sector_size
);
852 static NVM_DEV_ATTR_RO(oob_sector_size
);
853 static NVM_DEV_ATTR_RO(read_typ
);
854 static NVM_DEV_ATTR_RO(read_max
);
855 static NVM_DEV_ATTR_RO(prog_typ
);
856 static NVM_DEV_ATTR_RO(prog_max
);
857 static NVM_DEV_ATTR_RO(erase_typ
);
858 static NVM_DEV_ATTR_RO(erase_max
);
859 static NVM_DEV_ATTR_RO(multiplane_modes
);
860 static NVM_DEV_ATTR_RO(media_capabilities
);
861 static NVM_DEV_ATTR_RO(max_phys_secs
);
863 static struct attribute
*nvm_dev_attrs
[] = {
864 &dev_attr_version
.attr
,
865 &dev_attr_vendor_opcode
.attr
,
866 &dev_attr_capabilities
.attr
,
867 &dev_attr_device_mode
.attr
,
868 &dev_attr_media_manager
.attr
,
870 &dev_attr_ppa_format
.attr
,
871 &dev_attr_media_type
.attr
,
872 &dev_attr_flash_media_type
.attr
,
873 &dev_attr_num_channels
.attr
,
874 &dev_attr_num_luns
.attr
,
875 &dev_attr_num_planes
.attr
,
876 &dev_attr_num_blocks
.attr
,
877 &dev_attr_num_pages
.attr
,
878 &dev_attr_page_size
.attr
,
879 &dev_attr_hw_sector_size
.attr
,
880 &dev_attr_oob_sector_size
.attr
,
881 &dev_attr_read_typ
.attr
,
882 &dev_attr_read_max
.attr
,
883 &dev_attr_prog_typ
.attr
,
884 &dev_attr_prog_max
.attr
,
885 &dev_attr_erase_typ
.attr
,
886 &dev_attr_erase_max
.attr
,
887 &dev_attr_multiplane_modes
.attr
,
888 &dev_attr_media_capabilities
.attr
,
889 &dev_attr_max_phys_secs
.attr
,
893 static const struct attribute_group nvm_dev_attr_group
= {
895 .attrs
= nvm_dev_attrs
,
898 int nvme_nvm_register_sysfs(struct nvme_ns
*ns
)
900 return sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
901 &nvm_dev_attr_group
);
904 void nvme_nvm_unregister_sysfs(struct nvme_ns
*ns
)
906 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
907 &nvm_dev_attr_group
);