2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched/sysctl.h>
30 #include <uapi/linux/lightnvm.h>
32 enum nvme_nvm_admin_opcode
{
33 nvme_nvm_admin_identity
= 0xe2,
34 nvme_nvm_admin_get_l2p_tbl
= 0xea,
35 nvme_nvm_admin_get_bb_tbl
= 0xf2,
36 nvme_nvm_admin_set_bb_tbl
= 0xf1,
39 struct nvme_nvm_hb_rw
{
55 struct nvme_nvm_ph_rw
{
71 struct nvme_nvm_identity
{
83 struct nvme_nvm_l2ptbl
{
96 struct nvme_nvm_getbbtbl
{
108 struct nvme_nvm_setbbtbl
{
123 struct nvme_nvm_erase_blk
{
138 struct nvme_nvm_command
{
140 struct nvme_common_command common
;
141 struct nvme_nvm_identity identity
;
142 struct nvme_nvm_hb_rw hb_rw
;
143 struct nvme_nvm_ph_rw ph_rw
;
144 struct nvme_nvm_l2ptbl l2p
;
145 struct nvme_nvm_getbbtbl get_bb
;
146 struct nvme_nvm_setbbtbl set_bb
;
147 struct nvme_nvm_erase_blk erase
;
151 #define NVME_NVM_LP_MLC_PAIRS 886
152 struct nvme_nvm_lp_mlc
{
154 __u8 pairs
[NVME_NVM_LP_MLC_PAIRS
];
157 struct nvme_nvm_lp_tbl
{
159 struct nvme_nvm_lp_mlc mlc
;
162 struct nvme_nvm_id_group
{
186 struct nvme_nvm_lp_tbl lptbl
;
189 struct nvme_nvm_addr_format
{
212 struct nvme_nvm_addr_format ppaf
;
214 struct nvme_nvm_id_group groups
[4];
217 struct nvme_nvm_bb_tbl
{
232 * Check we didn't inadvertently grow the command struct
234 static inline void _nvme_nvm_check_size(void)
236 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity
) != 64);
237 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw
) != 64);
238 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw
) != 64);
239 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl
) != 64);
240 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl
) != 64);
241 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl
) != 64);
242 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk
) != 64);
243 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group
) != 960);
244 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format
) != 16);
245 BUILD_BUG_ON(sizeof(struct nvme_nvm_id
) != NVME_IDENTIFY_DATA_SIZE
);
246 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl
) != 64);
249 static int init_grps(struct nvm_id
*nvm_id
, struct nvme_nvm_id
*nvme_nvm_id
)
251 struct nvme_nvm_id_group
*src
;
252 struct nvm_id_group
*dst
;
254 if (nvme_nvm_id
->cgrps
!= 1)
257 src
= &nvme_nvm_id
->groups
[0];
260 dst
->mtype
= src
->mtype
;
261 dst
->fmtype
= src
->fmtype
;
262 dst
->num_ch
= src
->num_ch
;
263 dst
->num_lun
= src
->num_lun
;
264 dst
->num_pln
= src
->num_pln
;
266 dst
->num_pg
= le16_to_cpu(src
->num_pg
);
267 dst
->num_blk
= le16_to_cpu(src
->num_blk
);
268 dst
->fpg_sz
= le16_to_cpu(src
->fpg_sz
);
269 dst
->csecs
= le16_to_cpu(src
->csecs
);
270 dst
->sos
= le16_to_cpu(src
->sos
);
272 dst
->trdt
= le32_to_cpu(src
->trdt
);
273 dst
->trdm
= le32_to_cpu(src
->trdm
);
274 dst
->tprt
= le32_to_cpu(src
->tprt
);
275 dst
->tprm
= le32_to_cpu(src
->tprm
);
276 dst
->tbet
= le32_to_cpu(src
->tbet
);
277 dst
->tbem
= le32_to_cpu(src
->tbem
);
278 dst
->mpos
= le32_to_cpu(src
->mpos
);
279 dst
->mccap
= le32_to_cpu(src
->mccap
);
281 dst
->cpar
= le16_to_cpu(src
->cpar
);
283 if (dst
->fmtype
== NVM_ID_FMTYPE_MLC
) {
284 memcpy(dst
->lptbl
.id
, src
->lptbl
.id
, 8);
285 dst
->lptbl
.mlc
.num_pairs
=
286 le16_to_cpu(src
->lptbl
.mlc
.num_pairs
);
288 if (dst
->lptbl
.mlc
.num_pairs
> NVME_NVM_LP_MLC_PAIRS
) {
289 pr_err("nvm: number of MLC pairs not supported\n");
293 memcpy(dst
->lptbl
.mlc
.pairs
, src
->lptbl
.mlc
.pairs
,
294 dst
->lptbl
.mlc
.num_pairs
);
300 static int nvme_nvm_identity(struct nvm_dev
*nvmdev
, struct nvm_id
*nvm_id
)
302 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
303 struct nvme_nvm_id
*nvme_nvm_id
;
304 struct nvme_nvm_command c
= {};
307 c
.identity
.opcode
= nvme_nvm_admin_identity
;
308 c
.identity
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
309 c
.identity
.chnl_off
= 0;
311 nvme_nvm_id
= kmalloc(sizeof(struct nvme_nvm_id
), GFP_KERNEL
);
315 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
316 nvme_nvm_id
, sizeof(struct nvme_nvm_id
));
322 nvm_id
->ver_id
= nvme_nvm_id
->ver_id
;
323 nvm_id
->vmnt
= nvme_nvm_id
->vmnt
;
324 nvm_id
->cap
= le32_to_cpu(nvme_nvm_id
->cap
);
325 nvm_id
->dom
= le32_to_cpu(nvme_nvm_id
->dom
);
326 memcpy(&nvm_id
->ppaf
, &nvme_nvm_id
->ppaf
,
327 sizeof(struct nvm_addr_format
));
329 ret
= init_grps(nvm_id
, nvme_nvm_id
);
335 static int nvme_nvm_get_l2p_tbl(struct nvm_dev
*nvmdev
, u64 slba
, u32 nlb
,
336 nvm_l2p_update_fn
*update_l2p
, void *priv
)
338 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
339 struct nvme_nvm_command c
= {};
340 u32 len
= queue_max_hw_sectors(ns
->ctrl
->admin_q
) << 9;
341 u32 nlb_pr_rq
= len
/ sizeof(u64
);
346 c
.l2p
.opcode
= nvme_nvm_admin_get_l2p_tbl
;
347 c
.l2p
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
348 entries
= kmalloc(len
, GFP_KERNEL
);
353 u32 cmd_nlb
= min(nlb_pr_rq
, nlb
);
354 u64 elba
= slba
+ cmd_nlb
;
356 c
.l2p
.slba
= cpu_to_le64(cmd_slba
);
357 c
.l2p
.nlb
= cpu_to_le32(cmd_nlb
);
359 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
,
360 (struct nvme_command
*)&c
, entries
, len
);
362 dev_err(ns
->ctrl
->device
,
363 "L2P table transfer failed (%d)\n", ret
);
368 if (unlikely(elba
> nvmdev
->total_secs
)) {
369 pr_err("nvm: L2P data from device is out of bounds!\n");
374 /* Transform physical address to target address space */
375 nvm_part_to_tgt(nvmdev
, entries
, cmd_nlb
);
377 if (update_l2p(cmd_slba
, cmd_nlb
, entries
, priv
)) {
391 static int nvme_nvm_get_bb_tbl(struct nvm_dev
*nvmdev
, struct ppa_addr ppa
,
394 struct request_queue
*q
= nvmdev
->q
;
395 struct nvm_geo
*geo
= &nvmdev
->geo
;
396 struct nvme_ns
*ns
= q
->queuedata
;
397 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
398 struct nvme_nvm_command c
= {};
399 struct nvme_nvm_bb_tbl
*bb_tbl
;
400 int nr_blks
= geo
->blks_per_lun
* geo
->plane_mode
;
401 int tblsz
= sizeof(struct nvme_nvm_bb_tbl
) + nr_blks
;
404 c
.get_bb
.opcode
= nvme_nvm_admin_get_bb_tbl
;
405 c
.get_bb
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
406 c
.get_bb
.spba
= cpu_to_le64(ppa
.ppa
);
408 bb_tbl
= kzalloc(tblsz
, GFP_KERNEL
);
412 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, (struct nvme_command
*)&c
,
415 dev_err(ctrl
->device
, "get bad block table failed (%d)\n", ret
);
420 if (bb_tbl
->tblid
[0] != 'B' || bb_tbl
->tblid
[1] != 'B' ||
421 bb_tbl
->tblid
[2] != 'L' || bb_tbl
->tblid
[3] != 'T') {
422 dev_err(ctrl
->device
, "bbt format mismatch\n");
427 if (le16_to_cpu(bb_tbl
->verid
) != 1) {
429 dev_err(ctrl
->device
, "bbt version not supported\n");
433 if (le32_to_cpu(bb_tbl
->tblks
) != nr_blks
) {
435 dev_err(ctrl
->device
,
436 "bbt unsuspected blocks returned (%u!=%u)",
437 le32_to_cpu(bb_tbl
->tblks
), nr_blks
);
441 memcpy(blks
, bb_tbl
->blk
, geo
->blks_per_lun
* geo
->plane_mode
);
447 static int nvme_nvm_set_bb_tbl(struct nvm_dev
*nvmdev
, struct ppa_addr
*ppas
,
448 int nr_ppas
, int type
)
450 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
451 struct nvme_nvm_command c
= {};
454 c
.set_bb
.opcode
= nvme_nvm_admin_set_bb_tbl
;
455 c
.set_bb
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
456 c
.set_bb
.spba
= cpu_to_le64(ppas
->ppa
);
457 c
.set_bb
.nlb
= cpu_to_le16(nr_ppas
- 1);
458 c
.set_bb
.value
= type
;
460 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
463 dev_err(ns
->ctrl
->device
, "set bad block table failed (%d)\n",
468 static inline void nvme_nvm_rqtocmd(struct nvm_rq
*rqd
, struct nvme_ns
*ns
,
469 struct nvme_nvm_command
*c
)
471 c
->ph_rw
.opcode
= rqd
->opcode
;
472 c
->ph_rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
473 c
->ph_rw
.spba
= cpu_to_le64(rqd
->ppa_addr
.ppa
);
474 c
->ph_rw
.metadata
= cpu_to_le64(rqd
->dma_meta_list
);
475 c
->ph_rw
.control
= cpu_to_le16(rqd
->flags
);
476 c
->ph_rw
.length
= cpu_to_le16(rqd
->nr_ppas
- 1);
478 if (rqd
->opcode
== NVM_OP_HBWRITE
|| rqd
->opcode
== NVM_OP_HBREAD
)
479 c
->hb_rw
.slba
= cpu_to_le64(nvme_block_nr(ns
,
480 rqd
->bio
->bi_iter
.bi_sector
));
483 static void nvme_nvm_end_io(struct request
*rq
, blk_status_t status
)
485 struct nvm_rq
*rqd
= rq
->end_io_data
;
487 rqd
->ppa_status
= le64_to_cpu(nvme_req(rq
)->result
.u64
);
488 rqd
->error
= nvme_req(rq
)->status
;
491 kfree(nvme_req(rq
)->cmd
);
492 blk_mq_free_request(rq
);
495 static struct request
*nvme_nvm_alloc_request(struct request_queue
*q
,
497 struct nvme_nvm_command
*cmd
)
499 struct nvme_ns
*ns
= q
->queuedata
;
502 nvme_nvm_rqtocmd(rqd
, ns
, cmd
);
504 rq
= nvme_alloc_request(q
, (struct nvme_command
*)cmd
, 0, NVME_QID_ANY
);
508 rq
->cmd_flags
&= ~REQ_FAILFAST_DRIVER
;
511 blk_init_request_from_bio(rq
, rqd
->bio
);
513 rq
->ioprio
= IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE
, IOPRIO_NORM
);
520 static int nvme_nvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
522 struct request_queue
*q
= dev
->q
;
523 struct nvme_nvm_command
*cmd
;
526 cmd
= kzalloc(sizeof(struct nvme_nvm_command
), GFP_KERNEL
);
530 rq
= nvme_nvm_alloc_request(q
, rqd
, cmd
);
536 rq
->end_io_data
= rqd
;
538 blk_execute_rq_nowait(q
, NULL
, rq
, 0, nvme_nvm_end_io
);
543 static int nvme_nvm_submit_io_sync(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
545 struct request_queue
*q
= dev
->q
;
547 struct nvme_nvm_command cmd
;
550 memset(&cmd
, 0, sizeof(struct nvme_nvm_command
));
552 rq
= nvme_nvm_alloc_request(q
, rqd
, &cmd
);
556 /* I/Os can fail and the error is signaled through rqd. Callers must
557 * handle the error accordingly.
559 blk_execute_rq(q
, NULL
, rq
, 0);
560 if (nvme_req(rq
)->flags
& NVME_REQ_CANCELLED
)
563 rqd
->ppa_status
= le64_to_cpu(nvme_req(rq
)->result
.u64
);
564 rqd
->error
= nvme_req(rq
)->status
;
566 blk_mq_free_request(rq
);
571 static void *nvme_nvm_create_dma_pool(struct nvm_dev
*nvmdev
, char *name
)
573 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
575 return dma_pool_create(name
, ns
->ctrl
->dev
, PAGE_SIZE
, PAGE_SIZE
, 0);
578 static void nvme_nvm_destroy_dma_pool(void *pool
)
580 struct dma_pool
*dma_pool
= pool
;
582 dma_pool_destroy(dma_pool
);
585 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev
*dev
, void *pool
,
586 gfp_t mem_flags
, dma_addr_t
*dma_handler
)
588 return dma_pool_alloc(pool
, mem_flags
, dma_handler
);
591 static void nvme_nvm_dev_dma_free(void *pool
, void *addr
,
592 dma_addr_t dma_handler
)
594 dma_pool_free(pool
, addr
, dma_handler
);
597 static struct nvm_dev_ops nvme_nvm_dev_ops
= {
598 .identity
= nvme_nvm_identity
,
600 .get_l2p_tbl
= nvme_nvm_get_l2p_tbl
,
602 .get_bb_tbl
= nvme_nvm_get_bb_tbl
,
603 .set_bb_tbl
= nvme_nvm_set_bb_tbl
,
605 .submit_io
= nvme_nvm_submit_io
,
606 .submit_io_sync
= nvme_nvm_submit_io_sync
,
608 .create_dma_pool
= nvme_nvm_create_dma_pool
,
609 .destroy_dma_pool
= nvme_nvm_destroy_dma_pool
,
610 .dev_dma_alloc
= nvme_nvm_dev_dma_alloc
,
611 .dev_dma_free
= nvme_nvm_dev_dma_free
,
616 static int nvme_nvm_submit_user_cmd(struct request_queue
*q
,
618 struct nvme_nvm_command
*vcmd
,
619 void __user
*ubuf
, unsigned int bufflen
,
620 void __user
*meta_buf
, unsigned int meta_len
,
621 void __user
*ppa_buf
, unsigned int ppa_len
,
622 u32
*result
, u64
*status
, unsigned int timeout
)
624 bool write
= nvme_is_write((struct nvme_command
*)vcmd
);
625 struct nvm_dev
*dev
= ns
->ndev
;
626 struct gendisk
*disk
= ns
->disk
;
628 struct bio
*bio
= NULL
;
629 __le64
*ppa_list
= NULL
;
631 __le64
*metadata
= NULL
;
632 dma_addr_t metadata_dma
;
633 DECLARE_COMPLETION_ONSTACK(wait
);
636 rq
= nvme_alloc_request(q
, (struct nvme_command
*)vcmd
, 0,
643 rq
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
645 if (ppa_buf
&& ppa_len
) {
646 ppa_list
= dma_pool_alloc(dev
->dma_pool
, GFP_KERNEL
, &ppa_dma
);
651 if (copy_from_user(ppa_list
, (void __user
*)ppa_buf
,
652 sizeof(u64
) * (ppa_len
+ 1))) {
656 vcmd
->ph_rw
.spba
= cpu_to_le64(ppa_dma
);
658 vcmd
->ph_rw
.spba
= cpu_to_le64((uintptr_t)ppa_buf
);
661 if (ubuf
&& bufflen
) {
662 ret
= blk_rq_map_user(q
, rq
, NULL
, ubuf
, bufflen
, GFP_KERNEL
);
667 if (meta_buf
&& meta_len
) {
668 metadata
= dma_pool_alloc(dev
->dma_pool
, GFP_KERNEL
,
676 if (copy_from_user(metadata
,
677 (void __user
*)meta_buf
,
683 vcmd
->ph_rw
.metadata
= cpu_to_le64(metadata_dma
);
689 blk_execute_rq(q
, NULL
, rq
, 0);
691 if (nvme_req(rq
)->flags
& NVME_REQ_CANCELLED
)
693 else if (nvme_req(rq
)->status
& 0x7ff)
696 *result
= nvme_req(rq
)->status
& 0x7ff;
698 *status
= le64_to_cpu(nvme_req(rq
)->result
.u64
);
700 if (metadata
&& !ret
&& !write
) {
701 if (copy_to_user(meta_buf
, (void *)metadata
, meta_len
))
705 if (meta_buf
&& meta_len
)
706 dma_pool_free(dev
->dma_pool
, metadata
, metadata_dma
);
709 blk_rq_unmap_user(bio
);
711 if (ppa_buf
&& ppa_len
)
712 dma_pool_free(dev
->dma_pool
, ppa_list
, ppa_dma
);
714 blk_mq_free_request(rq
);
719 static int nvme_nvm_submit_vio(struct nvme_ns
*ns
,
720 struct nvm_user_vio __user
*uvio
)
722 struct nvm_user_vio vio
;
723 struct nvme_nvm_command c
;
727 if (copy_from_user(&vio
, uvio
, sizeof(vio
)))
732 memset(&c
, 0, sizeof(c
));
733 c
.ph_rw
.opcode
= vio
.opcode
;
734 c
.ph_rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
735 c
.ph_rw
.control
= cpu_to_le16(vio
.control
);
736 c
.ph_rw
.length
= cpu_to_le16(vio
.nppas
);
738 length
= (vio
.nppas
+ 1) << ns
->lba_shift
;
740 ret
= nvme_nvm_submit_user_cmd(ns
->queue
, ns
, &c
,
741 (void __user
*)(uintptr_t)vio
.addr
, length
,
742 (void __user
*)(uintptr_t)vio
.metadata
,
744 (void __user
*)(uintptr_t)vio
.ppa_list
, vio
.nppas
,
745 &vio
.result
, &vio
.status
, 0);
747 if (ret
&& copy_to_user(uvio
, &vio
, sizeof(vio
)))
753 static int nvme_nvm_user_vcmd(struct nvme_ns
*ns
, int admin
,
754 struct nvm_passthru_vio __user
*uvcmd
)
756 struct nvm_passthru_vio vcmd
;
757 struct nvme_nvm_command c
;
758 struct request_queue
*q
;
759 unsigned int timeout
= 0;
762 if (copy_from_user(&vcmd
, uvcmd
, sizeof(vcmd
)))
764 if ((vcmd
.opcode
!= 0xF2) && (!capable(CAP_SYS_ADMIN
)))
769 memset(&c
, 0, sizeof(c
));
770 c
.common
.opcode
= vcmd
.opcode
;
771 c
.common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
772 c
.common
.cdw2
[0] = cpu_to_le32(vcmd
.cdw2
);
773 c
.common
.cdw2
[1] = cpu_to_le32(vcmd
.cdw3
);
775 c
.ph_rw
.length
= cpu_to_le16(vcmd
.nppas
);
776 c
.ph_rw
.control
= cpu_to_le16(vcmd
.control
);
777 c
.common
.cdw10
[3] = cpu_to_le32(vcmd
.cdw13
);
778 c
.common
.cdw10
[4] = cpu_to_le32(vcmd
.cdw14
);
779 c
.common
.cdw10
[5] = cpu_to_le32(vcmd
.cdw15
);
782 timeout
= msecs_to_jiffies(vcmd
.timeout_ms
);
784 q
= admin
? ns
->ctrl
->admin_q
: ns
->queue
;
786 ret
= nvme_nvm_submit_user_cmd(q
, ns
,
787 (struct nvme_nvm_command
*)&c
,
788 (void __user
*)(uintptr_t)vcmd
.addr
, vcmd
.data_len
,
789 (void __user
*)(uintptr_t)vcmd
.metadata
,
791 (void __user
*)(uintptr_t)vcmd
.ppa_list
, vcmd
.nppas
,
792 &vcmd
.result
, &vcmd
.status
, timeout
);
794 if (ret
&& copy_to_user(uvcmd
, &vcmd
, sizeof(vcmd
)))
800 int nvme_nvm_ioctl(struct nvme_ns
*ns
, unsigned int cmd
, unsigned long arg
)
803 case NVME_NVM_IOCTL_ADMIN_VIO
:
804 return nvme_nvm_user_vcmd(ns
, 1, (void __user
*)arg
);
805 case NVME_NVM_IOCTL_IO_VIO
:
806 return nvme_nvm_user_vcmd(ns
, 0, (void __user
*)arg
);
807 case NVME_NVM_IOCTL_SUBMIT_VIO
:
808 return nvme_nvm_submit_vio(ns
, (void __user
*)arg
);
814 int nvme_nvm_register(struct nvme_ns
*ns
, char *disk_name
, int node
)
816 struct request_queue
*q
= ns
->queue
;
819 _nvme_nvm_check_size();
821 dev
= nvm_alloc_dev(node
);
826 memcpy(dev
->name
, disk_name
, DISK_NAME_LEN
);
827 dev
->ops
= &nvme_nvm_dev_ops
;
828 dev
->private_data
= ns
;
831 return nvm_register(dev
);
834 void nvme_nvm_unregister(struct nvme_ns
*ns
)
836 nvm_unregister(ns
->ndev
);
839 static ssize_t
nvm_dev_attr_show(struct device
*dev
,
840 struct device_attribute
*dattr
, char *page
)
842 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
843 struct nvm_dev
*ndev
= ns
->ndev
;
845 struct nvm_id_group
*grp
;
846 struct attribute
*attr
;
851 id
= &ndev
->identity
;
855 if (strcmp(attr
->name
, "version") == 0) {
856 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->ver_id
);
857 } else if (strcmp(attr
->name
, "vendor_opcode") == 0) {
858 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->vmnt
);
859 } else if (strcmp(attr
->name
, "capabilities") == 0) {
860 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->cap
);
861 } else if (strcmp(attr
->name
, "device_mode") == 0) {
862 return scnprintf(page
, PAGE_SIZE
, "%u\n", id
->dom
);
863 /* kept for compatibility */
864 } else if (strcmp(attr
->name
, "media_manager") == 0) {
865 return scnprintf(page
, PAGE_SIZE
, "%s\n", "gennvm");
866 } else if (strcmp(attr
->name
, "ppa_format") == 0) {
867 return scnprintf(page
, PAGE_SIZE
,
868 "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
869 id
->ppaf
.ch_offset
, id
->ppaf
.ch_len
,
870 id
->ppaf
.lun_offset
, id
->ppaf
.lun_len
,
871 id
->ppaf
.pln_offset
, id
->ppaf
.pln_len
,
872 id
->ppaf
.blk_offset
, id
->ppaf
.blk_len
,
873 id
->ppaf
.pg_offset
, id
->ppaf
.pg_len
,
874 id
->ppaf
.sect_offset
, id
->ppaf
.sect_len
);
875 } else if (strcmp(attr
->name
, "media_type") == 0) { /* u8 */
876 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->mtype
);
877 } else if (strcmp(attr
->name
, "flash_media_type") == 0) {
878 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->fmtype
);
879 } else if (strcmp(attr
->name
, "num_channels") == 0) {
880 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_ch
);
881 } else if (strcmp(attr
->name
, "num_luns") == 0) {
882 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_lun
);
883 } else if (strcmp(attr
->name
, "num_planes") == 0) {
884 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_pln
);
885 } else if (strcmp(attr
->name
, "num_blocks") == 0) { /* u16 */
886 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_blk
);
887 } else if (strcmp(attr
->name
, "num_pages") == 0) {
888 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->num_pg
);
889 } else if (strcmp(attr
->name
, "page_size") == 0) {
890 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->fpg_sz
);
891 } else if (strcmp(attr
->name
, "hw_sector_size") == 0) {
892 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->csecs
);
893 } else if (strcmp(attr
->name
, "oob_sector_size") == 0) {/* u32 */
894 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->sos
);
895 } else if (strcmp(attr
->name
, "read_typ") == 0) {
896 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->trdt
);
897 } else if (strcmp(attr
->name
, "read_max") == 0) {
898 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->trdm
);
899 } else if (strcmp(attr
->name
, "prog_typ") == 0) {
900 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tprt
);
901 } else if (strcmp(attr
->name
, "prog_max") == 0) {
902 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tprm
);
903 } else if (strcmp(attr
->name
, "erase_typ") == 0) {
904 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tbet
);
905 } else if (strcmp(attr
->name
, "erase_max") == 0) {
906 return scnprintf(page
, PAGE_SIZE
, "%u\n", grp
->tbem
);
907 } else if (strcmp(attr
->name
, "multiplane_modes") == 0) {
908 return scnprintf(page
, PAGE_SIZE
, "0x%08x\n", grp
->mpos
);
909 } else if (strcmp(attr
->name
, "media_capabilities") == 0) {
910 return scnprintf(page
, PAGE_SIZE
, "0x%08x\n", grp
->mccap
);
911 } else if (strcmp(attr
->name
, "max_phys_secs") == 0) {
912 return scnprintf(page
, PAGE_SIZE
, "%u\n",
913 ndev
->ops
->max_phys_sect
);
915 return scnprintf(page
,
917 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
922 #define NVM_DEV_ATTR_RO(_name) \
923 DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
925 static NVM_DEV_ATTR_RO(version
);
926 static NVM_DEV_ATTR_RO(vendor_opcode
);
927 static NVM_DEV_ATTR_RO(capabilities
);
928 static NVM_DEV_ATTR_RO(device_mode
);
929 static NVM_DEV_ATTR_RO(ppa_format
);
930 static NVM_DEV_ATTR_RO(media_manager
);
932 static NVM_DEV_ATTR_RO(media_type
);
933 static NVM_DEV_ATTR_RO(flash_media_type
);
934 static NVM_DEV_ATTR_RO(num_channels
);
935 static NVM_DEV_ATTR_RO(num_luns
);
936 static NVM_DEV_ATTR_RO(num_planes
);
937 static NVM_DEV_ATTR_RO(num_blocks
);
938 static NVM_DEV_ATTR_RO(num_pages
);
939 static NVM_DEV_ATTR_RO(page_size
);
940 static NVM_DEV_ATTR_RO(hw_sector_size
);
941 static NVM_DEV_ATTR_RO(oob_sector_size
);
942 static NVM_DEV_ATTR_RO(read_typ
);
943 static NVM_DEV_ATTR_RO(read_max
);
944 static NVM_DEV_ATTR_RO(prog_typ
);
945 static NVM_DEV_ATTR_RO(prog_max
);
946 static NVM_DEV_ATTR_RO(erase_typ
);
947 static NVM_DEV_ATTR_RO(erase_max
);
948 static NVM_DEV_ATTR_RO(multiplane_modes
);
949 static NVM_DEV_ATTR_RO(media_capabilities
);
950 static NVM_DEV_ATTR_RO(max_phys_secs
);
952 static struct attribute
*nvm_dev_attrs
[] = {
953 &dev_attr_version
.attr
,
954 &dev_attr_vendor_opcode
.attr
,
955 &dev_attr_capabilities
.attr
,
956 &dev_attr_device_mode
.attr
,
957 &dev_attr_media_manager
.attr
,
959 &dev_attr_ppa_format
.attr
,
960 &dev_attr_media_type
.attr
,
961 &dev_attr_flash_media_type
.attr
,
962 &dev_attr_num_channels
.attr
,
963 &dev_attr_num_luns
.attr
,
964 &dev_attr_num_planes
.attr
,
965 &dev_attr_num_blocks
.attr
,
966 &dev_attr_num_pages
.attr
,
967 &dev_attr_page_size
.attr
,
968 &dev_attr_hw_sector_size
.attr
,
969 &dev_attr_oob_sector_size
.attr
,
970 &dev_attr_read_typ
.attr
,
971 &dev_attr_read_max
.attr
,
972 &dev_attr_prog_typ
.attr
,
973 &dev_attr_prog_max
.attr
,
974 &dev_attr_erase_typ
.attr
,
975 &dev_attr_erase_max
.attr
,
976 &dev_attr_multiplane_modes
.attr
,
977 &dev_attr_media_capabilities
.attr
,
978 &dev_attr_max_phys_secs
.attr
,
982 static const struct attribute_group nvm_dev_attr_group
= {
984 .attrs
= nvm_dev_attrs
,
987 int nvme_nvm_register_sysfs(struct nvme_ns
*ns
)
989 return sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
990 &nvm_dev_attr_group
);
993 void nvme_nvm_unregister_sysfs(struct nvme_ns
*ns
)
995 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
996 &nvm_dev_attr_group
);