2 * nvme-lightnvm.c - LightNVM NVMe device
4 * Copyright (C) 2014-2015 IT University of Copenhagen
5 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/nvme.h>
26 #include <linux/bitops.h>
27 #include <linux/lightnvm.h>
28 #include <linux/vmalloc.h>
30 enum nvme_nvm_admin_opcode
{
31 nvme_nvm_admin_identity
= 0xe2,
32 nvme_nvm_admin_get_l2p_tbl
= 0xea,
33 nvme_nvm_admin_get_bb_tbl
= 0xf2,
34 nvme_nvm_admin_set_bb_tbl
= 0xf1,
37 struct nvme_nvm_hb_rw
{
53 struct nvme_nvm_ph_rw
{
69 struct nvme_nvm_identity
{
81 struct nvme_nvm_l2ptbl
{
94 struct nvme_nvm_getbbtbl
{
106 struct nvme_nvm_setbbtbl
{
121 struct nvme_nvm_erase_blk
{
136 struct nvme_nvm_command
{
138 struct nvme_common_command common
;
139 struct nvme_nvm_identity identity
;
140 struct nvme_nvm_hb_rw hb_rw
;
141 struct nvme_nvm_ph_rw ph_rw
;
142 struct nvme_nvm_l2ptbl l2p
;
143 struct nvme_nvm_getbbtbl get_bb
;
144 struct nvme_nvm_setbbtbl set_bb
;
145 struct nvme_nvm_erase_blk erase
;
149 #define NVME_NVM_LP_MLC_PAIRS 886
150 struct nvme_nvm_lp_mlc
{
152 __u8 pairs
[NVME_NVM_LP_MLC_PAIRS
];
155 struct nvme_nvm_lp_tbl
{
157 struct nvme_nvm_lp_mlc mlc
;
160 struct nvme_nvm_id_group
{
184 struct nvme_nvm_lp_tbl lptbl
;
187 struct nvme_nvm_addr_format
{
210 struct nvme_nvm_addr_format ppaf
;
212 struct nvme_nvm_id_group groups
[4];
215 struct nvme_nvm_bb_tbl
{
230 * Check we didn't inadvertently grow the command struct
232 static inline void _nvme_nvm_check_size(void)
234 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity
) != 64);
235 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw
) != 64);
236 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw
) != 64);
237 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl
) != 64);
238 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl
) != 64);
239 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl
) != 64);
240 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk
) != 64);
241 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group
) != 960);
242 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format
) != 128);
243 BUILD_BUG_ON(sizeof(struct nvme_nvm_id
) != 4096);
244 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl
) != 512);
247 static int init_grps(struct nvm_id
*nvm_id
, struct nvme_nvm_id
*nvme_nvm_id
)
249 struct nvme_nvm_id_group
*src
;
250 struct nvm_id_group
*dst
;
253 end
= min_t(u32
, 4, nvm_id
->cgrps
);
255 for (i
= 0; i
< end
; i
++) {
256 src
= &nvme_nvm_id
->groups
[i
];
257 dst
= &nvm_id
->groups
[i
];
259 dst
->mtype
= src
->mtype
;
260 dst
->fmtype
= src
->fmtype
;
261 dst
->num_ch
= src
->num_ch
;
262 dst
->num_lun
= src
->num_lun
;
263 dst
->num_pln
= src
->num_pln
;
265 dst
->num_pg
= le16_to_cpu(src
->num_pg
);
266 dst
->num_blk
= le16_to_cpu(src
->num_blk
);
267 dst
->fpg_sz
= le16_to_cpu(src
->fpg_sz
);
268 dst
->csecs
= le16_to_cpu(src
->csecs
);
269 dst
->sos
= le16_to_cpu(src
->sos
);
271 dst
->trdt
= le32_to_cpu(src
->trdt
);
272 dst
->trdm
= le32_to_cpu(src
->trdm
);
273 dst
->tprt
= le32_to_cpu(src
->tprt
);
274 dst
->tprm
= le32_to_cpu(src
->tprm
);
275 dst
->tbet
= le32_to_cpu(src
->tbet
);
276 dst
->tbem
= le32_to_cpu(src
->tbem
);
277 dst
->mpos
= le32_to_cpu(src
->mpos
);
278 dst
->mccap
= le32_to_cpu(src
->mccap
);
280 dst
->cpar
= le16_to_cpu(src
->cpar
);
282 if (dst
->fmtype
== NVM_ID_FMTYPE_MLC
) {
283 memcpy(dst
->lptbl
.id
, src
->lptbl
.id
, 8);
284 dst
->lptbl
.mlc
.num_pairs
=
285 le16_to_cpu(src
->lptbl
.mlc
.num_pairs
);
287 if (dst
->lptbl
.mlc
.num_pairs
> NVME_NVM_LP_MLC_PAIRS
) {
288 pr_err("nvm: number of MLC pairs not supported\n");
292 memcpy(dst
->lptbl
.mlc
.pairs
, src
->lptbl
.mlc
.pairs
,
293 dst
->lptbl
.mlc
.num_pairs
);
300 static int nvme_nvm_identity(struct nvm_dev
*nvmdev
, struct nvm_id
*nvm_id
)
302 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
303 struct nvme_nvm_id
*nvme_nvm_id
;
304 struct nvme_nvm_command c
= {};
307 c
.identity
.opcode
= nvme_nvm_admin_identity
;
308 c
.identity
.nsid
= cpu_to_le32(ns
->ns_id
);
309 c
.identity
.chnl_off
= 0;
311 nvme_nvm_id
= kmalloc(sizeof(struct nvme_nvm_id
), GFP_KERNEL
);
315 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
316 nvme_nvm_id
, sizeof(struct nvme_nvm_id
));
322 nvm_id
->ver_id
= nvme_nvm_id
->ver_id
;
323 nvm_id
->vmnt
= nvme_nvm_id
->vmnt
;
324 nvm_id
->cgrps
= nvme_nvm_id
->cgrps
;
325 nvm_id
->cap
= le32_to_cpu(nvme_nvm_id
->cap
);
326 nvm_id
->dom
= le32_to_cpu(nvme_nvm_id
->dom
);
327 memcpy(&nvm_id
->ppaf
, &nvme_nvm_id
->ppaf
,
328 sizeof(struct nvme_nvm_addr_format
));
330 ret
= init_grps(nvm_id
, nvme_nvm_id
);
336 static int nvme_nvm_get_l2p_tbl(struct nvm_dev
*nvmdev
, u64 slba
, u32 nlb
,
337 nvm_l2p_update_fn
*update_l2p
, void *priv
)
339 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
340 struct nvme_nvm_command c
= {};
341 u32 len
= queue_max_hw_sectors(ns
->ctrl
->admin_q
) << 9;
342 u32 nlb_pr_rq
= len
/ sizeof(u64
);
347 c
.l2p
.opcode
= nvme_nvm_admin_get_l2p_tbl
;
348 c
.l2p
.nsid
= cpu_to_le32(ns
->ns_id
);
349 entries
= kmalloc(len
, GFP_KERNEL
);
354 u32 cmd_nlb
= min(nlb_pr_rq
, nlb
);
356 c
.l2p
.slba
= cpu_to_le64(cmd_slba
);
357 c
.l2p
.nlb
= cpu_to_le32(cmd_nlb
);
359 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
,
360 (struct nvme_command
*)&c
, entries
, len
);
362 dev_err(ns
->ctrl
->dev
, "L2P table transfer failed (%d)\n",
368 if (update_l2p(cmd_slba
, cmd_nlb
, entries
, priv
)) {
382 static int nvme_nvm_get_bb_tbl(struct nvm_dev
*nvmdev
, struct ppa_addr ppa
,
383 int nr_blocks
, nvm_bb_update_fn
*update_bbtbl
,
386 struct request_queue
*q
= nvmdev
->q
;
387 struct nvme_ns
*ns
= q
->queuedata
;
388 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
389 struct nvme_nvm_command c
= {};
390 struct nvme_nvm_bb_tbl
*bb_tbl
;
391 int tblsz
= sizeof(struct nvme_nvm_bb_tbl
) + nr_blocks
;
394 c
.get_bb
.opcode
= nvme_nvm_admin_get_bb_tbl
;
395 c
.get_bb
.nsid
= cpu_to_le32(ns
->ns_id
);
396 c
.get_bb
.spba
= cpu_to_le64(ppa
.ppa
);
398 bb_tbl
= kzalloc(tblsz
, GFP_KERNEL
);
402 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, (struct nvme_command
*)&c
,
405 dev_err(ctrl
->dev
, "get bad block table failed (%d)\n", ret
);
410 if (bb_tbl
->tblid
[0] != 'B' || bb_tbl
->tblid
[1] != 'B' ||
411 bb_tbl
->tblid
[2] != 'L' || bb_tbl
->tblid
[3] != 'T') {
412 dev_err(ctrl
->dev
, "bbt format mismatch\n");
417 if (le16_to_cpu(bb_tbl
->verid
) != 1) {
419 dev_err(ctrl
->dev
, "bbt version not supported\n");
423 if (le32_to_cpu(bb_tbl
->tblks
) != nr_blocks
) {
425 dev_err(ctrl
->dev
, "bbt unsuspected blocks returned (%u!=%u)",
426 le32_to_cpu(bb_tbl
->tblks
), nr_blocks
);
430 ppa
= dev_to_generic_addr(nvmdev
, ppa
);
431 ret
= update_bbtbl(ppa
, nr_blocks
, bb_tbl
->blk
, priv
);
437 static int nvme_nvm_set_bb_tbl(struct nvm_dev
*nvmdev
, struct nvm_rq
*rqd
,
440 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
441 struct nvme_nvm_command c
= {};
444 c
.set_bb
.opcode
= nvme_nvm_admin_set_bb_tbl
;
445 c
.set_bb
.nsid
= cpu_to_le32(ns
->ns_id
);
446 c
.set_bb
.spba
= cpu_to_le64(rqd
->ppa_addr
.ppa
);
447 c
.set_bb
.nlb
= cpu_to_le16(rqd
->nr_pages
- 1);
448 c
.set_bb
.value
= type
;
450 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, (struct nvme_command
*)&c
,
453 dev_err(ns
->ctrl
->dev
, "set bad block table failed (%d)\n", ret
);
457 static inline void nvme_nvm_rqtocmd(struct request
*rq
, struct nvm_rq
*rqd
,
458 struct nvme_ns
*ns
, struct nvme_nvm_command
*c
)
460 c
->ph_rw
.opcode
= rqd
->opcode
;
461 c
->ph_rw
.nsid
= cpu_to_le32(ns
->ns_id
);
462 c
->ph_rw
.spba
= cpu_to_le64(rqd
->ppa_addr
.ppa
);
463 c
->ph_rw
.control
= cpu_to_le16(rqd
->flags
);
464 c
->ph_rw
.length
= cpu_to_le16(rqd
->nr_pages
- 1);
466 if (rqd
->opcode
== NVM_OP_HBWRITE
|| rqd
->opcode
== NVM_OP_HBREAD
)
467 c
->hb_rw
.slba
= cpu_to_le64(nvme_block_nr(ns
,
468 rqd
->bio
->bi_iter
.bi_sector
));
471 static void nvme_nvm_end_io(struct request
*rq
, int error
)
473 struct nvm_rq
*rqd
= rq
->end_io_data
;
475 nvm_end_io(rqd
, error
);
478 blk_mq_free_request(rq
);
481 static int nvme_nvm_submit_io(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
483 struct request_queue
*q
= dev
->q
;
484 struct nvme_ns
*ns
= q
->queuedata
;
486 struct bio
*bio
= rqd
->bio
;
487 struct nvme_nvm_command
*cmd
;
489 rq
= blk_mq_alloc_request(q
, bio_rw(bio
), 0);
493 cmd
= kzalloc(sizeof(struct nvme_nvm_command
), GFP_KERNEL
);
495 blk_mq_free_request(rq
);
499 rq
->cmd_type
= REQ_TYPE_DRV_PRIV
;
500 rq
->ioprio
= bio_prio(bio
);
502 if (bio_has_data(bio
))
503 rq
->nr_phys_segments
= bio_phys_segments(q
, bio
);
505 rq
->__data_len
= bio
->bi_iter
.bi_size
;
506 rq
->bio
= rq
->biotail
= bio
;
508 nvme_nvm_rqtocmd(rq
, rqd
, ns
, cmd
);
510 rq
->cmd
= (unsigned char *)cmd
;
511 rq
->cmd_len
= sizeof(struct nvme_nvm_command
);
512 rq
->special
= (void *)0;
514 rq
->end_io_data
= rqd
;
516 blk_execute_rq_nowait(q
, NULL
, rq
, 0, nvme_nvm_end_io
);
521 static int nvme_nvm_erase_block(struct nvm_dev
*dev
, struct nvm_rq
*rqd
)
523 struct request_queue
*q
= dev
->q
;
524 struct nvme_ns
*ns
= q
->queuedata
;
525 struct nvme_nvm_command c
= {};
527 c
.erase
.opcode
= NVM_OP_ERASE
;
528 c
.erase
.nsid
= cpu_to_le32(ns
->ns_id
);
529 c
.erase
.spba
= cpu_to_le64(rqd
->ppa_addr
.ppa
);
530 c
.erase
.length
= cpu_to_le16(rqd
->nr_pages
- 1);
532 return nvme_submit_sync_cmd(q
, (struct nvme_command
*)&c
, NULL
, 0);
535 static void *nvme_nvm_create_dma_pool(struct nvm_dev
*nvmdev
, char *name
)
537 struct nvme_ns
*ns
= nvmdev
->q
->queuedata
;
539 return dma_pool_create(name
, ns
->ctrl
->dev
, PAGE_SIZE
, PAGE_SIZE
, 0);
542 static void nvme_nvm_destroy_dma_pool(void *pool
)
544 struct dma_pool
*dma_pool
= pool
;
546 dma_pool_destroy(dma_pool
);
549 static void *nvme_nvm_dev_dma_alloc(struct nvm_dev
*dev
, void *pool
,
550 gfp_t mem_flags
, dma_addr_t
*dma_handler
)
552 return dma_pool_alloc(pool
, mem_flags
, dma_handler
);
555 static void nvme_nvm_dev_dma_free(void *pool
, void *ppa_list
,
556 dma_addr_t dma_handler
)
558 dma_pool_free(pool
, ppa_list
, dma_handler
);
561 static struct nvm_dev_ops nvme_nvm_dev_ops
= {
562 .identity
= nvme_nvm_identity
,
564 .get_l2p_tbl
= nvme_nvm_get_l2p_tbl
,
566 .get_bb_tbl
= nvme_nvm_get_bb_tbl
,
567 .set_bb_tbl
= nvme_nvm_set_bb_tbl
,
569 .submit_io
= nvme_nvm_submit_io
,
570 .erase_block
= nvme_nvm_erase_block
,
572 .create_dma_pool
= nvme_nvm_create_dma_pool
,
573 .destroy_dma_pool
= nvme_nvm_destroy_dma_pool
,
574 .dev_dma_alloc
= nvme_nvm_dev_dma_alloc
,
575 .dev_dma_free
= nvme_nvm_dev_dma_free
,
580 int nvme_nvm_register(struct request_queue
*q
, char *disk_name
)
582 return nvm_register(q
, disk_name
, &nvme_nvm_dev_ops
);
585 void nvme_nvm_unregister(struct request_queue
*q
, char *disk_name
)
587 nvm_unregister(disk_name
);
590 /* move to shared place when used in multiple places. */
591 #define PCI_VENDOR_ID_CNEX 0x1d1d
592 #define PCI_DEVICE_ID_CNEX_WL 0x2807
593 #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
595 int nvme_nvm_ns_supported(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
597 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
598 /* XXX: this is poking into PCI structures from generic code! */
599 struct pci_dev
*pdev
= to_pci_dev(ctrl
->dev
);
601 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
602 if (pdev
->vendor
== PCI_VENDOR_ID_CNEX
&&
603 pdev
->device
== PCI_DEVICE_ID_CNEX_QEMU
&&
607 /* CNEX Labs - PCI ID + Vendor specific bit */
608 if (pdev
->vendor
== PCI_VENDOR_ID_CNEX
&&
609 pdev
->device
== PCI_DEVICE_ID_CNEX_WL
&&