1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blkdev.h>
6 #include <linux/types.h>
7 #include <uapi/linux/lightnvm.h>
19 #define NVM_BLK_BITS (16)
20 #define NVM_PG_BITS (16)
21 #define NVM_SEC_BITS (8)
22 #define NVM_PL_BITS (8)
23 #define NVM_LUN_BITS (8)
24 #define NVM_CH_BITS (7)
27 /* Generic structure for all addresses */
30 u64 blk
: NVM_BLK_BITS
;
32 u64 sec
: NVM_SEC_BITS
;
34 u64 lun
: NVM_LUN_BITS
;
53 typedef int (nvm_id_fn
)(struct nvm_dev
*, struct nvm_id
*);
54 typedef int (nvm_op_bb_tbl_fn
)(struct nvm_dev
*, struct ppa_addr
, u8
*);
55 typedef int (nvm_op_set_bb_fn
)(struct nvm_dev
*, struct ppa_addr
*, int, int);
56 typedef int (nvm_submit_io_fn
)(struct nvm_dev
*, struct nvm_rq
*);
57 typedef int (nvm_submit_io_sync_fn
)(struct nvm_dev
*, struct nvm_rq
*);
58 typedef void *(nvm_create_dma_pool_fn
)(struct nvm_dev
*, char *);
59 typedef void (nvm_destroy_dma_pool_fn
)(void *);
60 typedef void *(nvm_dev_dma_alloc_fn
)(struct nvm_dev
*, void *, gfp_t
,
62 typedef void (nvm_dev_dma_free_fn
)(void *, void*, dma_addr_t
);
66 nvm_op_bb_tbl_fn
*get_bb_tbl
;
67 nvm_op_set_bb_fn
*set_bb_tbl
;
69 nvm_submit_io_fn
*submit_io
;
70 nvm_submit_io_sync_fn
*submit_io_sync
;
72 nvm_create_dma_pool_fn
*create_dma_pool
;
73 nvm_destroy_dma_pool_fn
*destroy_dma_pool
;
74 nvm_dev_dma_alloc_fn
*dev_dma_alloc
;
75 nvm_dev_dma_free_fn
*dev_dma_free
;
77 unsigned int max_phys_sect
;
82 #include <linux/blkdev.h>
83 #include <linux/file.h>
84 #include <linux/dmapool.h>
85 #include <uapi/linux/lightnvm.h>
88 /* HW Responsibilities */
92 /* Physical Adressing Mode */
93 NVM_ADDRMODE_LINEAR
= 0,
94 NVM_ADDRMODE_CHANNEL
= 1,
96 /* Plane programming mode for LUN */
102 NVM_RSP_SUCCESS
= 0x0,
103 NVM_RSP_NOT_CHANGEABLE
= 0x1,
104 NVM_RSP_ERR_FAILWRITE
= 0x40ff,
105 NVM_RSP_ERR_EMPTYPAGE
= 0x42ff,
106 NVM_RSP_ERR_FAILECC
= 0x4281,
107 NVM_RSP_ERR_FAILCRC
= 0x4004,
108 NVM_RSP_WARN_HIGHECC
= 0x4700,
111 NVM_OP_PWRITE
= 0x91,
115 /* PPA Command Flags */
116 NVM_IO_SNGL_ACCESS
= 0x0,
117 NVM_IO_DUAL_ACCESS
= 0x1,
118 NVM_IO_QUAD_ACCESS
= 0x2,
120 /* NAND Access Modes */
121 NVM_IO_SUSPEND
= 0x80,
122 NVM_IO_SLC_MODE
= 0x100,
123 NVM_IO_SCRAMBLE_ENABLE
= 0x200,
126 NVM_BLK_T_FREE
= 0x0,
128 NVM_BLK_T_GRWN_BAD
= 0x2,
130 NVM_BLK_T_HOST
= 0x8,
132 /* Memory capabilities */
133 NVM_ID_CAP_SLC
= 0x1,
134 NVM_ID_CAP_CMD_SUSPEND
= 0x2,
135 NVM_ID_CAP_SCRAMBLE
= 0x4,
136 NVM_ID_CAP_ENCRYPT
= 0x8,
139 NVM_ID_FMTYPE_SLC
= 0,
140 NVM_ID_FMTYPE_MLC
= 1,
142 /* Device capabilities */
143 NVM_ID_DCAP_BBLKMGMT
= 0x1,
144 NVM_UD_DCAP_ECC
= 0x2,
147 struct nvm_id_lp_mlc
{
152 struct nvm_id_lp_tbl
{
154 struct nvm_id_lp_mlc mlc
;
157 struct nvm_id_group
{
182 /* 1.2 compatibility */
188 struct nvm_addr_format
{
208 struct nvm_addr_format ppaf
;
209 struct nvm_id_group grp
;
213 struct list_head list
;
214 struct nvm_tgt_dev
*dev
;
215 struct nvm_tgt_type
*type
;
216 struct gendisk
*disk
;
219 #define ADDR_EMPTY (~0ULL)
221 #define NVM_TARGET_DEFAULT_OP (101)
222 #define NVM_TARGET_MIN_OP (3)
223 #define NVM_TARGET_MAX_OP (80)
225 #define NVM_VERSION_MAJOR 1
226 #define NVM_VERSION_MINOR 0
227 #define NVM_VERSION_PATCH 0
230 typedef void (nvm_end_io_fn
)(struct nvm_rq
*);
233 struct nvm_tgt_dev
*dev
;
238 struct ppa_addr ppa_addr
;
239 dma_addr_t dma_ppa_list
;
242 struct ppa_addr
*ppa_list
;
245 dma_addr_t dma_meta_list
;
247 nvm_end_io_fn
*end_io
;
253 u64 ppa_status
; /* ppa media status */
259 static inline struct nvm_rq
*nvm_rq_from_pdu(void *pdu
)
261 return pdu
- sizeof(struct nvm_rq
);
264 static inline void *nvm_rq_to_pdu(struct nvm_rq
*rqdata
)
270 NVM_BLK_ST_FREE
= 0x1, /* Free block */
271 NVM_BLK_ST_TGT
= 0x2, /* Block in use by target */
272 NVM_BLK_ST_BAD
= 0x8, /* Bad block */
276 /* Device generic information */
278 /* generic geometry */
280 int all_luns
; /* across channels */
281 int nr_luns
; /* per channel */
282 int nr_chks
; /* per lun */
300 struct nvm_addr_format ppaf
;
302 /* Legacy 1.2 specific geometry */
303 int plane_mode
; /* drive device in single, double or quad mode */
305 int sec_per_pg
; /* only sectors for a single page */
306 int sec_per_pl
; /* all sectors across planes */
309 /* sub-device structure */
311 /* Device information */
314 /* Base ppas for target LUNs */
315 struct ppa_addr
*luns
;
319 struct nvm_id identity
;
320 struct request_queue
*q
;
322 struct nvm_dev
*parent
;
327 struct nvm_dev_ops
*ops
;
329 struct list_head devices
;
331 /* Device information */
334 unsigned long total_secs
;
336 unsigned long *lun_map
;
339 struct nvm_id identity
;
342 struct request_queue
*q
;
343 char name
[DISK_NAME_LEN
];
351 /* target management */
352 struct list_head area_list
;
353 struct list_head targets
;
356 static inline struct ppa_addr
generic_to_dev_addr(struct nvm_tgt_dev
*tgt_dev
,
359 struct nvm_geo
*geo
= &tgt_dev
->geo
;
362 l
.ppa
= ((u64
)r
.g
.blk
) << geo
->ppaf
.blk_offset
;
363 l
.ppa
|= ((u64
)r
.g
.pg
) << geo
->ppaf
.pg_offset
;
364 l
.ppa
|= ((u64
)r
.g
.sec
) << geo
->ppaf
.sect_offset
;
365 l
.ppa
|= ((u64
)r
.g
.pl
) << geo
->ppaf
.pln_offset
;
366 l
.ppa
|= ((u64
)r
.g
.lun
) << geo
->ppaf
.lun_offset
;
367 l
.ppa
|= ((u64
)r
.g
.ch
) << geo
->ppaf
.ch_offset
;
372 static inline struct ppa_addr
dev_to_generic_addr(struct nvm_tgt_dev
*tgt_dev
,
375 struct nvm_geo
*geo
= &tgt_dev
->geo
;
380 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
382 l
.g
.blk
= (r
.ppa
>> geo
->ppaf
.blk_offset
) &
383 (((1 << geo
->ppaf
.blk_len
) - 1));
384 l
.g
.pg
|= (r
.ppa
>> geo
->ppaf
.pg_offset
) &
385 (((1 << geo
->ppaf
.pg_len
) - 1));
386 l
.g
.sec
|= (r
.ppa
>> geo
->ppaf
.sect_offset
) &
387 (((1 << geo
->ppaf
.sect_len
) - 1));
388 l
.g
.pl
|= (r
.ppa
>> geo
->ppaf
.pln_offset
) &
389 (((1 << geo
->ppaf
.pln_len
) - 1));
390 l
.g
.lun
|= (r
.ppa
>> geo
->ppaf
.lun_offset
) &
391 (((1 << geo
->ppaf
.lun_len
) - 1));
392 l
.g
.ch
|= (r
.ppa
>> geo
->ppaf
.ch_offset
) &
393 (((1 << geo
->ppaf
.ch_len
) - 1));
398 typedef blk_qc_t (nvm_tgt_make_rq_fn
)(struct request_queue
*, struct bio
*);
399 typedef sector_t (nvm_tgt_capacity_fn
)(void *);
400 typedef void *(nvm_tgt_init_fn
)(struct nvm_tgt_dev
*, struct gendisk
*,
402 typedef void (nvm_tgt_exit_fn
)(void *);
403 typedef int (nvm_tgt_sysfs_init_fn
)(struct gendisk
*);
404 typedef void (nvm_tgt_sysfs_exit_fn
)(struct gendisk
*);
406 struct nvm_tgt_type
{
408 unsigned int version
[3];
410 /* target entry points */
411 nvm_tgt_make_rq_fn
*make_rq
;
412 nvm_tgt_capacity_fn
*capacity
;
414 /* module-specific init/teardown */
415 nvm_tgt_init_fn
*init
;
416 nvm_tgt_exit_fn
*exit
;
419 nvm_tgt_sysfs_init_fn
*sysfs_init
;
420 nvm_tgt_sysfs_exit_fn
*sysfs_exit
;
422 /* For internal use */
423 struct list_head list
;
424 struct module
*owner
;
427 extern int nvm_register_tgt_type(struct nvm_tgt_type
*);
428 extern void nvm_unregister_tgt_type(struct nvm_tgt_type
*);
430 extern void *nvm_dev_dma_alloc(struct nvm_dev
*, gfp_t
, dma_addr_t
*);
431 extern void nvm_dev_dma_free(struct nvm_dev
*, void *, dma_addr_t
);
433 extern struct nvm_dev
*nvm_alloc_dev(int);
434 extern int nvm_register(struct nvm_dev
*);
435 extern void nvm_unregister(struct nvm_dev
*);
437 extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev
*, struct ppa_addr
*,
439 extern int nvm_max_phys_sects(struct nvm_tgt_dev
*);
440 extern int nvm_submit_io(struct nvm_tgt_dev
*, struct nvm_rq
*);
441 extern int nvm_submit_io_sync(struct nvm_tgt_dev
*, struct nvm_rq
*);
442 extern void nvm_end_io(struct nvm_rq
*);
443 extern int nvm_bb_tbl_fold(struct nvm_dev
*, u8
*, int);
444 extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev
*, struct ppa_addr
, u8
*);
446 #else /* CONFIG_NVM */
449 static inline struct nvm_dev
*nvm_alloc_dev(int node
)
451 return ERR_PTR(-EINVAL
);
453 static inline int nvm_register(struct nvm_dev
*dev
)
457 static inline void nvm_unregister(struct nvm_dev
*dev
) {}
458 #endif /* CONFIG_NVM */
459 #endif /* LIGHTNVM.H */