1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23 #include <linux/kfifo.h>
25 #define NVMET_DEFAULT_VS NVME_VS(2, 1, 0)
27 #define NVMET_ASYNC_EVENTS 4
28 #define NVMET_ERROR_LOG_SLOTS 128
29 #define NVMET_NO_ERROR_LOC ((u16)-1)
30 #define NVMET_DEFAULT_CTRL_MODEL "Linux"
31 #define NVMET_MN_MAX_SIZE 40
32 #define NVMET_SN_MAX_SIZE 20
33 #define NVMET_FR_MAX_SIZE 8
34 #define NVMET_PR_LOG_QUEUE_SIZE 64
37 * Supported optional AENs:
39 #define NVMET_AEN_CFG_OPTIONAL \
40 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
41 #define NVMET_DISC_AEN_CFG_OPTIONAL \
42 (NVME_AEN_CFG_DISC_CHANGE)
45 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
47 #define NVMET_AEN_CFG_ALL \
48 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
49 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
50 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
52 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
53 * The 16 bit shift is to set IATTR bit to 1, which means offending
54 * offset starts in the data section of connect()
56 #define IPO_IATTR_CONNECT_DATA(x) \
57 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
58 #define IPO_IATTR_CONNECT_SQE(x) \
59 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
61 struct nvmet_pr_registrant
{
64 enum nvme_pr_type rtype
;
65 struct list_head entry
;
71 unsigned long notify_mask
;
73 struct nvmet_pr_registrant __rcu
*holder
;
75 * During the execution of the reservation command, mutual
76 * exclusion is required throughout the process. However,
77 * while waiting asynchronously for the 'per controller
78 * percpu_ref' to complete before the 'preempt and abort'
79 * command finishes, a semaphore is needed to ensure mutual
80 * exclusion instead of a mutex.
82 struct semaphore pr_sem
;
83 struct list_head registrant_list
;
86 struct nvmet_pr_per_ctrl_ref
{
87 struct percpu_ref ref
;
88 struct completion free_done
;
89 struct completion confirm_done
;
94 struct percpu_ref ref
;
95 struct file
*bdev_file
;
96 struct block_device
*bdev
;
108 struct nvmet_subsys
*subsys
;
109 const char *device_path
;
111 struct config_group device_group
;
112 struct config_group group
;
114 struct completion disable_done
;
115 mempool_t
*bvec_pool
;
117 struct pci_dev
*p2p_dev
;
123 struct xarray pr_per_ctrl_refs
;
126 static inline struct nvmet_ns
*to_nvmet_ns(struct config_item
*item
)
128 return container_of(to_config_group(item
), struct nvmet_ns
, group
);
131 static inline struct device
*nvmet_ns_dev(struct nvmet_ns
*ns
)
133 return ns
->bdev
? disk_to_dev(ns
->bdev
->bd_disk
) : NULL
;
142 struct nvmet_ctrl
*ctrl
;
143 struct percpu_ref ref
;
148 #ifdef CONFIG_NVME_TARGET_AUTH
150 struct delayed_work auth_expired_work
;
161 struct completion free_done
;
162 struct completion confirm_done
;
165 struct nvmet_ana_group
{
166 struct config_group group
;
167 struct nvmet_port
*port
;
171 static inline struct nvmet_ana_group
*to_ana_group(struct config_item
*item
)
173 return container_of(to_config_group(item
), struct nvmet_ana_group
,
178 * struct nvmet_port - Common structure to keep port
179 * information for the target.
180 * @entry: Entry into referrals or transport list.
181 * @disc_addr: Address information is stored in a format defined
182 * for a discovery log page entry.
183 * @group: ConfigFS group for this element's folder.
184 * @priv: Private data for the transport.
187 struct list_head entry
;
188 struct nvmf_disc_rsp_page_entry disc_addr
;
189 struct config_group group
;
190 struct config_group subsys_group
;
191 struct list_head subsystems
;
192 struct config_group referrals_group
;
193 struct list_head referrals
;
194 struct list_head global_entry
;
195 struct config_group ana_groups_group
;
196 struct nvmet_ana_group ana_default_group
;
197 enum nvme_ana_state
*ana_state
;
201 int inline_data_size
;
203 const struct nvmet_fabrics_ops
*tr_ops
;
207 static inline struct nvmet_port
*to_nvmet_port(struct config_item
*item
)
209 return container_of(to_config_group(item
), struct nvmet_port
,
213 static inline struct nvmet_port
*ana_groups_to_port(
214 struct config_item
*item
)
216 return container_of(to_config_group(item
), struct nvmet_port
,
220 static inline u8
nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port
*port
)
222 return (port
->disc_addr
.treq
& NVME_TREQ_SECURE_CHANNEL_MASK
);
225 static inline bool nvmet_port_secure_channel_required(struct nvmet_port
*port
)
227 return nvmet_port_disc_addr_treq_secure_channel(port
) == NVMF_TREQ_REQUIRED
;
230 struct nvmet_pr_log_mgr
{
234 DECLARE_KFIFO(log_queue
, struct nvme_pr_log
, NVMET_PR_LOG_QUEUE_SIZE
);
238 struct nvmet_subsys
*subsys
;
239 struct nvmet_sq
**sqs
;
252 struct nvmet_port
*port
;
255 unsigned long aen_masked
;
256 struct nvmet_req
*async_event_cmds
[NVMET_ASYNC_EVENTS
];
257 unsigned int nr_async_event_cmds
;
258 struct list_head async_events
;
259 struct work_struct async_event_work
;
261 struct list_head subsys_entry
;
263 struct delayed_work ka_work
;
264 struct work_struct fatal_err_work
;
266 const struct nvmet_fabrics_ops
*ops
;
268 __le32
*changed_ns_list
;
271 char subsysnqn
[NVMF_NQN_FIELD_LEN
];
272 char hostnqn
[NVMF_NQN_FIELD_LEN
];
274 struct device
*p2p_client
;
275 struct radix_tree_root p2p_ns_map
;
276 #ifdef CONFIG_NVME_TARGET_DEBUGFS
277 struct dentry
*debugfs_dir
;
279 spinlock_t error_lock
;
281 struct nvme_error_slot slots
[NVMET_ERROR_LOG_SLOTS
];
283 #ifdef CONFIG_NVME_TARGET_AUTH
284 struct nvme_dhchap_key
*host_key
;
285 struct nvme_dhchap_key
*ctrl_key
;
287 struct crypto_kpp
*dh_tfm
;
292 struct nvmet_pr_log_mgr pr_log_mgr
;
295 struct nvmet_subsys
{
296 enum nvme_subsys_type type
;
301 struct xarray namespaces
;
302 unsigned int nr_namespaces
;
307 struct list_head ctrls
;
309 struct list_head hosts
;
311 #ifdef CONFIG_NVME_TARGET_DEBUGFS
312 struct dentry
*debugfs_dir
;
317 char serial
[NVMET_SN_MAX_SIZE
];
318 bool subsys_discovered
;
322 struct config_group group
;
324 struct config_group namespaces_group
;
325 struct config_group allowed_hosts_group
;
331 #ifdef CONFIG_NVME_TARGET_PASSTHRU
332 struct nvme_ctrl
*passthru_ctrl
;
333 char *passthru_ctrl_path
;
334 struct config_group passthru_group
;
335 unsigned int admin_timeout
;
336 unsigned int io_timeout
;
337 unsigned int clear_ids
;
338 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
340 #ifdef CONFIG_BLK_DEV_ZONED
342 #endif /* CONFIG_BLK_DEV_ZONED */
345 static inline struct nvmet_subsys
*to_subsys(struct config_item
*item
)
347 return container_of(to_config_group(item
), struct nvmet_subsys
, group
);
350 static inline struct nvmet_subsys
*namespaces_to_subsys(
351 struct config_item
*item
)
353 return container_of(to_config_group(item
), struct nvmet_subsys
,
358 struct config_group group
;
360 u8
*dhchap_ctrl_secret
;
362 u8 dhchap_ctrl_key_hash
;
364 u8 dhchap_dhgroup_id
;
367 static inline struct nvmet_host
*to_host(struct config_item
*item
)
369 return container_of(to_config_group(item
), struct nvmet_host
, group
);
372 static inline char *nvmet_host_name(struct nvmet_host
*host
)
374 return config_item_name(&host
->group
.cg_item
);
377 struct nvmet_host_link
{
378 struct list_head entry
;
379 struct nvmet_host
*host
;
382 struct nvmet_subsys_link
{
383 struct list_head entry
;
384 struct nvmet_subsys
*subsys
;
388 struct nvmet_fabrics_ops
{
389 struct module
*owner
;
393 #define NVMF_KEYED_SGLS (1 << 0)
394 #define NVMF_METADATA_SUPPORTED (1 << 1)
395 void (*queue_response
)(struct nvmet_req
*req
);
396 int (*add_port
)(struct nvmet_port
*port
);
397 void (*remove_port
)(struct nvmet_port
*port
);
398 void (*delete_ctrl
)(struct nvmet_ctrl
*ctrl
);
399 void (*disc_traddr
)(struct nvmet_req
*req
,
400 struct nvmet_port
*port
, char *traddr
);
401 ssize_t (*host_traddr
)(struct nvmet_ctrl
*ctrl
,
402 char *traddr
, size_t traddr_len
);
403 u16 (*install_queue
)(struct nvmet_sq
*nvme_sq
);
404 void (*discovery_chg
)(struct nvmet_port
*port
);
405 u8 (*get_mdts
)(const struct nvmet_ctrl
*ctrl
);
406 u16 (*get_max_queue_size
)(const struct nvmet_ctrl
*ctrl
);
409 #define NVMET_MAX_INLINE_BIOVEC 8
410 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
413 struct nvme_command
*cmd
;
414 struct nvme_completion
*cqe
;
418 struct scatterlist
*sg
;
419 struct scatterlist
*metadata_sg
;
420 struct bio_vec inline_bvec
[NVMET_MAX_INLINE_BIOVEC
];
423 struct bio inline_bio
;
428 struct bio_vec
*bvec
;
429 struct work_struct work
;
432 struct bio inline_bio
;
434 struct work_struct work
;
437 #ifdef CONFIG_BLK_DEV_ZONED
439 struct bio inline_bio
;
440 struct work_struct zmgmt_work
;
442 #endif /* CONFIG_BLK_DEV_ZONED */
444 struct work_struct abort_work
;
449 /* data length as parsed from the SGL descriptor: */
453 struct nvmet_port
*port
;
455 void (*execute
)(struct nvmet_req
*req
);
456 const struct nvmet_fabrics_ops
*ops
;
458 struct pci_dev
*p2p_dev
;
459 struct device
*p2p_client
;
462 struct nvmet_pr_per_ctrl_ref
*pc_ref
;
465 #define NVMET_MAX_MPOOL_BVEC 16
466 extern struct kmem_cache
*nvmet_bvec_cache
;
467 extern struct workqueue_struct
*buffered_io_wq
;
468 extern struct workqueue_struct
*zbd_wq
;
469 extern struct workqueue_struct
*nvmet_wq
;
471 static inline void nvmet_set_result(struct nvmet_req
*req
, u32 result
)
473 req
->cqe
->result
.u32
= cpu_to_le32(result
);
477 * NVMe command writes actually are DMA reads for us on the target side.
479 static inline enum dma_data_direction
480 nvmet_data_dir(struct nvmet_req
*req
)
482 return nvme_is_write(req
->cmd
) ? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
485 struct nvmet_async_event
{
486 struct list_head entry
;
492 static inline void nvmet_clear_aen_bit(struct nvmet_req
*req
, u32 bn
)
494 int rae
= le32_to_cpu(req
->cmd
->common
.cdw10
) & 1 << 15;
497 clear_bit(bn
, &req
->sq
->ctrl
->aen_masked
);
500 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl
*ctrl
, u32 bn
)
502 if (!(READ_ONCE(ctrl
->aen_enabled
) & (1 << bn
)))
504 return test_and_set_bit(bn
, &ctrl
->aen_masked
);
507 void nvmet_get_feat_kato(struct nvmet_req
*req
);
508 void nvmet_get_feat_async_event(struct nvmet_req
*req
);
509 u16
nvmet_set_feat_kato(struct nvmet_req
*req
);
510 u16
nvmet_set_feat_async_event(struct nvmet_req
*req
, u32 mask
);
511 void nvmet_execute_async_event(struct nvmet_req
*req
);
512 void nvmet_start_keep_alive_timer(struct nvmet_ctrl
*ctrl
);
513 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl
*ctrl
);
515 u16
nvmet_parse_connect_cmd(struct nvmet_req
*req
);
516 void nvmet_bdev_set_limits(struct block_device
*bdev
, struct nvme_id_ns
*id
);
517 u16
nvmet_bdev_parse_io_cmd(struct nvmet_req
*req
);
518 u16
nvmet_file_parse_io_cmd(struct nvmet_req
*req
);
519 u16
nvmet_bdev_zns_parse_io_cmd(struct nvmet_req
*req
);
520 u16
nvmet_parse_admin_cmd(struct nvmet_req
*req
);
521 u16
nvmet_parse_discovery_cmd(struct nvmet_req
*req
);
522 u16
nvmet_parse_fabrics_admin_cmd(struct nvmet_req
*req
);
523 u16
nvmet_parse_fabrics_io_cmd(struct nvmet_req
*req
);
525 bool nvmet_req_init(struct nvmet_req
*req
, struct nvmet_cq
*cq
,
526 struct nvmet_sq
*sq
, const struct nvmet_fabrics_ops
*ops
);
527 void nvmet_req_uninit(struct nvmet_req
*req
);
528 bool nvmet_check_transfer_len(struct nvmet_req
*req
, size_t len
);
529 bool nvmet_check_data_len_lte(struct nvmet_req
*req
, size_t data_len
);
530 void nvmet_req_complete(struct nvmet_req
*req
, u16 status
);
531 int nvmet_req_alloc_sgls(struct nvmet_req
*req
);
532 void nvmet_req_free_sgls(struct nvmet_req
*req
);
534 void nvmet_execute_set_features(struct nvmet_req
*req
);
535 void nvmet_execute_get_features(struct nvmet_req
*req
);
536 void nvmet_execute_keep_alive(struct nvmet_req
*req
);
538 void nvmet_cq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_cq
*cq
, u16 qid
,
540 void nvmet_sq_setup(struct nvmet_ctrl
*ctrl
, struct nvmet_sq
*sq
, u16 qid
,
542 void nvmet_sq_destroy(struct nvmet_sq
*sq
);
543 int nvmet_sq_init(struct nvmet_sq
*sq
);
545 void nvmet_ctrl_fatal_error(struct nvmet_ctrl
*ctrl
);
547 void nvmet_update_cc(struct nvmet_ctrl
*ctrl
, u32
new);
548 u16
nvmet_alloc_ctrl(const char *subsysnqn
, const char *hostnqn
,
549 struct nvmet_req
*req
, u32 kato
, struct nvmet_ctrl
**ctrlp
,
551 struct nvmet_ctrl
*nvmet_ctrl_find_get(const char *subsysnqn
,
552 const char *hostnqn
, u16 cntlid
,
553 struct nvmet_req
*req
);
554 void nvmet_ctrl_put(struct nvmet_ctrl
*ctrl
);
555 u16
nvmet_check_ctrl_status(struct nvmet_req
*req
);
556 ssize_t
nvmet_ctrl_host_traddr(struct nvmet_ctrl
*ctrl
,
557 char *traddr
, size_t traddr_len
);
559 struct nvmet_subsys
*nvmet_subsys_alloc(const char *subsysnqn
,
560 enum nvme_subsys_type type
);
561 void nvmet_subsys_put(struct nvmet_subsys
*subsys
);
562 void nvmet_subsys_del_ctrls(struct nvmet_subsys
*subsys
);
564 u16
nvmet_req_find_ns(struct nvmet_req
*req
);
565 void nvmet_put_namespace(struct nvmet_ns
*ns
);
566 int nvmet_ns_enable(struct nvmet_ns
*ns
);
567 void nvmet_ns_disable(struct nvmet_ns
*ns
);
568 struct nvmet_ns
*nvmet_ns_alloc(struct nvmet_subsys
*subsys
, u32 nsid
);
569 void nvmet_ns_free(struct nvmet_ns
*ns
);
571 void nvmet_send_ana_event(struct nvmet_subsys
*subsys
,
572 struct nvmet_port
*port
);
573 void nvmet_port_send_ana_event(struct nvmet_port
*port
);
575 int nvmet_register_transport(const struct nvmet_fabrics_ops
*ops
);
576 void nvmet_unregister_transport(const struct nvmet_fabrics_ops
*ops
);
578 void nvmet_port_del_ctrls(struct nvmet_port
*port
,
579 struct nvmet_subsys
*subsys
);
581 int nvmet_enable_port(struct nvmet_port
*port
);
582 void nvmet_disable_port(struct nvmet_port
*port
);
584 void nvmet_referral_enable(struct nvmet_port
*parent
, struct nvmet_port
*port
);
585 void nvmet_referral_disable(struct nvmet_port
*parent
, struct nvmet_port
*port
);
587 u16
nvmet_copy_to_sgl(struct nvmet_req
*req
, off_t off
, const void *buf
,
589 u16
nvmet_copy_from_sgl(struct nvmet_req
*req
, off_t off
, void *buf
,
591 u16
nvmet_zero_sgl(struct nvmet_req
*req
, off_t off
, size_t len
);
593 u32
nvmet_get_log_page_len(struct nvme_command
*cmd
);
594 u64
nvmet_get_log_page_offset(struct nvme_command
*cmd
);
596 extern struct list_head
*nvmet_ports
;
597 void nvmet_port_disc_changed(struct nvmet_port
*port
,
598 struct nvmet_subsys
*subsys
);
599 void nvmet_subsys_disc_changed(struct nvmet_subsys
*subsys
,
600 struct nvmet_host
*host
);
601 void nvmet_add_async_event(struct nvmet_ctrl
*ctrl
, u8 event_type
,
602 u8 event_info
, u8 log_page
);
603 bool nvmet_subsys_nsid_exists(struct nvmet_subsys
*subsys
, u32 nsid
);
605 #define NVMET_MIN_QUEUE_SIZE 16
606 #define NVMET_MAX_QUEUE_SIZE 1024
607 #define NVMET_NR_QUEUES 128
608 #define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
611 * Nice round number that makes a list of nsids fit into a page.
612 * Should become tunable at some point in the future.
614 #define NVMET_MAX_NAMESPACES 1024
617 * 0 is not a valid ANA group ID, so we start numbering at 1.
619 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
620 * by default, and is available in an optimized state through all ports.
622 #define NVMET_MAX_ANAGRPS 128
623 #define NVMET_DEFAULT_ANA_GRPID 1
626 #define NVMET_DISC_KATO_MS 120000
628 int __init
nvmet_init_configfs(void);
629 void __exit
nvmet_exit_configfs(void);
631 int __init
nvmet_init_discovery(void);
632 void nvmet_exit_discovery(void);
634 extern struct nvmet_subsys
*nvmet_disc_subsys
;
635 extern struct rw_semaphore nvmet_config_sem
;
637 extern u32 nvmet_ana_group_enabled
[NVMET_MAX_ANAGRPS
+ 1];
638 extern u64 nvmet_ana_chgcnt
;
639 extern struct rw_semaphore nvmet_ana_sem
;
641 bool nvmet_host_allowed(struct nvmet_subsys
*subsys
, const char *hostnqn
);
643 int nvmet_bdev_ns_enable(struct nvmet_ns
*ns
);
644 int nvmet_file_ns_enable(struct nvmet_ns
*ns
);
645 void nvmet_bdev_ns_disable(struct nvmet_ns
*ns
);
646 void nvmet_file_ns_disable(struct nvmet_ns
*ns
);
647 u16
nvmet_bdev_flush(struct nvmet_req
*req
);
648 u16
nvmet_file_flush(struct nvmet_req
*req
);
649 void nvmet_ns_changed(struct nvmet_subsys
*subsys
, u32 nsid
);
650 void nvmet_bdev_ns_revalidate(struct nvmet_ns
*ns
);
651 void nvmet_file_ns_revalidate(struct nvmet_ns
*ns
);
652 bool nvmet_ns_revalidate(struct nvmet_ns
*ns
);
653 u16
blk_to_nvme_status(struct nvmet_req
*req
, blk_status_t blk_sts
);
655 bool nvmet_bdev_zns_enable(struct nvmet_ns
*ns
);
656 void nvmet_execute_identify_ctrl_zns(struct nvmet_req
*req
);
657 void nvmet_execute_identify_ns_zns(struct nvmet_req
*req
);
658 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req
*req
);
659 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req
*req
);
660 void nvmet_bdev_execute_zone_append(struct nvmet_req
*req
);
662 static inline u32
nvmet_rw_data_len(struct nvmet_req
*req
)
664 return ((u32
)le16_to_cpu(req
->cmd
->rw
.length
) + 1) <<
665 req
->ns
->blksize_shift
;
668 static inline u32
nvmet_rw_metadata_len(struct nvmet_req
*req
)
670 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY
))
672 return ((u32
)le16_to_cpu(req
->cmd
->rw
.length
) + 1) *
673 req
->ns
->metadata_size
;
676 static inline u32
nvmet_dsm_len(struct nvmet_req
*req
)
678 return (le32_to_cpu(req
->cmd
->dsm
.nr
) + 1) *
679 sizeof(struct nvme_dsm_range
);
682 static inline struct nvmet_subsys
*nvmet_req_subsys(struct nvmet_req
*req
)
684 return req
->sq
->ctrl
->subsys
;
687 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys
*subsys
)
689 return subsys
->type
!= NVME_NQN_NVME
;
692 #ifdef CONFIG_NVME_TARGET_PASSTHRU
693 void nvmet_passthru_subsys_free(struct nvmet_subsys
*subsys
);
694 int nvmet_passthru_ctrl_enable(struct nvmet_subsys
*subsys
);
695 void nvmet_passthru_ctrl_disable(struct nvmet_subsys
*subsys
);
696 u16
nvmet_parse_passthru_admin_cmd(struct nvmet_req
*req
);
697 u16
nvmet_parse_passthru_io_cmd(struct nvmet_req
*req
);
698 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys
*subsys
)
700 return subsys
->passthru_ctrl
;
702 #else /* CONFIG_NVME_TARGET_PASSTHRU */
703 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys
*subsys
)
706 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys
*subsys
)
709 static inline u16
nvmet_parse_passthru_admin_cmd(struct nvmet_req
*req
)
713 static inline u16
nvmet_parse_passthru_io_cmd(struct nvmet_req
*req
)
717 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys
*subsys
)
721 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
723 static inline bool nvmet_is_passthru_req(struct nvmet_req
*req
)
725 return nvmet_is_passthru_subsys(nvmet_req_subsys(req
));
728 void nvmet_passthrough_override_cap(struct nvmet_ctrl
*ctrl
);
730 u16
errno_to_nvme_status(struct nvmet_req
*req
, int errno
);
731 u16
nvmet_report_invalid_opcode(struct nvmet_req
*req
);
733 /* Convert a 32-bit number to a 16-bit 0's based number */
734 static inline __le16
to0based(u32 a
)
736 return cpu_to_le16(max(1U, min(1U << 16, a
)) - 1);
739 static inline bool nvmet_ns_has_pi(struct nvmet_ns
*ns
)
741 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY
))
743 return ns
->pi_type
&& ns
->metadata_size
== sizeof(struct t10_pi_tuple
);
746 static inline __le64
nvmet_sect_to_lba(struct nvmet_ns
*ns
, sector_t sect
)
748 return cpu_to_le64(sect
>> (ns
->blksize_shift
- SECTOR_SHIFT
));
751 static inline sector_t
nvmet_lba_to_sect(struct nvmet_ns
*ns
, __le64 lba
)
753 return le64_to_cpu(lba
) << (ns
->blksize_shift
- SECTOR_SHIFT
);
756 static inline bool nvmet_use_inline_bvec(struct nvmet_req
*req
)
758 return req
->transfer_len
<= NVMET_MAX_INLINE_DATA_LEN
&&
759 req
->sg_cnt
<= NVMET_MAX_INLINE_BIOVEC
;
762 static inline void nvmet_req_bio_put(struct nvmet_req
*req
, struct bio
*bio
)
764 if (bio
!= &req
->b
.inline_bio
)
768 #ifdef CONFIG_NVME_TARGET_AUTH
769 void nvmet_execute_auth_send(struct nvmet_req
*req
);
770 void nvmet_execute_auth_receive(struct nvmet_req
*req
);
771 int nvmet_auth_set_key(struct nvmet_host
*host
, const char *secret
,
773 int nvmet_auth_set_host_hash(struct nvmet_host
*host
, const char *hash
);
774 u8
nvmet_setup_auth(struct nvmet_ctrl
*ctrl
);
775 void nvmet_auth_sq_init(struct nvmet_sq
*sq
);
776 void nvmet_destroy_auth(struct nvmet_ctrl
*ctrl
);
777 void nvmet_auth_sq_free(struct nvmet_sq
*sq
);
778 int nvmet_setup_dhgroup(struct nvmet_ctrl
*ctrl
, u8 dhgroup_id
);
779 bool nvmet_check_auth_status(struct nvmet_req
*req
);
780 int nvmet_auth_host_hash(struct nvmet_req
*req
, u8
*response
,
781 unsigned int hash_len
);
782 int nvmet_auth_ctrl_hash(struct nvmet_req
*req
, u8
*response
,
783 unsigned int hash_len
);
784 static inline bool nvmet_has_auth(struct nvmet_ctrl
*ctrl
)
786 return ctrl
->host_key
!= NULL
;
788 int nvmet_auth_ctrl_exponential(struct nvmet_req
*req
,
789 u8
*buf
, int buf_size
);
790 int nvmet_auth_ctrl_sesskey(struct nvmet_req
*req
,
791 u8
*buf
, int buf_size
);
793 static inline u8
nvmet_setup_auth(struct nvmet_ctrl
*ctrl
)
797 static inline void nvmet_auth_sq_init(struct nvmet_sq
*sq
)
800 static inline void nvmet_destroy_auth(struct nvmet_ctrl
*ctrl
) {};
801 static inline void nvmet_auth_sq_free(struct nvmet_sq
*sq
) {};
802 static inline bool nvmet_check_auth_status(struct nvmet_req
*req
)
806 static inline bool nvmet_has_auth(struct nvmet_ctrl
*ctrl
)
810 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid
) { return NULL
; }
813 int nvmet_pr_init_ns(struct nvmet_ns
*ns
);
814 u16
nvmet_parse_pr_cmd(struct nvmet_req
*req
);
815 u16
nvmet_pr_check_cmd_access(struct nvmet_req
*req
);
816 int nvmet_ctrl_init_pr(struct nvmet_ctrl
*ctrl
);
817 void nvmet_ctrl_destroy_pr(struct nvmet_ctrl
*ctrl
);
818 void nvmet_pr_exit_ns(struct nvmet_ns
*ns
);
819 void nvmet_execute_get_log_page_resv(struct nvmet_req
*req
);
820 u16
nvmet_set_feat_resv_notif_mask(struct nvmet_req
*req
, u32 mask
);
821 u16
nvmet_get_feat_resv_notif_mask(struct nvmet_req
*req
);
822 u16
nvmet_pr_get_ns_pc_ref(struct nvmet_req
*req
);
823 static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref
*pc_ref
)
825 percpu_ref_put(&pc_ref
->ref
);
827 #endif /* _NVMET_H */