1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics common host code.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/init.h>
8 #include <linux/miscdevice.h>
9 #include <linux/module.h>
10 #include <linux/mutex.h>
11 #include <linux/parser.h>
12 #include <linux/seq_file.h>
15 #include <linux/nvme-keyring.h>
17 static LIST_HEAD(nvmf_transports
);
18 static DECLARE_RWSEM(nvmf_transports_rwsem
);
20 static LIST_HEAD(nvmf_hosts
);
21 static DEFINE_MUTEX(nvmf_hosts_mutex
);
23 static struct nvmf_host
*nvmf_default_host
;
25 static struct nvmf_host
*nvmf_host_alloc(const char *hostnqn
, uuid_t
*id
)
27 struct nvmf_host
*host
;
29 host
= kmalloc(sizeof(*host
), GFP_KERNEL
);
33 kref_init(&host
->ref
);
34 uuid_copy(&host
->id
, id
);
35 strscpy(host
->nqn
, hostnqn
, NVMF_NQN_SIZE
);
40 static struct nvmf_host
*nvmf_host_add(const char *hostnqn
, uuid_t
*id
)
42 struct nvmf_host
*host
;
44 mutex_lock(&nvmf_hosts_mutex
);
47 * We have defined a host as how it is perceived by the target.
48 * Therefore, we don't allow different Host NQNs with the same Host ID.
49 * Similarly, we do not allow the usage of the same Host NQN with
50 * different Host IDs. This'll maintain unambiguous host identification.
52 list_for_each_entry(host
, &nvmf_hosts
, list
) {
53 bool same_hostnqn
= !strcmp(host
->nqn
, hostnqn
);
54 bool same_hostid
= uuid_equal(&host
->id
, id
);
56 if (same_hostnqn
&& same_hostid
) {
61 pr_err("found same hostnqn %s but different hostid %pUb\n",
63 host
= ERR_PTR(-EINVAL
);
67 pr_err("found same hostid %pUb but different hostnqn %s\n",
69 host
= ERR_PTR(-EINVAL
);
74 host
= nvmf_host_alloc(hostnqn
, id
);
76 host
= ERR_PTR(-ENOMEM
);
80 list_add_tail(&host
->list
, &nvmf_hosts
);
82 mutex_unlock(&nvmf_hosts_mutex
);
86 static struct nvmf_host
*nvmf_host_default(void)
88 struct nvmf_host
*host
;
89 char nqn
[NVMF_NQN_SIZE
];
93 snprintf(nqn
, NVMF_NQN_SIZE
,
94 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id
);
96 host
= nvmf_host_alloc(nqn
, &id
);
100 mutex_lock(&nvmf_hosts_mutex
);
101 list_add_tail(&host
->list
, &nvmf_hosts
);
102 mutex_unlock(&nvmf_hosts_mutex
);
107 static void nvmf_host_destroy(struct kref
*ref
)
109 struct nvmf_host
*host
= container_of(ref
, struct nvmf_host
, ref
);
111 mutex_lock(&nvmf_hosts_mutex
);
112 list_del(&host
->list
);
113 mutex_unlock(&nvmf_hosts_mutex
);
118 static void nvmf_host_put(struct nvmf_host
*host
)
121 kref_put(&host
->ref
, nvmf_host_destroy
);
125 * nvmf_get_address() - Get address/port
126 * @ctrl: Host NVMe controller instance which we got the address
127 * @buf: OUTPUT parameter that will contain the address/port
130 int nvmf_get_address(struct nvme_ctrl
*ctrl
, char *buf
, int size
)
134 if (ctrl
->opts
->mask
& NVMF_OPT_TRADDR
)
135 len
+= scnprintf(buf
, size
, "traddr=%s", ctrl
->opts
->traddr
);
136 if (ctrl
->opts
->mask
& NVMF_OPT_TRSVCID
)
137 len
+= scnprintf(buf
+ len
, size
- len
, "%strsvcid=%s",
138 (len
) ? "," : "", ctrl
->opts
->trsvcid
);
139 if (ctrl
->opts
->mask
& NVMF_OPT_HOST_TRADDR
)
140 len
+= scnprintf(buf
+ len
, size
- len
, "%shost_traddr=%s",
141 (len
) ? "," : "", ctrl
->opts
->host_traddr
);
142 if (ctrl
->opts
->mask
& NVMF_OPT_HOST_IFACE
)
143 len
+= scnprintf(buf
+ len
, size
- len
, "%shost_iface=%s",
144 (len
) ? "," : "", ctrl
->opts
->host_iface
);
145 len
+= scnprintf(buf
+ len
, size
- len
, "\n");
149 EXPORT_SYMBOL_GPL(nvmf_get_address
);
152 * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
153 * @ctrl: Host NVMe controller instance maintaining the admin
154 * queue used to submit the property read command to
155 * the allocated NVMe controller resource on the target system.
156 * @off: Starting offset value of the targeted property
157 * register (see the fabrics section of the NVMe standard).
158 * @val: OUTPUT parameter that will contain the value of
159 * the property after a successful read.
161 * Used by the host system to retrieve a 32-bit capsule property value
162 * from an NVMe controller on the target system.
164 * ("Capsule property" is an "PCIe register concept" applied to the
165 * NVMe fabrics space.)
169 * > 0: NVMe error status code
170 * < 0: Linux errno error code
172 int nvmf_reg_read32(struct nvme_ctrl
*ctrl
, u32 off
, u32
*val
)
174 struct nvme_command cmd
= { };
175 union nvme_result res
;
178 cmd
.prop_get
.opcode
= nvme_fabrics_command
;
179 cmd
.prop_get
.fctype
= nvme_fabrics_type_property_get
;
180 cmd
.prop_get
.offset
= cpu_to_le32(off
);
182 ret
= __nvme_submit_sync_cmd(ctrl
->fabrics_q
, &cmd
, &res
, NULL
, 0,
183 NVME_QID_ANY
, NVME_SUBMIT_RESERVED
);
186 *val
= le64_to_cpu(res
.u64
);
187 if (unlikely(ret
!= 0))
188 dev_err(ctrl
->device
,
189 "Property Get error: %d, offset %#x\n",
190 ret
> 0 ? ret
& ~NVME_STATUS_DNR
: ret
, off
);
194 EXPORT_SYMBOL_GPL(nvmf_reg_read32
);
197 * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
198 * @ctrl: Host NVMe controller instance maintaining the admin
199 * queue used to submit the property read command to
200 * the allocated controller resource on the target system.
201 * @off: Starting offset value of the targeted property
202 * register (see the fabrics section of the NVMe standard).
203 * @val: OUTPUT parameter that will contain the value of
204 * the property after a successful read.
206 * Used by the host system to retrieve a 64-bit capsule property value
207 * from an NVMe controller on the target system.
209 * ("Capsule property" is an "PCIe register concept" applied to the
210 * NVMe fabrics space.)
214 * > 0: NVMe error status code
215 * < 0: Linux errno error code
217 int nvmf_reg_read64(struct nvme_ctrl
*ctrl
, u32 off
, u64
*val
)
219 struct nvme_command cmd
= { };
220 union nvme_result res
;
223 cmd
.prop_get
.opcode
= nvme_fabrics_command
;
224 cmd
.prop_get
.fctype
= nvme_fabrics_type_property_get
;
225 cmd
.prop_get
.attrib
= 1;
226 cmd
.prop_get
.offset
= cpu_to_le32(off
);
228 ret
= __nvme_submit_sync_cmd(ctrl
->fabrics_q
, &cmd
, &res
, NULL
, 0,
229 NVME_QID_ANY
, NVME_SUBMIT_RESERVED
);
232 *val
= le64_to_cpu(res
.u64
);
233 if (unlikely(ret
!= 0))
234 dev_err(ctrl
->device
,
235 "Property Get error: %d, offset %#x\n",
236 ret
> 0 ? ret
& ~NVME_STATUS_DNR
: ret
, off
);
239 EXPORT_SYMBOL_GPL(nvmf_reg_read64
);
242 * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
243 * @ctrl: Host NVMe controller instance maintaining the admin
244 * queue used to submit the property read command to
245 * the allocated NVMe controller resource on the target system.
246 * @off: Starting offset value of the targeted property
247 * register (see the fabrics section of the NVMe standard).
248 * @val: Input parameter that contains the value to be
249 * written to the property.
251 * Used by the NVMe host system to write a 32-bit capsule property value
252 * to an NVMe controller on the target system.
254 * ("Capsule property" is an "PCIe register concept" applied to the
255 * NVMe fabrics space.)
258 * 0: successful write
259 * > 0: NVMe error status code
260 * < 0: Linux errno error code
262 int nvmf_reg_write32(struct nvme_ctrl
*ctrl
, u32 off
, u32 val
)
264 struct nvme_command cmd
= { };
267 cmd
.prop_set
.opcode
= nvme_fabrics_command
;
268 cmd
.prop_set
.fctype
= nvme_fabrics_type_property_set
;
269 cmd
.prop_set
.attrib
= 0;
270 cmd
.prop_set
.offset
= cpu_to_le32(off
);
271 cmd
.prop_set
.value
= cpu_to_le64(val
);
273 ret
= __nvme_submit_sync_cmd(ctrl
->fabrics_q
, &cmd
, NULL
, NULL
, 0,
274 NVME_QID_ANY
, NVME_SUBMIT_RESERVED
);
276 dev_err(ctrl
->device
,
277 "Property Set error: %d, offset %#x\n",
278 ret
> 0 ? ret
& ~NVME_STATUS_DNR
: ret
, off
);
281 EXPORT_SYMBOL_GPL(nvmf_reg_write32
);
283 int nvmf_subsystem_reset(struct nvme_ctrl
*ctrl
)
287 if (!nvme_wait_reset(ctrl
))
290 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_NSSR
, NVME_SUBSYS_RESET
);
294 return nvme_try_sched_reset(ctrl
);
296 EXPORT_SYMBOL_GPL(nvmf_subsystem_reset
);
299 * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
301 * @ctrl: The specific /dev/nvmeX device that had the error.
302 * @errval: Error code to be decoded in a more human-friendly
304 * @offset: For use with the NVMe error code
305 * NVME_SC_CONNECT_INVALID_PARAM.
306 * @cmd: This is the SQE portion of a submission capsule.
307 * @data: This is the "Data" portion of a submission capsule.
309 static void nvmf_log_connect_error(struct nvme_ctrl
*ctrl
,
310 int errval
, int offset
, struct nvme_command
*cmd
,
311 struct nvmf_connect_data
*data
)
313 int err_sctype
= errval
& ~NVME_STATUS_DNR
;
316 dev_err(ctrl
->device
,
317 "Connect command failed, errno: %d\n", errval
);
321 switch (err_sctype
) {
322 case NVME_SC_CONNECT_INVALID_PARAM
:
324 char *inv_data
= "Connect Invalid Data Parameter";
326 switch (offset
& 0xffff) {
327 case (offsetof(struct nvmf_connect_data
, cntlid
)):
328 dev_err(ctrl
->device
,
330 inv_data
, data
->cntlid
);
332 case (offsetof(struct nvmf_connect_data
, hostnqn
)):
333 dev_err(ctrl
->device
,
334 "%s, hostnqn \"%s\"\n",
335 inv_data
, data
->hostnqn
);
337 case (offsetof(struct nvmf_connect_data
, subsysnqn
)):
338 dev_err(ctrl
->device
,
339 "%s, subsysnqn \"%s\"\n",
340 inv_data
, data
->subsysnqn
);
343 dev_err(ctrl
->device
,
344 "%s, starting byte offset: %d\n",
345 inv_data
, offset
& 0xffff);
349 char *inv_sqe
= "Connect Invalid SQE Parameter";
352 case (offsetof(struct nvmf_connect_command
, qid
)):
353 dev_err(ctrl
->device
,
355 inv_sqe
, cmd
->connect
.qid
);
358 dev_err(ctrl
->device
,
359 "%s, starting byte offset: %d\n",
364 case NVME_SC_CONNECT_INVALID_HOST
:
365 dev_err(ctrl
->device
,
366 "Connect for subsystem %s is not allowed, hostnqn: %s\n",
367 data
->subsysnqn
, data
->hostnqn
);
369 case NVME_SC_CONNECT_CTRL_BUSY
:
370 dev_err(ctrl
->device
,
371 "Connect command failed: controller is busy or not available\n");
373 case NVME_SC_CONNECT_FORMAT
:
374 dev_err(ctrl
->device
,
375 "Connect incompatible format: %d",
376 cmd
->connect
.recfmt
);
378 case NVME_SC_HOST_PATH_ERROR
:
379 dev_err(ctrl
->device
,
380 "Connect command failed: host path error\n");
382 case NVME_SC_AUTH_REQUIRED
:
383 dev_err(ctrl
->device
,
384 "Connect command failed: authentication required\n");
387 dev_err(ctrl
->device
,
388 "Connect command failed, error wo/DNR bit: %d\n",
394 static struct nvmf_connect_data
*nvmf_connect_data_prep(struct nvme_ctrl
*ctrl
,
397 struct nvmf_connect_data
*data
;
399 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
403 uuid_copy(&data
->hostid
, &ctrl
->opts
->host
->id
);
404 data
->cntlid
= cpu_to_le16(cntlid
);
405 strscpy(data
->subsysnqn
, ctrl
->opts
->subsysnqn
, NVMF_NQN_SIZE
);
406 strscpy(data
->hostnqn
, ctrl
->opts
->host
->nqn
, NVMF_NQN_SIZE
);
411 static void nvmf_connect_cmd_prep(struct nvme_ctrl
*ctrl
, u16 qid
,
412 struct nvme_command
*cmd
)
414 cmd
->connect
.opcode
= nvme_fabrics_command
;
415 cmd
->connect
.fctype
= nvme_fabrics_type_connect
;
416 cmd
->connect
.qid
= cpu_to_le16(qid
);
419 cmd
->connect
.sqsize
= cpu_to_le16(ctrl
->sqsize
);
421 cmd
->connect
.sqsize
= cpu_to_le16(NVME_AQ_DEPTH
- 1);
424 * set keep-alive timeout in seconds granularity (ms * 1000)
426 cmd
->connect
.kato
= cpu_to_le32(ctrl
->kato
* 1000);
429 if (ctrl
->opts
->disable_sqflow
)
430 cmd
->connect
.cattr
|= NVME_CONNECT_DISABLE_SQFLOW
;
434 * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
436 * @ctrl: Host nvme controller instance used to request
437 * a new NVMe controller allocation on the target
438 * system and establish an NVMe Admin connection to
441 * This function enables an NVMe host device to request a new allocation of
442 * an NVMe controller resource on a target system as well establish a
443 * fabrics-protocol connection of the NVMe Admin queue between the
444 * host system device and the allocated NVMe controller on the
445 * target system via a NVMe Fabrics "Connect" command.
447 int nvmf_connect_admin_queue(struct nvme_ctrl
*ctrl
)
449 struct nvme_command cmd
= { };
450 union nvme_result res
;
451 struct nvmf_connect_data
*data
;
455 nvmf_connect_cmd_prep(ctrl
, 0, &cmd
);
457 data
= nvmf_connect_data_prep(ctrl
, 0xffff);
461 ret
= __nvme_submit_sync_cmd(ctrl
->fabrics_q
, &cmd
, &res
,
462 data
, sizeof(*data
), NVME_QID_ANY
,
463 NVME_SUBMIT_AT_HEAD
|
465 NVME_SUBMIT_RESERVED
);
467 nvmf_log_connect_error(ctrl
, ret
, le32_to_cpu(res
.u32
),
472 result
= le32_to_cpu(res
.u32
);
473 ctrl
->cntlid
= result
& 0xFFFF;
474 if (result
& (NVME_CONNECT_AUTHREQ_ATR
| NVME_CONNECT_AUTHREQ_ASCR
)) {
475 /* Secure concatenation is not implemented */
476 if (result
& NVME_CONNECT_AUTHREQ_ASCR
) {
477 dev_warn(ctrl
->device
,
478 "qid 0: secure concatenation is not supported\n");
482 /* Authentication required */
483 ret
= nvme_auth_negotiate(ctrl
, 0);
485 dev_warn(ctrl
->device
,
486 "qid 0: authentication setup failed\n");
489 ret
= nvme_auth_wait(ctrl
, 0);
491 dev_warn(ctrl
->device
,
492 "qid 0: authentication failed, error %d\n",
495 dev_info(ctrl
->device
,
496 "qid 0: authenticated\n");
502 EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue
);
505 * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
507 * @ctrl: Host nvme controller instance used to establish an
508 * NVMe I/O queue connection to the already allocated NVMe
509 * controller on the target system.
510 * @qid: NVMe I/O queue number for the new I/O connection between
511 * host and target (note qid == 0 is illegal as this is
512 * the Admin queue, per NVMe standard).
514 * This function issues a fabrics-protocol connection
515 * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
516 * between the host system device and the allocated NVMe controller
517 * on the target system.
521 * > 0: NVMe error status code
522 * < 0: Linux errno error code
524 int nvmf_connect_io_queue(struct nvme_ctrl
*ctrl
, u16 qid
)
526 struct nvme_command cmd
= { };
527 struct nvmf_connect_data
*data
;
528 union nvme_result res
;
532 nvmf_connect_cmd_prep(ctrl
, qid
, &cmd
);
534 data
= nvmf_connect_data_prep(ctrl
, ctrl
->cntlid
);
538 ret
= __nvme_submit_sync_cmd(ctrl
->connect_q
, &cmd
, &res
,
539 data
, sizeof(*data
), qid
,
540 NVME_SUBMIT_AT_HEAD
|
541 NVME_SUBMIT_RESERVED
|
544 nvmf_log_connect_error(ctrl
, ret
, le32_to_cpu(res
.u32
),
548 result
= le32_to_cpu(res
.u32
);
549 if (result
& (NVME_CONNECT_AUTHREQ_ATR
| NVME_CONNECT_AUTHREQ_ASCR
)) {
550 /* Secure concatenation is not implemented */
551 if (result
& NVME_CONNECT_AUTHREQ_ASCR
) {
552 dev_warn(ctrl
->device
,
553 "qid 0: secure concatenation is not supported\n");
557 /* Authentication required */
558 ret
= nvme_auth_negotiate(ctrl
, qid
);
560 dev_warn(ctrl
->device
,
561 "qid %d: authentication setup failed\n", qid
);
564 ret
= nvme_auth_wait(ctrl
, qid
);
566 dev_warn(ctrl
->device
,
567 "qid %u: authentication failed, error %d\n",
575 EXPORT_SYMBOL_GPL(nvmf_connect_io_queue
);
578 * Evaluate the status information returned by the transport in order to decided
579 * if a reconnect attempt should be scheduled.
583 * - the DNR bit is set and the specification states no further connect
584 * attempts with the same set of paramenters should be attempted.
586 * - when the authentication attempt fails, because the key was invalid.
587 * This error code is set on the host side.
589 bool nvmf_should_reconnect(struct nvme_ctrl
*ctrl
, int status
)
591 if (status
> 0 && (status
& NVME_STATUS_DNR
))
594 if (status
== -EKEYREJECTED
)
597 if (ctrl
->opts
->max_reconnects
== -1 ||
598 ctrl
->nr_reconnects
< ctrl
->opts
->max_reconnects
)
603 EXPORT_SYMBOL_GPL(nvmf_should_reconnect
);
606 * nvmf_register_transport() - NVMe Fabrics Library registration function.
607 * @ops: Transport ops instance to be registered to the
608 * common fabrics library.
610 * API function that registers the type of specific transport fabric
611 * being implemented to the common NVMe fabrics library. Part of
612 * the overall init sequence of starting up a fabrics driver.
614 int nvmf_register_transport(struct nvmf_transport_ops
*ops
)
616 if (!ops
->create_ctrl
)
619 down_write(&nvmf_transports_rwsem
);
620 list_add_tail(&ops
->entry
, &nvmf_transports
);
621 up_write(&nvmf_transports_rwsem
);
625 EXPORT_SYMBOL_GPL(nvmf_register_transport
);
628 * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
629 * @ops: Transport ops instance to be unregistered from the
630 * common fabrics library.
632 * Fabrics API function that unregisters the type of specific transport
633 * fabric being implemented from the common NVMe fabrics library.
634 * Part of the overall exit sequence of unloading the implemented driver.
636 void nvmf_unregister_transport(struct nvmf_transport_ops
*ops
)
638 down_write(&nvmf_transports_rwsem
);
639 list_del(&ops
->entry
);
640 up_write(&nvmf_transports_rwsem
);
642 EXPORT_SYMBOL_GPL(nvmf_unregister_transport
);
644 static struct nvmf_transport_ops
*nvmf_lookup_transport(
645 struct nvmf_ctrl_options
*opts
)
647 struct nvmf_transport_ops
*ops
;
649 lockdep_assert_held(&nvmf_transports_rwsem
);
651 list_for_each_entry(ops
, &nvmf_transports
, entry
) {
652 if (strcmp(ops
->name
, opts
->transport
) == 0)
659 static struct key
*nvmf_parse_key(int key_id
)
663 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS
)) {
664 pr_err("TLS is not supported\n");
665 return ERR_PTR(-EINVAL
);
668 key
= nvme_tls_key_lookup(key_id
);
670 pr_err("key id %08x not found\n", key_id
);
672 pr_debug("Using key id %08x\n", key_id
);
676 static const match_table_t opt_tokens
= {
677 { NVMF_OPT_TRANSPORT
, "transport=%s" },
678 { NVMF_OPT_TRADDR
, "traddr=%s" },
679 { NVMF_OPT_TRSVCID
, "trsvcid=%s" },
680 { NVMF_OPT_NQN
, "nqn=%s" },
681 { NVMF_OPT_QUEUE_SIZE
, "queue_size=%d" },
682 { NVMF_OPT_NR_IO_QUEUES
, "nr_io_queues=%d" },
683 { NVMF_OPT_RECONNECT_DELAY
, "reconnect_delay=%d" },
684 { NVMF_OPT_CTRL_LOSS_TMO
, "ctrl_loss_tmo=%d" },
685 { NVMF_OPT_KATO
, "keep_alive_tmo=%d" },
686 { NVMF_OPT_HOSTNQN
, "hostnqn=%s" },
687 { NVMF_OPT_HOST_TRADDR
, "host_traddr=%s" },
688 { NVMF_OPT_HOST_IFACE
, "host_iface=%s" },
689 { NVMF_OPT_HOST_ID
, "hostid=%s" },
690 { NVMF_OPT_DUP_CONNECT
, "duplicate_connect" },
691 { NVMF_OPT_DISABLE_SQFLOW
, "disable_sqflow" },
692 { NVMF_OPT_HDR_DIGEST
, "hdr_digest" },
693 { NVMF_OPT_DATA_DIGEST
, "data_digest" },
694 { NVMF_OPT_NR_WRITE_QUEUES
, "nr_write_queues=%d" },
695 { NVMF_OPT_NR_POLL_QUEUES
, "nr_poll_queues=%d" },
696 { NVMF_OPT_TOS
, "tos=%d" },
697 #ifdef CONFIG_NVME_TCP_TLS
698 { NVMF_OPT_KEYRING
, "keyring=%d" },
699 { NVMF_OPT_TLS_KEY
, "tls_key=%d" },
701 { NVMF_OPT_FAIL_FAST_TMO
, "fast_io_fail_tmo=%d" },
702 { NVMF_OPT_DISCOVERY
, "discovery" },
703 #ifdef CONFIG_NVME_HOST_AUTH
704 { NVMF_OPT_DHCHAP_SECRET
, "dhchap_secret=%s" },
705 { NVMF_OPT_DHCHAP_CTRL_SECRET
, "dhchap_ctrl_secret=%s" },
707 #ifdef CONFIG_NVME_TCP_TLS
708 { NVMF_OPT_TLS
, "tls" },
710 { NVMF_OPT_ERR
, NULL
}
713 static int nvmf_parse_options(struct nvmf_ctrl_options
*opts
,
716 substring_t args
[MAX_OPT_ARGS
];
717 char *options
, *o
, *p
;
720 int ctrl_loss_tmo
= NVMF_DEF_CTRL_LOSS_TMO
, key_id
;
722 char hostnqn
[NVMF_NQN_SIZE
];
726 opts
->queue_size
= NVMF_DEF_QUEUE_SIZE
;
727 opts
->nr_io_queues
= num_online_cpus();
728 opts
->reconnect_delay
= NVMF_DEF_RECONNECT_DELAY
;
730 opts
->duplicate_connect
= false;
731 opts
->fast_io_fail_tmo
= NVMF_DEF_FAIL_FAST_TMO
;
732 opts
->hdr_digest
= false;
733 opts
->data_digest
= false;
734 opts
->tos
= -1; /* < 0 == use transport default */
736 opts
->tls_key
= NULL
;
737 opts
->keyring
= NULL
;
739 options
= o
= kstrdup(buf
, GFP_KERNEL
);
743 /* use default host if not given by user space */
744 uuid_copy(&hostid
, &nvmf_default_host
->id
);
745 strscpy(hostnqn
, nvmf_default_host
->nqn
, NVMF_NQN_SIZE
);
747 while ((p
= strsep(&o
, ",\n")) != NULL
) {
751 token
= match_token(p
, opt_tokens
, args
);
754 case NVMF_OPT_TRANSPORT
:
755 p
= match_strdup(args
);
760 kfree(opts
->transport
);
764 p
= match_strdup(args
);
769 kfree(opts
->subsysnqn
);
771 nqnlen
= strlen(opts
->subsysnqn
);
772 if (nqnlen
>= NVMF_NQN_SIZE
) {
773 pr_err("%s needs to be < %d bytes\n",
774 opts
->subsysnqn
, NVMF_NQN_SIZE
);
778 opts
->discovery_nqn
=
779 !(strcmp(opts
->subsysnqn
,
780 NVME_DISC_SUBSYS_NAME
));
782 case NVMF_OPT_TRADDR
:
783 p
= match_strdup(args
);
791 case NVMF_OPT_TRSVCID
:
792 p
= match_strdup(args
);
797 kfree(opts
->trsvcid
);
800 case NVMF_OPT_QUEUE_SIZE
:
801 if (match_int(args
, &token
)) {
805 if (token
< NVMF_MIN_QUEUE_SIZE
||
806 token
> NVMF_MAX_QUEUE_SIZE
) {
807 pr_err("Invalid queue_size %d\n", token
);
811 opts
->queue_size
= token
;
813 case NVMF_OPT_NR_IO_QUEUES
:
814 if (match_int(args
, &token
)) {
819 pr_err("Invalid number of IOQs %d\n", token
);
823 if (opts
->discovery_nqn
) {
824 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
828 opts
->nr_io_queues
= min_t(unsigned int,
829 num_online_cpus(), token
);
832 if (match_int(args
, &token
)) {
838 pr_err("Invalid keep_alive_tmo %d\n", token
);
841 } else if (token
== 0 && !opts
->discovery_nqn
) {
842 /* Allowed for debug */
843 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
847 case NVMF_OPT_CTRL_LOSS_TMO
:
848 if (match_int(args
, &token
)) {
854 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
855 ctrl_loss_tmo
= token
;
857 case NVMF_OPT_FAIL_FAST_TMO
:
858 if (match_int(args
, &token
)) {
864 pr_warn("I/O fail on reconnect controller after %d sec\n",
869 opts
->fast_io_fail_tmo
= token
;
871 case NVMF_OPT_HOSTNQN
:
873 pr_err("hostnqn already user-assigned: %s\n",
878 p
= match_strdup(args
);
884 if (nqnlen
>= NVMF_NQN_SIZE
) {
885 pr_err("%s needs to be < %d bytes\n",
891 strscpy(hostnqn
, p
, NVMF_NQN_SIZE
);
894 case NVMF_OPT_RECONNECT_DELAY
:
895 if (match_int(args
, &token
)) {
900 pr_err("Invalid reconnect_delay %d\n", token
);
904 opts
->reconnect_delay
= token
;
906 case NVMF_OPT_HOST_TRADDR
:
907 p
= match_strdup(args
);
912 kfree(opts
->host_traddr
);
913 opts
->host_traddr
= p
;
915 case NVMF_OPT_HOST_IFACE
:
916 p
= match_strdup(args
);
921 kfree(opts
->host_iface
);
922 opts
->host_iface
= p
;
924 case NVMF_OPT_HOST_ID
:
925 p
= match_strdup(args
);
930 ret
= uuid_parse(p
, &hostid
);
932 pr_err("Invalid hostid %s\n", p
);
939 case NVMF_OPT_DUP_CONNECT
:
940 opts
->duplicate_connect
= true;
942 case NVMF_OPT_DISABLE_SQFLOW
:
943 opts
->disable_sqflow
= true;
945 case NVMF_OPT_HDR_DIGEST
:
946 opts
->hdr_digest
= true;
948 case NVMF_OPT_DATA_DIGEST
:
949 opts
->data_digest
= true;
951 case NVMF_OPT_NR_WRITE_QUEUES
:
952 if (match_int(args
, &token
)) {
957 pr_err("Invalid nr_write_queues %d\n", token
);
961 opts
->nr_write_queues
= token
;
963 case NVMF_OPT_NR_POLL_QUEUES
:
964 if (match_int(args
, &token
)) {
969 pr_err("Invalid nr_poll_queues %d\n", token
);
973 opts
->nr_poll_queues
= token
;
976 if (match_int(args
, &token
)) {
981 pr_err("Invalid type of service %d\n", token
);
986 pr_warn("Clamping type of service to 255\n");
991 case NVMF_OPT_KEYRING
:
992 if (match_int(args
, &key_id
) || key_id
<= 0) {
996 key
= nvmf_parse_key(key_id
);
1001 key_put(opts
->keyring
);
1002 opts
->keyring
= key
;
1004 case NVMF_OPT_TLS_KEY
:
1005 if (match_int(args
, &key_id
) || key_id
<= 0) {
1009 key
= nvmf_parse_key(key_id
);
1014 key_put(opts
->tls_key
);
1015 opts
->tls_key
= key
;
1017 case NVMF_OPT_DISCOVERY
:
1018 opts
->discovery_nqn
= true;
1020 case NVMF_OPT_DHCHAP_SECRET
:
1021 p
= match_strdup(args
);
1026 if (strlen(p
) < 11 || strncmp(p
, "DHHC-1:", 7)) {
1027 pr_err("Invalid DH-CHAP secret %s\n", p
);
1031 kfree(opts
->dhchap_secret
);
1032 opts
->dhchap_secret
= p
;
1034 case NVMF_OPT_DHCHAP_CTRL_SECRET
:
1035 p
= match_strdup(args
);
1040 if (strlen(p
) < 11 || strncmp(p
, "DHHC-1:", 7)) {
1041 pr_err("Invalid DH-CHAP secret %s\n", p
);
1045 kfree(opts
->dhchap_ctrl_secret
);
1046 opts
->dhchap_ctrl_secret
= p
;
1049 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS
)) {
1050 pr_err("TLS is not supported\n");
1057 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
1064 if (opts
->discovery_nqn
) {
1065 opts
->nr_io_queues
= 0;
1066 opts
->nr_write_queues
= 0;
1067 opts
->nr_poll_queues
= 0;
1068 opts
->duplicate_connect
= true;
1071 opts
->kato
= NVME_DEFAULT_KATO
;
1073 if (ctrl_loss_tmo
< 0) {
1074 opts
->max_reconnects
= -1;
1076 opts
->max_reconnects
= DIV_ROUND_UP(ctrl_loss_tmo
,
1077 opts
->reconnect_delay
);
1078 if (ctrl_loss_tmo
< opts
->fast_io_fail_tmo
)
1079 pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
1080 opts
->fast_io_fail_tmo
, ctrl_loss_tmo
);
1083 opts
->host
= nvmf_host_add(hostnqn
, &hostid
);
1084 if (IS_ERR(opts
->host
)) {
1085 ret
= PTR_ERR(opts
->host
);
1095 void nvmf_set_io_queues(struct nvmf_ctrl_options
*opts
, u32 nr_io_queues
,
1096 u32 io_queues
[HCTX_MAX_TYPES
])
1098 if (opts
->nr_write_queues
&& opts
->nr_io_queues
< nr_io_queues
) {
1100 * separate read/write queues
1101 * hand out dedicated default queues only after we have
1102 * sufficient read queues.
1104 io_queues
[HCTX_TYPE_READ
] = opts
->nr_io_queues
;
1105 nr_io_queues
-= io_queues
[HCTX_TYPE_READ
];
1106 io_queues
[HCTX_TYPE_DEFAULT
] =
1107 min(opts
->nr_write_queues
, nr_io_queues
);
1108 nr_io_queues
-= io_queues
[HCTX_TYPE_DEFAULT
];
1111 * shared read/write queues
1112 * either no write queues were requested, or we don't have
1113 * sufficient queue count to have dedicated default queues.
1115 io_queues
[HCTX_TYPE_DEFAULT
] =
1116 min(opts
->nr_io_queues
, nr_io_queues
);
1117 nr_io_queues
-= io_queues
[HCTX_TYPE_DEFAULT
];
1120 if (opts
->nr_poll_queues
&& nr_io_queues
) {
1121 /* map dedicated poll queues only if we have queues left */
1122 io_queues
[HCTX_TYPE_POLL
] =
1123 min(opts
->nr_poll_queues
, nr_io_queues
);
1126 EXPORT_SYMBOL_GPL(nvmf_set_io_queues
);
1128 void nvmf_map_queues(struct blk_mq_tag_set
*set
, struct nvme_ctrl
*ctrl
,
1129 u32 io_queues
[HCTX_MAX_TYPES
])
1131 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
1133 if (opts
->nr_write_queues
&& io_queues
[HCTX_TYPE_READ
]) {
1134 /* separate read/write queues */
1135 set
->map
[HCTX_TYPE_DEFAULT
].nr_queues
=
1136 io_queues
[HCTX_TYPE_DEFAULT
];
1137 set
->map
[HCTX_TYPE_DEFAULT
].queue_offset
= 0;
1138 set
->map
[HCTX_TYPE_READ
].nr_queues
=
1139 io_queues
[HCTX_TYPE_READ
];
1140 set
->map
[HCTX_TYPE_READ
].queue_offset
=
1141 io_queues
[HCTX_TYPE_DEFAULT
];
1143 /* shared read/write queues */
1144 set
->map
[HCTX_TYPE_DEFAULT
].nr_queues
=
1145 io_queues
[HCTX_TYPE_DEFAULT
];
1146 set
->map
[HCTX_TYPE_DEFAULT
].queue_offset
= 0;
1147 set
->map
[HCTX_TYPE_READ
].nr_queues
=
1148 io_queues
[HCTX_TYPE_DEFAULT
];
1149 set
->map
[HCTX_TYPE_READ
].queue_offset
= 0;
1152 blk_mq_map_queues(&set
->map
[HCTX_TYPE_DEFAULT
]);
1153 blk_mq_map_queues(&set
->map
[HCTX_TYPE_READ
]);
1154 if (opts
->nr_poll_queues
&& io_queues
[HCTX_TYPE_POLL
]) {
1155 /* map dedicated poll queues only if we have queues left */
1156 set
->map
[HCTX_TYPE_POLL
].nr_queues
= io_queues
[HCTX_TYPE_POLL
];
1157 set
->map
[HCTX_TYPE_POLL
].queue_offset
=
1158 io_queues
[HCTX_TYPE_DEFAULT
] +
1159 io_queues
[HCTX_TYPE_READ
];
1160 blk_mq_map_queues(&set
->map
[HCTX_TYPE_POLL
]);
1163 dev_info(ctrl
->device
,
1164 "mapped %d/%d/%d default/read/poll queues.\n",
1165 io_queues
[HCTX_TYPE_DEFAULT
],
1166 io_queues
[HCTX_TYPE_READ
],
1167 io_queues
[HCTX_TYPE_POLL
]);
1169 EXPORT_SYMBOL_GPL(nvmf_map_queues
);
1171 static int nvmf_check_required_opts(struct nvmf_ctrl_options
*opts
,
1172 unsigned int required_opts
)
1174 if ((opts
->mask
& required_opts
) != required_opts
) {
1177 for (i
= 0; i
< ARRAY_SIZE(opt_tokens
); i
++) {
1178 if ((opt_tokens
[i
].token
& required_opts
) &&
1179 !(opt_tokens
[i
].token
& opts
->mask
)) {
1180 pr_warn("missing parameter '%s'\n",
1181 opt_tokens
[i
].pattern
);
1191 bool nvmf_ip_options_match(struct nvme_ctrl
*ctrl
,
1192 struct nvmf_ctrl_options
*opts
)
1194 if (!nvmf_ctlr_matches_baseopts(ctrl
, opts
) ||
1195 strcmp(opts
->traddr
, ctrl
->opts
->traddr
) ||
1196 strcmp(opts
->trsvcid
, ctrl
->opts
->trsvcid
))
1200 * Checking the local address or host interfaces is rough.
1202 * In most cases, none is specified and the host port or
1203 * host interface is selected by the stack.
1205 * Assume no match if:
1206 * - local address or host interface is specified and address
1207 * or host interface is not the same
1208 * - local address or host interface is not specified but
1209 * remote is, or vice versa (admin using specific
1210 * host_traddr/host_iface when it matters).
1212 if ((opts
->mask
& NVMF_OPT_HOST_TRADDR
) &&
1213 (ctrl
->opts
->mask
& NVMF_OPT_HOST_TRADDR
)) {
1214 if (strcmp(opts
->host_traddr
, ctrl
->opts
->host_traddr
))
1216 } else if ((opts
->mask
& NVMF_OPT_HOST_TRADDR
) ||
1217 (ctrl
->opts
->mask
& NVMF_OPT_HOST_TRADDR
)) {
1221 if ((opts
->mask
& NVMF_OPT_HOST_IFACE
) &&
1222 (ctrl
->opts
->mask
& NVMF_OPT_HOST_IFACE
)) {
1223 if (strcmp(opts
->host_iface
, ctrl
->opts
->host_iface
))
1225 } else if ((opts
->mask
& NVMF_OPT_HOST_IFACE
) ||
1226 (ctrl
->opts
->mask
& NVMF_OPT_HOST_IFACE
)) {
1232 EXPORT_SYMBOL_GPL(nvmf_ip_options_match
);
1234 static int nvmf_check_allowed_opts(struct nvmf_ctrl_options
*opts
,
1235 unsigned int allowed_opts
)
1237 if (opts
->mask
& ~allowed_opts
) {
1240 for (i
= 0; i
< ARRAY_SIZE(opt_tokens
); i
++) {
1241 if ((opt_tokens
[i
].token
& opts
->mask
) &&
1242 (opt_tokens
[i
].token
& ~allowed_opts
)) {
1243 pr_warn("invalid parameter '%s'\n",
1244 opt_tokens
[i
].pattern
);
1254 void nvmf_free_options(struct nvmf_ctrl_options
*opts
)
1256 nvmf_host_put(opts
->host
);
1257 key_put(opts
->keyring
);
1258 key_put(opts
->tls_key
);
1259 kfree(opts
->transport
);
1260 kfree(opts
->traddr
);
1261 kfree(opts
->trsvcid
);
1262 kfree(opts
->subsysnqn
);
1263 kfree(opts
->host_traddr
);
1264 kfree(opts
->host_iface
);
1265 kfree(opts
->dhchap_secret
);
1266 kfree(opts
->dhchap_ctrl_secret
);
1269 EXPORT_SYMBOL_GPL(nvmf_free_options
);
1271 #define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
1272 #define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
1273 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
1274 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
1275 NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
1276 NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
1277 NVMF_OPT_DHCHAP_CTRL_SECRET)
1279 static struct nvme_ctrl
*
1280 nvmf_create_ctrl(struct device
*dev
, const char *buf
)
1282 struct nvmf_ctrl_options
*opts
;
1283 struct nvmf_transport_ops
*ops
;
1284 struct nvme_ctrl
*ctrl
;
1287 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
1289 return ERR_PTR(-ENOMEM
);
1291 ret
= nvmf_parse_options(opts
, buf
);
1296 request_module("nvme-%s", opts
->transport
);
1299 * Check the generic options first as we need a valid transport for
1300 * the lookup below. Then clear the generic flags so that transport
1301 * drivers don't have to care about them.
1303 ret
= nvmf_check_required_opts(opts
, NVMF_REQUIRED_OPTS
);
1306 opts
->mask
&= ~NVMF_REQUIRED_OPTS
;
1308 down_read(&nvmf_transports_rwsem
);
1309 ops
= nvmf_lookup_transport(opts
);
1311 pr_info("no handler found for transport %s.\n",
1317 if (!try_module_get(ops
->module
)) {
1321 up_read(&nvmf_transports_rwsem
);
1323 ret
= nvmf_check_required_opts(opts
, ops
->required_opts
);
1325 goto out_module_put
;
1326 ret
= nvmf_check_allowed_opts(opts
, NVMF_ALLOWED_OPTS
|
1327 ops
->allowed_opts
| ops
->required_opts
);
1329 goto out_module_put
;
1331 ctrl
= ops
->create_ctrl(dev
, opts
);
1333 ret
= PTR_ERR(ctrl
);
1334 goto out_module_put
;
1337 module_put(ops
->module
);
1341 module_put(ops
->module
);
1344 up_read(&nvmf_transports_rwsem
);
1346 nvmf_free_options(opts
);
1347 return ERR_PTR(ret
);
1350 static const struct class nvmf_class
= {
1351 .name
= "nvme-fabrics",
1354 static struct device
*nvmf_device
;
1355 static DEFINE_MUTEX(nvmf_dev_mutex
);
1357 static ssize_t
nvmf_dev_write(struct file
*file
, const char __user
*ubuf
,
1358 size_t count
, loff_t
*pos
)
1360 struct seq_file
*seq_file
= file
->private_data
;
1361 struct nvme_ctrl
*ctrl
;
1365 if (count
> PAGE_SIZE
)
1368 buf
= memdup_user_nul(ubuf
, count
);
1370 return PTR_ERR(buf
);
1372 mutex_lock(&nvmf_dev_mutex
);
1373 if (seq_file
->private) {
1378 ctrl
= nvmf_create_ctrl(nvmf_device
, buf
);
1380 ret
= PTR_ERR(ctrl
);
1384 seq_file
->private = ctrl
;
1387 mutex_unlock(&nvmf_dev_mutex
);
1389 return ret
? ret
: count
;
1392 static void __nvmf_concat_opt_tokens(struct seq_file
*seq_file
)
1394 const struct match_token
*tok
;
1398 * Add dummy entries for instance and cntlid to
1399 * signal an invalid/non-existing controller
1401 seq_puts(seq_file
, "instance=-1,cntlid=-1");
1402 for (idx
= 0; idx
< ARRAY_SIZE(opt_tokens
); idx
++) {
1403 tok
= &opt_tokens
[idx
];
1404 if (tok
->token
== NVMF_OPT_ERR
)
1406 seq_putc(seq_file
, ',');
1407 seq_puts(seq_file
, tok
->pattern
);
1409 seq_putc(seq_file
, '\n');
1412 static int nvmf_dev_show(struct seq_file
*seq_file
, void *private)
1414 struct nvme_ctrl
*ctrl
;
1416 mutex_lock(&nvmf_dev_mutex
);
1417 ctrl
= seq_file
->private;
1419 __nvmf_concat_opt_tokens(seq_file
);
1423 seq_printf(seq_file
, "instance=%d,cntlid=%d\n",
1424 ctrl
->instance
, ctrl
->cntlid
);
1427 mutex_unlock(&nvmf_dev_mutex
);
1431 static int nvmf_dev_open(struct inode
*inode
, struct file
*file
)
1434 * The miscdevice code initializes file->private_data, but doesn't
1435 * make use of it later.
1437 file
->private_data
= NULL
;
1438 return single_open(file
, nvmf_dev_show
, NULL
);
1441 static int nvmf_dev_release(struct inode
*inode
, struct file
*file
)
1443 struct seq_file
*seq_file
= file
->private_data
;
1444 struct nvme_ctrl
*ctrl
= seq_file
->private;
1447 nvme_put_ctrl(ctrl
);
1448 return single_release(inode
, file
);
1451 static const struct file_operations nvmf_dev_fops
= {
1452 .owner
= THIS_MODULE
,
1453 .write
= nvmf_dev_write
,
1455 .open
= nvmf_dev_open
,
1456 .release
= nvmf_dev_release
,
1459 static struct miscdevice nvmf_misc
= {
1460 .minor
= MISC_DYNAMIC_MINOR
,
1461 .name
= "nvme-fabrics",
1462 .fops
= &nvmf_dev_fops
,
1465 static int __init
nvmf_init(void)
1469 nvmf_default_host
= nvmf_host_default();
1470 if (!nvmf_default_host
)
1473 ret
= class_register(&nvmf_class
);
1475 pr_err("couldn't register class nvme-fabrics\n");
1480 device_create(&nvmf_class
, NULL
, MKDEV(0, 0), NULL
, "ctl");
1481 if (IS_ERR(nvmf_device
)) {
1482 pr_err("couldn't create nvme-fabrics device!\n");
1483 ret
= PTR_ERR(nvmf_device
);
1484 goto out_destroy_class
;
1487 ret
= misc_register(&nvmf_misc
);
1489 pr_err("couldn't register misc device: %d\n", ret
);
1490 goto out_destroy_device
;
1496 device_destroy(&nvmf_class
, MKDEV(0, 0));
1498 class_unregister(&nvmf_class
);
1500 nvmf_host_put(nvmf_default_host
);
1504 static void __exit
nvmf_exit(void)
1506 misc_deregister(&nvmf_misc
);
1507 device_destroy(&nvmf_class
, MKDEV(0, 0));
1508 class_unregister(&nvmf_class
);
1509 nvmf_host_put(nvmf_default_host
);
1511 BUILD_BUG_ON(sizeof(struct nvmf_common_command
) != 64);
1512 BUILD_BUG_ON(sizeof(struct nvmf_connect_command
) != 64);
1513 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command
) != 64);
1514 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command
) != 64);
1515 BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command
) != 64);
1516 BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command
) != 64);
1517 BUILD_BUG_ON(sizeof(struct nvmf_connect_data
) != 1024);
1518 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data
) != 8);
1519 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data
) != 16);
1520 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data
) != 16);
1521 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data
) != 16);
1522 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data
) != 16);
1525 MODULE_LICENSE("GPL v2");
1526 MODULE_DESCRIPTION("NVMe host fabrics library");
1528 module_init(nvmf_init
);
1529 module_exit(nvmf_exit
);