drm/rockchip: Don't change hdmi reference clock rate
[drm/drm-misc.git] / drivers / nvme / host / fabrics.c
blob432efcbf9e2f5f7be9edcd3364434d421f88c29f
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics common host code.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/init.h>
8 #include <linux/miscdevice.h>
9 #include <linux/module.h>
10 #include <linux/mutex.h>
11 #include <linux/parser.h>
12 #include <linux/seq_file.h>
13 #include "nvme.h"
14 #include "fabrics.h"
15 #include <linux/nvme-keyring.h>
17 static LIST_HEAD(nvmf_transports);
18 static DECLARE_RWSEM(nvmf_transports_rwsem);
20 static LIST_HEAD(nvmf_hosts);
21 static DEFINE_MUTEX(nvmf_hosts_mutex);
23 static struct nvmf_host *nvmf_default_host;
25 static struct nvmf_host *nvmf_host_alloc(const char *hostnqn, uuid_t *id)
27 struct nvmf_host *host;
29 host = kmalloc(sizeof(*host), GFP_KERNEL);
30 if (!host)
31 return NULL;
33 kref_init(&host->ref);
34 uuid_copy(&host->id, id);
35 strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
37 return host;
40 static struct nvmf_host *nvmf_host_add(const char *hostnqn, uuid_t *id)
42 struct nvmf_host *host;
44 mutex_lock(&nvmf_hosts_mutex);
47 * We have defined a host as how it is perceived by the target.
48 * Therefore, we don't allow different Host NQNs with the same Host ID.
49 * Similarly, we do not allow the usage of the same Host NQN with
50 * different Host IDs. This'll maintain unambiguous host identification.
52 list_for_each_entry(host, &nvmf_hosts, list) {
53 bool same_hostnqn = !strcmp(host->nqn, hostnqn);
54 bool same_hostid = uuid_equal(&host->id, id);
56 if (same_hostnqn && same_hostid) {
57 kref_get(&host->ref);
58 goto out_unlock;
60 if (same_hostnqn) {
61 pr_err("found same hostnqn %s but different hostid %pUb\n",
62 hostnqn, id);
63 host = ERR_PTR(-EINVAL);
64 goto out_unlock;
66 if (same_hostid) {
67 pr_err("found same hostid %pUb but different hostnqn %s\n",
68 id, hostnqn);
69 host = ERR_PTR(-EINVAL);
70 goto out_unlock;
74 host = nvmf_host_alloc(hostnqn, id);
75 if (!host) {
76 host = ERR_PTR(-ENOMEM);
77 goto out_unlock;
80 list_add_tail(&host->list, &nvmf_hosts);
81 out_unlock:
82 mutex_unlock(&nvmf_hosts_mutex);
83 return host;
86 static struct nvmf_host *nvmf_host_default(void)
88 struct nvmf_host *host;
89 char nqn[NVMF_NQN_SIZE];
90 uuid_t id;
92 uuid_gen(&id);
93 snprintf(nqn, NVMF_NQN_SIZE,
94 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
96 host = nvmf_host_alloc(nqn, &id);
97 if (!host)
98 return NULL;
100 mutex_lock(&nvmf_hosts_mutex);
101 list_add_tail(&host->list, &nvmf_hosts);
102 mutex_unlock(&nvmf_hosts_mutex);
104 return host;
107 static void nvmf_host_destroy(struct kref *ref)
109 struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
111 mutex_lock(&nvmf_hosts_mutex);
112 list_del(&host->list);
113 mutex_unlock(&nvmf_hosts_mutex);
115 kfree(host);
118 static void nvmf_host_put(struct nvmf_host *host)
120 if (host)
121 kref_put(&host->ref, nvmf_host_destroy);
125 * nvmf_get_address() - Get address/port
126 * @ctrl: Host NVMe controller instance which we got the address
127 * @buf: OUTPUT parameter that will contain the address/port
128 * @size: buffer size
130 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
132 int len = 0;
134 if (ctrl->opts->mask & NVMF_OPT_TRADDR)
135 len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
136 if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
137 len += scnprintf(buf + len, size - len, "%strsvcid=%s",
138 (len) ? "," : "", ctrl->opts->trsvcid);
139 if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
140 len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
141 (len) ? "," : "", ctrl->opts->host_traddr);
142 if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)
143 len += scnprintf(buf + len, size - len, "%shost_iface=%s",
144 (len) ? "," : "", ctrl->opts->host_iface);
145 len += scnprintf(buf + len, size - len, "\n");
147 return len;
149 EXPORT_SYMBOL_GPL(nvmf_get_address);
152 * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
153 * @ctrl: Host NVMe controller instance maintaining the admin
154 * queue used to submit the property read command to
155 * the allocated NVMe controller resource on the target system.
156 * @off: Starting offset value of the targeted property
157 * register (see the fabrics section of the NVMe standard).
158 * @val: OUTPUT parameter that will contain the value of
159 * the property after a successful read.
161 * Used by the host system to retrieve a 32-bit capsule property value
162 * from an NVMe controller on the target system.
164 * ("Capsule property" is an "PCIe register concept" applied to the
165 * NVMe fabrics space.)
167 * Return:
168 * 0: successful read
169 * > 0: NVMe error status code
170 * < 0: Linux errno error code
172 int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
174 struct nvme_command cmd = { };
175 union nvme_result res;
176 int ret;
178 cmd.prop_get.opcode = nvme_fabrics_command;
179 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
180 cmd.prop_get.offset = cpu_to_le32(off);
182 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
183 NVME_QID_ANY, NVME_SUBMIT_RESERVED);
185 if (ret >= 0)
186 *val = le64_to_cpu(res.u64);
187 if (unlikely(ret != 0))
188 dev_err(ctrl->device,
189 "Property Get error: %d, offset %#x\n",
190 ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
192 return ret;
194 EXPORT_SYMBOL_GPL(nvmf_reg_read32);
197 * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
198 * @ctrl: Host NVMe controller instance maintaining the admin
199 * queue used to submit the property read command to
200 * the allocated controller resource on the target system.
201 * @off: Starting offset value of the targeted property
202 * register (see the fabrics section of the NVMe standard).
203 * @val: OUTPUT parameter that will contain the value of
204 * the property after a successful read.
206 * Used by the host system to retrieve a 64-bit capsule property value
207 * from an NVMe controller on the target system.
209 * ("Capsule property" is an "PCIe register concept" applied to the
210 * NVMe fabrics space.)
212 * Return:
213 * 0: successful read
214 * > 0: NVMe error status code
215 * < 0: Linux errno error code
217 int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
219 struct nvme_command cmd = { };
220 union nvme_result res;
221 int ret;
223 cmd.prop_get.opcode = nvme_fabrics_command;
224 cmd.prop_get.fctype = nvme_fabrics_type_property_get;
225 cmd.prop_get.attrib = 1;
226 cmd.prop_get.offset = cpu_to_le32(off);
228 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
229 NVME_QID_ANY, NVME_SUBMIT_RESERVED);
231 if (ret >= 0)
232 *val = le64_to_cpu(res.u64);
233 if (unlikely(ret != 0))
234 dev_err(ctrl->device,
235 "Property Get error: %d, offset %#x\n",
236 ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
237 return ret;
239 EXPORT_SYMBOL_GPL(nvmf_reg_read64);
242 * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
243 * @ctrl: Host NVMe controller instance maintaining the admin
244 * queue used to submit the property read command to
245 * the allocated NVMe controller resource on the target system.
246 * @off: Starting offset value of the targeted property
247 * register (see the fabrics section of the NVMe standard).
248 * @val: Input parameter that contains the value to be
249 * written to the property.
251 * Used by the NVMe host system to write a 32-bit capsule property value
252 * to an NVMe controller on the target system.
254 * ("Capsule property" is an "PCIe register concept" applied to the
255 * NVMe fabrics space.)
257 * Return:
258 * 0: successful write
259 * > 0: NVMe error status code
260 * < 0: Linux errno error code
262 int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
264 struct nvme_command cmd = { };
265 int ret;
267 cmd.prop_set.opcode = nvme_fabrics_command;
268 cmd.prop_set.fctype = nvme_fabrics_type_property_set;
269 cmd.prop_set.attrib = 0;
270 cmd.prop_set.offset = cpu_to_le32(off);
271 cmd.prop_set.value = cpu_to_le64(val);
273 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
274 NVME_QID_ANY, NVME_SUBMIT_RESERVED);
275 if (unlikely(ret))
276 dev_err(ctrl->device,
277 "Property Set error: %d, offset %#x\n",
278 ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
279 return ret;
281 EXPORT_SYMBOL_GPL(nvmf_reg_write32);
283 int nvmf_subsystem_reset(struct nvme_ctrl *ctrl)
285 int ret;
287 if (!nvme_wait_reset(ctrl))
288 return -EBUSY;
290 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, NVME_SUBSYS_RESET);
291 if (ret)
292 return ret;
294 return nvme_try_sched_reset(ctrl);
296 EXPORT_SYMBOL_GPL(nvmf_subsystem_reset);
299 * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for
300 * connect() errors.
301 * @ctrl: The specific /dev/nvmeX device that had the error.
302 * @errval: Error code to be decoded in a more human-friendly
303 * printout.
304 * @offset: For use with the NVMe error code
305 * NVME_SC_CONNECT_INVALID_PARAM.
306 * @cmd: This is the SQE portion of a submission capsule.
307 * @data: This is the "Data" portion of a submission capsule.
309 static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
310 int errval, int offset, struct nvme_command *cmd,
311 struct nvmf_connect_data *data)
313 int err_sctype = errval & ~NVME_STATUS_DNR;
315 if (errval < 0) {
316 dev_err(ctrl->device,
317 "Connect command failed, errno: %d\n", errval);
318 return;
321 switch (err_sctype) {
322 case NVME_SC_CONNECT_INVALID_PARAM:
323 if (offset >> 16) {
324 char *inv_data = "Connect Invalid Data Parameter";
326 switch (offset & 0xffff) {
327 case (offsetof(struct nvmf_connect_data, cntlid)):
328 dev_err(ctrl->device,
329 "%s, cntlid: %d\n",
330 inv_data, data->cntlid);
331 break;
332 case (offsetof(struct nvmf_connect_data, hostnqn)):
333 dev_err(ctrl->device,
334 "%s, hostnqn \"%s\"\n",
335 inv_data, data->hostnqn);
336 break;
337 case (offsetof(struct nvmf_connect_data, subsysnqn)):
338 dev_err(ctrl->device,
339 "%s, subsysnqn \"%s\"\n",
340 inv_data, data->subsysnqn);
341 break;
342 default:
343 dev_err(ctrl->device,
344 "%s, starting byte offset: %d\n",
345 inv_data, offset & 0xffff);
346 break;
348 } else {
349 char *inv_sqe = "Connect Invalid SQE Parameter";
351 switch (offset) {
352 case (offsetof(struct nvmf_connect_command, qid)):
353 dev_err(ctrl->device,
354 "%s, qid %d\n",
355 inv_sqe, cmd->connect.qid);
356 break;
357 default:
358 dev_err(ctrl->device,
359 "%s, starting byte offset: %d\n",
360 inv_sqe, offset);
363 break;
364 case NVME_SC_CONNECT_INVALID_HOST:
365 dev_err(ctrl->device,
366 "Connect for subsystem %s is not allowed, hostnqn: %s\n",
367 data->subsysnqn, data->hostnqn);
368 break;
369 case NVME_SC_CONNECT_CTRL_BUSY:
370 dev_err(ctrl->device,
371 "Connect command failed: controller is busy or not available\n");
372 break;
373 case NVME_SC_CONNECT_FORMAT:
374 dev_err(ctrl->device,
375 "Connect incompatible format: %d",
376 cmd->connect.recfmt);
377 break;
378 case NVME_SC_HOST_PATH_ERROR:
379 dev_err(ctrl->device,
380 "Connect command failed: host path error\n");
381 break;
382 case NVME_SC_AUTH_REQUIRED:
383 dev_err(ctrl->device,
384 "Connect command failed: authentication required\n");
385 break;
386 default:
387 dev_err(ctrl->device,
388 "Connect command failed, error wo/DNR bit: %d\n",
389 err_sctype);
390 break;
394 static struct nvmf_connect_data *nvmf_connect_data_prep(struct nvme_ctrl *ctrl,
395 u16 cntlid)
397 struct nvmf_connect_data *data;
399 data = kzalloc(sizeof(*data), GFP_KERNEL);
400 if (!data)
401 return NULL;
403 uuid_copy(&data->hostid, &ctrl->opts->host->id);
404 data->cntlid = cpu_to_le16(cntlid);
405 strscpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
406 strscpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
408 return data;
411 static void nvmf_connect_cmd_prep(struct nvme_ctrl *ctrl, u16 qid,
412 struct nvme_command *cmd)
414 cmd->connect.opcode = nvme_fabrics_command;
415 cmd->connect.fctype = nvme_fabrics_type_connect;
416 cmd->connect.qid = cpu_to_le16(qid);
418 if (qid) {
419 cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize);
420 } else {
421 cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
424 * set keep-alive timeout in seconds granularity (ms * 1000)
426 cmd->connect.kato = cpu_to_le32(ctrl->kato * 1000);
429 if (ctrl->opts->disable_sqflow)
430 cmd->connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
434 * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
435 * API function.
436 * @ctrl: Host nvme controller instance used to request
437 * a new NVMe controller allocation on the target
438 * system and establish an NVMe Admin connection to
439 * that controller.
441 * This function enables an NVMe host device to request a new allocation of
442 * an NVMe controller resource on a target system as well establish a
443 * fabrics-protocol connection of the NVMe Admin queue between the
444 * host system device and the allocated NVMe controller on the
445 * target system via a NVMe Fabrics "Connect" command.
447 int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
449 struct nvme_command cmd = { };
450 union nvme_result res;
451 struct nvmf_connect_data *data;
452 int ret;
453 u32 result;
455 nvmf_connect_cmd_prep(ctrl, 0, &cmd);
457 data = nvmf_connect_data_prep(ctrl, 0xffff);
458 if (!data)
459 return -ENOMEM;
461 ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
462 data, sizeof(*data), NVME_QID_ANY,
463 NVME_SUBMIT_AT_HEAD |
464 NVME_SUBMIT_NOWAIT |
465 NVME_SUBMIT_RESERVED);
466 if (ret) {
467 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
468 &cmd, data);
469 goto out_free_data;
472 result = le32_to_cpu(res.u32);
473 ctrl->cntlid = result & 0xFFFF;
474 if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
475 /* Secure concatenation is not implemented */
476 if (result & NVME_CONNECT_AUTHREQ_ASCR) {
477 dev_warn(ctrl->device,
478 "qid 0: secure concatenation is not supported\n");
479 ret = -EOPNOTSUPP;
480 goto out_free_data;
482 /* Authentication required */
483 ret = nvme_auth_negotiate(ctrl, 0);
484 if (ret) {
485 dev_warn(ctrl->device,
486 "qid 0: authentication setup failed\n");
487 goto out_free_data;
489 ret = nvme_auth_wait(ctrl, 0);
490 if (ret) {
491 dev_warn(ctrl->device,
492 "qid 0: authentication failed, error %d\n",
493 ret);
494 } else
495 dev_info(ctrl->device,
496 "qid 0: authenticated\n");
498 out_free_data:
499 kfree(data);
500 return ret;
502 EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
505 * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
506 * API function.
507 * @ctrl: Host nvme controller instance used to establish an
508 * NVMe I/O queue connection to the already allocated NVMe
509 * controller on the target system.
510 * @qid: NVMe I/O queue number for the new I/O connection between
511 * host and target (note qid == 0 is illegal as this is
512 * the Admin queue, per NVMe standard).
514 * This function issues a fabrics-protocol connection
515 * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
516 * between the host system device and the allocated NVMe controller
517 * on the target system.
519 * Return:
520 * 0: success
521 * > 0: NVMe error status code
522 * < 0: Linux errno error code
524 int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
526 struct nvme_command cmd = { };
527 struct nvmf_connect_data *data;
528 union nvme_result res;
529 int ret;
530 u32 result;
532 nvmf_connect_cmd_prep(ctrl, qid, &cmd);
534 data = nvmf_connect_data_prep(ctrl, ctrl->cntlid);
535 if (!data)
536 return -ENOMEM;
538 ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
539 data, sizeof(*data), qid,
540 NVME_SUBMIT_AT_HEAD |
541 NVME_SUBMIT_RESERVED |
542 NVME_SUBMIT_NOWAIT);
543 if (ret) {
544 nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
545 &cmd, data);
546 goto out_free_data;
548 result = le32_to_cpu(res.u32);
549 if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
550 /* Secure concatenation is not implemented */
551 if (result & NVME_CONNECT_AUTHREQ_ASCR) {
552 dev_warn(ctrl->device,
553 "qid 0: secure concatenation is not supported\n");
554 ret = -EOPNOTSUPP;
555 goto out_free_data;
557 /* Authentication required */
558 ret = nvme_auth_negotiate(ctrl, qid);
559 if (ret) {
560 dev_warn(ctrl->device,
561 "qid %d: authentication setup failed\n", qid);
562 goto out_free_data;
564 ret = nvme_auth_wait(ctrl, qid);
565 if (ret) {
566 dev_warn(ctrl->device,
567 "qid %u: authentication failed, error %d\n",
568 qid, ret);
571 out_free_data:
572 kfree(data);
573 return ret;
575 EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
578 * Evaluate the status information returned by the transport in order to decided
579 * if a reconnect attempt should be scheduled.
581 * Do not retry when:
583 * - the DNR bit is set and the specification states no further connect
584 * attempts with the same set of paramenters should be attempted.
586 * - when the authentication attempt fails, because the key was invalid.
587 * This error code is set on the host side.
589 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
591 if (status > 0 && (status & NVME_STATUS_DNR))
592 return false;
594 if (status == -EKEYREJECTED)
595 return false;
597 if (ctrl->opts->max_reconnects == -1 ||
598 ctrl->nr_reconnects < ctrl->opts->max_reconnects)
599 return true;
601 return false;
603 EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
606 * nvmf_register_transport() - NVMe Fabrics Library registration function.
607 * @ops: Transport ops instance to be registered to the
608 * common fabrics library.
610 * API function that registers the type of specific transport fabric
611 * being implemented to the common NVMe fabrics library. Part of
612 * the overall init sequence of starting up a fabrics driver.
614 int nvmf_register_transport(struct nvmf_transport_ops *ops)
616 if (!ops->create_ctrl)
617 return -EINVAL;
619 down_write(&nvmf_transports_rwsem);
620 list_add_tail(&ops->entry, &nvmf_transports);
621 up_write(&nvmf_transports_rwsem);
623 return 0;
625 EXPORT_SYMBOL_GPL(nvmf_register_transport);
628 * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
629 * @ops: Transport ops instance to be unregistered from the
630 * common fabrics library.
632 * Fabrics API function that unregisters the type of specific transport
633 * fabric being implemented from the common NVMe fabrics library.
634 * Part of the overall exit sequence of unloading the implemented driver.
636 void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
638 down_write(&nvmf_transports_rwsem);
639 list_del(&ops->entry);
640 up_write(&nvmf_transports_rwsem);
642 EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
644 static struct nvmf_transport_ops *nvmf_lookup_transport(
645 struct nvmf_ctrl_options *opts)
647 struct nvmf_transport_ops *ops;
649 lockdep_assert_held(&nvmf_transports_rwsem);
651 list_for_each_entry(ops, &nvmf_transports, entry) {
652 if (strcmp(ops->name, opts->transport) == 0)
653 return ops;
656 return NULL;
659 static struct key *nvmf_parse_key(int key_id)
661 struct key *key;
663 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
664 pr_err("TLS is not supported\n");
665 return ERR_PTR(-EINVAL);
668 key = nvme_tls_key_lookup(key_id);
669 if (IS_ERR(key))
670 pr_err("key id %08x not found\n", key_id);
671 else
672 pr_debug("Using key id %08x\n", key_id);
673 return key;
676 static const match_table_t opt_tokens = {
677 { NVMF_OPT_TRANSPORT, "transport=%s" },
678 { NVMF_OPT_TRADDR, "traddr=%s" },
679 { NVMF_OPT_TRSVCID, "trsvcid=%s" },
680 { NVMF_OPT_NQN, "nqn=%s" },
681 { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
682 { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
683 { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
684 { NVMF_OPT_CTRL_LOSS_TMO, "ctrl_loss_tmo=%d" },
685 { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
686 { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
687 { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
688 { NVMF_OPT_HOST_IFACE, "host_iface=%s" },
689 { NVMF_OPT_HOST_ID, "hostid=%s" },
690 { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
691 { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
692 { NVMF_OPT_HDR_DIGEST, "hdr_digest" },
693 { NVMF_OPT_DATA_DIGEST, "data_digest" },
694 { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
695 { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
696 { NVMF_OPT_TOS, "tos=%d" },
697 #ifdef CONFIG_NVME_TCP_TLS
698 { NVMF_OPT_KEYRING, "keyring=%d" },
699 { NVMF_OPT_TLS_KEY, "tls_key=%d" },
700 #endif
701 { NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
702 { NVMF_OPT_DISCOVERY, "discovery" },
703 #ifdef CONFIG_NVME_HOST_AUTH
704 { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
705 { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
706 #endif
707 #ifdef CONFIG_NVME_TCP_TLS
708 { NVMF_OPT_TLS, "tls" },
709 #endif
710 { NVMF_OPT_ERR, NULL }
713 static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
714 const char *buf)
716 substring_t args[MAX_OPT_ARGS];
717 char *options, *o, *p;
718 int token, ret = 0;
719 size_t nqnlen = 0;
720 int ctrl_loss_tmo = NVMF_DEF_CTRL_LOSS_TMO, key_id;
721 uuid_t hostid;
722 char hostnqn[NVMF_NQN_SIZE];
723 struct key *key;
725 /* Set defaults */
726 opts->queue_size = NVMF_DEF_QUEUE_SIZE;
727 opts->nr_io_queues = num_online_cpus();
728 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
729 opts->kato = 0;
730 opts->duplicate_connect = false;
731 opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
732 opts->hdr_digest = false;
733 opts->data_digest = false;
734 opts->tos = -1; /* < 0 == use transport default */
735 opts->tls = false;
736 opts->tls_key = NULL;
737 opts->keyring = NULL;
739 options = o = kstrdup(buf, GFP_KERNEL);
740 if (!options)
741 return -ENOMEM;
743 /* use default host if not given by user space */
744 uuid_copy(&hostid, &nvmf_default_host->id);
745 strscpy(hostnqn, nvmf_default_host->nqn, NVMF_NQN_SIZE);
747 while ((p = strsep(&o, ",\n")) != NULL) {
748 if (!*p)
749 continue;
751 token = match_token(p, opt_tokens, args);
752 opts->mask |= token;
753 switch (token) {
754 case NVMF_OPT_TRANSPORT:
755 p = match_strdup(args);
756 if (!p) {
757 ret = -ENOMEM;
758 goto out;
760 kfree(opts->transport);
761 opts->transport = p;
762 break;
763 case NVMF_OPT_NQN:
764 p = match_strdup(args);
765 if (!p) {
766 ret = -ENOMEM;
767 goto out;
769 kfree(opts->subsysnqn);
770 opts->subsysnqn = p;
771 nqnlen = strlen(opts->subsysnqn);
772 if (nqnlen >= NVMF_NQN_SIZE) {
773 pr_err("%s needs to be < %d bytes\n",
774 opts->subsysnqn, NVMF_NQN_SIZE);
775 ret = -EINVAL;
776 goto out;
778 opts->discovery_nqn =
779 !(strcmp(opts->subsysnqn,
780 NVME_DISC_SUBSYS_NAME));
781 break;
782 case NVMF_OPT_TRADDR:
783 p = match_strdup(args);
784 if (!p) {
785 ret = -ENOMEM;
786 goto out;
788 kfree(opts->traddr);
789 opts->traddr = p;
790 break;
791 case NVMF_OPT_TRSVCID:
792 p = match_strdup(args);
793 if (!p) {
794 ret = -ENOMEM;
795 goto out;
797 kfree(opts->trsvcid);
798 opts->trsvcid = p;
799 break;
800 case NVMF_OPT_QUEUE_SIZE:
801 if (match_int(args, &token)) {
802 ret = -EINVAL;
803 goto out;
805 if (token < NVMF_MIN_QUEUE_SIZE ||
806 token > NVMF_MAX_QUEUE_SIZE) {
807 pr_err("Invalid queue_size %d\n", token);
808 ret = -EINVAL;
809 goto out;
811 opts->queue_size = token;
812 break;
813 case NVMF_OPT_NR_IO_QUEUES:
814 if (match_int(args, &token)) {
815 ret = -EINVAL;
816 goto out;
818 if (token <= 0) {
819 pr_err("Invalid number of IOQs %d\n", token);
820 ret = -EINVAL;
821 goto out;
823 if (opts->discovery_nqn) {
824 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
825 break;
828 opts->nr_io_queues = min_t(unsigned int,
829 num_online_cpus(), token);
830 break;
831 case NVMF_OPT_KATO:
832 if (match_int(args, &token)) {
833 ret = -EINVAL;
834 goto out;
837 if (token < 0) {
838 pr_err("Invalid keep_alive_tmo %d\n", token);
839 ret = -EINVAL;
840 goto out;
841 } else if (token == 0 && !opts->discovery_nqn) {
842 /* Allowed for debug */
843 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
845 opts->kato = token;
846 break;
847 case NVMF_OPT_CTRL_LOSS_TMO:
848 if (match_int(args, &token)) {
849 ret = -EINVAL;
850 goto out;
853 if (token < 0)
854 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
855 ctrl_loss_tmo = token;
856 break;
857 case NVMF_OPT_FAIL_FAST_TMO:
858 if (match_int(args, &token)) {
859 ret = -EINVAL;
860 goto out;
863 if (token >= 0)
864 pr_warn("I/O fail on reconnect controller after %d sec\n",
865 token);
866 else
867 token = -1;
869 opts->fast_io_fail_tmo = token;
870 break;
871 case NVMF_OPT_HOSTNQN:
872 if (opts->host) {
873 pr_err("hostnqn already user-assigned: %s\n",
874 opts->host->nqn);
875 ret = -EADDRINUSE;
876 goto out;
878 p = match_strdup(args);
879 if (!p) {
880 ret = -ENOMEM;
881 goto out;
883 nqnlen = strlen(p);
884 if (nqnlen >= NVMF_NQN_SIZE) {
885 pr_err("%s needs to be < %d bytes\n",
886 p, NVMF_NQN_SIZE);
887 kfree(p);
888 ret = -EINVAL;
889 goto out;
891 strscpy(hostnqn, p, NVMF_NQN_SIZE);
892 kfree(p);
893 break;
894 case NVMF_OPT_RECONNECT_DELAY:
895 if (match_int(args, &token)) {
896 ret = -EINVAL;
897 goto out;
899 if (token <= 0) {
900 pr_err("Invalid reconnect_delay %d\n", token);
901 ret = -EINVAL;
902 goto out;
904 opts->reconnect_delay = token;
905 break;
906 case NVMF_OPT_HOST_TRADDR:
907 p = match_strdup(args);
908 if (!p) {
909 ret = -ENOMEM;
910 goto out;
912 kfree(opts->host_traddr);
913 opts->host_traddr = p;
914 break;
915 case NVMF_OPT_HOST_IFACE:
916 p = match_strdup(args);
917 if (!p) {
918 ret = -ENOMEM;
919 goto out;
921 kfree(opts->host_iface);
922 opts->host_iface = p;
923 break;
924 case NVMF_OPT_HOST_ID:
925 p = match_strdup(args);
926 if (!p) {
927 ret = -ENOMEM;
928 goto out;
930 ret = uuid_parse(p, &hostid);
931 if (ret) {
932 pr_err("Invalid hostid %s\n", p);
933 ret = -EINVAL;
934 kfree(p);
935 goto out;
937 kfree(p);
938 break;
939 case NVMF_OPT_DUP_CONNECT:
940 opts->duplicate_connect = true;
941 break;
942 case NVMF_OPT_DISABLE_SQFLOW:
943 opts->disable_sqflow = true;
944 break;
945 case NVMF_OPT_HDR_DIGEST:
946 opts->hdr_digest = true;
947 break;
948 case NVMF_OPT_DATA_DIGEST:
949 opts->data_digest = true;
950 break;
951 case NVMF_OPT_NR_WRITE_QUEUES:
952 if (match_int(args, &token)) {
953 ret = -EINVAL;
954 goto out;
956 if (token <= 0) {
957 pr_err("Invalid nr_write_queues %d\n", token);
958 ret = -EINVAL;
959 goto out;
961 opts->nr_write_queues = token;
962 break;
963 case NVMF_OPT_NR_POLL_QUEUES:
964 if (match_int(args, &token)) {
965 ret = -EINVAL;
966 goto out;
968 if (token <= 0) {
969 pr_err("Invalid nr_poll_queues %d\n", token);
970 ret = -EINVAL;
971 goto out;
973 opts->nr_poll_queues = token;
974 break;
975 case NVMF_OPT_TOS:
976 if (match_int(args, &token)) {
977 ret = -EINVAL;
978 goto out;
980 if (token < 0) {
981 pr_err("Invalid type of service %d\n", token);
982 ret = -EINVAL;
983 goto out;
985 if (token > 255) {
986 pr_warn("Clamping type of service to 255\n");
987 token = 255;
989 opts->tos = token;
990 break;
991 case NVMF_OPT_KEYRING:
992 if (match_int(args, &key_id) || key_id <= 0) {
993 ret = -EINVAL;
994 goto out;
996 key = nvmf_parse_key(key_id);
997 if (IS_ERR(key)) {
998 ret = PTR_ERR(key);
999 goto out;
1001 key_put(opts->keyring);
1002 opts->keyring = key;
1003 break;
1004 case NVMF_OPT_TLS_KEY:
1005 if (match_int(args, &key_id) || key_id <= 0) {
1006 ret = -EINVAL;
1007 goto out;
1009 key = nvmf_parse_key(key_id);
1010 if (IS_ERR(key)) {
1011 ret = PTR_ERR(key);
1012 goto out;
1014 key_put(opts->tls_key);
1015 opts->tls_key = key;
1016 break;
1017 case NVMF_OPT_DISCOVERY:
1018 opts->discovery_nqn = true;
1019 break;
1020 case NVMF_OPT_DHCHAP_SECRET:
1021 p = match_strdup(args);
1022 if (!p) {
1023 ret = -ENOMEM;
1024 goto out;
1026 if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
1027 pr_err("Invalid DH-CHAP secret %s\n", p);
1028 ret = -EINVAL;
1029 goto out;
1031 kfree(opts->dhchap_secret);
1032 opts->dhchap_secret = p;
1033 break;
1034 case NVMF_OPT_DHCHAP_CTRL_SECRET:
1035 p = match_strdup(args);
1036 if (!p) {
1037 ret = -ENOMEM;
1038 goto out;
1040 if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
1041 pr_err("Invalid DH-CHAP secret %s\n", p);
1042 ret = -EINVAL;
1043 goto out;
1045 kfree(opts->dhchap_ctrl_secret);
1046 opts->dhchap_ctrl_secret = p;
1047 break;
1048 case NVMF_OPT_TLS:
1049 if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
1050 pr_err("TLS is not supported\n");
1051 ret = -EINVAL;
1052 goto out;
1054 opts->tls = true;
1055 break;
1056 default:
1057 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
1059 ret = -EINVAL;
1060 goto out;
1064 if (opts->discovery_nqn) {
1065 opts->nr_io_queues = 0;
1066 opts->nr_write_queues = 0;
1067 opts->nr_poll_queues = 0;
1068 opts->duplicate_connect = true;
1069 } else {
1070 if (!opts->kato)
1071 opts->kato = NVME_DEFAULT_KATO;
1073 if (ctrl_loss_tmo < 0) {
1074 opts->max_reconnects = -1;
1075 } else {
1076 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
1077 opts->reconnect_delay);
1078 if (ctrl_loss_tmo < opts->fast_io_fail_tmo)
1079 pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
1080 opts->fast_io_fail_tmo, ctrl_loss_tmo);
1083 opts->host = nvmf_host_add(hostnqn, &hostid);
1084 if (IS_ERR(opts->host)) {
1085 ret = PTR_ERR(opts->host);
1086 opts->host = NULL;
1087 goto out;
1090 out:
1091 kfree(options);
1092 return ret;
1095 void nvmf_set_io_queues(struct nvmf_ctrl_options *opts, u32 nr_io_queues,
1096 u32 io_queues[HCTX_MAX_TYPES])
1098 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1100 * separate read/write queues
1101 * hand out dedicated default queues only after we have
1102 * sufficient read queues.
1104 io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1105 nr_io_queues -= io_queues[HCTX_TYPE_READ];
1106 io_queues[HCTX_TYPE_DEFAULT] =
1107 min(opts->nr_write_queues, nr_io_queues);
1108 nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
1109 } else {
1111 * shared read/write queues
1112 * either no write queues were requested, or we don't have
1113 * sufficient queue count to have dedicated default queues.
1115 io_queues[HCTX_TYPE_DEFAULT] =
1116 min(opts->nr_io_queues, nr_io_queues);
1117 nr_io_queues -= io_queues[HCTX_TYPE_DEFAULT];
1120 if (opts->nr_poll_queues && nr_io_queues) {
1121 /* map dedicated poll queues only if we have queues left */
1122 io_queues[HCTX_TYPE_POLL] =
1123 min(opts->nr_poll_queues, nr_io_queues);
1126 EXPORT_SYMBOL_GPL(nvmf_set_io_queues);
1128 void nvmf_map_queues(struct blk_mq_tag_set *set, struct nvme_ctrl *ctrl,
1129 u32 io_queues[HCTX_MAX_TYPES])
1131 struct nvmf_ctrl_options *opts = ctrl->opts;
1133 if (opts->nr_write_queues && io_queues[HCTX_TYPE_READ]) {
1134 /* separate read/write queues */
1135 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1136 io_queues[HCTX_TYPE_DEFAULT];
1137 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1138 set->map[HCTX_TYPE_READ].nr_queues =
1139 io_queues[HCTX_TYPE_READ];
1140 set->map[HCTX_TYPE_READ].queue_offset =
1141 io_queues[HCTX_TYPE_DEFAULT];
1142 } else {
1143 /* shared read/write queues */
1144 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1145 io_queues[HCTX_TYPE_DEFAULT];
1146 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1147 set->map[HCTX_TYPE_READ].nr_queues =
1148 io_queues[HCTX_TYPE_DEFAULT];
1149 set->map[HCTX_TYPE_READ].queue_offset = 0;
1152 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1153 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
1154 if (opts->nr_poll_queues && io_queues[HCTX_TYPE_POLL]) {
1155 /* map dedicated poll queues only if we have queues left */
1156 set->map[HCTX_TYPE_POLL].nr_queues = io_queues[HCTX_TYPE_POLL];
1157 set->map[HCTX_TYPE_POLL].queue_offset =
1158 io_queues[HCTX_TYPE_DEFAULT] +
1159 io_queues[HCTX_TYPE_READ];
1160 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
1163 dev_info(ctrl->device,
1164 "mapped %d/%d/%d default/read/poll queues.\n",
1165 io_queues[HCTX_TYPE_DEFAULT],
1166 io_queues[HCTX_TYPE_READ],
1167 io_queues[HCTX_TYPE_POLL]);
1169 EXPORT_SYMBOL_GPL(nvmf_map_queues);
1171 static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
1172 unsigned int required_opts)
1174 if ((opts->mask & required_opts) != required_opts) {
1175 unsigned int i;
1177 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
1178 if ((opt_tokens[i].token & required_opts) &&
1179 !(opt_tokens[i].token & opts->mask)) {
1180 pr_warn("missing parameter '%s'\n",
1181 opt_tokens[i].pattern);
1185 return -EINVAL;
1188 return 0;
1191 bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
1192 struct nvmf_ctrl_options *opts)
1194 if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
1195 strcmp(opts->traddr, ctrl->opts->traddr) ||
1196 strcmp(opts->trsvcid, ctrl->opts->trsvcid))
1197 return false;
1200 * Checking the local address or host interfaces is rough.
1202 * In most cases, none is specified and the host port or
1203 * host interface is selected by the stack.
1205 * Assume no match if:
1206 * - local address or host interface is specified and address
1207 * or host interface is not the same
1208 * - local address or host interface is not specified but
1209 * remote is, or vice versa (admin using specific
1210 * host_traddr/host_iface when it matters).
1212 if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
1213 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
1214 if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
1215 return false;
1216 } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
1217 (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
1218 return false;
1221 if ((opts->mask & NVMF_OPT_HOST_IFACE) &&
1222 (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
1223 if (strcmp(opts->host_iface, ctrl->opts->host_iface))
1224 return false;
1225 } else if ((opts->mask & NVMF_OPT_HOST_IFACE) ||
1226 (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
1227 return false;
1230 return true;
1232 EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
1234 static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
1235 unsigned int allowed_opts)
1237 if (opts->mask & ~allowed_opts) {
1238 unsigned int i;
1240 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
1241 if ((opt_tokens[i].token & opts->mask) &&
1242 (opt_tokens[i].token & ~allowed_opts)) {
1243 pr_warn("invalid parameter '%s'\n",
1244 opt_tokens[i].pattern);
1248 return -EINVAL;
1251 return 0;
1254 void nvmf_free_options(struct nvmf_ctrl_options *opts)
1256 nvmf_host_put(opts->host);
1257 key_put(opts->keyring);
1258 key_put(opts->tls_key);
1259 kfree(opts->transport);
1260 kfree(opts->traddr);
1261 kfree(opts->trsvcid);
1262 kfree(opts->subsysnqn);
1263 kfree(opts->host_traddr);
1264 kfree(opts->host_iface);
1265 kfree(opts->dhchap_secret);
1266 kfree(opts->dhchap_ctrl_secret);
1267 kfree(opts);
1269 EXPORT_SYMBOL_GPL(nvmf_free_options);
1271 #define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
1272 #define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
1273 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
1274 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
1275 NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
1276 NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
1277 NVMF_OPT_DHCHAP_CTRL_SECRET)
1279 static struct nvme_ctrl *
1280 nvmf_create_ctrl(struct device *dev, const char *buf)
1282 struct nvmf_ctrl_options *opts;
1283 struct nvmf_transport_ops *ops;
1284 struct nvme_ctrl *ctrl;
1285 int ret;
1287 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1288 if (!opts)
1289 return ERR_PTR(-ENOMEM);
1291 ret = nvmf_parse_options(opts, buf);
1292 if (ret)
1293 goto out_free_opts;
1296 request_module("nvme-%s", opts->transport);
1299 * Check the generic options first as we need a valid transport for
1300 * the lookup below. Then clear the generic flags so that transport
1301 * drivers don't have to care about them.
1303 ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
1304 if (ret)
1305 goto out_free_opts;
1306 opts->mask &= ~NVMF_REQUIRED_OPTS;
1308 down_read(&nvmf_transports_rwsem);
1309 ops = nvmf_lookup_transport(opts);
1310 if (!ops) {
1311 pr_info("no handler found for transport %s.\n",
1312 opts->transport);
1313 ret = -EINVAL;
1314 goto out_unlock;
1317 if (!try_module_get(ops->module)) {
1318 ret = -EBUSY;
1319 goto out_unlock;
1321 up_read(&nvmf_transports_rwsem);
1323 ret = nvmf_check_required_opts(opts, ops->required_opts);
1324 if (ret)
1325 goto out_module_put;
1326 ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
1327 ops->allowed_opts | ops->required_opts);
1328 if (ret)
1329 goto out_module_put;
1331 ctrl = ops->create_ctrl(dev, opts);
1332 if (IS_ERR(ctrl)) {
1333 ret = PTR_ERR(ctrl);
1334 goto out_module_put;
1337 module_put(ops->module);
1338 return ctrl;
1340 out_module_put:
1341 module_put(ops->module);
1342 goto out_free_opts;
1343 out_unlock:
1344 up_read(&nvmf_transports_rwsem);
1345 out_free_opts:
1346 nvmf_free_options(opts);
1347 return ERR_PTR(ret);
1350 static const struct class nvmf_class = {
1351 .name = "nvme-fabrics",
1354 static struct device *nvmf_device;
1355 static DEFINE_MUTEX(nvmf_dev_mutex);
1357 static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
1358 size_t count, loff_t *pos)
1360 struct seq_file *seq_file = file->private_data;
1361 struct nvme_ctrl *ctrl;
1362 const char *buf;
1363 int ret = 0;
1365 if (count > PAGE_SIZE)
1366 return -ENOMEM;
1368 buf = memdup_user_nul(ubuf, count);
1369 if (IS_ERR(buf))
1370 return PTR_ERR(buf);
1372 mutex_lock(&nvmf_dev_mutex);
1373 if (seq_file->private) {
1374 ret = -EINVAL;
1375 goto out_unlock;
1378 ctrl = nvmf_create_ctrl(nvmf_device, buf);
1379 if (IS_ERR(ctrl)) {
1380 ret = PTR_ERR(ctrl);
1381 goto out_unlock;
1384 seq_file->private = ctrl;
1386 out_unlock:
1387 mutex_unlock(&nvmf_dev_mutex);
1388 kfree(buf);
1389 return ret ? ret : count;
1392 static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
1394 const struct match_token *tok;
1395 int idx;
1398 * Add dummy entries for instance and cntlid to
1399 * signal an invalid/non-existing controller
1401 seq_puts(seq_file, "instance=-1,cntlid=-1");
1402 for (idx = 0; idx < ARRAY_SIZE(opt_tokens); idx++) {
1403 tok = &opt_tokens[idx];
1404 if (tok->token == NVMF_OPT_ERR)
1405 continue;
1406 seq_putc(seq_file, ',');
1407 seq_puts(seq_file, tok->pattern);
1409 seq_putc(seq_file, '\n');
1412 static int nvmf_dev_show(struct seq_file *seq_file, void *private)
1414 struct nvme_ctrl *ctrl;
1416 mutex_lock(&nvmf_dev_mutex);
1417 ctrl = seq_file->private;
1418 if (!ctrl) {
1419 __nvmf_concat_opt_tokens(seq_file);
1420 goto out_unlock;
1423 seq_printf(seq_file, "instance=%d,cntlid=%d\n",
1424 ctrl->instance, ctrl->cntlid);
1426 out_unlock:
1427 mutex_unlock(&nvmf_dev_mutex);
1428 return 0;
1431 static int nvmf_dev_open(struct inode *inode, struct file *file)
1434 * The miscdevice code initializes file->private_data, but doesn't
1435 * make use of it later.
1437 file->private_data = NULL;
1438 return single_open(file, nvmf_dev_show, NULL);
1441 static int nvmf_dev_release(struct inode *inode, struct file *file)
1443 struct seq_file *seq_file = file->private_data;
1444 struct nvme_ctrl *ctrl = seq_file->private;
1446 if (ctrl)
1447 nvme_put_ctrl(ctrl);
1448 return single_release(inode, file);
1451 static const struct file_operations nvmf_dev_fops = {
1452 .owner = THIS_MODULE,
1453 .write = nvmf_dev_write,
1454 .read = seq_read,
1455 .open = nvmf_dev_open,
1456 .release = nvmf_dev_release,
1459 static struct miscdevice nvmf_misc = {
1460 .minor = MISC_DYNAMIC_MINOR,
1461 .name = "nvme-fabrics",
1462 .fops = &nvmf_dev_fops,
1465 static int __init nvmf_init(void)
1467 int ret;
1469 nvmf_default_host = nvmf_host_default();
1470 if (!nvmf_default_host)
1471 return -ENOMEM;
1473 ret = class_register(&nvmf_class);
1474 if (ret) {
1475 pr_err("couldn't register class nvme-fabrics\n");
1476 goto out_free_host;
1479 nvmf_device =
1480 device_create(&nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
1481 if (IS_ERR(nvmf_device)) {
1482 pr_err("couldn't create nvme-fabrics device!\n");
1483 ret = PTR_ERR(nvmf_device);
1484 goto out_destroy_class;
1487 ret = misc_register(&nvmf_misc);
1488 if (ret) {
1489 pr_err("couldn't register misc device: %d\n", ret);
1490 goto out_destroy_device;
1493 return 0;
1495 out_destroy_device:
1496 device_destroy(&nvmf_class, MKDEV(0, 0));
1497 out_destroy_class:
1498 class_unregister(&nvmf_class);
1499 out_free_host:
1500 nvmf_host_put(nvmf_default_host);
1501 return ret;
1504 static void __exit nvmf_exit(void)
1506 misc_deregister(&nvmf_misc);
1507 device_destroy(&nvmf_class, MKDEV(0, 0));
1508 class_unregister(&nvmf_class);
1509 nvmf_host_put(nvmf_default_host);
1511 BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
1512 BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
1513 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
1514 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
1515 BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
1516 BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
1517 BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
1518 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
1519 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
1520 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
1521 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
1522 BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
1525 MODULE_LICENSE("GPL v2");
1526 MODULE_DESCRIPTION("NVMe host fabrics library");
1528 module_init(nvmf_init);
1529 module_exit(nvmf_exit);