2 * NVMe over Fabrics common host code.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/init.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/parser.h>
20 #include <linux/seq_file.h>
24 static LIST_HEAD(nvmf_transports
);
25 static DECLARE_RWSEM(nvmf_transports_rwsem
);
27 static LIST_HEAD(nvmf_hosts
);
28 static DEFINE_MUTEX(nvmf_hosts_mutex
);
30 static struct nvmf_host
*nvmf_default_host
;
32 static struct nvmf_host
*__nvmf_host_find(const char *hostnqn
)
34 struct nvmf_host
*host
;
36 list_for_each_entry(host
, &nvmf_hosts
, list
) {
37 if (!strcmp(host
->nqn
, hostnqn
))
44 static struct nvmf_host
*nvmf_host_add(const char *hostnqn
)
46 struct nvmf_host
*host
;
48 mutex_lock(&nvmf_hosts_mutex
);
49 host
= __nvmf_host_find(hostnqn
);
55 host
= kmalloc(sizeof(*host
), GFP_KERNEL
);
59 kref_init(&host
->ref
);
60 memcpy(host
->nqn
, hostnqn
, NVMF_NQN_SIZE
);
62 list_add_tail(&host
->list
, &nvmf_hosts
);
64 mutex_unlock(&nvmf_hosts_mutex
);
68 static struct nvmf_host
*nvmf_host_default(void)
70 struct nvmf_host
*host
;
72 host
= kmalloc(sizeof(*host
), GFP_KERNEL
);
76 kref_init(&host
->ref
);
78 snprintf(host
->nqn
, NVMF_NQN_SIZE
,
79 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host
->id
);
81 mutex_lock(&nvmf_hosts_mutex
);
82 list_add_tail(&host
->list
, &nvmf_hosts
);
83 mutex_unlock(&nvmf_hosts_mutex
);
88 static void nvmf_host_destroy(struct kref
*ref
)
90 struct nvmf_host
*host
= container_of(ref
, struct nvmf_host
, ref
);
92 mutex_lock(&nvmf_hosts_mutex
);
93 list_del(&host
->list
);
94 mutex_unlock(&nvmf_hosts_mutex
);
99 static void nvmf_host_put(struct nvmf_host
*host
)
102 kref_put(&host
->ref
, nvmf_host_destroy
);
106 * nvmf_get_address() - Get address/port
107 * @ctrl: Host NVMe controller instance which we got the address
108 * @buf: OUTPUT parameter that will contain the address/port
111 int nvmf_get_address(struct nvme_ctrl
*ctrl
, char *buf
, int size
)
115 if (ctrl
->opts
->mask
& NVMF_OPT_TRADDR
)
116 len
+= snprintf(buf
, size
, "traddr=%s", ctrl
->opts
->traddr
);
117 if (ctrl
->opts
->mask
& NVMF_OPT_TRSVCID
)
118 len
+= snprintf(buf
+ len
, size
- len
, "%strsvcid=%s",
119 (len
) ? "," : "", ctrl
->opts
->trsvcid
);
120 if (ctrl
->opts
->mask
& NVMF_OPT_HOST_TRADDR
)
121 len
+= snprintf(buf
+ len
, size
- len
, "%shost_traddr=%s",
122 (len
) ? "," : "", ctrl
->opts
->host_traddr
);
123 len
+= snprintf(buf
+ len
, size
- len
, "\n");
127 EXPORT_SYMBOL_GPL(nvmf_get_address
);
130 * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
131 * @ctrl: Host NVMe controller instance maintaining the admin
132 * queue used to submit the property read command to
133 * the allocated NVMe controller resource on the target system.
134 * @off: Starting offset value of the targeted property
135 * register (see the fabrics section of the NVMe standard).
136 * @val: OUTPUT parameter that will contain the value of
137 * the property after a successful read.
139 * Used by the host system to retrieve a 32-bit capsule property value
140 * from an NVMe controller on the target system.
142 * ("Capsule property" is an "PCIe register concept" applied to the
143 * NVMe fabrics space.)
147 * > 0: NVMe error status code
148 * < 0: Linux errno error code
150 int nvmf_reg_read32(struct nvme_ctrl
*ctrl
, u32 off
, u32
*val
)
152 struct nvme_command cmd
;
153 union nvme_result res
;
156 memset(&cmd
, 0, sizeof(cmd
));
157 cmd
.prop_get
.opcode
= nvme_fabrics_command
;
158 cmd
.prop_get
.fctype
= nvme_fabrics_type_property_get
;
159 cmd
.prop_get
.offset
= cpu_to_le32(off
);
161 ret
= __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, &res
, NULL
, 0, 0,
165 *val
= le64_to_cpu(res
.u64
);
166 if (unlikely(ret
!= 0))
167 dev_err(ctrl
->device
,
168 "Property Get error: %d, offset %#x\n",
169 ret
> 0 ? ret
& ~NVME_SC_DNR
: ret
, off
);
173 EXPORT_SYMBOL_GPL(nvmf_reg_read32
);
176 * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
177 * @ctrl: Host NVMe controller instance maintaining the admin
178 * queue used to submit the property read command to
179 * the allocated controller resource on the target system.
180 * @off: Starting offset value of the targeted property
181 * register (see the fabrics section of the NVMe standard).
182 * @val: OUTPUT parameter that will contain the value of
183 * the property after a successful read.
185 * Used by the host system to retrieve a 64-bit capsule property value
186 * from an NVMe controller on the target system.
188 * ("Capsule property" is an "PCIe register concept" applied to the
189 * NVMe fabrics space.)
193 * > 0: NVMe error status code
194 * < 0: Linux errno error code
196 int nvmf_reg_read64(struct nvme_ctrl
*ctrl
, u32 off
, u64
*val
)
198 struct nvme_command cmd
;
199 union nvme_result res
;
202 memset(&cmd
, 0, sizeof(cmd
));
203 cmd
.prop_get
.opcode
= nvme_fabrics_command
;
204 cmd
.prop_get
.fctype
= nvme_fabrics_type_property_get
;
205 cmd
.prop_get
.attrib
= 1;
206 cmd
.prop_get
.offset
= cpu_to_le32(off
);
208 ret
= __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, &res
, NULL
, 0, 0,
212 *val
= le64_to_cpu(res
.u64
);
213 if (unlikely(ret
!= 0))
214 dev_err(ctrl
->device
,
215 "Property Get error: %d, offset %#x\n",
216 ret
> 0 ? ret
& ~NVME_SC_DNR
: ret
, off
);
219 EXPORT_SYMBOL_GPL(nvmf_reg_read64
);
222 * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
223 * @ctrl: Host NVMe controller instance maintaining the admin
224 * queue used to submit the property read command to
225 * the allocated NVMe controller resource on the target system.
226 * @off: Starting offset value of the targeted property
227 * register (see the fabrics section of the NVMe standard).
228 * @val: Input parameter that contains the value to be
229 * written to the property.
231 * Used by the NVMe host system to write a 32-bit capsule property value
232 * to an NVMe controller on the target system.
234 * ("Capsule property" is an "PCIe register concept" applied to the
235 * NVMe fabrics space.)
238 * 0: successful write
239 * > 0: NVMe error status code
240 * < 0: Linux errno error code
242 int nvmf_reg_write32(struct nvme_ctrl
*ctrl
, u32 off
, u32 val
)
244 struct nvme_command cmd
;
247 memset(&cmd
, 0, sizeof(cmd
));
248 cmd
.prop_set
.opcode
= nvme_fabrics_command
;
249 cmd
.prop_set
.fctype
= nvme_fabrics_type_property_set
;
250 cmd
.prop_set
.attrib
= 0;
251 cmd
.prop_set
.offset
= cpu_to_le32(off
);
252 cmd
.prop_set
.value
= cpu_to_le64(val
);
254 ret
= __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, NULL
, 0, 0,
257 dev_err(ctrl
->device
,
258 "Property Set error: %d, offset %#x\n",
259 ret
> 0 ? ret
& ~NVME_SC_DNR
: ret
, off
);
262 EXPORT_SYMBOL_GPL(nvmf_reg_write32
);
265 * nvmf_log_connect_error() - Error-parsing-diagnostic print
266 * out function for connect() errors.
268 * @ctrl: the specific /dev/nvmeX device that had the error.
270 * @errval: Error code to be decoded in a more human-friendly
273 * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM.
275 * @cmd: This is the SQE portion of a submission capsule.
277 * @data: This is the "Data" portion of a submission capsule.
279 static void nvmf_log_connect_error(struct nvme_ctrl
*ctrl
,
280 int errval
, int offset
, struct nvme_command
*cmd
,
281 struct nvmf_connect_data
*data
)
283 int err_sctype
= errval
& (~NVME_SC_DNR
);
285 switch (err_sctype
) {
287 case (NVME_SC_CONNECT_INVALID_PARAM
):
289 char *inv_data
= "Connect Invalid Data Parameter";
291 switch (offset
& 0xffff) {
292 case (offsetof(struct nvmf_connect_data
, cntlid
)):
293 dev_err(ctrl
->device
,
295 inv_data
, data
->cntlid
);
297 case (offsetof(struct nvmf_connect_data
, hostnqn
)):
298 dev_err(ctrl
->device
,
299 "%s, hostnqn \"%s\"\n",
300 inv_data
, data
->hostnqn
);
302 case (offsetof(struct nvmf_connect_data
, subsysnqn
)):
303 dev_err(ctrl
->device
,
304 "%s, subsysnqn \"%s\"\n",
305 inv_data
, data
->subsysnqn
);
308 dev_err(ctrl
->device
,
309 "%s, starting byte offset: %d\n",
310 inv_data
, offset
& 0xffff);
314 char *inv_sqe
= "Connect Invalid SQE Parameter";
317 case (offsetof(struct nvmf_connect_command
, qid
)):
318 dev_err(ctrl
->device
,
320 inv_sqe
, cmd
->connect
.qid
);
323 dev_err(ctrl
->device
,
324 "%s, starting byte offset: %d\n",
330 case NVME_SC_CONNECT_INVALID_HOST
:
331 dev_err(ctrl
->device
,
332 "Connect for subsystem %s is not allowed, hostnqn: %s\n",
333 data
->subsysnqn
, data
->hostnqn
);
336 case NVME_SC_CONNECT_CTRL_BUSY
:
337 dev_err(ctrl
->device
,
338 "Connect command failed: controller is busy or not available\n");
341 case NVME_SC_CONNECT_FORMAT
:
342 dev_err(ctrl
->device
,
343 "Connect incompatible format: %d",
344 cmd
->connect
.recfmt
);
348 dev_err(ctrl
->device
,
349 "Connect command failed, error wo/DNR bit: %d\n",
352 } /* switch (err_sctype) */
356 * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
358 * @ctrl: Host nvme controller instance used to request
359 * a new NVMe controller allocation on the target
360 * system and establish an NVMe Admin connection to
363 * This function enables an NVMe host device to request a new allocation of
364 * an NVMe controller resource on a target system as well establish a
365 * fabrics-protocol connection of the NVMe Admin queue between the
366 * host system device and the allocated NVMe controller on the
367 * target system via a NVMe Fabrics "Connect" command.
371 * > 0: NVMe error status code
372 * < 0: Linux errno error code
375 int nvmf_connect_admin_queue(struct nvme_ctrl
*ctrl
)
377 struct nvme_command cmd
;
378 union nvme_result res
;
379 struct nvmf_connect_data
*data
;
382 memset(&cmd
, 0, sizeof(cmd
));
383 cmd
.connect
.opcode
= nvme_fabrics_command
;
384 cmd
.connect
.fctype
= nvme_fabrics_type_connect
;
386 cmd
.connect
.sqsize
= cpu_to_le16(NVME_AQ_DEPTH
- 1);
389 * Set keep-alive timeout in seconds granularity (ms * 1000)
390 * and add a grace period for controller kato enforcement
392 cmd
.connect
.kato
= ctrl
->opts
->discovery_nqn
? 0 :
393 cpu_to_le32((ctrl
->kato
+ NVME_KATO_GRACE
) * 1000);
395 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
399 uuid_copy(&data
->hostid
, &ctrl
->opts
->host
->id
);
400 data
->cntlid
= cpu_to_le16(0xffff);
401 strncpy(data
->subsysnqn
, ctrl
->opts
->subsysnqn
, NVMF_NQN_SIZE
);
402 strncpy(data
->hostnqn
, ctrl
->opts
->host
->nqn
, NVMF_NQN_SIZE
);
404 ret
= __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, &res
,
405 data
, sizeof(*data
), 0, NVME_QID_ANY
, 1,
406 BLK_MQ_REQ_RESERVED
| BLK_MQ_REQ_NOWAIT
);
408 nvmf_log_connect_error(ctrl
, ret
, le32_to_cpu(res
.u32
),
413 ctrl
->cntlid
= le16_to_cpu(res
.u16
);
419 EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue
);
422 * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
424 * @ctrl: Host nvme controller instance used to establish an
425 * NVMe I/O queue connection to the already allocated NVMe
426 * controller on the target system.
427 * @qid: NVMe I/O queue number for the new I/O connection between
428 * host and target (note qid == 0 is illegal as this is
429 * the Admin queue, per NVMe standard).
431 * This function issues a fabrics-protocol connection
432 * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
433 * between the host system device and the allocated NVMe controller
434 * on the target system.
438 * > 0: NVMe error status code
439 * < 0: Linux errno error code
441 int nvmf_connect_io_queue(struct nvme_ctrl
*ctrl
, u16 qid
)
443 struct nvme_command cmd
;
444 struct nvmf_connect_data
*data
;
445 union nvme_result res
;
448 memset(&cmd
, 0, sizeof(cmd
));
449 cmd
.connect
.opcode
= nvme_fabrics_command
;
450 cmd
.connect
.fctype
= nvme_fabrics_type_connect
;
451 cmd
.connect
.qid
= cpu_to_le16(qid
);
452 cmd
.connect
.sqsize
= cpu_to_le16(ctrl
->sqsize
);
454 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
458 uuid_copy(&data
->hostid
, &ctrl
->opts
->host
->id
);
459 data
->cntlid
= cpu_to_le16(ctrl
->cntlid
);
460 strncpy(data
->subsysnqn
, ctrl
->opts
->subsysnqn
, NVMF_NQN_SIZE
);
461 strncpy(data
->hostnqn
, ctrl
->opts
->host
->nqn
, NVMF_NQN_SIZE
);
463 ret
= __nvme_submit_sync_cmd(ctrl
->connect_q
, &cmd
, &res
,
464 data
, sizeof(*data
), 0, qid
, 1,
465 BLK_MQ_REQ_RESERVED
| BLK_MQ_REQ_NOWAIT
);
467 nvmf_log_connect_error(ctrl
, ret
, le32_to_cpu(res
.u32
),
473 EXPORT_SYMBOL_GPL(nvmf_connect_io_queue
);
475 bool nvmf_should_reconnect(struct nvme_ctrl
*ctrl
)
477 if (ctrl
->opts
->max_reconnects
!= -1 &&
478 ctrl
->nr_reconnects
< ctrl
->opts
->max_reconnects
)
483 EXPORT_SYMBOL_GPL(nvmf_should_reconnect
);
486 * nvmf_register_transport() - NVMe Fabrics Library registration function.
487 * @ops: Transport ops instance to be registered to the
488 * common fabrics library.
490 * API function that registers the type of specific transport fabric
491 * being implemented to the common NVMe fabrics library. Part of
492 * the overall init sequence of starting up a fabrics driver.
494 int nvmf_register_transport(struct nvmf_transport_ops
*ops
)
496 if (!ops
->create_ctrl
)
499 down_write(&nvmf_transports_rwsem
);
500 list_add_tail(&ops
->entry
, &nvmf_transports
);
501 up_write(&nvmf_transports_rwsem
);
505 EXPORT_SYMBOL_GPL(nvmf_register_transport
);
508 * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
509 * @ops: Transport ops instance to be unregistered from the
510 * common fabrics library.
512 * Fabrics API function that unregisters the type of specific transport
513 * fabric being implemented from the common NVMe fabrics library.
514 * Part of the overall exit sequence of unloading the implemented driver.
516 void nvmf_unregister_transport(struct nvmf_transport_ops
*ops
)
518 down_write(&nvmf_transports_rwsem
);
519 list_del(&ops
->entry
);
520 up_write(&nvmf_transports_rwsem
);
522 EXPORT_SYMBOL_GPL(nvmf_unregister_transport
);
524 static struct nvmf_transport_ops
*nvmf_lookup_transport(
525 struct nvmf_ctrl_options
*opts
)
527 struct nvmf_transport_ops
*ops
;
529 lockdep_assert_held(&nvmf_transports_rwsem
);
531 list_for_each_entry(ops
, &nvmf_transports
, entry
) {
532 if (strcmp(ops
->name
, opts
->transport
) == 0)
539 static const match_table_t opt_tokens
= {
540 { NVMF_OPT_TRANSPORT
, "transport=%s" },
541 { NVMF_OPT_TRADDR
, "traddr=%s" },
542 { NVMF_OPT_TRSVCID
, "trsvcid=%s" },
543 { NVMF_OPT_NQN
, "nqn=%s" },
544 { NVMF_OPT_QUEUE_SIZE
, "queue_size=%d" },
545 { NVMF_OPT_NR_IO_QUEUES
, "nr_io_queues=%d" },
546 { NVMF_OPT_RECONNECT_DELAY
, "reconnect_delay=%d" },
547 { NVMF_OPT_CTRL_LOSS_TMO
, "ctrl_loss_tmo=%d" },
548 { NVMF_OPT_KATO
, "keep_alive_tmo=%d" },
549 { NVMF_OPT_HOSTNQN
, "hostnqn=%s" },
550 { NVMF_OPT_HOST_TRADDR
, "host_traddr=%s" },
551 { NVMF_OPT_HOST_ID
, "hostid=%s" },
552 { NVMF_OPT_DUP_CONNECT
, "duplicate_connect" },
553 { NVMF_OPT_ERR
, NULL
}
556 static int nvmf_parse_options(struct nvmf_ctrl_options
*opts
,
559 substring_t args
[MAX_OPT_ARGS
];
560 char *options
, *o
, *p
;
563 int ctrl_loss_tmo
= NVMF_DEF_CTRL_LOSS_TMO
;
567 opts
->queue_size
= NVMF_DEF_QUEUE_SIZE
;
568 opts
->nr_io_queues
= num_online_cpus();
569 opts
->reconnect_delay
= NVMF_DEF_RECONNECT_DELAY
;
570 opts
->kato
= NVME_DEFAULT_KATO
;
571 opts
->duplicate_connect
= false;
573 options
= o
= kstrdup(buf
, GFP_KERNEL
);
579 while ((p
= strsep(&o
, ",\n")) != NULL
) {
583 token
= match_token(p
, opt_tokens
, args
);
586 case NVMF_OPT_TRANSPORT
:
587 p
= match_strdup(args
);
595 p
= match_strdup(args
);
601 nqnlen
= strlen(opts
->subsysnqn
);
602 if (nqnlen
>= NVMF_NQN_SIZE
) {
603 pr_err("%s needs to be < %d bytes\n",
604 opts
->subsysnqn
, NVMF_NQN_SIZE
);
608 opts
->discovery_nqn
=
609 !(strcmp(opts
->subsysnqn
,
610 NVME_DISC_SUBSYS_NAME
));
611 if (opts
->discovery_nqn
)
612 opts
->nr_io_queues
= 0;
614 case NVMF_OPT_TRADDR
:
615 p
= match_strdup(args
);
622 case NVMF_OPT_TRSVCID
:
623 p
= match_strdup(args
);
630 case NVMF_OPT_QUEUE_SIZE
:
631 if (match_int(args
, &token
)) {
635 if (token
< NVMF_MIN_QUEUE_SIZE
||
636 token
> NVMF_MAX_QUEUE_SIZE
) {
637 pr_err("Invalid queue_size %d\n", token
);
641 opts
->queue_size
= token
;
643 case NVMF_OPT_NR_IO_QUEUES
:
644 if (match_int(args
, &token
)) {
649 pr_err("Invalid number of IOQs %d\n", token
);
653 if (opts
->discovery_nqn
) {
654 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
658 opts
->nr_io_queues
= min_t(unsigned int,
659 num_online_cpus(), token
);
662 if (match_int(args
, &token
)) {
668 pr_err("Invalid keep_alive_tmo %d\n", token
);
671 } else if (token
== 0 && !opts
->discovery_nqn
) {
672 /* Allowed for debug */
673 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
677 if (opts
->discovery_nqn
&& opts
->kato
) {
678 pr_err("Discovery controllers cannot accept KATO != 0\n");
684 case NVMF_OPT_CTRL_LOSS_TMO
:
685 if (match_int(args
, &token
)) {
691 pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
692 ctrl_loss_tmo
= token
;
694 case NVMF_OPT_HOSTNQN
:
696 pr_err("hostnqn already user-assigned: %s\n",
701 p
= match_strdup(args
);
707 if (nqnlen
>= NVMF_NQN_SIZE
) {
708 pr_err("%s needs to be < %d bytes\n",
714 opts
->host
= nvmf_host_add(p
);
721 case NVMF_OPT_RECONNECT_DELAY
:
722 if (match_int(args
, &token
)) {
727 pr_err("Invalid reconnect_delay %d\n", token
);
731 opts
->reconnect_delay
= token
;
733 case NVMF_OPT_HOST_TRADDR
:
734 p
= match_strdup(args
);
739 opts
->host_traddr
= p
;
741 case NVMF_OPT_HOST_ID
:
742 p
= match_strdup(args
);
747 ret
= uuid_parse(p
, &hostid
);
749 pr_err("Invalid hostid %s\n", p
);
756 case NVMF_OPT_DUP_CONNECT
:
757 opts
->duplicate_connect
= true;
760 pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
767 if (ctrl_loss_tmo
< 0)
768 opts
->max_reconnects
= -1;
770 opts
->max_reconnects
= DIV_ROUND_UP(ctrl_loss_tmo
,
771 opts
->reconnect_delay
);
774 kref_get(&nvmf_default_host
->ref
);
775 opts
->host
= nvmf_default_host
;
778 uuid_copy(&opts
->host
->id
, &hostid
);
785 static int nvmf_check_required_opts(struct nvmf_ctrl_options
*opts
,
786 unsigned int required_opts
)
788 if ((opts
->mask
& required_opts
) != required_opts
) {
791 for (i
= 0; i
< ARRAY_SIZE(opt_tokens
); i
++) {
792 if ((opt_tokens
[i
].token
& required_opts
) &&
793 !(opt_tokens
[i
].token
& opts
->mask
)) {
794 pr_warn("missing parameter '%s'\n",
795 opt_tokens
[i
].pattern
);
805 static int nvmf_check_allowed_opts(struct nvmf_ctrl_options
*opts
,
806 unsigned int allowed_opts
)
808 if (opts
->mask
& ~allowed_opts
) {
811 for (i
= 0; i
< ARRAY_SIZE(opt_tokens
); i
++) {
812 if ((opt_tokens
[i
].token
& opts
->mask
) &&
813 (opt_tokens
[i
].token
& ~allowed_opts
)) {
814 pr_warn("invalid parameter '%s'\n",
815 opt_tokens
[i
].pattern
);
825 void nvmf_free_options(struct nvmf_ctrl_options
*opts
)
827 nvmf_host_put(opts
->host
);
828 kfree(opts
->transport
);
830 kfree(opts
->trsvcid
);
831 kfree(opts
->subsysnqn
);
832 kfree(opts
->host_traddr
);
835 EXPORT_SYMBOL_GPL(nvmf_free_options
);
837 #define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
838 #define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
839 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
840 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT)
842 static struct nvme_ctrl
*
843 nvmf_create_ctrl(struct device
*dev
, const char *buf
, size_t count
)
845 struct nvmf_ctrl_options
*opts
;
846 struct nvmf_transport_ops
*ops
;
847 struct nvme_ctrl
*ctrl
;
850 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
852 return ERR_PTR(-ENOMEM
);
854 ret
= nvmf_parse_options(opts
, buf
);
859 request_module("nvme-%s", opts
->transport
);
862 * Check the generic options first as we need a valid transport for
863 * the lookup below. Then clear the generic flags so that transport
864 * drivers don't have to care about them.
866 ret
= nvmf_check_required_opts(opts
, NVMF_REQUIRED_OPTS
);
869 opts
->mask
&= ~NVMF_REQUIRED_OPTS
;
871 down_read(&nvmf_transports_rwsem
);
872 ops
= nvmf_lookup_transport(opts
);
874 pr_info("no handler found for transport %s.\n",
880 if (!try_module_get(ops
->module
)) {
885 ret
= nvmf_check_required_opts(opts
, ops
->required_opts
);
888 ret
= nvmf_check_allowed_opts(opts
, NVMF_ALLOWED_OPTS
|
889 ops
->allowed_opts
| ops
->required_opts
);
893 ctrl
= ops
->create_ctrl(dev
, opts
);
899 if (strcmp(ctrl
->subsys
->subnqn
, opts
->subsysnqn
)) {
900 dev_warn(ctrl
->device
,
901 "controller returned incorrect NQN: \"%s\".\n",
902 ctrl
->subsys
->subnqn
);
903 module_put(ops
->module
);
904 up_read(&nvmf_transports_rwsem
);
905 nvme_delete_ctrl_sync(ctrl
);
906 return ERR_PTR(-EINVAL
);
909 module_put(ops
->module
);
910 up_read(&nvmf_transports_rwsem
);
914 module_put(ops
->module
);
916 up_read(&nvmf_transports_rwsem
);
918 nvmf_free_options(opts
);
922 static struct class *nvmf_class
;
923 static struct device
*nvmf_device
;
924 static DEFINE_MUTEX(nvmf_dev_mutex
);
926 static ssize_t
nvmf_dev_write(struct file
*file
, const char __user
*ubuf
,
927 size_t count
, loff_t
*pos
)
929 struct seq_file
*seq_file
= file
->private_data
;
930 struct nvme_ctrl
*ctrl
;
934 if (count
> PAGE_SIZE
)
937 buf
= memdup_user_nul(ubuf
, count
);
941 mutex_lock(&nvmf_dev_mutex
);
942 if (seq_file
->private) {
947 ctrl
= nvmf_create_ctrl(nvmf_device
, buf
, count
);
953 seq_file
->private = ctrl
;
956 mutex_unlock(&nvmf_dev_mutex
);
958 return ret
? ret
: count
;
961 static int nvmf_dev_show(struct seq_file
*seq_file
, void *private)
963 struct nvme_ctrl
*ctrl
;
966 mutex_lock(&nvmf_dev_mutex
);
967 ctrl
= seq_file
->private;
973 seq_printf(seq_file
, "instance=%d,cntlid=%d\n",
974 ctrl
->instance
, ctrl
->cntlid
);
977 mutex_unlock(&nvmf_dev_mutex
);
981 static int nvmf_dev_open(struct inode
*inode
, struct file
*file
)
984 * The miscdevice code initializes file->private_data, but doesn't
985 * make use of it later.
987 file
->private_data
= NULL
;
988 return single_open(file
, nvmf_dev_show
, NULL
);
991 static int nvmf_dev_release(struct inode
*inode
, struct file
*file
)
993 struct seq_file
*seq_file
= file
->private_data
;
994 struct nvme_ctrl
*ctrl
= seq_file
->private;
998 return single_release(inode
, file
);
1001 static const struct file_operations nvmf_dev_fops
= {
1002 .owner
= THIS_MODULE
,
1003 .write
= nvmf_dev_write
,
1005 .open
= nvmf_dev_open
,
1006 .release
= nvmf_dev_release
,
1009 static struct miscdevice nvmf_misc
= {
1010 .minor
= MISC_DYNAMIC_MINOR
,
1011 .name
= "nvme-fabrics",
1012 .fops
= &nvmf_dev_fops
,
1015 static int __init
nvmf_init(void)
1019 nvmf_default_host
= nvmf_host_default();
1020 if (!nvmf_default_host
)
1023 nvmf_class
= class_create(THIS_MODULE
, "nvme-fabrics");
1024 if (IS_ERR(nvmf_class
)) {
1025 pr_err("couldn't register class nvme-fabrics\n");
1026 ret
= PTR_ERR(nvmf_class
);
1031 device_create(nvmf_class
, NULL
, MKDEV(0, 0), NULL
, "ctl");
1032 if (IS_ERR(nvmf_device
)) {
1033 pr_err("couldn't create nvme-fabris device!\n");
1034 ret
= PTR_ERR(nvmf_device
);
1035 goto out_destroy_class
;
1038 ret
= misc_register(&nvmf_misc
);
1040 pr_err("couldn't register misc device: %d\n", ret
);
1041 goto out_destroy_device
;
1047 device_destroy(nvmf_class
, MKDEV(0, 0));
1049 class_destroy(nvmf_class
);
1051 nvmf_host_put(nvmf_default_host
);
1055 static void __exit
nvmf_exit(void)
1057 misc_deregister(&nvmf_misc
);
1058 device_destroy(nvmf_class
, MKDEV(0, 0));
1059 class_destroy(nvmf_class
);
1060 nvmf_host_put(nvmf_default_host
);
1062 BUILD_BUG_ON(sizeof(struct nvmf_connect_command
) != 64);
1063 BUILD_BUG_ON(sizeof(struct nvmf_property_get_command
) != 64);
1064 BUILD_BUG_ON(sizeof(struct nvmf_property_set_command
) != 64);
1065 BUILD_BUG_ON(sizeof(struct nvmf_connect_data
) != 1024);
1068 MODULE_LICENSE("GPL v2");
1070 module_init(nvmf_init
);
1071 module_exit(nvmf_exit
);