2 * Configfs interface for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/stat.h>
19 #include <linux/ctype.h>
20 #include <linux/pci.h>
21 #include <linux/pci-p2pdma.h>
25 static const struct config_item_type nvmet_host_type
;
26 static const struct config_item_type nvmet_subsys_type
;
28 static const struct nvmet_transport_name
{
31 } nvmet_transport_names
[] = {
32 { NVMF_TRTYPE_RDMA
, "rdma" },
33 { NVMF_TRTYPE_FC
, "fc" },
34 { NVMF_TRTYPE_LOOP
, "loop" },
38 * nvmet_port Generic ConfigFS definitions.
39 * Used in any place in the ConfigFS tree that refers to an address.
41 static ssize_t
nvmet_addr_adrfam_show(struct config_item
*item
,
44 switch (to_nvmet_port(item
)->disc_addr
.adrfam
) {
45 case NVMF_ADDR_FAMILY_IP4
:
46 return sprintf(page
, "ipv4\n");
47 case NVMF_ADDR_FAMILY_IP6
:
48 return sprintf(page
, "ipv6\n");
49 case NVMF_ADDR_FAMILY_IB
:
50 return sprintf(page
, "ib\n");
51 case NVMF_ADDR_FAMILY_FC
:
52 return sprintf(page
, "fc\n");
54 return sprintf(page
, "\n");
58 static ssize_t
nvmet_addr_adrfam_store(struct config_item
*item
,
59 const char *page
, size_t count
)
61 struct nvmet_port
*port
= to_nvmet_port(item
);
64 pr_err("Cannot modify address while enabled\n");
65 pr_err("Disable the address before modifying\n");
69 if (sysfs_streq(page
, "ipv4")) {
70 port
->disc_addr
.adrfam
= NVMF_ADDR_FAMILY_IP4
;
71 } else if (sysfs_streq(page
, "ipv6")) {
72 port
->disc_addr
.adrfam
= NVMF_ADDR_FAMILY_IP6
;
73 } else if (sysfs_streq(page
, "ib")) {
74 port
->disc_addr
.adrfam
= NVMF_ADDR_FAMILY_IB
;
75 } else if (sysfs_streq(page
, "fc")) {
76 port
->disc_addr
.adrfam
= NVMF_ADDR_FAMILY_FC
;
78 pr_err("Invalid value '%s' for adrfam\n", page
);
85 CONFIGFS_ATTR(nvmet_
, addr_adrfam
);
87 static ssize_t
nvmet_addr_portid_show(struct config_item
*item
,
90 struct nvmet_port
*port
= to_nvmet_port(item
);
92 return snprintf(page
, PAGE_SIZE
, "%d\n",
93 le16_to_cpu(port
->disc_addr
.portid
));
96 static ssize_t
nvmet_addr_portid_store(struct config_item
*item
,
97 const char *page
, size_t count
)
99 struct nvmet_port
*port
= to_nvmet_port(item
);
102 if (kstrtou16(page
, 0, &portid
)) {
103 pr_err("Invalid value '%s' for portid\n", page
);
108 pr_err("Cannot modify address while enabled\n");
109 pr_err("Disable the address before modifying\n");
112 port
->disc_addr
.portid
= cpu_to_le16(portid
);
116 CONFIGFS_ATTR(nvmet_
, addr_portid
);
118 static ssize_t
nvmet_addr_traddr_show(struct config_item
*item
,
121 struct nvmet_port
*port
= to_nvmet_port(item
);
123 return snprintf(page
, PAGE_SIZE
, "%s\n",
124 port
->disc_addr
.traddr
);
127 static ssize_t
nvmet_addr_traddr_store(struct config_item
*item
,
128 const char *page
, size_t count
)
130 struct nvmet_port
*port
= to_nvmet_port(item
);
132 if (count
> NVMF_TRADDR_SIZE
) {
133 pr_err("Invalid value '%s' for traddr\n", page
);
138 pr_err("Cannot modify address while enabled\n");
139 pr_err("Disable the address before modifying\n");
143 if (sscanf(page
, "%s\n", port
->disc_addr
.traddr
) != 1)
148 CONFIGFS_ATTR(nvmet_
, addr_traddr
);
150 static ssize_t
nvmet_addr_treq_show(struct config_item
*item
,
153 switch (to_nvmet_port(item
)->disc_addr
.treq
) {
154 case NVMF_TREQ_NOT_SPECIFIED
:
155 return sprintf(page
, "not specified\n");
156 case NVMF_TREQ_REQUIRED
:
157 return sprintf(page
, "required\n");
158 case NVMF_TREQ_NOT_REQUIRED
:
159 return sprintf(page
, "not required\n");
161 return sprintf(page
, "\n");
165 static ssize_t
nvmet_addr_treq_store(struct config_item
*item
,
166 const char *page
, size_t count
)
168 struct nvmet_port
*port
= to_nvmet_port(item
);
171 pr_err("Cannot modify address while enabled\n");
172 pr_err("Disable the address before modifying\n");
176 if (sysfs_streq(page
, "not specified")) {
177 port
->disc_addr
.treq
= NVMF_TREQ_NOT_SPECIFIED
;
178 } else if (sysfs_streq(page
, "required")) {
179 port
->disc_addr
.treq
= NVMF_TREQ_REQUIRED
;
180 } else if (sysfs_streq(page
, "not required")) {
181 port
->disc_addr
.treq
= NVMF_TREQ_NOT_REQUIRED
;
183 pr_err("Invalid value '%s' for treq\n", page
);
190 CONFIGFS_ATTR(nvmet_
, addr_treq
);
192 static ssize_t
nvmet_addr_trsvcid_show(struct config_item
*item
,
195 struct nvmet_port
*port
= to_nvmet_port(item
);
197 return snprintf(page
, PAGE_SIZE
, "%s\n",
198 port
->disc_addr
.trsvcid
);
201 static ssize_t
nvmet_addr_trsvcid_store(struct config_item
*item
,
202 const char *page
, size_t count
)
204 struct nvmet_port
*port
= to_nvmet_port(item
);
206 if (count
> NVMF_TRSVCID_SIZE
) {
207 pr_err("Invalid value '%s' for trsvcid\n", page
);
211 pr_err("Cannot modify address while enabled\n");
212 pr_err("Disable the address before modifying\n");
216 if (sscanf(page
, "%s\n", port
->disc_addr
.trsvcid
) != 1)
221 CONFIGFS_ATTR(nvmet_
, addr_trsvcid
);
223 static ssize_t
nvmet_param_inline_data_size_show(struct config_item
*item
,
226 struct nvmet_port
*port
= to_nvmet_port(item
);
228 return snprintf(page
, PAGE_SIZE
, "%d\n", port
->inline_data_size
);
231 static ssize_t
nvmet_param_inline_data_size_store(struct config_item
*item
,
232 const char *page
, size_t count
)
234 struct nvmet_port
*port
= to_nvmet_port(item
);
238 pr_err("Cannot modify inline_data_size while port enabled\n");
239 pr_err("Disable the port before modifying\n");
242 ret
= kstrtoint(page
, 0, &port
->inline_data_size
);
244 pr_err("Invalid value '%s' for inline_data_size\n", page
);
250 CONFIGFS_ATTR(nvmet_
, param_inline_data_size
);
252 static ssize_t
nvmet_addr_trtype_show(struct config_item
*item
,
255 struct nvmet_port
*port
= to_nvmet_port(item
);
258 for (i
= 0; i
< ARRAY_SIZE(nvmet_transport_names
); i
++) {
259 if (port
->disc_addr
.trtype
!= nvmet_transport_names
[i
].type
)
261 return sprintf(page
, "%s\n", nvmet_transport_names
[i
].name
);
264 return sprintf(page
, "\n");
267 static void nvmet_port_init_tsas_rdma(struct nvmet_port
*port
)
269 port
->disc_addr
.tsas
.rdma
.qptype
= NVMF_RDMA_QPTYPE_CONNECTED
;
270 port
->disc_addr
.tsas
.rdma
.prtype
= NVMF_RDMA_PRTYPE_NOT_SPECIFIED
;
271 port
->disc_addr
.tsas
.rdma
.cms
= NVMF_RDMA_CMS_RDMA_CM
;
274 static ssize_t
nvmet_addr_trtype_store(struct config_item
*item
,
275 const char *page
, size_t count
)
277 struct nvmet_port
*port
= to_nvmet_port(item
);
281 pr_err("Cannot modify address while enabled\n");
282 pr_err("Disable the address before modifying\n");
286 for (i
= 0; i
< ARRAY_SIZE(nvmet_transport_names
); i
++) {
287 if (sysfs_streq(page
, nvmet_transport_names
[i
].name
))
291 pr_err("Invalid value '%s' for trtype\n", page
);
294 memset(&port
->disc_addr
.tsas
, 0, NVMF_TSAS_SIZE
);
295 port
->disc_addr
.trtype
= nvmet_transport_names
[i
].type
;
296 if (port
->disc_addr
.trtype
== NVMF_TRTYPE_RDMA
)
297 nvmet_port_init_tsas_rdma(port
);
301 CONFIGFS_ATTR(nvmet_
, addr_trtype
);
304 * Namespace structures & file operation functions below
306 static ssize_t
nvmet_ns_device_path_show(struct config_item
*item
, char *page
)
308 return sprintf(page
, "%s\n", to_nvmet_ns(item
)->device_path
);
311 static ssize_t
nvmet_ns_device_path_store(struct config_item
*item
,
312 const char *page
, size_t count
)
314 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
315 struct nvmet_subsys
*subsys
= ns
->subsys
;
319 mutex_lock(&subsys
->lock
);
325 len
= strcspn(page
, "\n");
329 kfree(ns
->device_path
);
331 ns
->device_path
= kstrndup(page
, len
, GFP_KERNEL
);
332 if (!ns
->device_path
)
335 mutex_unlock(&subsys
->lock
);
339 mutex_unlock(&subsys
->lock
);
343 CONFIGFS_ATTR(nvmet_ns_
, device_path
);
345 #ifdef CONFIG_PCI_P2PDMA
346 static ssize_t
nvmet_ns_p2pmem_show(struct config_item
*item
, char *page
)
348 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
350 return pci_p2pdma_enable_show(page
, ns
->p2p_dev
, ns
->use_p2pmem
);
353 static ssize_t
nvmet_ns_p2pmem_store(struct config_item
*item
,
354 const char *page
, size_t count
)
356 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
357 struct pci_dev
*p2p_dev
= NULL
;
362 mutex_lock(&ns
->subsys
->lock
);
368 error
= pci_p2pdma_enable_store(page
, &p2p_dev
, &use_p2pmem
);
374 ns
->use_p2pmem
= use_p2pmem
;
375 pci_dev_put(ns
->p2p_dev
);
376 ns
->p2p_dev
= p2p_dev
;
379 mutex_unlock(&ns
->subsys
->lock
);
384 CONFIGFS_ATTR(nvmet_ns_
, p2pmem
);
385 #endif /* CONFIG_PCI_P2PDMA */
387 static ssize_t
nvmet_ns_device_uuid_show(struct config_item
*item
, char *page
)
389 return sprintf(page
, "%pUb\n", &to_nvmet_ns(item
)->uuid
);
392 static ssize_t
nvmet_ns_device_uuid_store(struct config_item
*item
,
393 const char *page
, size_t count
)
395 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
396 struct nvmet_subsys
*subsys
= ns
->subsys
;
400 mutex_lock(&subsys
->lock
);
407 if (uuid_parse(page
, &ns
->uuid
))
411 mutex_unlock(&subsys
->lock
);
412 return ret
? ret
: count
;
415 CONFIGFS_ATTR(nvmet_ns_
, device_uuid
);
417 static ssize_t
nvmet_ns_device_nguid_show(struct config_item
*item
, char *page
)
419 return sprintf(page
, "%pUb\n", &to_nvmet_ns(item
)->nguid
);
422 static ssize_t
nvmet_ns_device_nguid_store(struct config_item
*item
,
423 const char *page
, size_t count
)
425 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
426 struct nvmet_subsys
*subsys
= ns
->subsys
;
428 const char *p
= page
;
432 mutex_lock(&subsys
->lock
);
438 for (i
= 0; i
< 16; i
++) {
439 if (p
+ 2 > page
+ count
) {
443 if (!isxdigit(p
[0]) || !isxdigit(p
[1])) {
448 nguid
[i
] = (hex_to_bin(p
[0]) << 4) | hex_to_bin(p
[1]);
451 if (*p
== '-' || *p
== ':')
455 memcpy(&ns
->nguid
, nguid
, sizeof(nguid
));
457 mutex_unlock(&subsys
->lock
);
458 return ret
? ret
: count
;
461 CONFIGFS_ATTR(nvmet_ns_
, device_nguid
);
463 static ssize_t
nvmet_ns_ana_grpid_show(struct config_item
*item
, char *page
)
465 return sprintf(page
, "%u\n", to_nvmet_ns(item
)->anagrpid
);
468 static ssize_t
nvmet_ns_ana_grpid_store(struct config_item
*item
,
469 const char *page
, size_t count
)
471 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
472 u32 oldgrpid
, newgrpid
;
475 ret
= kstrtou32(page
, 0, &newgrpid
);
479 if (newgrpid
< 1 || newgrpid
> NVMET_MAX_ANAGRPS
)
482 down_write(&nvmet_ana_sem
);
483 oldgrpid
= ns
->anagrpid
;
484 nvmet_ana_group_enabled
[newgrpid
]++;
485 ns
->anagrpid
= newgrpid
;
486 nvmet_ana_group_enabled
[oldgrpid
]--;
488 up_write(&nvmet_ana_sem
);
490 nvmet_send_ana_event(ns
->subsys
, NULL
);
494 CONFIGFS_ATTR(nvmet_ns_
, ana_grpid
);
496 static ssize_t
nvmet_ns_enable_show(struct config_item
*item
, char *page
)
498 return sprintf(page
, "%d\n", to_nvmet_ns(item
)->enabled
);
501 static ssize_t
nvmet_ns_enable_store(struct config_item
*item
,
502 const char *page
, size_t count
)
504 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
508 if (strtobool(page
, &enable
))
512 ret
= nvmet_ns_enable(ns
);
514 nvmet_ns_disable(ns
);
516 return ret
? ret
: count
;
519 CONFIGFS_ATTR(nvmet_ns_
, enable
);
521 static ssize_t
nvmet_ns_buffered_io_show(struct config_item
*item
, char *page
)
523 return sprintf(page
, "%d\n", to_nvmet_ns(item
)->buffered_io
);
526 static ssize_t
nvmet_ns_buffered_io_store(struct config_item
*item
,
527 const char *page
, size_t count
)
529 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
532 if (strtobool(page
, &val
))
535 mutex_lock(&ns
->subsys
->lock
);
537 pr_err("disable ns before setting buffered_io value.\n");
538 mutex_unlock(&ns
->subsys
->lock
);
542 ns
->buffered_io
= val
;
543 mutex_unlock(&ns
->subsys
->lock
);
547 CONFIGFS_ATTR(nvmet_ns_
, buffered_io
);
549 static struct configfs_attribute
*nvmet_ns_attrs
[] = {
550 &nvmet_ns_attr_device_path
,
551 &nvmet_ns_attr_device_nguid
,
552 &nvmet_ns_attr_device_uuid
,
553 &nvmet_ns_attr_ana_grpid
,
554 &nvmet_ns_attr_enable
,
555 &nvmet_ns_attr_buffered_io
,
556 #ifdef CONFIG_PCI_P2PDMA
557 &nvmet_ns_attr_p2pmem
,
562 static void nvmet_ns_release(struct config_item
*item
)
564 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
569 static struct configfs_item_operations nvmet_ns_item_ops
= {
570 .release
= nvmet_ns_release
,
573 static const struct config_item_type nvmet_ns_type
= {
574 .ct_item_ops
= &nvmet_ns_item_ops
,
575 .ct_attrs
= nvmet_ns_attrs
,
576 .ct_owner
= THIS_MODULE
,
579 static struct config_group
*nvmet_ns_make(struct config_group
*group
,
582 struct nvmet_subsys
*subsys
= namespaces_to_subsys(&group
->cg_item
);
587 ret
= kstrtou32(name
, 0, &nsid
);
592 if (nsid
== 0 || nsid
== NVME_NSID_ALL
)
596 ns
= nvmet_ns_alloc(subsys
, nsid
);
599 config_group_init_type_name(&ns
->group
, name
, &nvmet_ns_type
);
601 pr_info("adding nsid %d to subsystem %s\n", nsid
, subsys
->subsysnqn
);
608 static struct configfs_group_operations nvmet_namespaces_group_ops
= {
609 .make_group
= nvmet_ns_make
,
612 static const struct config_item_type nvmet_namespaces_type
= {
613 .ct_group_ops
= &nvmet_namespaces_group_ops
,
614 .ct_owner
= THIS_MODULE
,
617 static int nvmet_port_subsys_allow_link(struct config_item
*parent
,
618 struct config_item
*target
)
620 struct nvmet_port
*port
= to_nvmet_port(parent
->ci_parent
);
621 struct nvmet_subsys
*subsys
;
622 struct nvmet_subsys_link
*link
, *p
;
625 if (target
->ci_type
!= &nvmet_subsys_type
) {
626 pr_err("can only link subsystems into the subsystems dir.!\n");
629 subsys
= to_subsys(target
);
630 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
633 link
->subsys
= subsys
;
635 down_write(&nvmet_config_sem
);
637 list_for_each_entry(p
, &port
->subsystems
, entry
) {
638 if (p
->subsys
== subsys
)
642 if (list_empty(&port
->subsystems
)) {
643 ret
= nvmet_enable_port(port
);
648 list_add_tail(&link
->entry
, &port
->subsystems
);
650 up_write(&nvmet_config_sem
);
654 up_write(&nvmet_config_sem
);
659 static void nvmet_port_subsys_drop_link(struct config_item
*parent
,
660 struct config_item
*target
)
662 struct nvmet_port
*port
= to_nvmet_port(parent
->ci_parent
);
663 struct nvmet_subsys
*subsys
= to_subsys(target
);
664 struct nvmet_subsys_link
*p
;
666 down_write(&nvmet_config_sem
);
667 list_for_each_entry(p
, &port
->subsystems
, entry
) {
668 if (p
->subsys
== subsys
)
671 up_write(&nvmet_config_sem
);
677 if (list_empty(&port
->subsystems
))
678 nvmet_disable_port(port
);
679 up_write(&nvmet_config_sem
);
683 static struct configfs_item_operations nvmet_port_subsys_item_ops
= {
684 .allow_link
= nvmet_port_subsys_allow_link
,
685 .drop_link
= nvmet_port_subsys_drop_link
,
688 static const struct config_item_type nvmet_port_subsys_type
= {
689 .ct_item_ops
= &nvmet_port_subsys_item_ops
,
690 .ct_owner
= THIS_MODULE
,
693 static int nvmet_allowed_hosts_allow_link(struct config_item
*parent
,
694 struct config_item
*target
)
696 struct nvmet_subsys
*subsys
= to_subsys(parent
->ci_parent
);
697 struct nvmet_host
*host
;
698 struct nvmet_host_link
*link
, *p
;
701 if (target
->ci_type
!= &nvmet_host_type
) {
702 pr_err("can only link hosts into the allowed_hosts directory!\n");
706 host
= to_host(target
);
707 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
712 down_write(&nvmet_config_sem
);
714 if (subsys
->allow_any_host
) {
715 pr_err("can't add hosts when allow_any_host is set!\n");
720 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
721 if (!strcmp(nvmet_host_name(p
->host
), nvmet_host_name(host
)))
724 list_add_tail(&link
->entry
, &subsys
->hosts
);
726 up_write(&nvmet_config_sem
);
729 up_write(&nvmet_config_sem
);
734 static void nvmet_allowed_hosts_drop_link(struct config_item
*parent
,
735 struct config_item
*target
)
737 struct nvmet_subsys
*subsys
= to_subsys(parent
->ci_parent
);
738 struct nvmet_host
*host
= to_host(target
);
739 struct nvmet_host_link
*p
;
741 down_write(&nvmet_config_sem
);
742 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
743 if (!strcmp(nvmet_host_name(p
->host
), nvmet_host_name(host
)))
746 up_write(&nvmet_config_sem
);
752 up_write(&nvmet_config_sem
);
756 static struct configfs_item_operations nvmet_allowed_hosts_item_ops
= {
757 .allow_link
= nvmet_allowed_hosts_allow_link
,
758 .drop_link
= nvmet_allowed_hosts_drop_link
,
761 static const struct config_item_type nvmet_allowed_hosts_type
= {
762 .ct_item_ops
= &nvmet_allowed_hosts_item_ops
,
763 .ct_owner
= THIS_MODULE
,
766 static ssize_t
nvmet_subsys_attr_allow_any_host_show(struct config_item
*item
,
769 return snprintf(page
, PAGE_SIZE
, "%d\n",
770 to_subsys(item
)->allow_any_host
);
773 static ssize_t
nvmet_subsys_attr_allow_any_host_store(struct config_item
*item
,
774 const char *page
, size_t count
)
776 struct nvmet_subsys
*subsys
= to_subsys(item
);
780 if (strtobool(page
, &allow_any_host
))
783 down_write(&nvmet_config_sem
);
784 if (allow_any_host
&& !list_empty(&subsys
->hosts
)) {
785 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
790 subsys
->allow_any_host
= allow_any_host
;
792 up_write(&nvmet_config_sem
);
793 return ret
? ret
: count
;
796 CONFIGFS_ATTR(nvmet_subsys_
, attr_allow_any_host
);
798 static ssize_t
nvmet_subsys_attr_version_show(struct config_item
*item
,
801 struct nvmet_subsys
*subsys
= to_subsys(item
);
803 if (NVME_TERTIARY(subsys
->ver
))
804 return snprintf(page
, PAGE_SIZE
, "%d.%d.%d\n",
805 (int)NVME_MAJOR(subsys
->ver
),
806 (int)NVME_MINOR(subsys
->ver
),
807 (int)NVME_TERTIARY(subsys
->ver
));
809 return snprintf(page
, PAGE_SIZE
, "%d.%d\n",
810 (int)NVME_MAJOR(subsys
->ver
),
811 (int)NVME_MINOR(subsys
->ver
));
814 static ssize_t
nvmet_subsys_attr_version_store(struct config_item
*item
,
815 const char *page
, size_t count
)
817 struct nvmet_subsys
*subsys
= to_subsys(item
);
818 int major
, minor
, tertiary
= 0;
822 ret
= sscanf(page
, "%d.%d.%d\n", &major
, &minor
, &tertiary
);
823 if (ret
!= 2 && ret
!= 3)
826 down_write(&nvmet_config_sem
);
827 subsys
->ver
= NVME_VS(major
, minor
, tertiary
);
828 up_write(&nvmet_config_sem
);
832 CONFIGFS_ATTR(nvmet_subsys_
, attr_version
);
834 static ssize_t
nvmet_subsys_attr_serial_show(struct config_item
*item
,
837 struct nvmet_subsys
*subsys
= to_subsys(item
);
839 return snprintf(page
, PAGE_SIZE
, "%llx\n", subsys
->serial
);
842 static ssize_t
nvmet_subsys_attr_serial_store(struct config_item
*item
,
843 const char *page
, size_t count
)
845 struct nvmet_subsys
*subsys
= to_subsys(item
);
847 down_write(&nvmet_config_sem
);
848 sscanf(page
, "%llx\n", &subsys
->serial
);
849 up_write(&nvmet_config_sem
);
853 CONFIGFS_ATTR(nvmet_subsys_
, attr_serial
);
855 static struct configfs_attribute
*nvmet_subsys_attrs
[] = {
856 &nvmet_subsys_attr_attr_allow_any_host
,
857 &nvmet_subsys_attr_attr_version
,
858 &nvmet_subsys_attr_attr_serial
,
863 * Subsystem structures & folder operation functions below
865 static void nvmet_subsys_release(struct config_item
*item
)
867 struct nvmet_subsys
*subsys
= to_subsys(item
);
869 nvmet_subsys_del_ctrls(subsys
);
870 nvmet_subsys_put(subsys
);
873 static struct configfs_item_operations nvmet_subsys_item_ops
= {
874 .release
= nvmet_subsys_release
,
877 static const struct config_item_type nvmet_subsys_type
= {
878 .ct_item_ops
= &nvmet_subsys_item_ops
,
879 .ct_attrs
= nvmet_subsys_attrs
,
880 .ct_owner
= THIS_MODULE
,
883 static struct config_group
*nvmet_subsys_make(struct config_group
*group
,
886 struct nvmet_subsys
*subsys
;
888 if (sysfs_streq(name
, NVME_DISC_SUBSYS_NAME
)) {
889 pr_err("can't create discovery subsystem through configfs\n");
890 return ERR_PTR(-EINVAL
);
893 subsys
= nvmet_subsys_alloc(name
, NVME_NQN_NVME
);
895 return ERR_PTR(-ENOMEM
);
897 config_group_init_type_name(&subsys
->group
, name
, &nvmet_subsys_type
);
899 config_group_init_type_name(&subsys
->namespaces_group
,
900 "namespaces", &nvmet_namespaces_type
);
901 configfs_add_default_group(&subsys
->namespaces_group
, &subsys
->group
);
903 config_group_init_type_name(&subsys
->allowed_hosts_group
,
904 "allowed_hosts", &nvmet_allowed_hosts_type
);
905 configfs_add_default_group(&subsys
->allowed_hosts_group
,
908 return &subsys
->group
;
911 static struct configfs_group_operations nvmet_subsystems_group_ops
= {
912 .make_group
= nvmet_subsys_make
,
915 static const struct config_item_type nvmet_subsystems_type
= {
916 .ct_group_ops
= &nvmet_subsystems_group_ops
,
917 .ct_owner
= THIS_MODULE
,
920 static ssize_t
nvmet_referral_enable_show(struct config_item
*item
,
923 return snprintf(page
, PAGE_SIZE
, "%d\n", to_nvmet_port(item
)->enabled
);
926 static ssize_t
nvmet_referral_enable_store(struct config_item
*item
,
927 const char *page
, size_t count
)
929 struct nvmet_port
*parent
= to_nvmet_port(item
->ci_parent
->ci_parent
);
930 struct nvmet_port
*port
= to_nvmet_port(item
);
933 if (strtobool(page
, &enable
))
937 nvmet_referral_enable(parent
, port
);
939 nvmet_referral_disable(port
);
943 pr_err("Invalid value '%s' for enable\n", page
);
947 CONFIGFS_ATTR(nvmet_referral_
, enable
);
950 * Discovery Service subsystem definitions
952 static struct configfs_attribute
*nvmet_referral_attrs
[] = {
953 &nvmet_attr_addr_adrfam
,
954 &nvmet_attr_addr_portid
,
955 &nvmet_attr_addr_treq
,
956 &nvmet_attr_addr_traddr
,
957 &nvmet_attr_addr_trsvcid
,
958 &nvmet_attr_addr_trtype
,
959 &nvmet_referral_attr_enable
,
963 static void nvmet_referral_release(struct config_item
*item
)
965 struct nvmet_port
*port
= to_nvmet_port(item
);
967 nvmet_referral_disable(port
);
971 static struct configfs_item_operations nvmet_referral_item_ops
= {
972 .release
= nvmet_referral_release
,
975 static const struct config_item_type nvmet_referral_type
= {
976 .ct_owner
= THIS_MODULE
,
977 .ct_attrs
= nvmet_referral_attrs
,
978 .ct_item_ops
= &nvmet_referral_item_ops
,
981 static struct config_group
*nvmet_referral_make(
982 struct config_group
*group
, const char *name
)
984 struct nvmet_port
*port
;
986 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
988 return ERR_PTR(-ENOMEM
);
990 INIT_LIST_HEAD(&port
->entry
);
991 config_group_init_type_name(&port
->group
, name
, &nvmet_referral_type
);
996 static struct configfs_group_operations nvmet_referral_group_ops
= {
997 .make_group
= nvmet_referral_make
,
1000 static const struct config_item_type nvmet_referrals_type
= {
1001 .ct_owner
= THIS_MODULE
,
1002 .ct_group_ops
= &nvmet_referral_group_ops
,
1006 enum nvme_ana_state state
;
1008 } nvmet_ana_state_names
[] = {
1009 { NVME_ANA_OPTIMIZED
, "optimized" },
1010 { NVME_ANA_NONOPTIMIZED
, "non-optimized" },
1011 { NVME_ANA_INACCESSIBLE
, "inaccessible" },
1012 { NVME_ANA_PERSISTENT_LOSS
, "persistent-loss" },
1013 { NVME_ANA_CHANGE
, "change" },
1016 static ssize_t
nvmet_ana_group_ana_state_show(struct config_item
*item
,
1019 struct nvmet_ana_group
*grp
= to_ana_group(item
);
1020 enum nvme_ana_state state
= grp
->port
->ana_state
[grp
->grpid
];
1023 for (i
= 0; i
< ARRAY_SIZE(nvmet_ana_state_names
); i
++) {
1024 if (state
!= nvmet_ana_state_names
[i
].state
)
1026 return sprintf(page
, "%s\n", nvmet_ana_state_names
[i
].name
);
1029 return sprintf(page
, "\n");
1032 static ssize_t
nvmet_ana_group_ana_state_store(struct config_item
*item
,
1033 const char *page
, size_t count
)
1035 struct nvmet_ana_group
*grp
= to_ana_group(item
);
1038 for (i
= 0; i
< ARRAY_SIZE(nvmet_ana_state_names
); i
++) {
1039 if (sysfs_streq(page
, nvmet_ana_state_names
[i
].name
))
1043 pr_err("Invalid value '%s' for ana_state\n", page
);
1047 down_write(&nvmet_ana_sem
);
1048 grp
->port
->ana_state
[grp
->grpid
] = nvmet_ana_state_names
[i
].state
;
1050 up_write(&nvmet_ana_sem
);
1052 nvmet_port_send_ana_event(grp
->port
);
1056 CONFIGFS_ATTR(nvmet_ana_group_
, ana_state
);
1058 static struct configfs_attribute
*nvmet_ana_group_attrs
[] = {
1059 &nvmet_ana_group_attr_ana_state
,
1063 static void nvmet_ana_group_release(struct config_item
*item
)
1065 struct nvmet_ana_group
*grp
= to_ana_group(item
);
1067 if (grp
== &grp
->port
->ana_default_group
)
1070 down_write(&nvmet_ana_sem
);
1071 grp
->port
->ana_state
[grp
->grpid
] = NVME_ANA_INACCESSIBLE
;
1072 nvmet_ana_group_enabled
[grp
->grpid
]--;
1073 up_write(&nvmet_ana_sem
);
1075 nvmet_port_send_ana_event(grp
->port
);
1079 static struct configfs_item_operations nvmet_ana_group_item_ops
= {
1080 .release
= nvmet_ana_group_release
,
1083 static const struct config_item_type nvmet_ana_group_type
= {
1084 .ct_item_ops
= &nvmet_ana_group_item_ops
,
1085 .ct_attrs
= nvmet_ana_group_attrs
,
1086 .ct_owner
= THIS_MODULE
,
1089 static struct config_group
*nvmet_ana_groups_make_group(
1090 struct config_group
*group
, const char *name
)
1092 struct nvmet_port
*port
= ana_groups_to_port(&group
->cg_item
);
1093 struct nvmet_ana_group
*grp
;
1097 ret
= kstrtou32(name
, 0, &grpid
);
1102 if (grpid
<= 1 || grpid
> NVMET_MAX_ANAGRPS
)
1106 grp
= kzalloc(sizeof(*grp
), GFP_KERNEL
);
1112 down_write(&nvmet_ana_sem
);
1113 nvmet_ana_group_enabled
[grpid
]++;
1114 up_write(&nvmet_ana_sem
);
1116 nvmet_port_send_ana_event(grp
->port
);
1118 config_group_init_type_name(&grp
->group
, name
, &nvmet_ana_group_type
);
1121 return ERR_PTR(ret
);
1124 static struct configfs_group_operations nvmet_ana_groups_group_ops
= {
1125 .make_group
= nvmet_ana_groups_make_group
,
1128 static const struct config_item_type nvmet_ana_groups_type
= {
1129 .ct_group_ops
= &nvmet_ana_groups_group_ops
,
1130 .ct_owner
= THIS_MODULE
,
1134 * Ports definitions.
1136 static void nvmet_port_release(struct config_item
*item
)
1138 struct nvmet_port
*port
= to_nvmet_port(item
);
1140 kfree(port
->ana_state
);
1144 static struct configfs_attribute
*nvmet_port_attrs
[] = {
1145 &nvmet_attr_addr_adrfam
,
1146 &nvmet_attr_addr_treq
,
1147 &nvmet_attr_addr_traddr
,
1148 &nvmet_attr_addr_trsvcid
,
1149 &nvmet_attr_addr_trtype
,
1150 &nvmet_attr_param_inline_data_size
,
1154 static struct configfs_item_operations nvmet_port_item_ops
= {
1155 .release
= nvmet_port_release
,
1158 static const struct config_item_type nvmet_port_type
= {
1159 .ct_attrs
= nvmet_port_attrs
,
1160 .ct_item_ops
= &nvmet_port_item_ops
,
1161 .ct_owner
= THIS_MODULE
,
1164 static struct config_group
*nvmet_ports_make(struct config_group
*group
,
1167 struct nvmet_port
*port
;
1171 if (kstrtou16(name
, 0, &portid
))
1172 return ERR_PTR(-EINVAL
);
1174 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1176 return ERR_PTR(-ENOMEM
);
1178 port
->ana_state
= kcalloc(NVMET_MAX_ANAGRPS
+ 1,
1179 sizeof(*port
->ana_state
), GFP_KERNEL
);
1180 if (!port
->ana_state
) {
1182 return ERR_PTR(-ENOMEM
);
1185 for (i
= 1; i
<= NVMET_MAX_ANAGRPS
; i
++) {
1186 if (i
== NVMET_DEFAULT_ANA_GRPID
)
1187 port
->ana_state
[1] = NVME_ANA_OPTIMIZED
;
1189 port
->ana_state
[i
] = NVME_ANA_INACCESSIBLE
;
1192 INIT_LIST_HEAD(&port
->entry
);
1193 INIT_LIST_HEAD(&port
->subsystems
);
1194 INIT_LIST_HEAD(&port
->referrals
);
1195 port
->inline_data_size
= -1; /* < 0 == let the transport choose */
1197 port
->disc_addr
.portid
= cpu_to_le16(portid
);
1198 config_group_init_type_name(&port
->group
, name
, &nvmet_port_type
);
1200 config_group_init_type_name(&port
->subsys_group
,
1201 "subsystems", &nvmet_port_subsys_type
);
1202 configfs_add_default_group(&port
->subsys_group
, &port
->group
);
1204 config_group_init_type_name(&port
->referrals_group
,
1205 "referrals", &nvmet_referrals_type
);
1206 configfs_add_default_group(&port
->referrals_group
, &port
->group
);
1208 config_group_init_type_name(&port
->ana_groups_group
,
1209 "ana_groups", &nvmet_ana_groups_type
);
1210 configfs_add_default_group(&port
->ana_groups_group
, &port
->group
);
1212 port
->ana_default_group
.port
= port
;
1213 port
->ana_default_group
.grpid
= NVMET_DEFAULT_ANA_GRPID
;
1214 config_group_init_type_name(&port
->ana_default_group
.group
,
1215 __stringify(NVMET_DEFAULT_ANA_GRPID
),
1216 &nvmet_ana_group_type
);
1217 configfs_add_default_group(&port
->ana_default_group
.group
,
1218 &port
->ana_groups_group
);
1220 return &port
->group
;
1223 static struct configfs_group_operations nvmet_ports_group_ops
= {
1224 .make_group
= nvmet_ports_make
,
1227 static const struct config_item_type nvmet_ports_type
= {
1228 .ct_group_ops
= &nvmet_ports_group_ops
,
1229 .ct_owner
= THIS_MODULE
,
1232 static struct config_group nvmet_subsystems_group
;
1233 static struct config_group nvmet_ports_group
;
1235 static void nvmet_host_release(struct config_item
*item
)
1237 struct nvmet_host
*host
= to_host(item
);
1242 static struct configfs_item_operations nvmet_host_item_ops
= {
1243 .release
= nvmet_host_release
,
1246 static const struct config_item_type nvmet_host_type
= {
1247 .ct_item_ops
= &nvmet_host_item_ops
,
1248 .ct_owner
= THIS_MODULE
,
1251 static struct config_group
*nvmet_hosts_make_group(struct config_group
*group
,
1254 struct nvmet_host
*host
;
1256 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
1258 return ERR_PTR(-ENOMEM
);
1260 config_group_init_type_name(&host
->group
, name
, &nvmet_host_type
);
1262 return &host
->group
;
1265 static struct configfs_group_operations nvmet_hosts_group_ops
= {
1266 .make_group
= nvmet_hosts_make_group
,
1269 static const struct config_item_type nvmet_hosts_type
= {
1270 .ct_group_ops
= &nvmet_hosts_group_ops
,
1271 .ct_owner
= THIS_MODULE
,
1274 static struct config_group nvmet_hosts_group
;
1276 static const struct config_item_type nvmet_root_type
= {
1277 .ct_owner
= THIS_MODULE
,
1280 static struct configfs_subsystem nvmet_configfs_subsystem
= {
1283 .ci_namebuf
= "nvmet",
1284 .ci_type
= &nvmet_root_type
,
1289 int __init
nvmet_init_configfs(void)
1293 config_group_init(&nvmet_configfs_subsystem
.su_group
);
1294 mutex_init(&nvmet_configfs_subsystem
.su_mutex
);
1296 config_group_init_type_name(&nvmet_subsystems_group
,
1297 "subsystems", &nvmet_subsystems_type
);
1298 configfs_add_default_group(&nvmet_subsystems_group
,
1299 &nvmet_configfs_subsystem
.su_group
);
1301 config_group_init_type_name(&nvmet_ports_group
,
1302 "ports", &nvmet_ports_type
);
1303 configfs_add_default_group(&nvmet_ports_group
,
1304 &nvmet_configfs_subsystem
.su_group
);
1306 config_group_init_type_name(&nvmet_hosts_group
,
1307 "hosts", &nvmet_hosts_type
);
1308 configfs_add_default_group(&nvmet_hosts_group
,
1309 &nvmet_configfs_subsystem
.su_group
);
1311 ret
= configfs_register_subsystem(&nvmet_configfs_subsystem
);
1313 pr_err("configfs_register_subsystem: %d\n", ret
);
1320 void __exit
nvmet_exit_configfs(void)
1322 configfs_unregister_subsystem(&nvmet_configfs_subsystem
);