1 // SPDX-License-Identifier: GPL-2.0
3 * Configfs interface for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/stat.h>
11 #include <linux/ctype.h>
12 #include <linux/pci.h>
13 #include <linux/pci-p2pdma.h>
17 static const struct config_item_type nvmet_host_type
;
18 static const struct config_item_type nvmet_subsys_type
;
20 static LIST_HEAD(nvmet_ports_list
);
21 struct list_head
*nvmet_ports
= &nvmet_ports_list
;
23 static const struct nvmet_transport_name
{
26 } nvmet_transport_names
[] = {
27 { NVMF_TRTYPE_RDMA
, "rdma" },
28 { NVMF_TRTYPE_FC
, "fc" },
29 { NVMF_TRTYPE_TCP
, "tcp" },
30 { NVMF_TRTYPE_LOOP
, "loop" },
34 * nvmet_port Generic ConfigFS definitions.
35 * Used in any place in the ConfigFS tree that refers to an address.
37 static ssize_t
nvmet_addr_adrfam_show(struct config_item
*item
,
40 switch (to_nvmet_port(item
)->disc_addr
.adrfam
) {
41 case NVMF_ADDR_FAMILY_IP4
:
42 return sprintf(page
, "ipv4\n");
43 case NVMF_ADDR_FAMILY_IP6
:
44 return sprintf(page
, "ipv6\n");
45 case NVMF_ADDR_FAMILY_IB
:
46 return sprintf(page
, "ib\n");
47 case NVMF_ADDR_FAMILY_FC
:
48 return sprintf(page
, "fc\n");
50 return sprintf(page
, "\n");
54 static ssize_t
nvmet_addr_adrfam_store(struct config_item
*item
,
55 const char *page
, size_t count
)
57 struct nvmet_port
*port
= to_nvmet_port(item
);
60 pr_err("Cannot modify address while enabled\n");
61 pr_err("Disable the address before modifying\n");
65 if (sysfs_streq(page
, "ipv4")) {
66 port
->disc_addr
.adrfam
= NVMF_ADDR_FAMILY_IP4
;
67 } else if (sysfs_streq(page
, "ipv6")) {
68 port
->disc_addr
.adrfam
= NVMF_ADDR_FAMILY_IP6
;
69 } else if (sysfs_streq(page
, "ib")) {
70 port
->disc_addr
.adrfam
= NVMF_ADDR_FAMILY_IB
;
71 } else if (sysfs_streq(page
, "fc")) {
72 port
->disc_addr
.adrfam
= NVMF_ADDR_FAMILY_FC
;
74 pr_err("Invalid value '%s' for adrfam\n", page
);
81 CONFIGFS_ATTR(nvmet_
, addr_adrfam
);
83 static ssize_t
nvmet_addr_portid_show(struct config_item
*item
,
86 struct nvmet_port
*port
= to_nvmet_port(item
);
88 return snprintf(page
, PAGE_SIZE
, "%d\n",
89 le16_to_cpu(port
->disc_addr
.portid
));
92 static ssize_t
nvmet_addr_portid_store(struct config_item
*item
,
93 const char *page
, size_t count
)
95 struct nvmet_port
*port
= to_nvmet_port(item
);
98 if (kstrtou16(page
, 0, &portid
)) {
99 pr_err("Invalid value '%s' for portid\n", page
);
104 pr_err("Cannot modify address while enabled\n");
105 pr_err("Disable the address before modifying\n");
108 port
->disc_addr
.portid
= cpu_to_le16(portid
);
112 CONFIGFS_ATTR(nvmet_
, addr_portid
);
114 static ssize_t
nvmet_addr_traddr_show(struct config_item
*item
,
117 struct nvmet_port
*port
= to_nvmet_port(item
);
119 return snprintf(page
, PAGE_SIZE
, "%s\n",
120 port
->disc_addr
.traddr
);
123 static ssize_t
nvmet_addr_traddr_store(struct config_item
*item
,
124 const char *page
, size_t count
)
126 struct nvmet_port
*port
= to_nvmet_port(item
);
128 if (count
> NVMF_TRADDR_SIZE
) {
129 pr_err("Invalid value '%s' for traddr\n", page
);
134 pr_err("Cannot modify address while enabled\n");
135 pr_err("Disable the address before modifying\n");
139 if (sscanf(page
, "%s\n", port
->disc_addr
.traddr
) != 1)
144 CONFIGFS_ATTR(nvmet_
, addr_traddr
);
146 static ssize_t
nvmet_addr_treq_show(struct config_item
*item
,
149 switch (to_nvmet_port(item
)->disc_addr
.treq
&
150 NVME_TREQ_SECURE_CHANNEL_MASK
) {
151 case NVMF_TREQ_NOT_SPECIFIED
:
152 return sprintf(page
, "not specified\n");
153 case NVMF_TREQ_REQUIRED
:
154 return sprintf(page
, "required\n");
155 case NVMF_TREQ_NOT_REQUIRED
:
156 return sprintf(page
, "not required\n");
158 return sprintf(page
, "\n");
162 static ssize_t
nvmet_addr_treq_store(struct config_item
*item
,
163 const char *page
, size_t count
)
165 struct nvmet_port
*port
= to_nvmet_port(item
);
166 u8 treq
= port
->disc_addr
.treq
& ~NVME_TREQ_SECURE_CHANNEL_MASK
;
169 pr_err("Cannot modify address while enabled\n");
170 pr_err("Disable the address before modifying\n");
174 if (sysfs_streq(page
, "not specified")) {
175 treq
|= NVMF_TREQ_NOT_SPECIFIED
;
176 } else if (sysfs_streq(page
, "required")) {
177 treq
|= NVMF_TREQ_REQUIRED
;
178 } else if (sysfs_streq(page
, "not required")) {
179 treq
|= NVMF_TREQ_NOT_REQUIRED
;
181 pr_err("Invalid value '%s' for treq\n", page
);
184 port
->disc_addr
.treq
= treq
;
189 CONFIGFS_ATTR(nvmet_
, addr_treq
);
191 static ssize_t
nvmet_addr_trsvcid_show(struct config_item
*item
,
194 struct nvmet_port
*port
= to_nvmet_port(item
);
196 return snprintf(page
, PAGE_SIZE
, "%s\n",
197 port
->disc_addr
.trsvcid
);
200 static ssize_t
nvmet_addr_trsvcid_store(struct config_item
*item
,
201 const char *page
, size_t count
)
203 struct nvmet_port
*port
= to_nvmet_port(item
);
205 if (count
> NVMF_TRSVCID_SIZE
) {
206 pr_err("Invalid value '%s' for trsvcid\n", page
);
210 pr_err("Cannot modify address while enabled\n");
211 pr_err("Disable the address before modifying\n");
215 if (sscanf(page
, "%s\n", port
->disc_addr
.trsvcid
) != 1)
220 CONFIGFS_ATTR(nvmet_
, addr_trsvcid
);
222 static ssize_t
nvmet_param_inline_data_size_show(struct config_item
*item
,
225 struct nvmet_port
*port
= to_nvmet_port(item
);
227 return snprintf(page
, PAGE_SIZE
, "%d\n", port
->inline_data_size
);
230 static ssize_t
nvmet_param_inline_data_size_store(struct config_item
*item
,
231 const char *page
, size_t count
)
233 struct nvmet_port
*port
= to_nvmet_port(item
);
237 pr_err("Cannot modify inline_data_size while port enabled\n");
238 pr_err("Disable the port before modifying\n");
241 ret
= kstrtoint(page
, 0, &port
->inline_data_size
);
243 pr_err("Invalid value '%s' for inline_data_size\n", page
);
249 CONFIGFS_ATTR(nvmet_
, param_inline_data_size
);
251 static ssize_t
nvmet_addr_trtype_show(struct config_item
*item
,
254 struct nvmet_port
*port
= to_nvmet_port(item
);
257 for (i
= 0; i
< ARRAY_SIZE(nvmet_transport_names
); i
++) {
258 if (port
->disc_addr
.trtype
!= nvmet_transport_names
[i
].type
)
260 return sprintf(page
, "%s\n", nvmet_transport_names
[i
].name
);
263 return sprintf(page
, "\n");
266 static void nvmet_port_init_tsas_rdma(struct nvmet_port
*port
)
268 port
->disc_addr
.tsas
.rdma
.qptype
= NVMF_RDMA_QPTYPE_CONNECTED
;
269 port
->disc_addr
.tsas
.rdma
.prtype
= NVMF_RDMA_PRTYPE_NOT_SPECIFIED
;
270 port
->disc_addr
.tsas
.rdma
.cms
= NVMF_RDMA_CMS_RDMA_CM
;
273 static ssize_t
nvmet_addr_trtype_store(struct config_item
*item
,
274 const char *page
, size_t count
)
276 struct nvmet_port
*port
= to_nvmet_port(item
);
280 pr_err("Cannot modify address while enabled\n");
281 pr_err("Disable the address before modifying\n");
285 for (i
= 0; i
< ARRAY_SIZE(nvmet_transport_names
); i
++) {
286 if (sysfs_streq(page
, nvmet_transport_names
[i
].name
))
290 pr_err("Invalid value '%s' for trtype\n", page
);
293 memset(&port
->disc_addr
.tsas
, 0, NVMF_TSAS_SIZE
);
294 port
->disc_addr
.trtype
= nvmet_transport_names
[i
].type
;
295 if (port
->disc_addr
.trtype
== NVMF_TRTYPE_RDMA
)
296 nvmet_port_init_tsas_rdma(port
);
300 CONFIGFS_ATTR(nvmet_
, addr_trtype
);
303 * Namespace structures & file operation functions below
305 static ssize_t
nvmet_ns_device_path_show(struct config_item
*item
, char *page
)
307 return sprintf(page
, "%s\n", to_nvmet_ns(item
)->device_path
);
310 static ssize_t
nvmet_ns_device_path_store(struct config_item
*item
,
311 const char *page
, size_t count
)
313 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
314 struct nvmet_subsys
*subsys
= ns
->subsys
;
318 mutex_lock(&subsys
->lock
);
324 len
= strcspn(page
, "\n");
328 kfree(ns
->device_path
);
330 ns
->device_path
= kstrndup(page
, len
, GFP_KERNEL
);
331 if (!ns
->device_path
)
334 mutex_unlock(&subsys
->lock
);
338 mutex_unlock(&subsys
->lock
);
342 CONFIGFS_ATTR(nvmet_ns_
, device_path
);
344 #ifdef CONFIG_PCI_P2PDMA
345 static ssize_t
nvmet_ns_p2pmem_show(struct config_item
*item
, char *page
)
347 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
349 return pci_p2pdma_enable_show(page
, ns
->p2p_dev
, ns
->use_p2pmem
);
352 static ssize_t
nvmet_ns_p2pmem_store(struct config_item
*item
,
353 const char *page
, size_t count
)
355 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
356 struct pci_dev
*p2p_dev
= NULL
;
361 mutex_lock(&ns
->subsys
->lock
);
367 error
= pci_p2pdma_enable_store(page
, &p2p_dev
, &use_p2pmem
);
373 ns
->use_p2pmem
= use_p2pmem
;
374 pci_dev_put(ns
->p2p_dev
);
375 ns
->p2p_dev
= p2p_dev
;
378 mutex_unlock(&ns
->subsys
->lock
);
383 CONFIGFS_ATTR(nvmet_ns_
, p2pmem
);
384 #endif /* CONFIG_PCI_P2PDMA */
386 static ssize_t
nvmet_ns_device_uuid_show(struct config_item
*item
, char *page
)
388 return sprintf(page
, "%pUb\n", &to_nvmet_ns(item
)->uuid
);
391 static ssize_t
nvmet_ns_device_uuid_store(struct config_item
*item
,
392 const char *page
, size_t count
)
394 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
395 struct nvmet_subsys
*subsys
= ns
->subsys
;
398 mutex_lock(&subsys
->lock
);
404 if (uuid_parse(page
, &ns
->uuid
))
408 mutex_unlock(&subsys
->lock
);
409 return ret
? ret
: count
;
412 CONFIGFS_ATTR(nvmet_ns_
, device_uuid
);
414 static ssize_t
nvmet_ns_device_nguid_show(struct config_item
*item
, char *page
)
416 return sprintf(page
, "%pUb\n", &to_nvmet_ns(item
)->nguid
);
419 static ssize_t
nvmet_ns_device_nguid_store(struct config_item
*item
,
420 const char *page
, size_t count
)
422 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
423 struct nvmet_subsys
*subsys
= ns
->subsys
;
425 const char *p
= page
;
429 mutex_lock(&subsys
->lock
);
435 for (i
= 0; i
< 16; i
++) {
436 if (p
+ 2 > page
+ count
) {
440 if (!isxdigit(p
[0]) || !isxdigit(p
[1])) {
445 nguid
[i
] = (hex_to_bin(p
[0]) << 4) | hex_to_bin(p
[1]);
448 if (*p
== '-' || *p
== ':')
452 memcpy(&ns
->nguid
, nguid
, sizeof(nguid
));
454 mutex_unlock(&subsys
->lock
);
455 return ret
? ret
: count
;
458 CONFIGFS_ATTR(nvmet_ns_
, device_nguid
);
460 static ssize_t
nvmet_ns_ana_grpid_show(struct config_item
*item
, char *page
)
462 return sprintf(page
, "%u\n", to_nvmet_ns(item
)->anagrpid
);
465 static ssize_t
nvmet_ns_ana_grpid_store(struct config_item
*item
,
466 const char *page
, size_t count
)
468 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
469 u32 oldgrpid
, newgrpid
;
472 ret
= kstrtou32(page
, 0, &newgrpid
);
476 if (newgrpid
< 1 || newgrpid
> NVMET_MAX_ANAGRPS
)
479 down_write(&nvmet_ana_sem
);
480 oldgrpid
= ns
->anagrpid
;
481 nvmet_ana_group_enabled
[newgrpid
]++;
482 ns
->anagrpid
= newgrpid
;
483 nvmet_ana_group_enabled
[oldgrpid
]--;
485 up_write(&nvmet_ana_sem
);
487 nvmet_send_ana_event(ns
->subsys
, NULL
);
491 CONFIGFS_ATTR(nvmet_ns_
, ana_grpid
);
493 static ssize_t
nvmet_ns_enable_show(struct config_item
*item
, char *page
)
495 return sprintf(page
, "%d\n", to_nvmet_ns(item
)->enabled
);
498 static ssize_t
nvmet_ns_enable_store(struct config_item
*item
,
499 const char *page
, size_t count
)
501 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
505 if (strtobool(page
, &enable
))
509 ret
= nvmet_ns_enable(ns
);
511 nvmet_ns_disable(ns
);
513 return ret
? ret
: count
;
516 CONFIGFS_ATTR(nvmet_ns_
, enable
);
518 static ssize_t
nvmet_ns_buffered_io_show(struct config_item
*item
, char *page
)
520 return sprintf(page
, "%d\n", to_nvmet_ns(item
)->buffered_io
);
523 static ssize_t
nvmet_ns_buffered_io_store(struct config_item
*item
,
524 const char *page
, size_t count
)
526 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
529 if (strtobool(page
, &val
))
532 mutex_lock(&ns
->subsys
->lock
);
534 pr_err("disable ns before setting buffered_io value.\n");
535 mutex_unlock(&ns
->subsys
->lock
);
539 ns
->buffered_io
= val
;
540 mutex_unlock(&ns
->subsys
->lock
);
544 CONFIGFS_ATTR(nvmet_ns_
, buffered_io
);
546 static struct configfs_attribute
*nvmet_ns_attrs
[] = {
547 &nvmet_ns_attr_device_path
,
548 &nvmet_ns_attr_device_nguid
,
549 &nvmet_ns_attr_device_uuid
,
550 &nvmet_ns_attr_ana_grpid
,
551 &nvmet_ns_attr_enable
,
552 &nvmet_ns_attr_buffered_io
,
553 #ifdef CONFIG_PCI_P2PDMA
554 &nvmet_ns_attr_p2pmem
,
559 static void nvmet_ns_release(struct config_item
*item
)
561 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
566 static struct configfs_item_operations nvmet_ns_item_ops
= {
567 .release
= nvmet_ns_release
,
570 static const struct config_item_type nvmet_ns_type
= {
571 .ct_item_ops
= &nvmet_ns_item_ops
,
572 .ct_attrs
= nvmet_ns_attrs
,
573 .ct_owner
= THIS_MODULE
,
576 static struct config_group
*nvmet_ns_make(struct config_group
*group
,
579 struct nvmet_subsys
*subsys
= namespaces_to_subsys(&group
->cg_item
);
584 ret
= kstrtou32(name
, 0, &nsid
);
589 if (nsid
== 0 || nsid
== NVME_NSID_ALL
) {
590 pr_err("invalid nsid %#x", nsid
);
595 ns
= nvmet_ns_alloc(subsys
, nsid
);
598 config_group_init_type_name(&ns
->group
, name
, &nvmet_ns_type
);
600 pr_info("adding nsid %d to subsystem %s\n", nsid
, subsys
->subsysnqn
);
607 static struct configfs_group_operations nvmet_namespaces_group_ops
= {
608 .make_group
= nvmet_ns_make
,
611 static const struct config_item_type nvmet_namespaces_type
= {
612 .ct_group_ops
= &nvmet_namespaces_group_ops
,
613 .ct_owner
= THIS_MODULE
,
616 static int nvmet_port_subsys_allow_link(struct config_item
*parent
,
617 struct config_item
*target
)
619 struct nvmet_port
*port
= to_nvmet_port(parent
->ci_parent
);
620 struct nvmet_subsys
*subsys
;
621 struct nvmet_subsys_link
*link
, *p
;
624 if (target
->ci_type
!= &nvmet_subsys_type
) {
625 pr_err("can only link subsystems into the subsystems dir.!\n");
628 subsys
= to_subsys(target
);
629 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
632 link
->subsys
= subsys
;
634 down_write(&nvmet_config_sem
);
636 list_for_each_entry(p
, &port
->subsystems
, entry
) {
637 if (p
->subsys
== subsys
)
641 if (list_empty(&port
->subsystems
)) {
642 ret
= nvmet_enable_port(port
);
647 list_add_tail(&link
->entry
, &port
->subsystems
);
648 nvmet_port_disc_changed(port
, subsys
);
650 up_write(&nvmet_config_sem
);
654 up_write(&nvmet_config_sem
);
659 static void nvmet_port_subsys_drop_link(struct config_item
*parent
,
660 struct config_item
*target
)
662 struct nvmet_port
*port
= to_nvmet_port(parent
->ci_parent
);
663 struct nvmet_subsys
*subsys
= to_subsys(target
);
664 struct nvmet_subsys_link
*p
;
666 down_write(&nvmet_config_sem
);
667 list_for_each_entry(p
, &port
->subsystems
, entry
) {
668 if (p
->subsys
== subsys
)
671 up_write(&nvmet_config_sem
);
676 nvmet_port_del_ctrls(port
, subsys
);
677 nvmet_port_disc_changed(port
, subsys
);
679 if (list_empty(&port
->subsystems
))
680 nvmet_disable_port(port
);
681 up_write(&nvmet_config_sem
);
685 static struct configfs_item_operations nvmet_port_subsys_item_ops
= {
686 .allow_link
= nvmet_port_subsys_allow_link
,
687 .drop_link
= nvmet_port_subsys_drop_link
,
690 static const struct config_item_type nvmet_port_subsys_type
= {
691 .ct_item_ops
= &nvmet_port_subsys_item_ops
,
692 .ct_owner
= THIS_MODULE
,
695 static int nvmet_allowed_hosts_allow_link(struct config_item
*parent
,
696 struct config_item
*target
)
698 struct nvmet_subsys
*subsys
= to_subsys(parent
->ci_parent
);
699 struct nvmet_host
*host
;
700 struct nvmet_host_link
*link
, *p
;
703 if (target
->ci_type
!= &nvmet_host_type
) {
704 pr_err("can only link hosts into the allowed_hosts directory!\n");
708 host
= to_host(target
);
709 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
714 down_write(&nvmet_config_sem
);
716 if (subsys
->allow_any_host
) {
717 pr_err("can't add hosts when allow_any_host is set!\n");
722 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
723 if (!strcmp(nvmet_host_name(p
->host
), nvmet_host_name(host
)))
726 list_add_tail(&link
->entry
, &subsys
->hosts
);
727 nvmet_subsys_disc_changed(subsys
, host
);
729 up_write(&nvmet_config_sem
);
732 up_write(&nvmet_config_sem
);
737 static void nvmet_allowed_hosts_drop_link(struct config_item
*parent
,
738 struct config_item
*target
)
740 struct nvmet_subsys
*subsys
= to_subsys(parent
->ci_parent
);
741 struct nvmet_host
*host
= to_host(target
);
742 struct nvmet_host_link
*p
;
744 down_write(&nvmet_config_sem
);
745 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
746 if (!strcmp(nvmet_host_name(p
->host
), nvmet_host_name(host
)))
749 up_write(&nvmet_config_sem
);
754 nvmet_subsys_disc_changed(subsys
, host
);
756 up_write(&nvmet_config_sem
);
760 static struct configfs_item_operations nvmet_allowed_hosts_item_ops
= {
761 .allow_link
= nvmet_allowed_hosts_allow_link
,
762 .drop_link
= nvmet_allowed_hosts_drop_link
,
765 static const struct config_item_type nvmet_allowed_hosts_type
= {
766 .ct_item_ops
= &nvmet_allowed_hosts_item_ops
,
767 .ct_owner
= THIS_MODULE
,
770 static ssize_t
nvmet_subsys_attr_allow_any_host_show(struct config_item
*item
,
773 return snprintf(page
, PAGE_SIZE
, "%d\n",
774 to_subsys(item
)->allow_any_host
);
777 static ssize_t
nvmet_subsys_attr_allow_any_host_store(struct config_item
*item
,
778 const char *page
, size_t count
)
780 struct nvmet_subsys
*subsys
= to_subsys(item
);
784 if (strtobool(page
, &allow_any_host
))
787 down_write(&nvmet_config_sem
);
788 if (allow_any_host
&& !list_empty(&subsys
->hosts
)) {
789 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
794 if (subsys
->allow_any_host
!= allow_any_host
) {
795 subsys
->allow_any_host
= allow_any_host
;
796 nvmet_subsys_disc_changed(subsys
, NULL
);
800 up_write(&nvmet_config_sem
);
801 return ret
? ret
: count
;
804 CONFIGFS_ATTR(nvmet_subsys_
, attr_allow_any_host
);
806 static ssize_t
nvmet_subsys_attr_version_show(struct config_item
*item
,
809 struct nvmet_subsys
*subsys
= to_subsys(item
);
811 if (NVME_TERTIARY(subsys
->ver
))
812 return snprintf(page
, PAGE_SIZE
, "%d.%d.%d\n",
813 (int)NVME_MAJOR(subsys
->ver
),
814 (int)NVME_MINOR(subsys
->ver
),
815 (int)NVME_TERTIARY(subsys
->ver
));
817 return snprintf(page
, PAGE_SIZE
, "%d.%d\n",
818 (int)NVME_MAJOR(subsys
->ver
),
819 (int)NVME_MINOR(subsys
->ver
));
822 static ssize_t
nvmet_subsys_attr_version_store(struct config_item
*item
,
823 const char *page
, size_t count
)
825 struct nvmet_subsys
*subsys
= to_subsys(item
);
826 int major
, minor
, tertiary
= 0;
829 ret
= sscanf(page
, "%d.%d.%d\n", &major
, &minor
, &tertiary
);
830 if (ret
!= 2 && ret
!= 3)
833 down_write(&nvmet_config_sem
);
834 subsys
->ver
= NVME_VS(major
, minor
, tertiary
);
835 up_write(&nvmet_config_sem
);
839 CONFIGFS_ATTR(nvmet_subsys_
, attr_version
);
841 static ssize_t
nvmet_subsys_attr_serial_show(struct config_item
*item
,
844 struct nvmet_subsys
*subsys
= to_subsys(item
);
846 return snprintf(page
, PAGE_SIZE
, "%llx\n", subsys
->serial
);
849 static ssize_t
nvmet_subsys_attr_serial_store(struct config_item
*item
,
850 const char *page
, size_t count
)
854 if (sscanf(page
, "%llx\n", &serial
) != 1)
857 down_write(&nvmet_config_sem
);
858 to_subsys(item
)->serial
= serial
;
859 up_write(&nvmet_config_sem
);
863 CONFIGFS_ATTR(nvmet_subsys_
, attr_serial
);
865 static ssize_t
nvmet_subsys_attr_cntlid_min_show(struct config_item
*item
,
868 return snprintf(page
, PAGE_SIZE
, "%u\n", to_subsys(item
)->cntlid_min
);
871 static ssize_t
nvmet_subsys_attr_cntlid_min_store(struct config_item
*item
,
872 const char *page
, size_t cnt
)
876 if (sscanf(page
, "%hu\n", &cntlid_min
) != 1)
882 down_write(&nvmet_config_sem
);
883 if (cntlid_min
>= to_subsys(item
)->cntlid_max
)
885 to_subsys(item
)->cntlid_min
= cntlid_min
;
886 up_write(&nvmet_config_sem
);
890 up_write(&nvmet_config_sem
);
893 CONFIGFS_ATTR(nvmet_subsys_
, attr_cntlid_min
);
895 static ssize_t
nvmet_subsys_attr_cntlid_max_show(struct config_item
*item
,
898 return snprintf(page
, PAGE_SIZE
, "%u\n", to_subsys(item
)->cntlid_max
);
901 static ssize_t
nvmet_subsys_attr_cntlid_max_store(struct config_item
*item
,
902 const char *page
, size_t cnt
)
906 if (sscanf(page
, "%hu\n", &cntlid_max
) != 1)
912 down_write(&nvmet_config_sem
);
913 if (cntlid_max
<= to_subsys(item
)->cntlid_min
)
915 to_subsys(item
)->cntlid_max
= cntlid_max
;
916 up_write(&nvmet_config_sem
);
920 up_write(&nvmet_config_sem
);
923 CONFIGFS_ATTR(nvmet_subsys_
, attr_cntlid_max
);
925 static ssize_t
nvmet_subsys_attr_model_show(struct config_item
*item
,
928 struct nvmet_subsys
*subsys
= to_subsys(item
);
929 struct nvmet_subsys_model
*subsys_model
;
930 char *model
= NVMET_DEFAULT_CTRL_MODEL
;
934 subsys_model
= rcu_dereference(subsys
->model
);
936 model
= subsys_model
->number
;
937 ret
= snprintf(page
, PAGE_SIZE
, "%s\n", model
);
943 /* See Section 1.5 of NVMe 1.4 */
944 static bool nvmet_is_ascii(const char c
)
946 return c
>= 0x20 && c
<= 0x7e;
949 static ssize_t
nvmet_subsys_attr_model_store(struct config_item
*item
,
950 const char *page
, size_t count
)
952 struct nvmet_subsys
*subsys
= to_subsys(item
);
953 struct nvmet_subsys_model
*new_model
;
954 char *new_model_number
;
957 len
= strcspn(page
, "\n");
961 for (pos
= 0; pos
< len
; pos
++) {
962 if (!nvmet_is_ascii(page
[pos
]))
966 new_model_number
= kstrndup(page
, len
, GFP_KERNEL
);
967 if (!new_model_number
)
970 new_model
= kzalloc(sizeof(*new_model
) + len
+ 1, GFP_KERNEL
);
972 kfree(new_model_number
);
975 memcpy(new_model
->number
, new_model_number
, len
);
977 down_write(&nvmet_config_sem
);
978 mutex_lock(&subsys
->lock
);
979 new_model
= rcu_replace_pointer(subsys
->model
, new_model
,
980 mutex_is_locked(&subsys
->lock
));
981 mutex_unlock(&subsys
->lock
);
982 up_write(&nvmet_config_sem
);
984 kfree_rcu(new_model
, rcuhead
);
988 CONFIGFS_ATTR(nvmet_subsys_
, attr_model
);
990 static struct configfs_attribute
*nvmet_subsys_attrs
[] = {
991 &nvmet_subsys_attr_attr_allow_any_host
,
992 &nvmet_subsys_attr_attr_version
,
993 &nvmet_subsys_attr_attr_serial
,
994 &nvmet_subsys_attr_attr_cntlid_min
,
995 &nvmet_subsys_attr_attr_cntlid_max
,
996 &nvmet_subsys_attr_attr_model
,
1001 * Subsystem structures & folder operation functions below
1003 static void nvmet_subsys_release(struct config_item
*item
)
1005 struct nvmet_subsys
*subsys
= to_subsys(item
);
1007 nvmet_subsys_del_ctrls(subsys
);
1008 nvmet_subsys_put(subsys
);
1011 static struct configfs_item_operations nvmet_subsys_item_ops
= {
1012 .release
= nvmet_subsys_release
,
1015 static const struct config_item_type nvmet_subsys_type
= {
1016 .ct_item_ops
= &nvmet_subsys_item_ops
,
1017 .ct_attrs
= nvmet_subsys_attrs
,
1018 .ct_owner
= THIS_MODULE
,
1021 static struct config_group
*nvmet_subsys_make(struct config_group
*group
,
1024 struct nvmet_subsys
*subsys
;
1026 if (sysfs_streq(name
, NVME_DISC_SUBSYS_NAME
)) {
1027 pr_err("can't create discovery subsystem through configfs\n");
1028 return ERR_PTR(-EINVAL
);
1031 subsys
= nvmet_subsys_alloc(name
, NVME_NQN_NVME
);
1033 return ERR_CAST(subsys
);
1035 config_group_init_type_name(&subsys
->group
, name
, &nvmet_subsys_type
);
1037 config_group_init_type_name(&subsys
->namespaces_group
,
1038 "namespaces", &nvmet_namespaces_type
);
1039 configfs_add_default_group(&subsys
->namespaces_group
, &subsys
->group
);
1041 config_group_init_type_name(&subsys
->allowed_hosts_group
,
1042 "allowed_hosts", &nvmet_allowed_hosts_type
);
1043 configfs_add_default_group(&subsys
->allowed_hosts_group
,
1046 return &subsys
->group
;
1049 static struct configfs_group_operations nvmet_subsystems_group_ops
= {
1050 .make_group
= nvmet_subsys_make
,
1053 static const struct config_item_type nvmet_subsystems_type
= {
1054 .ct_group_ops
= &nvmet_subsystems_group_ops
,
1055 .ct_owner
= THIS_MODULE
,
1058 static ssize_t
nvmet_referral_enable_show(struct config_item
*item
,
1061 return snprintf(page
, PAGE_SIZE
, "%d\n", to_nvmet_port(item
)->enabled
);
1064 static ssize_t
nvmet_referral_enable_store(struct config_item
*item
,
1065 const char *page
, size_t count
)
1067 struct nvmet_port
*parent
= to_nvmet_port(item
->ci_parent
->ci_parent
);
1068 struct nvmet_port
*port
= to_nvmet_port(item
);
1071 if (strtobool(page
, &enable
))
1075 nvmet_referral_enable(parent
, port
);
1077 nvmet_referral_disable(parent
, port
);
1081 pr_err("Invalid value '%s' for enable\n", page
);
1085 CONFIGFS_ATTR(nvmet_referral_
, enable
);
1088 * Discovery Service subsystem definitions
1090 static struct configfs_attribute
*nvmet_referral_attrs
[] = {
1091 &nvmet_attr_addr_adrfam
,
1092 &nvmet_attr_addr_portid
,
1093 &nvmet_attr_addr_treq
,
1094 &nvmet_attr_addr_traddr
,
1095 &nvmet_attr_addr_trsvcid
,
1096 &nvmet_attr_addr_trtype
,
1097 &nvmet_referral_attr_enable
,
1101 static void nvmet_referral_notify(struct config_group
*group
,
1102 struct config_item
*item
)
1104 struct nvmet_port
*parent
= to_nvmet_port(item
->ci_parent
->ci_parent
);
1105 struct nvmet_port
*port
= to_nvmet_port(item
);
1107 nvmet_referral_disable(parent
, port
);
1110 static void nvmet_referral_release(struct config_item
*item
)
1112 struct nvmet_port
*port
= to_nvmet_port(item
);
1117 static struct configfs_item_operations nvmet_referral_item_ops
= {
1118 .release
= nvmet_referral_release
,
1121 static const struct config_item_type nvmet_referral_type
= {
1122 .ct_owner
= THIS_MODULE
,
1123 .ct_attrs
= nvmet_referral_attrs
,
1124 .ct_item_ops
= &nvmet_referral_item_ops
,
1127 static struct config_group
*nvmet_referral_make(
1128 struct config_group
*group
, const char *name
)
1130 struct nvmet_port
*port
;
1132 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1134 return ERR_PTR(-ENOMEM
);
1136 INIT_LIST_HEAD(&port
->entry
);
1137 config_group_init_type_name(&port
->group
, name
, &nvmet_referral_type
);
1139 return &port
->group
;
1142 static struct configfs_group_operations nvmet_referral_group_ops
= {
1143 .make_group
= nvmet_referral_make
,
1144 .disconnect_notify
= nvmet_referral_notify
,
1147 static const struct config_item_type nvmet_referrals_type
= {
1148 .ct_owner
= THIS_MODULE
,
1149 .ct_group_ops
= &nvmet_referral_group_ops
,
1153 enum nvme_ana_state state
;
1155 } nvmet_ana_state_names
[] = {
1156 { NVME_ANA_OPTIMIZED
, "optimized" },
1157 { NVME_ANA_NONOPTIMIZED
, "non-optimized" },
1158 { NVME_ANA_INACCESSIBLE
, "inaccessible" },
1159 { NVME_ANA_PERSISTENT_LOSS
, "persistent-loss" },
1160 { NVME_ANA_CHANGE
, "change" },
1163 static ssize_t
nvmet_ana_group_ana_state_show(struct config_item
*item
,
1166 struct nvmet_ana_group
*grp
= to_ana_group(item
);
1167 enum nvme_ana_state state
= grp
->port
->ana_state
[grp
->grpid
];
1170 for (i
= 0; i
< ARRAY_SIZE(nvmet_ana_state_names
); i
++) {
1171 if (state
!= nvmet_ana_state_names
[i
].state
)
1173 return sprintf(page
, "%s\n", nvmet_ana_state_names
[i
].name
);
1176 return sprintf(page
, "\n");
1179 static ssize_t
nvmet_ana_group_ana_state_store(struct config_item
*item
,
1180 const char *page
, size_t count
)
1182 struct nvmet_ana_group
*grp
= to_ana_group(item
);
1185 for (i
= 0; i
< ARRAY_SIZE(nvmet_ana_state_names
); i
++) {
1186 if (sysfs_streq(page
, nvmet_ana_state_names
[i
].name
))
1190 pr_err("Invalid value '%s' for ana_state\n", page
);
1194 down_write(&nvmet_ana_sem
);
1195 grp
->port
->ana_state
[grp
->grpid
] = nvmet_ana_state_names
[i
].state
;
1197 up_write(&nvmet_ana_sem
);
1199 nvmet_port_send_ana_event(grp
->port
);
1203 CONFIGFS_ATTR(nvmet_ana_group_
, ana_state
);
1205 static struct configfs_attribute
*nvmet_ana_group_attrs
[] = {
1206 &nvmet_ana_group_attr_ana_state
,
1210 static void nvmet_ana_group_release(struct config_item
*item
)
1212 struct nvmet_ana_group
*grp
= to_ana_group(item
);
1214 if (grp
== &grp
->port
->ana_default_group
)
1217 down_write(&nvmet_ana_sem
);
1218 grp
->port
->ana_state
[grp
->grpid
] = NVME_ANA_INACCESSIBLE
;
1219 nvmet_ana_group_enabled
[grp
->grpid
]--;
1220 up_write(&nvmet_ana_sem
);
1222 nvmet_port_send_ana_event(grp
->port
);
1226 static struct configfs_item_operations nvmet_ana_group_item_ops
= {
1227 .release
= nvmet_ana_group_release
,
1230 static const struct config_item_type nvmet_ana_group_type
= {
1231 .ct_item_ops
= &nvmet_ana_group_item_ops
,
1232 .ct_attrs
= nvmet_ana_group_attrs
,
1233 .ct_owner
= THIS_MODULE
,
1236 static struct config_group
*nvmet_ana_groups_make_group(
1237 struct config_group
*group
, const char *name
)
1239 struct nvmet_port
*port
= ana_groups_to_port(&group
->cg_item
);
1240 struct nvmet_ana_group
*grp
;
1244 ret
= kstrtou32(name
, 0, &grpid
);
1249 if (grpid
<= 1 || grpid
> NVMET_MAX_ANAGRPS
)
1253 grp
= kzalloc(sizeof(*grp
), GFP_KERNEL
);
1259 down_write(&nvmet_ana_sem
);
1260 nvmet_ana_group_enabled
[grpid
]++;
1261 up_write(&nvmet_ana_sem
);
1263 nvmet_port_send_ana_event(grp
->port
);
1265 config_group_init_type_name(&grp
->group
, name
, &nvmet_ana_group_type
);
1268 return ERR_PTR(ret
);
1271 static struct configfs_group_operations nvmet_ana_groups_group_ops
= {
1272 .make_group
= nvmet_ana_groups_make_group
,
1275 static const struct config_item_type nvmet_ana_groups_type
= {
1276 .ct_group_ops
= &nvmet_ana_groups_group_ops
,
1277 .ct_owner
= THIS_MODULE
,
1281 * Ports definitions.
1283 static void nvmet_port_release(struct config_item
*item
)
1285 struct nvmet_port
*port
= to_nvmet_port(item
);
1287 list_del(&port
->global_entry
);
1289 kfree(port
->ana_state
);
1293 static struct configfs_attribute
*nvmet_port_attrs
[] = {
1294 &nvmet_attr_addr_adrfam
,
1295 &nvmet_attr_addr_treq
,
1296 &nvmet_attr_addr_traddr
,
1297 &nvmet_attr_addr_trsvcid
,
1298 &nvmet_attr_addr_trtype
,
1299 &nvmet_attr_param_inline_data_size
,
1303 static struct configfs_item_operations nvmet_port_item_ops
= {
1304 .release
= nvmet_port_release
,
1307 static const struct config_item_type nvmet_port_type
= {
1308 .ct_attrs
= nvmet_port_attrs
,
1309 .ct_item_ops
= &nvmet_port_item_ops
,
1310 .ct_owner
= THIS_MODULE
,
1313 static struct config_group
*nvmet_ports_make(struct config_group
*group
,
1316 struct nvmet_port
*port
;
1320 if (kstrtou16(name
, 0, &portid
))
1321 return ERR_PTR(-EINVAL
);
1323 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1325 return ERR_PTR(-ENOMEM
);
1327 port
->ana_state
= kcalloc(NVMET_MAX_ANAGRPS
+ 1,
1328 sizeof(*port
->ana_state
), GFP_KERNEL
);
1329 if (!port
->ana_state
) {
1331 return ERR_PTR(-ENOMEM
);
1334 for (i
= 1; i
<= NVMET_MAX_ANAGRPS
; i
++) {
1335 if (i
== NVMET_DEFAULT_ANA_GRPID
)
1336 port
->ana_state
[1] = NVME_ANA_OPTIMIZED
;
1338 port
->ana_state
[i
] = NVME_ANA_INACCESSIBLE
;
1341 list_add(&port
->global_entry
, &nvmet_ports_list
);
1343 INIT_LIST_HEAD(&port
->entry
);
1344 INIT_LIST_HEAD(&port
->subsystems
);
1345 INIT_LIST_HEAD(&port
->referrals
);
1346 port
->inline_data_size
= -1; /* < 0 == let the transport choose */
1348 port
->disc_addr
.portid
= cpu_to_le16(portid
);
1349 port
->disc_addr
.treq
= NVMF_TREQ_DISABLE_SQFLOW
;
1350 config_group_init_type_name(&port
->group
, name
, &nvmet_port_type
);
1352 config_group_init_type_name(&port
->subsys_group
,
1353 "subsystems", &nvmet_port_subsys_type
);
1354 configfs_add_default_group(&port
->subsys_group
, &port
->group
);
1356 config_group_init_type_name(&port
->referrals_group
,
1357 "referrals", &nvmet_referrals_type
);
1358 configfs_add_default_group(&port
->referrals_group
, &port
->group
);
1360 config_group_init_type_name(&port
->ana_groups_group
,
1361 "ana_groups", &nvmet_ana_groups_type
);
1362 configfs_add_default_group(&port
->ana_groups_group
, &port
->group
);
1364 port
->ana_default_group
.port
= port
;
1365 port
->ana_default_group
.grpid
= NVMET_DEFAULT_ANA_GRPID
;
1366 config_group_init_type_name(&port
->ana_default_group
.group
,
1367 __stringify(NVMET_DEFAULT_ANA_GRPID
),
1368 &nvmet_ana_group_type
);
1369 configfs_add_default_group(&port
->ana_default_group
.group
,
1370 &port
->ana_groups_group
);
1372 return &port
->group
;
1375 static struct configfs_group_operations nvmet_ports_group_ops
= {
1376 .make_group
= nvmet_ports_make
,
1379 static const struct config_item_type nvmet_ports_type
= {
1380 .ct_group_ops
= &nvmet_ports_group_ops
,
1381 .ct_owner
= THIS_MODULE
,
1384 static struct config_group nvmet_subsystems_group
;
1385 static struct config_group nvmet_ports_group
;
1387 static void nvmet_host_release(struct config_item
*item
)
1389 struct nvmet_host
*host
= to_host(item
);
1394 static struct configfs_item_operations nvmet_host_item_ops
= {
1395 .release
= nvmet_host_release
,
1398 static const struct config_item_type nvmet_host_type
= {
1399 .ct_item_ops
= &nvmet_host_item_ops
,
1400 .ct_owner
= THIS_MODULE
,
1403 static struct config_group
*nvmet_hosts_make_group(struct config_group
*group
,
1406 struct nvmet_host
*host
;
1408 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
1410 return ERR_PTR(-ENOMEM
);
1412 config_group_init_type_name(&host
->group
, name
, &nvmet_host_type
);
1414 return &host
->group
;
1417 static struct configfs_group_operations nvmet_hosts_group_ops
= {
1418 .make_group
= nvmet_hosts_make_group
,
1421 static const struct config_item_type nvmet_hosts_type
= {
1422 .ct_group_ops
= &nvmet_hosts_group_ops
,
1423 .ct_owner
= THIS_MODULE
,
1426 static struct config_group nvmet_hosts_group
;
1428 static const struct config_item_type nvmet_root_type
= {
1429 .ct_owner
= THIS_MODULE
,
1432 static struct configfs_subsystem nvmet_configfs_subsystem
= {
1435 .ci_namebuf
= "nvmet",
1436 .ci_type
= &nvmet_root_type
,
1441 int __init
nvmet_init_configfs(void)
1445 config_group_init(&nvmet_configfs_subsystem
.su_group
);
1446 mutex_init(&nvmet_configfs_subsystem
.su_mutex
);
1448 config_group_init_type_name(&nvmet_subsystems_group
,
1449 "subsystems", &nvmet_subsystems_type
);
1450 configfs_add_default_group(&nvmet_subsystems_group
,
1451 &nvmet_configfs_subsystem
.su_group
);
1453 config_group_init_type_name(&nvmet_ports_group
,
1454 "ports", &nvmet_ports_type
);
1455 configfs_add_default_group(&nvmet_ports_group
,
1456 &nvmet_configfs_subsystem
.su_group
);
1458 config_group_init_type_name(&nvmet_hosts_group
,
1459 "hosts", &nvmet_hosts_type
);
1460 configfs_add_default_group(&nvmet_hosts_group
,
1461 &nvmet_configfs_subsystem
.su_group
);
1463 ret
= configfs_register_subsystem(&nvmet_configfs_subsystem
);
1465 pr_err("configfs_register_subsystem: %d\n", ret
);
1472 void __exit
nvmet_exit_configfs(void)
1474 configfs_unregister_subsystem(&nvmet_configfs_subsystem
);