1 // SPDX-License-Identifier: GPL-2.0
3 * Configfs interface for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
18 #include <linux/nvme-keyring.h>
19 #include <crypto/hash.h>
20 #include <crypto/kpp.h>
21 #include <linux/nospec.h>
25 static const struct config_item_type nvmet_host_type
;
26 static const struct config_item_type nvmet_subsys_type
;
28 static LIST_HEAD(nvmet_ports_list
);
29 struct list_head
*nvmet_ports
= &nvmet_ports_list
;
31 struct nvmet_type_name_map
{
36 static struct nvmet_type_name_map nvmet_transport
[] = {
37 { NVMF_TRTYPE_RDMA
, "rdma" },
38 { NVMF_TRTYPE_FC
, "fc" },
39 { NVMF_TRTYPE_TCP
, "tcp" },
40 { NVMF_TRTYPE_LOOP
, "loop" },
43 static const struct nvmet_type_name_map nvmet_addr_family
[] = {
44 { NVMF_ADDR_FAMILY_PCI
, "pcie" },
45 { NVMF_ADDR_FAMILY_IP4
, "ipv4" },
46 { NVMF_ADDR_FAMILY_IP6
, "ipv6" },
47 { NVMF_ADDR_FAMILY_IB
, "ib" },
48 { NVMF_ADDR_FAMILY_FC
, "fc" },
49 { NVMF_ADDR_FAMILY_LOOP
, "loop" },
52 static bool nvmet_is_port_enabled(struct nvmet_port
*p
, const char *caller
)
55 pr_err("Disable port '%u' before changing attribute in %s\n",
56 le16_to_cpu(p
->disc_addr
.portid
), caller
);
61 * nvmet_port Generic ConfigFS definitions.
62 * Used in any place in the ConfigFS tree that refers to an address.
64 static ssize_t
nvmet_addr_adrfam_show(struct config_item
*item
, char *page
)
66 u8 adrfam
= to_nvmet_port(item
)->disc_addr
.adrfam
;
69 for (i
= 1; i
< ARRAY_SIZE(nvmet_addr_family
); i
++) {
70 if (nvmet_addr_family
[i
].type
== adrfam
)
71 return snprintf(page
, PAGE_SIZE
, "%s\n",
72 nvmet_addr_family
[i
].name
);
75 return snprintf(page
, PAGE_SIZE
, "\n");
78 static ssize_t
nvmet_addr_adrfam_store(struct config_item
*item
,
79 const char *page
, size_t count
)
81 struct nvmet_port
*port
= to_nvmet_port(item
);
84 if (nvmet_is_port_enabled(port
, __func__
))
87 for (i
= 1; i
< ARRAY_SIZE(nvmet_addr_family
); i
++) {
88 if (sysfs_streq(page
, nvmet_addr_family
[i
].name
))
92 pr_err("Invalid value '%s' for adrfam\n", page
);
96 port
->disc_addr
.adrfam
= nvmet_addr_family
[i
].type
;
100 CONFIGFS_ATTR(nvmet_
, addr_adrfam
);
102 static ssize_t
nvmet_addr_portid_show(struct config_item
*item
,
105 __le16 portid
= to_nvmet_port(item
)->disc_addr
.portid
;
107 return snprintf(page
, PAGE_SIZE
, "%d\n", le16_to_cpu(portid
));
110 static ssize_t
nvmet_addr_portid_store(struct config_item
*item
,
111 const char *page
, size_t count
)
113 struct nvmet_port
*port
= to_nvmet_port(item
);
116 if (kstrtou16(page
, 0, &portid
)) {
117 pr_err("Invalid value '%s' for portid\n", page
);
121 if (nvmet_is_port_enabled(port
, __func__
))
124 port
->disc_addr
.portid
= cpu_to_le16(portid
);
128 CONFIGFS_ATTR(nvmet_
, addr_portid
);
130 static ssize_t
nvmet_addr_traddr_show(struct config_item
*item
,
133 struct nvmet_port
*port
= to_nvmet_port(item
);
135 return snprintf(page
, PAGE_SIZE
, "%s\n", port
->disc_addr
.traddr
);
138 static ssize_t
nvmet_addr_traddr_store(struct config_item
*item
,
139 const char *page
, size_t count
)
141 struct nvmet_port
*port
= to_nvmet_port(item
);
143 if (count
> NVMF_TRADDR_SIZE
) {
144 pr_err("Invalid value '%s' for traddr\n", page
);
148 if (nvmet_is_port_enabled(port
, __func__
))
151 if (sscanf(page
, "%s\n", port
->disc_addr
.traddr
) != 1)
156 CONFIGFS_ATTR(nvmet_
, addr_traddr
);
158 static const struct nvmet_type_name_map nvmet_addr_treq
[] = {
159 { NVMF_TREQ_NOT_SPECIFIED
, "not specified" },
160 { NVMF_TREQ_REQUIRED
, "required" },
161 { NVMF_TREQ_NOT_REQUIRED
, "not required" },
164 static inline u8
nvmet_port_disc_addr_treq_mask(struct nvmet_port
*port
)
166 return (port
->disc_addr
.treq
& ~NVME_TREQ_SECURE_CHANNEL_MASK
);
169 static ssize_t
nvmet_addr_treq_show(struct config_item
*item
, char *page
)
171 u8 treq
= nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item
));
174 for (i
= 0; i
< ARRAY_SIZE(nvmet_addr_treq
); i
++) {
175 if (treq
== nvmet_addr_treq
[i
].type
)
176 return snprintf(page
, PAGE_SIZE
, "%s\n",
177 nvmet_addr_treq
[i
].name
);
180 return snprintf(page
, PAGE_SIZE
, "\n");
183 static ssize_t
nvmet_addr_treq_store(struct config_item
*item
,
184 const char *page
, size_t count
)
186 struct nvmet_port
*port
= to_nvmet_port(item
);
187 u8 treq
= nvmet_port_disc_addr_treq_mask(port
);
190 if (nvmet_is_port_enabled(port
, __func__
))
193 for (i
= 0; i
< ARRAY_SIZE(nvmet_addr_treq
); i
++) {
194 if (sysfs_streq(page
, nvmet_addr_treq
[i
].name
))
198 pr_err("Invalid value '%s' for treq\n", page
);
202 if (port
->disc_addr
.trtype
== NVMF_TRTYPE_TCP
&&
203 port
->disc_addr
.tsas
.tcp
.sectype
== NVMF_TCP_SECTYPE_TLS13
) {
204 switch (nvmet_addr_treq
[i
].type
) {
205 case NVMF_TREQ_NOT_SPECIFIED
:
206 pr_debug("treq '%s' not allowed for TLS1.3\n",
207 nvmet_addr_treq
[i
].name
);
209 case NVMF_TREQ_NOT_REQUIRED
:
210 pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
216 treq
|= nvmet_addr_treq
[i
].type
;
217 port
->disc_addr
.treq
= treq
;
221 CONFIGFS_ATTR(nvmet_
, addr_treq
);
223 static ssize_t
nvmet_addr_trsvcid_show(struct config_item
*item
,
226 struct nvmet_port
*port
= to_nvmet_port(item
);
228 return snprintf(page
, PAGE_SIZE
, "%s\n", port
->disc_addr
.trsvcid
);
231 static ssize_t
nvmet_addr_trsvcid_store(struct config_item
*item
,
232 const char *page
, size_t count
)
234 struct nvmet_port
*port
= to_nvmet_port(item
);
236 if (count
> NVMF_TRSVCID_SIZE
) {
237 pr_err("Invalid value '%s' for trsvcid\n", page
);
240 if (nvmet_is_port_enabled(port
, __func__
))
243 if (sscanf(page
, "%s\n", port
->disc_addr
.trsvcid
) != 1)
248 CONFIGFS_ATTR(nvmet_
, addr_trsvcid
);
250 static ssize_t
nvmet_param_inline_data_size_show(struct config_item
*item
,
253 struct nvmet_port
*port
= to_nvmet_port(item
);
255 return snprintf(page
, PAGE_SIZE
, "%d\n", port
->inline_data_size
);
258 static ssize_t
nvmet_param_inline_data_size_store(struct config_item
*item
,
259 const char *page
, size_t count
)
261 struct nvmet_port
*port
= to_nvmet_port(item
);
264 if (nvmet_is_port_enabled(port
, __func__
))
266 ret
= kstrtoint(page
, 0, &port
->inline_data_size
);
268 pr_err("Invalid value '%s' for inline_data_size\n", page
);
274 CONFIGFS_ATTR(nvmet_
, param_inline_data_size
);
276 static ssize_t
nvmet_param_max_queue_size_show(struct config_item
*item
,
279 struct nvmet_port
*port
= to_nvmet_port(item
);
281 return snprintf(page
, PAGE_SIZE
, "%d\n", port
->max_queue_size
);
284 static ssize_t
nvmet_param_max_queue_size_store(struct config_item
*item
,
285 const char *page
, size_t count
)
287 struct nvmet_port
*port
= to_nvmet_port(item
);
290 if (nvmet_is_port_enabled(port
, __func__
))
292 ret
= kstrtoint(page
, 0, &port
->max_queue_size
);
294 pr_err("Invalid value '%s' for max_queue_size\n", page
);
300 CONFIGFS_ATTR(nvmet_
, param_max_queue_size
);
302 #ifdef CONFIG_BLK_DEV_INTEGRITY
303 static ssize_t
nvmet_param_pi_enable_show(struct config_item
*item
,
306 struct nvmet_port
*port
= to_nvmet_port(item
);
308 return snprintf(page
, PAGE_SIZE
, "%d\n", port
->pi_enable
);
311 static ssize_t
nvmet_param_pi_enable_store(struct config_item
*item
,
312 const char *page
, size_t count
)
314 struct nvmet_port
*port
= to_nvmet_port(item
);
317 if (kstrtobool(page
, &val
))
320 if (nvmet_is_port_enabled(port
, __func__
))
323 port
->pi_enable
= val
;
327 CONFIGFS_ATTR(nvmet_
, param_pi_enable
);
330 static ssize_t
nvmet_addr_trtype_show(struct config_item
*item
,
333 struct nvmet_port
*port
= to_nvmet_port(item
);
336 for (i
= 0; i
< ARRAY_SIZE(nvmet_transport
); i
++) {
337 if (port
->disc_addr
.trtype
== nvmet_transport
[i
].type
)
338 return snprintf(page
, PAGE_SIZE
,
339 "%s\n", nvmet_transport
[i
].name
);
342 return sprintf(page
, "\n");
345 static void nvmet_port_init_tsas_rdma(struct nvmet_port
*port
)
347 port
->disc_addr
.tsas
.rdma
.qptype
= NVMF_RDMA_QPTYPE_CONNECTED
;
348 port
->disc_addr
.tsas
.rdma
.prtype
= NVMF_RDMA_PRTYPE_NOT_SPECIFIED
;
349 port
->disc_addr
.tsas
.rdma
.cms
= NVMF_RDMA_CMS_RDMA_CM
;
352 static void nvmet_port_init_tsas_tcp(struct nvmet_port
*port
, int sectype
)
354 port
->disc_addr
.tsas
.tcp
.sectype
= sectype
;
357 static ssize_t
nvmet_addr_trtype_store(struct config_item
*item
,
358 const char *page
, size_t count
)
360 struct nvmet_port
*port
= to_nvmet_port(item
);
363 if (nvmet_is_port_enabled(port
, __func__
))
366 for (i
= 0; i
< ARRAY_SIZE(nvmet_transport
); i
++) {
367 if (sysfs_streq(page
, nvmet_transport
[i
].name
))
371 pr_err("Invalid value '%s' for trtype\n", page
);
375 memset(&port
->disc_addr
.tsas
, 0, NVMF_TSAS_SIZE
);
376 port
->disc_addr
.trtype
= nvmet_transport
[i
].type
;
377 if (port
->disc_addr
.trtype
== NVMF_TRTYPE_RDMA
)
378 nvmet_port_init_tsas_rdma(port
);
379 else if (port
->disc_addr
.trtype
== NVMF_TRTYPE_TCP
)
380 nvmet_port_init_tsas_tcp(port
, NVMF_TCP_SECTYPE_NONE
);
384 CONFIGFS_ATTR(nvmet_
, addr_trtype
);
386 static const struct nvmet_type_name_map nvmet_addr_tsas_tcp
[] = {
387 { NVMF_TCP_SECTYPE_NONE
, "none" },
388 { NVMF_TCP_SECTYPE_TLS13
, "tls1.3" },
391 static const struct nvmet_type_name_map nvmet_addr_tsas_rdma
[] = {
392 { NVMF_RDMA_QPTYPE_CONNECTED
, "connected" },
393 { NVMF_RDMA_QPTYPE_DATAGRAM
, "datagram" },
396 static ssize_t
nvmet_addr_tsas_show(struct config_item
*item
,
399 struct nvmet_port
*port
= to_nvmet_port(item
);
402 if (port
->disc_addr
.trtype
== NVMF_TRTYPE_TCP
) {
403 for (i
= 0; i
< ARRAY_SIZE(nvmet_addr_tsas_tcp
); i
++) {
404 if (port
->disc_addr
.tsas
.tcp
.sectype
== nvmet_addr_tsas_tcp
[i
].type
)
405 return sprintf(page
, "%s\n", nvmet_addr_tsas_tcp
[i
].name
);
407 } else if (port
->disc_addr
.trtype
== NVMF_TRTYPE_RDMA
) {
408 for (i
= 0; i
< ARRAY_SIZE(nvmet_addr_tsas_rdma
); i
++) {
409 if (port
->disc_addr
.tsas
.rdma
.qptype
== nvmet_addr_tsas_rdma
[i
].type
)
410 return sprintf(page
, "%s\n", nvmet_addr_tsas_rdma
[i
].name
);
413 return sprintf(page
, "\n");
416 static u8
nvmet_addr_tsas_rdma_store(const char *page
)
420 for (i
= 0; i
< ARRAY_SIZE(nvmet_addr_tsas_rdma
); i
++) {
421 if (sysfs_streq(page
, nvmet_addr_tsas_rdma
[i
].name
))
422 return nvmet_addr_tsas_rdma
[i
].type
;
424 return NVMF_RDMA_QPTYPE_INVALID
;
427 static u8
nvmet_addr_tsas_tcp_store(const char *page
)
431 for (i
= 0; i
< ARRAY_SIZE(nvmet_addr_tsas_tcp
); i
++) {
432 if (sysfs_streq(page
, nvmet_addr_tsas_tcp
[i
].name
))
433 return nvmet_addr_tsas_tcp
[i
].type
;
435 return NVMF_TCP_SECTYPE_INVALID
;
438 static ssize_t
nvmet_addr_tsas_store(struct config_item
*item
,
439 const char *page
, size_t count
)
441 struct nvmet_port
*port
= to_nvmet_port(item
);
442 u8 treq
= nvmet_port_disc_addr_treq_mask(port
);
445 if (nvmet_is_port_enabled(port
, __func__
))
448 if (port
->disc_addr
.trtype
== NVMF_TRTYPE_RDMA
) {
449 qptype
= nvmet_addr_tsas_rdma_store(page
);
450 if (qptype
== port
->disc_addr
.tsas
.rdma
.qptype
)
452 } else if (port
->disc_addr
.trtype
== NVMF_TRTYPE_TCP
) {
453 sectype
= nvmet_addr_tsas_tcp_store(page
);
454 if (sectype
!= NVMF_TCP_SECTYPE_INVALID
)
458 pr_err("Invalid value '%s' for tsas\n", page
);
462 if (sectype
== NVMF_TCP_SECTYPE_TLS13
) {
463 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS
)) {
464 pr_err("TLS is not supported\n");
467 if (!port
->keyring
) {
468 pr_err("TLS keyring not configured\n");
473 nvmet_port_init_tsas_tcp(port
, sectype
);
475 * If TLS is enabled TREQ should be set to 'required' per default
477 if (sectype
== NVMF_TCP_SECTYPE_TLS13
) {
478 u8 sc
= nvmet_port_disc_addr_treq_secure_channel(port
);
480 if (sc
== NVMF_TREQ_NOT_SPECIFIED
)
481 treq
|= NVMF_TREQ_REQUIRED
;
485 treq
|= NVMF_TREQ_NOT_SPECIFIED
;
487 port
->disc_addr
.treq
= treq
;
491 CONFIGFS_ATTR(nvmet_
, addr_tsas
);
494 * Namespace structures & file operation functions below
496 static ssize_t
nvmet_ns_device_path_show(struct config_item
*item
, char *page
)
498 return sprintf(page
, "%s\n", to_nvmet_ns(item
)->device_path
);
501 static ssize_t
nvmet_ns_device_path_store(struct config_item
*item
,
502 const char *page
, size_t count
)
504 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
505 struct nvmet_subsys
*subsys
= ns
->subsys
;
509 mutex_lock(&subsys
->lock
);
515 len
= strcspn(page
, "\n");
519 kfree(ns
->device_path
);
521 ns
->device_path
= kmemdup_nul(page
, len
, GFP_KERNEL
);
522 if (!ns
->device_path
)
525 mutex_unlock(&subsys
->lock
);
529 mutex_unlock(&subsys
->lock
);
533 CONFIGFS_ATTR(nvmet_ns_
, device_path
);
535 #ifdef CONFIG_PCI_P2PDMA
536 static ssize_t
nvmet_ns_p2pmem_show(struct config_item
*item
, char *page
)
538 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
540 return pci_p2pdma_enable_show(page
, ns
->p2p_dev
, ns
->use_p2pmem
);
543 static ssize_t
nvmet_ns_p2pmem_store(struct config_item
*item
,
544 const char *page
, size_t count
)
546 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
547 struct pci_dev
*p2p_dev
= NULL
;
552 mutex_lock(&ns
->subsys
->lock
);
558 error
= pci_p2pdma_enable_store(page
, &p2p_dev
, &use_p2pmem
);
564 ns
->use_p2pmem
= use_p2pmem
;
565 pci_dev_put(ns
->p2p_dev
);
566 ns
->p2p_dev
= p2p_dev
;
569 mutex_unlock(&ns
->subsys
->lock
);
574 CONFIGFS_ATTR(nvmet_ns_
, p2pmem
);
575 #endif /* CONFIG_PCI_P2PDMA */
577 static ssize_t
nvmet_ns_device_uuid_show(struct config_item
*item
, char *page
)
579 return sprintf(page
, "%pUb\n", &to_nvmet_ns(item
)->uuid
);
582 static ssize_t
nvmet_ns_device_uuid_store(struct config_item
*item
,
583 const char *page
, size_t count
)
585 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
586 struct nvmet_subsys
*subsys
= ns
->subsys
;
589 mutex_lock(&subsys
->lock
);
595 if (uuid_parse(page
, &ns
->uuid
))
599 mutex_unlock(&subsys
->lock
);
600 return ret
? ret
: count
;
603 CONFIGFS_ATTR(nvmet_ns_
, device_uuid
);
605 static ssize_t
nvmet_ns_device_nguid_show(struct config_item
*item
, char *page
)
607 return sprintf(page
, "%pUb\n", &to_nvmet_ns(item
)->nguid
);
610 static ssize_t
nvmet_ns_device_nguid_store(struct config_item
*item
,
611 const char *page
, size_t count
)
613 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
614 struct nvmet_subsys
*subsys
= ns
->subsys
;
616 const char *p
= page
;
620 mutex_lock(&subsys
->lock
);
626 for (i
= 0; i
< 16; i
++) {
627 if (p
+ 2 > page
+ count
) {
631 if (!isxdigit(p
[0]) || !isxdigit(p
[1])) {
636 nguid
[i
] = (hex_to_bin(p
[0]) << 4) | hex_to_bin(p
[1]);
639 if (*p
== '-' || *p
== ':')
643 memcpy(&ns
->nguid
, nguid
, sizeof(nguid
));
645 mutex_unlock(&subsys
->lock
);
646 return ret
? ret
: count
;
649 CONFIGFS_ATTR(nvmet_ns_
, device_nguid
);
651 static ssize_t
nvmet_ns_ana_grpid_show(struct config_item
*item
, char *page
)
653 return sprintf(page
, "%u\n", to_nvmet_ns(item
)->anagrpid
);
656 static ssize_t
nvmet_ns_ana_grpid_store(struct config_item
*item
,
657 const char *page
, size_t count
)
659 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
660 u32 oldgrpid
, newgrpid
;
663 ret
= kstrtou32(page
, 0, &newgrpid
);
667 if (newgrpid
< 1 || newgrpid
> NVMET_MAX_ANAGRPS
)
670 down_write(&nvmet_ana_sem
);
671 oldgrpid
= ns
->anagrpid
;
672 newgrpid
= array_index_nospec(newgrpid
, NVMET_MAX_ANAGRPS
);
673 nvmet_ana_group_enabled
[newgrpid
]++;
674 ns
->anagrpid
= newgrpid
;
675 nvmet_ana_group_enabled
[oldgrpid
]--;
677 up_write(&nvmet_ana_sem
);
679 nvmet_send_ana_event(ns
->subsys
, NULL
);
683 CONFIGFS_ATTR(nvmet_ns_
, ana_grpid
);
685 static ssize_t
nvmet_ns_enable_show(struct config_item
*item
, char *page
)
687 return sprintf(page
, "%d\n", to_nvmet_ns(item
)->enabled
);
690 static ssize_t
nvmet_ns_enable_store(struct config_item
*item
,
691 const char *page
, size_t count
)
693 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
697 if (kstrtobool(page
, &enable
))
701 * take a global nvmet_config_sem because the disable routine has a
702 * window where it releases the subsys-lock, giving a chance to
703 * a parallel enable to concurrently execute causing the disable to
704 * have a misaccounting of the ns percpu_ref.
706 down_write(&nvmet_config_sem
);
708 ret
= nvmet_ns_enable(ns
);
710 nvmet_ns_disable(ns
);
711 up_write(&nvmet_config_sem
);
713 return ret
? ret
: count
;
716 CONFIGFS_ATTR(nvmet_ns_
, enable
);
718 static ssize_t
nvmet_ns_buffered_io_show(struct config_item
*item
, char *page
)
720 return sprintf(page
, "%d\n", to_nvmet_ns(item
)->buffered_io
);
723 static ssize_t
nvmet_ns_buffered_io_store(struct config_item
*item
,
724 const char *page
, size_t count
)
726 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
729 if (kstrtobool(page
, &val
))
732 mutex_lock(&ns
->subsys
->lock
);
734 pr_err("disable ns before setting buffered_io value.\n");
735 mutex_unlock(&ns
->subsys
->lock
);
739 ns
->buffered_io
= val
;
740 mutex_unlock(&ns
->subsys
->lock
);
744 CONFIGFS_ATTR(nvmet_ns_
, buffered_io
);
746 static ssize_t
nvmet_ns_revalidate_size_store(struct config_item
*item
,
747 const char *page
, size_t count
)
749 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
752 if (kstrtobool(page
, &val
))
758 mutex_lock(&ns
->subsys
->lock
);
760 pr_err("enable ns before revalidate.\n");
761 mutex_unlock(&ns
->subsys
->lock
);
764 if (nvmet_ns_revalidate(ns
))
765 nvmet_ns_changed(ns
->subsys
, ns
->nsid
);
766 mutex_unlock(&ns
->subsys
->lock
);
770 CONFIGFS_ATTR_WO(nvmet_ns_
, revalidate_size
);
772 static ssize_t
nvmet_ns_resv_enable_show(struct config_item
*item
, char *page
)
774 return sysfs_emit(page
, "%d\n", to_nvmet_ns(item
)->pr
.enable
);
777 static ssize_t
nvmet_ns_resv_enable_store(struct config_item
*item
,
778 const char *page
, size_t count
)
780 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
783 if (kstrtobool(page
, &val
))
786 mutex_lock(&ns
->subsys
->lock
);
788 pr_err("the ns:%d is already enabled.\n", ns
->nsid
);
789 mutex_unlock(&ns
->subsys
->lock
);
793 mutex_unlock(&ns
->subsys
->lock
);
796 CONFIGFS_ATTR(nvmet_ns_
, resv_enable
);
798 static struct configfs_attribute
*nvmet_ns_attrs
[] = {
799 &nvmet_ns_attr_device_path
,
800 &nvmet_ns_attr_device_nguid
,
801 &nvmet_ns_attr_device_uuid
,
802 &nvmet_ns_attr_ana_grpid
,
803 &nvmet_ns_attr_enable
,
804 &nvmet_ns_attr_buffered_io
,
805 &nvmet_ns_attr_revalidate_size
,
806 &nvmet_ns_attr_resv_enable
,
807 #ifdef CONFIG_PCI_P2PDMA
808 &nvmet_ns_attr_p2pmem
,
813 bool nvmet_subsys_nsid_exists(struct nvmet_subsys
*subsys
, u32 nsid
)
815 struct config_item
*ns_item
;
818 snprintf(name
, sizeof(name
), "%u", nsid
);
819 mutex_lock(&subsys
->namespaces_group
.cg_subsys
->su_mutex
);
820 ns_item
= config_group_find_item(&subsys
->namespaces_group
, name
);
821 mutex_unlock(&subsys
->namespaces_group
.cg_subsys
->su_mutex
);
822 return ns_item
!= NULL
;
825 static void nvmet_ns_release(struct config_item
*item
)
827 struct nvmet_ns
*ns
= to_nvmet_ns(item
);
832 static struct configfs_item_operations nvmet_ns_item_ops
= {
833 .release
= nvmet_ns_release
,
836 static const struct config_item_type nvmet_ns_type
= {
837 .ct_item_ops
= &nvmet_ns_item_ops
,
838 .ct_attrs
= nvmet_ns_attrs
,
839 .ct_owner
= THIS_MODULE
,
842 static struct config_group
*nvmet_ns_make(struct config_group
*group
,
845 struct nvmet_subsys
*subsys
= namespaces_to_subsys(&group
->cg_item
);
850 ret
= kstrtou32(name
, 0, &nsid
);
855 if (nsid
== 0 || nsid
== NVME_NSID_ALL
) {
856 pr_err("invalid nsid %#x", nsid
);
861 ns
= nvmet_ns_alloc(subsys
, nsid
);
864 config_group_init_type_name(&ns
->group
, name
, &nvmet_ns_type
);
866 pr_info("adding nsid %d to subsystem %s\n", nsid
, subsys
->subsysnqn
);
873 static struct configfs_group_operations nvmet_namespaces_group_ops
= {
874 .make_group
= nvmet_ns_make
,
877 static const struct config_item_type nvmet_namespaces_type
= {
878 .ct_group_ops
= &nvmet_namespaces_group_ops
,
879 .ct_owner
= THIS_MODULE
,
882 #ifdef CONFIG_NVME_TARGET_PASSTHRU
884 static ssize_t
nvmet_passthru_device_path_show(struct config_item
*item
,
887 struct nvmet_subsys
*subsys
= to_subsys(item
->ci_parent
);
889 return snprintf(page
, PAGE_SIZE
, "%s\n", subsys
->passthru_ctrl_path
);
892 static ssize_t
nvmet_passthru_device_path_store(struct config_item
*item
,
893 const char *page
, size_t count
)
895 struct nvmet_subsys
*subsys
= to_subsys(item
->ci_parent
);
899 mutex_lock(&subsys
->lock
);
902 if (subsys
->passthru_ctrl
)
906 len
= strcspn(page
, "\n");
910 kfree(subsys
->passthru_ctrl_path
);
912 subsys
->passthru_ctrl_path
= kstrndup(page
, len
, GFP_KERNEL
);
913 if (!subsys
->passthru_ctrl_path
)
916 mutex_unlock(&subsys
->lock
);
920 mutex_unlock(&subsys
->lock
);
923 CONFIGFS_ATTR(nvmet_passthru_
, device_path
);
925 static ssize_t
nvmet_passthru_enable_show(struct config_item
*item
,
928 struct nvmet_subsys
*subsys
= to_subsys(item
->ci_parent
);
930 return sprintf(page
, "%d\n", subsys
->passthru_ctrl
? 1 : 0);
933 static ssize_t
nvmet_passthru_enable_store(struct config_item
*item
,
934 const char *page
, size_t count
)
936 struct nvmet_subsys
*subsys
= to_subsys(item
->ci_parent
);
940 if (kstrtobool(page
, &enable
))
944 ret
= nvmet_passthru_ctrl_enable(subsys
);
946 nvmet_passthru_ctrl_disable(subsys
);
948 return ret
? ret
: count
;
950 CONFIGFS_ATTR(nvmet_passthru_
, enable
);
952 static ssize_t
nvmet_passthru_admin_timeout_show(struct config_item
*item
,
955 return sprintf(page
, "%u\n", to_subsys(item
->ci_parent
)->admin_timeout
);
958 static ssize_t
nvmet_passthru_admin_timeout_store(struct config_item
*item
,
959 const char *page
, size_t count
)
961 struct nvmet_subsys
*subsys
= to_subsys(item
->ci_parent
);
962 unsigned int timeout
;
964 if (kstrtouint(page
, 0, &timeout
))
966 subsys
->admin_timeout
= timeout
;
969 CONFIGFS_ATTR(nvmet_passthru_
, admin_timeout
);
971 static ssize_t
nvmet_passthru_io_timeout_show(struct config_item
*item
,
974 return sprintf(page
, "%u\n", to_subsys(item
->ci_parent
)->io_timeout
);
977 static ssize_t
nvmet_passthru_io_timeout_store(struct config_item
*item
,
978 const char *page
, size_t count
)
980 struct nvmet_subsys
*subsys
= to_subsys(item
->ci_parent
);
981 unsigned int timeout
;
983 if (kstrtouint(page
, 0, &timeout
))
985 subsys
->io_timeout
= timeout
;
988 CONFIGFS_ATTR(nvmet_passthru_
, io_timeout
);
990 static ssize_t
nvmet_passthru_clear_ids_show(struct config_item
*item
,
993 return sprintf(page
, "%u\n", to_subsys(item
->ci_parent
)->clear_ids
);
996 static ssize_t
nvmet_passthru_clear_ids_store(struct config_item
*item
,
997 const char *page
, size_t count
)
999 struct nvmet_subsys
*subsys
= to_subsys(item
->ci_parent
);
1000 unsigned int clear_ids
;
1002 if (kstrtouint(page
, 0, &clear_ids
))
1004 subsys
->clear_ids
= clear_ids
;
1007 CONFIGFS_ATTR(nvmet_passthru_
, clear_ids
);
1009 static struct configfs_attribute
*nvmet_passthru_attrs
[] = {
1010 &nvmet_passthru_attr_device_path
,
1011 &nvmet_passthru_attr_enable
,
1012 &nvmet_passthru_attr_admin_timeout
,
1013 &nvmet_passthru_attr_io_timeout
,
1014 &nvmet_passthru_attr_clear_ids
,
1018 static const struct config_item_type nvmet_passthru_type
= {
1019 .ct_attrs
= nvmet_passthru_attrs
,
1020 .ct_owner
= THIS_MODULE
,
1023 static void nvmet_add_passthru_group(struct nvmet_subsys
*subsys
)
1025 config_group_init_type_name(&subsys
->passthru_group
,
1026 "passthru", &nvmet_passthru_type
);
1027 configfs_add_default_group(&subsys
->passthru_group
,
1031 #else /* CONFIG_NVME_TARGET_PASSTHRU */
1033 static void nvmet_add_passthru_group(struct nvmet_subsys
*subsys
)
1037 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
1039 static int nvmet_port_subsys_allow_link(struct config_item
*parent
,
1040 struct config_item
*target
)
1042 struct nvmet_port
*port
= to_nvmet_port(parent
->ci_parent
);
1043 struct nvmet_subsys
*subsys
;
1044 struct nvmet_subsys_link
*link
, *p
;
1047 if (target
->ci_type
!= &nvmet_subsys_type
) {
1048 pr_err("can only link subsystems into the subsystems dir.!\n");
1051 subsys
= to_subsys(target
);
1052 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
1055 link
->subsys
= subsys
;
1057 down_write(&nvmet_config_sem
);
1059 list_for_each_entry(p
, &port
->subsystems
, entry
) {
1060 if (p
->subsys
== subsys
)
1064 if (list_empty(&port
->subsystems
)) {
1065 ret
= nvmet_enable_port(port
);
1070 list_add_tail(&link
->entry
, &port
->subsystems
);
1071 nvmet_port_disc_changed(port
, subsys
);
1073 up_write(&nvmet_config_sem
);
1077 up_write(&nvmet_config_sem
);
1082 static void nvmet_port_subsys_drop_link(struct config_item
*parent
,
1083 struct config_item
*target
)
1085 struct nvmet_port
*port
= to_nvmet_port(parent
->ci_parent
);
1086 struct nvmet_subsys
*subsys
= to_subsys(target
);
1087 struct nvmet_subsys_link
*p
;
1089 down_write(&nvmet_config_sem
);
1090 list_for_each_entry(p
, &port
->subsystems
, entry
) {
1091 if (p
->subsys
== subsys
)
1094 up_write(&nvmet_config_sem
);
1098 list_del(&p
->entry
);
1099 nvmet_port_del_ctrls(port
, subsys
);
1100 nvmet_port_disc_changed(port
, subsys
);
1102 if (list_empty(&port
->subsystems
))
1103 nvmet_disable_port(port
);
1104 up_write(&nvmet_config_sem
);
1108 static struct configfs_item_operations nvmet_port_subsys_item_ops
= {
1109 .allow_link
= nvmet_port_subsys_allow_link
,
1110 .drop_link
= nvmet_port_subsys_drop_link
,
1113 static const struct config_item_type nvmet_port_subsys_type
= {
1114 .ct_item_ops
= &nvmet_port_subsys_item_ops
,
1115 .ct_owner
= THIS_MODULE
,
1118 static int nvmet_allowed_hosts_allow_link(struct config_item
*parent
,
1119 struct config_item
*target
)
1121 struct nvmet_subsys
*subsys
= to_subsys(parent
->ci_parent
);
1122 struct nvmet_host
*host
;
1123 struct nvmet_host_link
*link
, *p
;
1126 if (target
->ci_type
!= &nvmet_host_type
) {
1127 pr_err("can only link hosts into the allowed_hosts directory!\n");
1131 host
= to_host(target
);
1132 link
= kmalloc(sizeof(*link
), GFP_KERNEL
);
1137 down_write(&nvmet_config_sem
);
1139 if (subsys
->allow_any_host
) {
1140 pr_err("can't add hosts when allow_any_host is set!\n");
1145 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
1146 if (!strcmp(nvmet_host_name(p
->host
), nvmet_host_name(host
)))
1149 list_add_tail(&link
->entry
, &subsys
->hosts
);
1150 nvmet_subsys_disc_changed(subsys
, host
);
1152 up_write(&nvmet_config_sem
);
1155 up_write(&nvmet_config_sem
);
1160 static void nvmet_allowed_hosts_drop_link(struct config_item
*parent
,
1161 struct config_item
*target
)
1163 struct nvmet_subsys
*subsys
= to_subsys(parent
->ci_parent
);
1164 struct nvmet_host
*host
= to_host(target
);
1165 struct nvmet_host_link
*p
;
1167 down_write(&nvmet_config_sem
);
1168 list_for_each_entry(p
, &subsys
->hosts
, entry
) {
1169 if (!strcmp(nvmet_host_name(p
->host
), nvmet_host_name(host
)))
1172 up_write(&nvmet_config_sem
);
1176 list_del(&p
->entry
);
1177 nvmet_subsys_disc_changed(subsys
, host
);
1179 up_write(&nvmet_config_sem
);
1183 static struct configfs_item_operations nvmet_allowed_hosts_item_ops
= {
1184 .allow_link
= nvmet_allowed_hosts_allow_link
,
1185 .drop_link
= nvmet_allowed_hosts_drop_link
,
1188 static const struct config_item_type nvmet_allowed_hosts_type
= {
1189 .ct_item_ops
= &nvmet_allowed_hosts_item_ops
,
1190 .ct_owner
= THIS_MODULE
,
1193 static ssize_t
nvmet_subsys_attr_allow_any_host_show(struct config_item
*item
,
1196 return snprintf(page
, PAGE_SIZE
, "%d\n",
1197 to_subsys(item
)->allow_any_host
);
1200 static ssize_t
nvmet_subsys_attr_allow_any_host_store(struct config_item
*item
,
1201 const char *page
, size_t count
)
1203 struct nvmet_subsys
*subsys
= to_subsys(item
);
1204 bool allow_any_host
;
1207 if (kstrtobool(page
, &allow_any_host
))
1210 down_write(&nvmet_config_sem
);
1211 if (allow_any_host
&& !list_empty(&subsys
->hosts
)) {
1212 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1217 if (subsys
->allow_any_host
!= allow_any_host
) {
1218 subsys
->allow_any_host
= allow_any_host
;
1219 nvmet_subsys_disc_changed(subsys
, NULL
);
1223 up_write(&nvmet_config_sem
);
1224 return ret
? ret
: count
;
1227 CONFIGFS_ATTR(nvmet_subsys_
, attr_allow_any_host
);
1229 static ssize_t
nvmet_subsys_attr_version_show(struct config_item
*item
,
1232 struct nvmet_subsys
*subsys
= to_subsys(item
);
1234 if (NVME_TERTIARY(subsys
->ver
))
1235 return snprintf(page
, PAGE_SIZE
, "%llu.%llu.%llu\n",
1236 NVME_MAJOR(subsys
->ver
),
1237 NVME_MINOR(subsys
->ver
),
1238 NVME_TERTIARY(subsys
->ver
));
1240 return snprintf(page
, PAGE_SIZE
, "%llu.%llu\n",
1241 NVME_MAJOR(subsys
->ver
),
1242 NVME_MINOR(subsys
->ver
));
1246 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys
*subsys
,
1247 const char *page
, size_t count
)
1249 int major
, minor
, tertiary
= 0;
1252 if (subsys
->subsys_discovered
) {
1253 if (NVME_TERTIARY(subsys
->ver
))
1254 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1255 NVME_MAJOR(subsys
->ver
),
1256 NVME_MINOR(subsys
->ver
),
1257 NVME_TERTIARY(subsys
->ver
));
1259 pr_err("Can't set version number. %llu.%llu is already assigned\n",
1260 NVME_MAJOR(subsys
->ver
),
1261 NVME_MINOR(subsys
->ver
));
1265 /* passthru subsystems use the underlying controller's version */
1266 if (nvmet_is_passthru_subsys(subsys
))
1269 ret
= sscanf(page
, "%d.%d.%d\n", &major
, &minor
, &tertiary
);
1270 if (ret
!= 2 && ret
!= 3)
1273 subsys
->ver
= NVME_VS(major
, minor
, tertiary
);
1278 static ssize_t
nvmet_subsys_attr_version_store(struct config_item
*item
,
1279 const char *page
, size_t count
)
1281 struct nvmet_subsys
*subsys
= to_subsys(item
);
1284 down_write(&nvmet_config_sem
);
1285 mutex_lock(&subsys
->lock
);
1286 ret
= nvmet_subsys_attr_version_store_locked(subsys
, page
, count
);
1287 mutex_unlock(&subsys
->lock
);
1288 up_write(&nvmet_config_sem
);
1292 CONFIGFS_ATTR(nvmet_subsys_
, attr_version
);
1294 /* See Section 1.5 of NVMe 1.4 */
1295 static bool nvmet_is_ascii(const char c
)
1297 return c
>= 0x20 && c
<= 0x7e;
1300 static ssize_t
nvmet_subsys_attr_serial_show(struct config_item
*item
,
1303 struct nvmet_subsys
*subsys
= to_subsys(item
);
1305 return snprintf(page
, PAGE_SIZE
, "%.*s\n",
1306 NVMET_SN_MAX_SIZE
, subsys
->serial
);
1310 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys
*subsys
,
1311 const char *page
, size_t count
)
1313 int pos
, len
= strcspn(page
, "\n");
1315 if (subsys
->subsys_discovered
) {
1316 pr_err("Can't set serial number. %s is already assigned\n",
1321 if (!len
|| len
> NVMET_SN_MAX_SIZE
) {
1322 pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1327 for (pos
= 0; pos
< len
; pos
++) {
1328 if (!nvmet_is_ascii(page
[pos
])) {
1329 pr_err("Serial Number must contain only ASCII strings\n");
1334 memcpy_and_pad(subsys
->serial
, NVMET_SN_MAX_SIZE
, page
, len
, ' ');
1339 static ssize_t
nvmet_subsys_attr_serial_store(struct config_item
*item
,
1340 const char *page
, size_t count
)
1342 struct nvmet_subsys
*subsys
= to_subsys(item
);
1345 down_write(&nvmet_config_sem
);
1346 mutex_lock(&subsys
->lock
);
1347 ret
= nvmet_subsys_attr_serial_store_locked(subsys
, page
, count
);
1348 mutex_unlock(&subsys
->lock
);
1349 up_write(&nvmet_config_sem
);
1353 CONFIGFS_ATTR(nvmet_subsys_
, attr_serial
);
1355 static ssize_t
nvmet_subsys_attr_cntlid_min_show(struct config_item
*item
,
1358 return snprintf(page
, PAGE_SIZE
, "%u\n", to_subsys(item
)->cntlid_min
);
1361 static ssize_t
nvmet_subsys_attr_cntlid_min_store(struct config_item
*item
,
1362 const char *page
, size_t cnt
)
1366 if (sscanf(page
, "%hu\n", &cntlid_min
) != 1)
1369 if (cntlid_min
== 0)
1372 down_write(&nvmet_config_sem
);
1373 if (cntlid_min
> to_subsys(item
)->cntlid_max
)
1375 to_subsys(item
)->cntlid_min
= cntlid_min
;
1376 up_write(&nvmet_config_sem
);
1380 up_write(&nvmet_config_sem
);
1383 CONFIGFS_ATTR(nvmet_subsys_
, attr_cntlid_min
);
1385 static ssize_t
nvmet_subsys_attr_cntlid_max_show(struct config_item
*item
,
1388 return snprintf(page
, PAGE_SIZE
, "%u\n", to_subsys(item
)->cntlid_max
);
1391 static ssize_t
nvmet_subsys_attr_cntlid_max_store(struct config_item
*item
,
1392 const char *page
, size_t cnt
)
1396 if (sscanf(page
, "%hu\n", &cntlid_max
) != 1)
1399 if (cntlid_max
== 0)
1402 down_write(&nvmet_config_sem
);
1403 if (cntlid_max
< to_subsys(item
)->cntlid_min
)
1405 to_subsys(item
)->cntlid_max
= cntlid_max
;
1406 up_write(&nvmet_config_sem
);
1410 up_write(&nvmet_config_sem
);
1413 CONFIGFS_ATTR(nvmet_subsys_
, attr_cntlid_max
);
1415 static ssize_t
nvmet_subsys_attr_model_show(struct config_item
*item
,
1418 struct nvmet_subsys
*subsys
= to_subsys(item
);
1420 return snprintf(page
, PAGE_SIZE
, "%s\n", subsys
->model_number
);
1423 static ssize_t
nvmet_subsys_attr_model_store_locked(struct nvmet_subsys
*subsys
,
1424 const char *page
, size_t count
)
1429 if (subsys
->subsys_discovered
) {
1430 pr_err("Can't set model number. %s is already assigned\n",
1431 subsys
->model_number
);
1435 len
= strcspn(page
, "\n");
1439 if (len
> NVMET_MN_MAX_SIZE
) {
1440 pr_err("Model number size can not exceed %d Bytes\n",
1445 for (pos
= 0; pos
< len
; pos
++) {
1446 if (!nvmet_is_ascii(page
[pos
]))
1450 val
= kmemdup_nul(page
, len
, GFP_KERNEL
);
1453 kfree(subsys
->model_number
);
1454 subsys
->model_number
= val
;
1458 static ssize_t
nvmet_subsys_attr_model_store(struct config_item
*item
,
1459 const char *page
, size_t count
)
1461 struct nvmet_subsys
*subsys
= to_subsys(item
);
1464 down_write(&nvmet_config_sem
);
1465 mutex_lock(&subsys
->lock
);
1466 ret
= nvmet_subsys_attr_model_store_locked(subsys
, page
, count
);
1467 mutex_unlock(&subsys
->lock
);
1468 up_write(&nvmet_config_sem
);
1472 CONFIGFS_ATTR(nvmet_subsys_
, attr_model
);
1474 static ssize_t
nvmet_subsys_attr_ieee_oui_show(struct config_item
*item
,
1477 struct nvmet_subsys
*subsys
= to_subsys(item
);
1479 return sysfs_emit(page
, "0x%06x\n", subsys
->ieee_oui
);
1482 static ssize_t
nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys
*subsys
,
1483 const char *page
, size_t count
)
1488 if (subsys
->subsys_discovered
) {
1489 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1494 ret
= kstrtou32(page
, 0, &val
);
1498 if (val
>= 0x1000000)
1501 subsys
->ieee_oui
= val
;
1506 static ssize_t
nvmet_subsys_attr_ieee_oui_store(struct config_item
*item
,
1507 const char *page
, size_t count
)
1509 struct nvmet_subsys
*subsys
= to_subsys(item
);
1512 down_write(&nvmet_config_sem
);
1513 mutex_lock(&subsys
->lock
);
1514 ret
= nvmet_subsys_attr_ieee_oui_store_locked(subsys
, page
, count
);
1515 mutex_unlock(&subsys
->lock
);
1516 up_write(&nvmet_config_sem
);
1520 CONFIGFS_ATTR(nvmet_subsys_
, attr_ieee_oui
);
1522 static ssize_t
nvmet_subsys_attr_firmware_show(struct config_item
*item
,
1525 struct nvmet_subsys
*subsys
= to_subsys(item
);
1527 return sysfs_emit(page
, "%s\n", subsys
->firmware_rev
);
1530 static ssize_t
nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys
*subsys
,
1531 const char *page
, size_t count
)
1536 if (subsys
->subsys_discovered
) {
1537 pr_err("Can't set firmware revision. %s is already assigned\n",
1538 subsys
->firmware_rev
);
1542 len
= strcspn(page
, "\n");
1546 if (len
> NVMET_FR_MAX_SIZE
) {
1547 pr_err("Firmware revision size can not exceed %d Bytes\n",
1552 for (pos
= 0; pos
< len
; pos
++) {
1553 if (!nvmet_is_ascii(page
[pos
]))
1557 val
= kmemdup_nul(page
, len
, GFP_KERNEL
);
1561 kfree(subsys
->firmware_rev
);
1563 subsys
->firmware_rev
= val
;
1568 static ssize_t
nvmet_subsys_attr_firmware_store(struct config_item
*item
,
1569 const char *page
, size_t count
)
1571 struct nvmet_subsys
*subsys
= to_subsys(item
);
1574 down_write(&nvmet_config_sem
);
1575 mutex_lock(&subsys
->lock
);
1576 ret
= nvmet_subsys_attr_firmware_store_locked(subsys
, page
, count
);
1577 mutex_unlock(&subsys
->lock
);
1578 up_write(&nvmet_config_sem
);
1582 CONFIGFS_ATTR(nvmet_subsys_
, attr_firmware
);
1584 #ifdef CONFIG_BLK_DEV_INTEGRITY
1585 static ssize_t
nvmet_subsys_attr_pi_enable_show(struct config_item
*item
,
1588 return snprintf(page
, PAGE_SIZE
, "%d\n", to_subsys(item
)->pi_support
);
1591 static ssize_t
nvmet_subsys_attr_pi_enable_store(struct config_item
*item
,
1592 const char *page
, size_t count
)
1594 struct nvmet_subsys
*subsys
= to_subsys(item
);
1597 if (kstrtobool(page
, &pi_enable
))
1600 subsys
->pi_support
= pi_enable
;
1603 CONFIGFS_ATTR(nvmet_subsys_
, attr_pi_enable
);
1606 static ssize_t
nvmet_subsys_attr_qid_max_show(struct config_item
*item
,
1609 return snprintf(page
, PAGE_SIZE
, "%u\n", to_subsys(item
)->max_qid
);
1612 static ssize_t
nvmet_subsys_attr_qid_max_store(struct config_item
*item
,
1613 const char *page
, size_t cnt
)
1615 struct nvmet_subsys
*subsys
= to_subsys(item
);
1616 struct nvmet_ctrl
*ctrl
;
1619 if (sscanf(page
, "%hu\n", &qid_max
) != 1)
1622 if (qid_max
< 1 || qid_max
> NVMET_NR_QUEUES
)
1625 down_write(&nvmet_config_sem
);
1626 subsys
->max_qid
= qid_max
;
1628 /* Force reconnect */
1629 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
)
1630 ctrl
->ops
->delete_ctrl(ctrl
);
1631 up_write(&nvmet_config_sem
);
1635 CONFIGFS_ATTR(nvmet_subsys_
, attr_qid_max
);
1637 static struct configfs_attribute
*nvmet_subsys_attrs
[] = {
1638 &nvmet_subsys_attr_attr_allow_any_host
,
1639 &nvmet_subsys_attr_attr_version
,
1640 &nvmet_subsys_attr_attr_serial
,
1641 &nvmet_subsys_attr_attr_cntlid_min
,
1642 &nvmet_subsys_attr_attr_cntlid_max
,
1643 &nvmet_subsys_attr_attr_model
,
1644 &nvmet_subsys_attr_attr_qid_max
,
1645 &nvmet_subsys_attr_attr_ieee_oui
,
1646 &nvmet_subsys_attr_attr_firmware
,
1647 #ifdef CONFIG_BLK_DEV_INTEGRITY
1648 &nvmet_subsys_attr_attr_pi_enable
,
1654 * Subsystem structures & folder operation functions below
1656 static void nvmet_subsys_release(struct config_item
*item
)
1658 struct nvmet_subsys
*subsys
= to_subsys(item
);
1660 nvmet_subsys_del_ctrls(subsys
);
1661 nvmet_subsys_put(subsys
);
1664 static struct configfs_item_operations nvmet_subsys_item_ops
= {
1665 .release
= nvmet_subsys_release
,
1668 static const struct config_item_type nvmet_subsys_type
= {
1669 .ct_item_ops
= &nvmet_subsys_item_ops
,
1670 .ct_attrs
= nvmet_subsys_attrs
,
1671 .ct_owner
= THIS_MODULE
,
1674 static struct config_group
*nvmet_subsys_make(struct config_group
*group
,
1677 struct nvmet_subsys
*subsys
;
1679 if (sysfs_streq(name
, NVME_DISC_SUBSYS_NAME
)) {
1680 pr_err("can't create discovery subsystem through configfs\n");
1681 return ERR_PTR(-EINVAL
);
1684 if (sysfs_streq(name
, nvmet_disc_subsys
->subsysnqn
)) {
1685 pr_err("can't create subsystem using unique discovery NQN\n");
1686 return ERR_PTR(-EINVAL
);
1689 subsys
= nvmet_subsys_alloc(name
, NVME_NQN_NVME
);
1691 return ERR_CAST(subsys
);
1693 config_group_init_type_name(&subsys
->group
, name
, &nvmet_subsys_type
);
1695 config_group_init_type_name(&subsys
->namespaces_group
,
1696 "namespaces", &nvmet_namespaces_type
);
1697 configfs_add_default_group(&subsys
->namespaces_group
, &subsys
->group
);
1699 config_group_init_type_name(&subsys
->allowed_hosts_group
,
1700 "allowed_hosts", &nvmet_allowed_hosts_type
);
1701 configfs_add_default_group(&subsys
->allowed_hosts_group
,
1704 nvmet_add_passthru_group(subsys
);
1706 return &subsys
->group
;
1709 static struct configfs_group_operations nvmet_subsystems_group_ops
= {
1710 .make_group
= nvmet_subsys_make
,
1713 static const struct config_item_type nvmet_subsystems_type
= {
1714 .ct_group_ops
= &nvmet_subsystems_group_ops
,
1715 .ct_owner
= THIS_MODULE
,
1718 static ssize_t
nvmet_referral_enable_show(struct config_item
*item
,
1721 return snprintf(page
, PAGE_SIZE
, "%d\n", to_nvmet_port(item
)->enabled
);
1724 static ssize_t
nvmet_referral_enable_store(struct config_item
*item
,
1725 const char *page
, size_t count
)
1727 struct nvmet_port
*parent
= to_nvmet_port(item
->ci_parent
->ci_parent
);
1728 struct nvmet_port
*port
= to_nvmet_port(item
);
1731 if (kstrtobool(page
, &enable
))
1735 nvmet_referral_enable(parent
, port
);
1737 nvmet_referral_disable(parent
, port
);
1741 pr_err("Invalid value '%s' for enable\n", page
);
1745 CONFIGFS_ATTR(nvmet_referral_
, enable
);
1748 * Discovery Service subsystem definitions
1750 static struct configfs_attribute
*nvmet_referral_attrs
[] = {
1751 &nvmet_attr_addr_adrfam
,
1752 &nvmet_attr_addr_portid
,
1753 &nvmet_attr_addr_treq
,
1754 &nvmet_attr_addr_traddr
,
1755 &nvmet_attr_addr_trsvcid
,
1756 &nvmet_attr_addr_trtype
,
1757 &nvmet_referral_attr_enable
,
1761 static void nvmet_referral_notify(struct config_group
*group
,
1762 struct config_item
*item
)
1764 struct nvmet_port
*parent
= to_nvmet_port(item
->ci_parent
->ci_parent
);
1765 struct nvmet_port
*port
= to_nvmet_port(item
);
1767 nvmet_referral_disable(parent
, port
);
1770 static void nvmet_referral_release(struct config_item
*item
)
1772 struct nvmet_port
*port
= to_nvmet_port(item
);
1777 static struct configfs_item_operations nvmet_referral_item_ops
= {
1778 .release
= nvmet_referral_release
,
1781 static const struct config_item_type nvmet_referral_type
= {
1782 .ct_owner
= THIS_MODULE
,
1783 .ct_attrs
= nvmet_referral_attrs
,
1784 .ct_item_ops
= &nvmet_referral_item_ops
,
1787 static struct config_group
*nvmet_referral_make(
1788 struct config_group
*group
, const char *name
)
1790 struct nvmet_port
*port
;
1792 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1794 return ERR_PTR(-ENOMEM
);
1796 INIT_LIST_HEAD(&port
->entry
);
1797 config_group_init_type_name(&port
->group
, name
, &nvmet_referral_type
);
1799 return &port
->group
;
1802 static struct configfs_group_operations nvmet_referral_group_ops
= {
1803 .make_group
= nvmet_referral_make
,
1804 .disconnect_notify
= nvmet_referral_notify
,
1807 static const struct config_item_type nvmet_referrals_type
= {
1808 .ct_owner
= THIS_MODULE
,
1809 .ct_group_ops
= &nvmet_referral_group_ops
,
1812 static struct nvmet_type_name_map nvmet_ana_state
[] = {
1813 { NVME_ANA_OPTIMIZED
, "optimized" },
1814 { NVME_ANA_NONOPTIMIZED
, "non-optimized" },
1815 { NVME_ANA_INACCESSIBLE
, "inaccessible" },
1816 { NVME_ANA_PERSISTENT_LOSS
, "persistent-loss" },
1817 { NVME_ANA_CHANGE
, "change" },
1820 static ssize_t
nvmet_ana_group_ana_state_show(struct config_item
*item
,
1823 struct nvmet_ana_group
*grp
= to_ana_group(item
);
1824 enum nvme_ana_state state
= grp
->port
->ana_state
[grp
->grpid
];
1827 for (i
= 0; i
< ARRAY_SIZE(nvmet_ana_state
); i
++) {
1828 if (state
== nvmet_ana_state
[i
].type
)
1829 return sprintf(page
, "%s\n", nvmet_ana_state
[i
].name
);
1832 return sprintf(page
, "\n");
1835 static ssize_t
nvmet_ana_group_ana_state_store(struct config_item
*item
,
1836 const char *page
, size_t count
)
1838 struct nvmet_ana_group
*grp
= to_ana_group(item
);
1839 enum nvme_ana_state
*ana_state
= grp
->port
->ana_state
;
1842 for (i
= 0; i
< ARRAY_SIZE(nvmet_ana_state
); i
++) {
1843 if (sysfs_streq(page
, nvmet_ana_state
[i
].name
))
1847 pr_err("Invalid value '%s' for ana_state\n", page
);
1851 down_write(&nvmet_ana_sem
);
1852 ana_state
[grp
->grpid
] = (enum nvme_ana_state
) nvmet_ana_state
[i
].type
;
1854 up_write(&nvmet_ana_sem
);
1855 nvmet_port_send_ana_event(grp
->port
);
1859 CONFIGFS_ATTR(nvmet_ana_group_
, ana_state
);
1861 static struct configfs_attribute
*nvmet_ana_group_attrs
[] = {
1862 &nvmet_ana_group_attr_ana_state
,
1866 static void nvmet_ana_group_release(struct config_item
*item
)
1868 struct nvmet_ana_group
*grp
= to_ana_group(item
);
1870 if (grp
== &grp
->port
->ana_default_group
)
1873 down_write(&nvmet_ana_sem
);
1874 grp
->port
->ana_state
[grp
->grpid
] = NVME_ANA_INACCESSIBLE
;
1875 nvmet_ana_group_enabled
[grp
->grpid
]--;
1876 up_write(&nvmet_ana_sem
);
1878 nvmet_port_send_ana_event(grp
->port
);
1882 static struct configfs_item_operations nvmet_ana_group_item_ops
= {
1883 .release
= nvmet_ana_group_release
,
1886 static const struct config_item_type nvmet_ana_group_type
= {
1887 .ct_item_ops
= &nvmet_ana_group_item_ops
,
1888 .ct_attrs
= nvmet_ana_group_attrs
,
1889 .ct_owner
= THIS_MODULE
,
1892 static struct config_group
*nvmet_ana_groups_make_group(
1893 struct config_group
*group
, const char *name
)
1895 struct nvmet_port
*port
= ana_groups_to_port(&group
->cg_item
);
1896 struct nvmet_ana_group
*grp
;
1900 ret
= kstrtou32(name
, 0, &grpid
);
1905 if (grpid
<= 1 || grpid
> NVMET_MAX_ANAGRPS
)
1909 grp
= kzalloc(sizeof(*grp
), GFP_KERNEL
);
1915 down_write(&nvmet_ana_sem
);
1916 grpid
= array_index_nospec(grpid
, NVMET_MAX_ANAGRPS
);
1917 nvmet_ana_group_enabled
[grpid
]++;
1918 up_write(&nvmet_ana_sem
);
1920 nvmet_port_send_ana_event(grp
->port
);
1922 config_group_init_type_name(&grp
->group
, name
, &nvmet_ana_group_type
);
1925 return ERR_PTR(ret
);
1928 static struct configfs_group_operations nvmet_ana_groups_group_ops
= {
1929 .make_group
= nvmet_ana_groups_make_group
,
1932 static const struct config_item_type nvmet_ana_groups_type
= {
1933 .ct_group_ops
= &nvmet_ana_groups_group_ops
,
1934 .ct_owner
= THIS_MODULE
,
1938 * Ports definitions.
1940 static void nvmet_port_release(struct config_item
*item
)
1942 struct nvmet_port
*port
= to_nvmet_port(item
);
1944 /* Let inflight controllers teardown complete */
1945 flush_workqueue(nvmet_wq
);
1946 list_del(&port
->global_entry
);
1948 key_put(port
->keyring
);
1949 kfree(port
->ana_state
);
1953 static struct configfs_attribute
*nvmet_port_attrs
[] = {
1954 &nvmet_attr_addr_adrfam
,
1955 &nvmet_attr_addr_treq
,
1956 &nvmet_attr_addr_traddr
,
1957 &nvmet_attr_addr_trsvcid
,
1958 &nvmet_attr_addr_trtype
,
1959 &nvmet_attr_addr_tsas
,
1960 &nvmet_attr_param_inline_data_size
,
1961 &nvmet_attr_param_max_queue_size
,
1962 #ifdef CONFIG_BLK_DEV_INTEGRITY
1963 &nvmet_attr_param_pi_enable
,
1968 static struct configfs_item_operations nvmet_port_item_ops
= {
1969 .release
= nvmet_port_release
,
1972 static const struct config_item_type nvmet_port_type
= {
1973 .ct_attrs
= nvmet_port_attrs
,
1974 .ct_item_ops
= &nvmet_port_item_ops
,
1975 .ct_owner
= THIS_MODULE
,
1978 static struct config_group
*nvmet_ports_make(struct config_group
*group
,
1981 struct nvmet_port
*port
;
1985 if (kstrtou16(name
, 0, &portid
))
1986 return ERR_PTR(-EINVAL
);
1988 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1990 return ERR_PTR(-ENOMEM
);
1992 port
->ana_state
= kcalloc(NVMET_MAX_ANAGRPS
+ 1,
1993 sizeof(*port
->ana_state
), GFP_KERNEL
);
1994 if (!port
->ana_state
) {
1996 return ERR_PTR(-ENOMEM
);
1999 if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS
) && nvme_keyring_id()) {
2000 port
->keyring
= key_lookup(nvme_keyring_id());
2001 if (IS_ERR(port
->keyring
)) {
2002 pr_warn("NVMe keyring not available, disabling TLS\n");
2003 port
->keyring
= NULL
;
2007 for (i
= 1; i
<= NVMET_MAX_ANAGRPS
; i
++) {
2008 if (i
== NVMET_DEFAULT_ANA_GRPID
)
2009 port
->ana_state
[1] = NVME_ANA_OPTIMIZED
;
2011 port
->ana_state
[i
] = NVME_ANA_INACCESSIBLE
;
2014 list_add(&port
->global_entry
, &nvmet_ports_list
);
2016 INIT_LIST_HEAD(&port
->entry
);
2017 INIT_LIST_HEAD(&port
->subsystems
);
2018 INIT_LIST_HEAD(&port
->referrals
);
2019 port
->inline_data_size
= -1; /* < 0 == let the transport choose */
2020 port
->max_queue_size
= -1; /* < 0 == let the transport choose */
2022 port
->disc_addr
.portid
= cpu_to_le16(portid
);
2023 port
->disc_addr
.adrfam
= NVMF_ADDR_FAMILY_MAX
;
2024 port
->disc_addr
.treq
= NVMF_TREQ_DISABLE_SQFLOW
;
2025 config_group_init_type_name(&port
->group
, name
, &nvmet_port_type
);
2027 config_group_init_type_name(&port
->subsys_group
,
2028 "subsystems", &nvmet_port_subsys_type
);
2029 configfs_add_default_group(&port
->subsys_group
, &port
->group
);
2031 config_group_init_type_name(&port
->referrals_group
,
2032 "referrals", &nvmet_referrals_type
);
2033 configfs_add_default_group(&port
->referrals_group
, &port
->group
);
2035 config_group_init_type_name(&port
->ana_groups_group
,
2036 "ana_groups", &nvmet_ana_groups_type
);
2037 configfs_add_default_group(&port
->ana_groups_group
, &port
->group
);
2039 port
->ana_default_group
.port
= port
;
2040 port
->ana_default_group
.grpid
= NVMET_DEFAULT_ANA_GRPID
;
2041 config_group_init_type_name(&port
->ana_default_group
.group
,
2042 __stringify(NVMET_DEFAULT_ANA_GRPID
),
2043 &nvmet_ana_group_type
);
2044 configfs_add_default_group(&port
->ana_default_group
.group
,
2045 &port
->ana_groups_group
);
2047 return &port
->group
;
2050 static struct configfs_group_operations nvmet_ports_group_ops
= {
2051 .make_group
= nvmet_ports_make
,
2054 static const struct config_item_type nvmet_ports_type
= {
2055 .ct_group_ops
= &nvmet_ports_group_ops
,
2056 .ct_owner
= THIS_MODULE
,
2059 static struct config_group nvmet_subsystems_group
;
2060 static struct config_group nvmet_ports_group
;
2062 #ifdef CONFIG_NVME_TARGET_AUTH
2063 static ssize_t
nvmet_host_dhchap_key_show(struct config_item
*item
,
2069 down_read(&nvmet_config_sem
);
2070 dhchap_secret
= to_host(item
)->dhchap_secret
;
2072 ret
= sprintf(page
, "\n");
2074 ret
= sprintf(page
, "%s\n", dhchap_secret
);
2075 up_read(&nvmet_config_sem
);
2079 static ssize_t
nvmet_host_dhchap_key_store(struct config_item
*item
,
2080 const char *page
, size_t count
)
2082 struct nvmet_host
*host
= to_host(item
);
2085 ret
= nvmet_auth_set_key(host
, page
, false);
2087 * Re-authentication is a soft state, so keep the
2088 * current authentication valid until the host
2089 * requests re-authentication.
2091 return ret
< 0 ? ret
: count
;
2094 CONFIGFS_ATTR(nvmet_host_
, dhchap_key
);
2096 static ssize_t
nvmet_host_dhchap_ctrl_key_show(struct config_item
*item
,
2099 u8
*dhchap_secret
= to_host(item
)->dhchap_ctrl_secret
;
2102 down_read(&nvmet_config_sem
);
2103 dhchap_secret
= to_host(item
)->dhchap_ctrl_secret
;
2105 ret
= sprintf(page
, "\n");
2107 ret
= sprintf(page
, "%s\n", dhchap_secret
);
2108 up_read(&nvmet_config_sem
);
2112 static ssize_t
nvmet_host_dhchap_ctrl_key_store(struct config_item
*item
,
2113 const char *page
, size_t count
)
2115 struct nvmet_host
*host
= to_host(item
);
2118 ret
= nvmet_auth_set_key(host
, page
, true);
2120 * Re-authentication is a soft state, so keep the
2121 * current authentication valid until the host
2122 * requests re-authentication.
2124 return ret
< 0 ? ret
: count
;
2127 CONFIGFS_ATTR(nvmet_host_
, dhchap_ctrl_key
);
2129 static ssize_t
nvmet_host_dhchap_hash_show(struct config_item
*item
,
2132 struct nvmet_host
*host
= to_host(item
);
2133 const char *hash_name
= nvme_auth_hmac_name(host
->dhchap_hash_id
);
2135 return sprintf(page
, "%s\n", hash_name
? hash_name
: "none");
2138 static ssize_t
nvmet_host_dhchap_hash_store(struct config_item
*item
,
2139 const char *page
, size_t count
)
2141 struct nvmet_host
*host
= to_host(item
);
2144 hmac_id
= nvme_auth_hmac_id(page
);
2145 if (hmac_id
== NVME_AUTH_HASH_INVALID
)
2147 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id
), 0, 0))
2149 host
->dhchap_hash_id
= hmac_id
;
2153 CONFIGFS_ATTR(nvmet_host_
, dhchap_hash
);
2155 static ssize_t
nvmet_host_dhchap_dhgroup_show(struct config_item
*item
,
2158 struct nvmet_host
*host
= to_host(item
);
2159 const char *dhgroup
= nvme_auth_dhgroup_name(host
->dhchap_dhgroup_id
);
2161 return sprintf(page
, "%s\n", dhgroup
? dhgroup
: "none");
2164 static ssize_t
nvmet_host_dhchap_dhgroup_store(struct config_item
*item
,
2165 const char *page
, size_t count
)
2167 struct nvmet_host
*host
= to_host(item
);
2170 dhgroup_id
= nvme_auth_dhgroup_id(page
);
2171 if (dhgroup_id
== NVME_AUTH_DHGROUP_INVALID
)
2173 if (dhgroup_id
!= NVME_AUTH_DHGROUP_NULL
) {
2174 const char *kpp
= nvme_auth_dhgroup_kpp(dhgroup_id
);
2176 if (!crypto_has_kpp(kpp
, 0, 0))
2179 host
->dhchap_dhgroup_id
= dhgroup_id
;
2183 CONFIGFS_ATTR(nvmet_host_
, dhchap_dhgroup
);
2185 static struct configfs_attribute
*nvmet_host_attrs
[] = {
2186 &nvmet_host_attr_dhchap_key
,
2187 &nvmet_host_attr_dhchap_ctrl_key
,
2188 &nvmet_host_attr_dhchap_hash
,
2189 &nvmet_host_attr_dhchap_dhgroup
,
2192 #endif /* CONFIG_NVME_TARGET_AUTH */
2194 static void nvmet_host_release(struct config_item
*item
)
2196 struct nvmet_host
*host
= to_host(item
);
2198 #ifdef CONFIG_NVME_TARGET_AUTH
2199 kfree(host
->dhchap_secret
);
2200 kfree(host
->dhchap_ctrl_secret
);
2205 static struct configfs_item_operations nvmet_host_item_ops
= {
2206 .release
= nvmet_host_release
,
2209 static const struct config_item_type nvmet_host_type
= {
2210 .ct_item_ops
= &nvmet_host_item_ops
,
2211 #ifdef CONFIG_NVME_TARGET_AUTH
2212 .ct_attrs
= nvmet_host_attrs
,
2214 .ct_owner
= THIS_MODULE
,
2217 static struct config_group
*nvmet_hosts_make_group(struct config_group
*group
,
2220 struct nvmet_host
*host
;
2222 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
2224 return ERR_PTR(-ENOMEM
);
2226 #ifdef CONFIG_NVME_TARGET_AUTH
2227 /* Default to SHA256 */
2228 host
->dhchap_hash_id
= NVME_AUTH_HASH_SHA256
;
2231 config_group_init_type_name(&host
->group
, name
, &nvmet_host_type
);
2233 return &host
->group
;
2236 static struct configfs_group_operations nvmet_hosts_group_ops
= {
2237 .make_group
= nvmet_hosts_make_group
,
2240 static const struct config_item_type nvmet_hosts_type
= {
2241 .ct_group_ops
= &nvmet_hosts_group_ops
,
2242 .ct_owner
= THIS_MODULE
,
2245 static struct config_group nvmet_hosts_group
;
2247 static ssize_t
nvmet_root_discovery_nqn_show(struct config_item
*item
,
2250 return snprintf(page
, PAGE_SIZE
, "%s\n", nvmet_disc_subsys
->subsysnqn
);
2253 static ssize_t
nvmet_root_discovery_nqn_store(struct config_item
*item
,
2254 const char *page
, size_t count
)
2256 struct list_head
*entry
;
2259 len
= strcspn(page
, "\n");
2260 if (!len
|| len
> NVMF_NQN_FIELD_LEN
- 1)
2263 down_write(&nvmet_config_sem
);
2264 list_for_each(entry
, &nvmet_subsystems_group
.cg_children
) {
2265 struct config_item
*item
=
2266 container_of(entry
, struct config_item
, ci_entry
);
2268 if (!strncmp(config_item_name(item
), page
, len
)) {
2269 pr_err("duplicate NQN %s\n", config_item_name(item
));
2270 up_write(&nvmet_config_sem
);
2274 memset(nvmet_disc_subsys
->subsysnqn
, 0, NVMF_NQN_FIELD_LEN
);
2275 memcpy(nvmet_disc_subsys
->subsysnqn
, page
, len
);
2276 up_write(&nvmet_config_sem
);
2281 CONFIGFS_ATTR(nvmet_root_
, discovery_nqn
);
2283 static struct configfs_attribute
*nvmet_root_attrs
[] = {
2284 &nvmet_root_attr_discovery_nqn
,
2288 static const struct config_item_type nvmet_root_type
= {
2289 .ct_attrs
= nvmet_root_attrs
,
2290 .ct_owner
= THIS_MODULE
,
2293 static struct configfs_subsystem nvmet_configfs_subsystem
= {
2296 .ci_namebuf
= "nvmet",
2297 .ci_type
= &nvmet_root_type
,
2302 int __init
nvmet_init_configfs(void)
2306 config_group_init(&nvmet_configfs_subsystem
.su_group
);
2307 mutex_init(&nvmet_configfs_subsystem
.su_mutex
);
2309 config_group_init_type_name(&nvmet_subsystems_group
,
2310 "subsystems", &nvmet_subsystems_type
);
2311 configfs_add_default_group(&nvmet_subsystems_group
,
2312 &nvmet_configfs_subsystem
.su_group
);
2314 config_group_init_type_name(&nvmet_ports_group
,
2315 "ports", &nvmet_ports_type
);
2316 configfs_add_default_group(&nvmet_ports_group
,
2317 &nvmet_configfs_subsystem
.su_group
);
2319 config_group_init_type_name(&nvmet_hosts_group
,
2320 "hosts", &nvmet_hosts_type
);
2321 configfs_add_default_group(&nvmet_hosts_group
,
2322 &nvmet_configfs_subsystem
.su_group
);
2324 ret
= configfs_register_subsystem(&nvmet_configfs_subsystem
);
2326 pr_err("configfs_register_subsystem: %d\n", ret
);
2333 void __exit
nvmet_exit_configfs(void)
2335 configfs_unregister_subsystem(&nvmet_configfs_subsystem
);