drm/nouveau: consume the return of large GSP message
[drm/drm-misc.git] / drivers / nvme / target / configfs.c
blobeeee9e9b854c12602d5a2939c3978ac492302949
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Configfs interface for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
17 #endif
18 #include <linux/nvme-keyring.h>
19 #include <crypto/hash.h>
20 #include <crypto/kpp.h>
21 #include <linux/nospec.h>
23 #include "nvmet.h"
25 static const struct config_item_type nvmet_host_type;
26 static const struct config_item_type nvmet_subsys_type;
28 static LIST_HEAD(nvmet_ports_list);
29 struct list_head *nvmet_ports = &nvmet_ports_list;
31 struct nvmet_type_name_map {
32 u8 type;
33 const char *name;
36 static struct nvmet_type_name_map nvmet_transport[] = {
37 { NVMF_TRTYPE_RDMA, "rdma" },
38 { NVMF_TRTYPE_FC, "fc" },
39 { NVMF_TRTYPE_TCP, "tcp" },
40 { NVMF_TRTYPE_LOOP, "loop" },
43 static const struct nvmet_type_name_map nvmet_addr_family[] = {
44 { NVMF_ADDR_FAMILY_PCI, "pcie" },
45 { NVMF_ADDR_FAMILY_IP4, "ipv4" },
46 { NVMF_ADDR_FAMILY_IP6, "ipv6" },
47 { NVMF_ADDR_FAMILY_IB, "ib" },
48 { NVMF_ADDR_FAMILY_FC, "fc" },
49 { NVMF_ADDR_FAMILY_LOOP, "loop" },
52 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
54 if (p->enabled)
55 pr_err("Disable port '%u' before changing attribute in %s\n",
56 le16_to_cpu(p->disc_addr.portid), caller);
57 return p->enabled;
61 * nvmet_port Generic ConfigFS definitions.
62 * Used in any place in the ConfigFS tree that refers to an address.
64 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
66 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
67 int i;
69 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
70 if (nvmet_addr_family[i].type == adrfam)
71 return snprintf(page, PAGE_SIZE, "%s\n",
72 nvmet_addr_family[i].name);
75 return snprintf(page, PAGE_SIZE, "\n");
78 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
79 const char *page, size_t count)
81 struct nvmet_port *port = to_nvmet_port(item);
82 int i;
84 if (nvmet_is_port_enabled(port, __func__))
85 return -EACCES;
87 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
88 if (sysfs_streq(page, nvmet_addr_family[i].name))
89 goto found;
92 pr_err("Invalid value '%s' for adrfam\n", page);
93 return -EINVAL;
95 found:
96 port->disc_addr.adrfam = nvmet_addr_family[i].type;
97 return count;
100 CONFIGFS_ATTR(nvmet_, addr_adrfam);
102 static ssize_t nvmet_addr_portid_show(struct config_item *item,
103 char *page)
105 __le16 portid = to_nvmet_port(item)->disc_addr.portid;
107 return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
110 static ssize_t nvmet_addr_portid_store(struct config_item *item,
111 const char *page, size_t count)
113 struct nvmet_port *port = to_nvmet_port(item);
114 u16 portid = 0;
116 if (kstrtou16(page, 0, &portid)) {
117 pr_err("Invalid value '%s' for portid\n", page);
118 return -EINVAL;
121 if (nvmet_is_port_enabled(port, __func__))
122 return -EACCES;
124 port->disc_addr.portid = cpu_to_le16(portid);
125 return count;
128 CONFIGFS_ATTR(nvmet_, addr_portid);
130 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
131 char *page)
133 struct nvmet_port *port = to_nvmet_port(item);
135 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
138 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
139 const char *page, size_t count)
141 struct nvmet_port *port = to_nvmet_port(item);
143 if (count > NVMF_TRADDR_SIZE) {
144 pr_err("Invalid value '%s' for traddr\n", page);
145 return -EINVAL;
148 if (nvmet_is_port_enabled(port, __func__))
149 return -EACCES;
151 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
152 return -EINVAL;
153 return count;
156 CONFIGFS_ATTR(nvmet_, addr_traddr);
158 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
159 { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
160 { NVMF_TREQ_REQUIRED, "required" },
161 { NVMF_TREQ_NOT_REQUIRED, "not required" },
164 static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
166 return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
169 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
171 u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item));
172 int i;
174 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
175 if (treq == nvmet_addr_treq[i].type)
176 return snprintf(page, PAGE_SIZE, "%s\n",
177 nvmet_addr_treq[i].name);
180 return snprintf(page, PAGE_SIZE, "\n");
183 static ssize_t nvmet_addr_treq_store(struct config_item *item,
184 const char *page, size_t count)
186 struct nvmet_port *port = to_nvmet_port(item);
187 u8 treq = nvmet_port_disc_addr_treq_mask(port);
188 int i;
190 if (nvmet_is_port_enabled(port, __func__))
191 return -EACCES;
193 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
194 if (sysfs_streq(page, nvmet_addr_treq[i].name))
195 goto found;
198 pr_err("Invalid value '%s' for treq\n", page);
199 return -EINVAL;
201 found:
202 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP &&
203 port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) {
204 switch (nvmet_addr_treq[i].type) {
205 case NVMF_TREQ_NOT_SPECIFIED:
206 pr_debug("treq '%s' not allowed for TLS1.3\n",
207 nvmet_addr_treq[i].name);
208 return -EINVAL;
209 case NVMF_TREQ_NOT_REQUIRED:
210 pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
211 break;
212 default:
213 break;
216 treq |= nvmet_addr_treq[i].type;
217 port->disc_addr.treq = treq;
218 return count;
221 CONFIGFS_ATTR(nvmet_, addr_treq);
223 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
224 char *page)
226 struct nvmet_port *port = to_nvmet_port(item);
228 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
231 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
232 const char *page, size_t count)
234 struct nvmet_port *port = to_nvmet_port(item);
236 if (count > NVMF_TRSVCID_SIZE) {
237 pr_err("Invalid value '%s' for trsvcid\n", page);
238 return -EINVAL;
240 if (nvmet_is_port_enabled(port, __func__))
241 return -EACCES;
243 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
244 return -EINVAL;
245 return count;
248 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
250 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
251 char *page)
253 struct nvmet_port *port = to_nvmet_port(item);
255 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
258 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
259 const char *page, size_t count)
261 struct nvmet_port *port = to_nvmet_port(item);
262 int ret;
264 if (nvmet_is_port_enabled(port, __func__))
265 return -EACCES;
266 ret = kstrtoint(page, 0, &port->inline_data_size);
267 if (ret) {
268 pr_err("Invalid value '%s' for inline_data_size\n", page);
269 return -EINVAL;
271 return count;
274 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
276 static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
277 char *page)
279 struct nvmet_port *port = to_nvmet_port(item);
281 return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
284 static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
285 const char *page, size_t count)
287 struct nvmet_port *port = to_nvmet_port(item);
288 int ret;
290 if (nvmet_is_port_enabled(port, __func__))
291 return -EACCES;
292 ret = kstrtoint(page, 0, &port->max_queue_size);
293 if (ret) {
294 pr_err("Invalid value '%s' for max_queue_size\n", page);
295 return -EINVAL;
297 return count;
300 CONFIGFS_ATTR(nvmet_, param_max_queue_size);
302 #ifdef CONFIG_BLK_DEV_INTEGRITY
303 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
304 char *page)
306 struct nvmet_port *port = to_nvmet_port(item);
308 return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
311 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
312 const char *page, size_t count)
314 struct nvmet_port *port = to_nvmet_port(item);
315 bool val;
317 if (kstrtobool(page, &val))
318 return -EINVAL;
320 if (nvmet_is_port_enabled(port, __func__))
321 return -EACCES;
323 port->pi_enable = val;
324 return count;
327 CONFIGFS_ATTR(nvmet_, param_pi_enable);
328 #endif
330 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
331 char *page)
333 struct nvmet_port *port = to_nvmet_port(item);
334 int i;
336 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
337 if (port->disc_addr.trtype == nvmet_transport[i].type)
338 return snprintf(page, PAGE_SIZE,
339 "%s\n", nvmet_transport[i].name);
342 return sprintf(page, "\n");
345 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
347 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
348 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
349 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
352 static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype)
354 port->disc_addr.tsas.tcp.sectype = sectype;
357 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
358 const char *page, size_t count)
360 struct nvmet_port *port = to_nvmet_port(item);
361 int i;
363 if (nvmet_is_port_enabled(port, __func__))
364 return -EACCES;
366 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
367 if (sysfs_streq(page, nvmet_transport[i].name))
368 goto found;
371 pr_err("Invalid value '%s' for trtype\n", page);
372 return -EINVAL;
374 found:
375 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
376 port->disc_addr.trtype = nvmet_transport[i].type;
377 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
378 nvmet_port_init_tsas_rdma(port);
379 else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP)
380 nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE);
381 return count;
384 CONFIGFS_ATTR(nvmet_, addr_trtype);
386 static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = {
387 { NVMF_TCP_SECTYPE_NONE, "none" },
388 { NVMF_TCP_SECTYPE_TLS13, "tls1.3" },
391 static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = {
392 { NVMF_RDMA_QPTYPE_CONNECTED, "connected" },
393 { NVMF_RDMA_QPTYPE_DATAGRAM, "datagram" },
396 static ssize_t nvmet_addr_tsas_show(struct config_item *item,
397 char *page)
399 struct nvmet_port *port = to_nvmet_port(item);
400 int i;
402 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
403 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
404 if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type)
405 return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name);
407 } else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
408 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
409 if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type)
410 return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
413 return sprintf(page, "\n");
416 static u8 nvmet_addr_tsas_rdma_store(const char *page)
418 int i;
420 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
421 if (sysfs_streq(page, nvmet_addr_tsas_rdma[i].name))
422 return nvmet_addr_tsas_rdma[i].type;
424 return NVMF_RDMA_QPTYPE_INVALID;
427 static u8 nvmet_addr_tsas_tcp_store(const char *page)
429 int i;
431 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
432 if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name))
433 return nvmet_addr_tsas_tcp[i].type;
435 return NVMF_TCP_SECTYPE_INVALID;
438 static ssize_t nvmet_addr_tsas_store(struct config_item *item,
439 const char *page, size_t count)
441 struct nvmet_port *port = to_nvmet_port(item);
442 u8 treq = nvmet_port_disc_addr_treq_mask(port);
443 u8 sectype, qptype;
445 if (nvmet_is_port_enabled(port, __func__))
446 return -EACCES;
448 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
449 qptype = nvmet_addr_tsas_rdma_store(page);
450 if (qptype == port->disc_addr.tsas.rdma.qptype)
451 return count;
452 } else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
453 sectype = nvmet_addr_tsas_tcp_store(page);
454 if (sectype != NVMF_TCP_SECTYPE_INVALID)
455 goto found;
458 pr_err("Invalid value '%s' for tsas\n", page);
459 return -EINVAL;
461 found:
462 if (sectype == NVMF_TCP_SECTYPE_TLS13) {
463 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) {
464 pr_err("TLS is not supported\n");
465 return -EINVAL;
467 if (!port->keyring) {
468 pr_err("TLS keyring not configured\n");
469 return -EINVAL;
473 nvmet_port_init_tsas_tcp(port, sectype);
475 * If TLS is enabled TREQ should be set to 'required' per default
477 if (sectype == NVMF_TCP_SECTYPE_TLS13) {
478 u8 sc = nvmet_port_disc_addr_treq_secure_channel(port);
480 if (sc == NVMF_TREQ_NOT_SPECIFIED)
481 treq |= NVMF_TREQ_REQUIRED;
482 else
483 treq |= sc;
484 } else {
485 treq |= NVMF_TREQ_NOT_SPECIFIED;
487 port->disc_addr.treq = treq;
488 return count;
491 CONFIGFS_ATTR(nvmet_, addr_tsas);
494 * Namespace structures & file operation functions below
496 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
498 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
501 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
502 const char *page, size_t count)
504 struct nvmet_ns *ns = to_nvmet_ns(item);
505 struct nvmet_subsys *subsys = ns->subsys;
506 size_t len;
507 int ret;
509 mutex_lock(&subsys->lock);
510 ret = -EBUSY;
511 if (ns->enabled)
512 goto out_unlock;
514 ret = -EINVAL;
515 len = strcspn(page, "\n");
516 if (!len)
517 goto out_unlock;
519 kfree(ns->device_path);
520 ret = -ENOMEM;
521 ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
522 if (!ns->device_path)
523 goto out_unlock;
525 mutex_unlock(&subsys->lock);
526 return count;
528 out_unlock:
529 mutex_unlock(&subsys->lock);
530 return ret;
533 CONFIGFS_ATTR(nvmet_ns_, device_path);
535 #ifdef CONFIG_PCI_P2PDMA
536 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
538 struct nvmet_ns *ns = to_nvmet_ns(item);
540 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
543 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
544 const char *page, size_t count)
546 struct nvmet_ns *ns = to_nvmet_ns(item);
547 struct pci_dev *p2p_dev = NULL;
548 bool use_p2pmem;
549 int ret = count;
550 int error;
552 mutex_lock(&ns->subsys->lock);
553 if (ns->enabled) {
554 ret = -EBUSY;
555 goto out_unlock;
558 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
559 if (error) {
560 ret = error;
561 goto out_unlock;
564 ns->use_p2pmem = use_p2pmem;
565 pci_dev_put(ns->p2p_dev);
566 ns->p2p_dev = p2p_dev;
568 out_unlock:
569 mutex_unlock(&ns->subsys->lock);
571 return ret;
574 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
575 #endif /* CONFIG_PCI_P2PDMA */
577 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
579 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
582 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
583 const char *page, size_t count)
585 struct nvmet_ns *ns = to_nvmet_ns(item);
586 struct nvmet_subsys *subsys = ns->subsys;
587 int ret = 0;
589 mutex_lock(&subsys->lock);
590 if (ns->enabled) {
591 ret = -EBUSY;
592 goto out_unlock;
595 if (uuid_parse(page, &ns->uuid))
596 ret = -EINVAL;
598 out_unlock:
599 mutex_unlock(&subsys->lock);
600 return ret ? ret : count;
603 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
605 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
607 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
610 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
611 const char *page, size_t count)
613 struct nvmet_ns *ns = to_nvmet_ns(item);
614 struct nvmet_subsys *subsys = ns->subsys;
615 u8 nguid[16];
616 const char *p = page;
617 int i;
618 int ret = 0;
620 mutex_lock(&subsys->lock);
621 if (ns->enabled) {
622 ret = -EBUSY;
623 goto out_unlock;
626 for (i = 0; i < 16; i++) {
627 if (p + 2 > page + count) {
628 ret = -EINVAL;
629 goto out_unlock;
631 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
632 ret = -EINVAL;
633 goto out_unlock;
636 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
637 p += 2;
639 if (*p == '-' || *p == ':')
640 p++;
643 memcpy(&ns->nguid, nguid, sizeof(nguid));
644 out_unlock:
645 mutex_unlock(&subsys->lock);
646 return ret ? ret : count;
649 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
651 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
653 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
656 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
657 const char *page, size_t count)
659 struct nvmet_ns *ns = to_nvmet_ns(item);
660 u32 oldgrpid, newgrpid;
661 int ret;
663 ret = kstrtou32(page, 0, &newgrpid);
664 if (ret)
665 return ret;
667 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
668 return -EINVAL;
670 down_write(&nvmet_ana_sem);
671 oldgrpid = ns->anagrpid;
672 newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
673 nvmet_ana_group_enabled[newgrpid]++;
674 ns->anagrpid = newgrpid;
675 nvmet_ana_group_enabled[oldgrpid]--;
676 nvmet_ana_chgcnt++;
677 up_write(&nvmet_ana_sem);
679 nvmet_send_ana_event(ns->subsys, NULL);
680 return count;
683 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
685 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
687 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
690 static ssize_t nvmet_ns_enable_store(struct config_item *item,
691 const char *page, size_t count)
693 struct nvmet_ns *ns = to_nvmet_ns(item);
694 bool enable;
695 int ret = 0;
697 if (kstrtobool(page, &enable))
698 return -EINVAL;
701 * take a global nvmet_config_sem because the disable routine has a
702 * window where it releases the subsys-lock, giving a chance to
703 * a parallel enable to concurrently execute causing the disable to
704 * have a misaccounting of the ns percpu_ref.
706 down_write(&nvmet_config_sem);
707 if (enable)
708 ret = nvmet_ns_enable(ns);
709 else
710 nvmet_ns_disable(ns);
711 up_write(&nvmet_config_sem);
713 return ret ? ret : count;
716 CONFIGFS_ATTR(nvmet_ns_, enable);
718 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
720 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
723 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
724 const char *page, size_t count)
726 struct nvmet_ns *ns = to_nvmet_ns(item);
727 bool val;
729 if (kstrtobool(page, &val))
730 return -EINVAL;
732 mutex_lock(&ns->subsys->lock);
733 if (ns->enabled) {
734 pr_err("disable ns before setting buffered_io value.\n");
735 mutex_unlock(&ns->subsys->lock);
736 return -EINVAL;
739 ns->buffered_io = val;
740 mutex_unlock(&ns->subsys->lock);
741 return count;
744 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
746 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
747 const char *page, size_t count)
749 struct nvmet_ns *ns = to_nvmet_ns(item);
750 bool val;
752 if (kstrtobool(page, &val))
753 return -EINVAL;
755 if (!val)
756 return -EINVAL;
758 mutex_lock(&ns->subsys->lock);
759 if (!ns->enabled) {
760 pr_err("enable ns before revalidate.\n");
761 mutex_unlock(&ns->subsys->lock);
762 return -EINVAL;
764 if (nvmet_ns_revalidate(ns))
765 nvmet_ns_changed(ns->subsys, ns->nsid);
766 mutex_unlock(&ns->subsys->lock);
767 return count;
770 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
772 static ssize_t nvmet_ns_resv_enable_show(struct config_item *item, char *page)
774 return sysfs_emit(page, "%d\n", to_nvmet_ns(item)->pr.enable);
777 static ssize_t nvmet_ns_resv_enable_store(struct config_item *item,
778 const char *page, size_t count)
780 struct nvmet_ns *ns = to_nvmet_ns(item);
781 bool val;
783 if (kstrtobool(page, &val))
784 return -EINVAL;
786 mutex_lock(&ns->subsys->lock);
787 if (ns->enabled) {
788 pr_err("the ns:%d is already enabled.\n", ns->nsid);
789 mutex_unlock(&ns->subsys->lock);
790 return -EINVAL;
792 ns->pr.enable = val;
793 mutex_unlock(&ns->subsys->lock);
794 return count;
796 CONFIGFS_ATTR(nvmet_ns_, resv_enable);
798 static struct configfs_attribute *nvmet_ns_attrs[] = {
799 &nvmet_ns_attr_device_path,
800 &nvmet_ns_attr_device_nguid,
801 &nvmet_ns_attr_device_uuid,
802 &nvmet_ns_attr_ana_grpid,
803 &nvmet_ns_attr_enable,
804 &nvmet_ns_attr_buffered_io,
805 &nvmet_ns_attr_revalidate_size,
806 &nvmet_ns_attr_resv_enable,
807 #ifdef CONFIG_PCI_P2PDMA
808 &nvmet_ns_attr_p2pmem,
809 #endif
810 NULL,
813 bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
815 struct config_item *ns_item;
816 char name[12];
818 snprintf(name, sizeof(name), "%u", nsid);
819 mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
820 ns_item = config_group_find_item(&subsys->namespaces_group, name);
821 mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
822 return ns_item != NULL;
825 static void nvmet_ns_release(struct config_item *item)
827 struct nvmet_ns *ns = to_nvmet_ns(item);
829 nvmet_ns_free(ns);
832 static struct configfs_item_operations nvmet_ns_item_ops = {
833 .release = nvmet_ns_release,
836 static const struct config_item_type nvmet_ns_type = {
837 .ct_item_ops = &nvmet_ns_item_ops,
838 .ct_attrs = nvmet_ns_attrs,
839 .ct_owner = THIS_MODULE,
842 static struct config_group *nvmet_ns_make(struct config_group *group,
843 const char *name)
845 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
846 struct nvmet_ns *ns;
847 int ret;
848 u32 nsid;
850 ret = kstrtou32(name, 0, &nsid);
851 if (ret)
852 goto out;
854 ret = -EINVAL;
855 if (nsid == 0 || nsid == NVME_NSID_ALL) {
856 pr_err("invalid nsid %#x", nsid);
857 goto out;
860 ret = -ENOMEM;
861 ns = nvmet_ns_alloc(subsys, nsid);
862 if (!ns)
863 goto out;
864 config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
866 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
868 return &ns->group;
869 out:
870 return ERR_PTR(ret);
873 static struct configfs_group_operations nvmet_namespaces_group_ops = {
874 .make_group = nvmet_ns_make,
877 static const struct config_item_type nvmet_namespaces_type = {
878 .ct_group_ops = &nvmet_namespaces_group_ops,
879 .ct_owner = THIS_MODULE,
882 #ifdef CONFIG_NVME_TARGET_PASSTHRU
884 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
885 char *page)
887 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
889 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
892 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
893 const char *page, size_t count)
895 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
896 size_t len;
897 int ret;
899 mutex_lock(&subsys->lock);
901 ret = -EBUSY;
902 if (subsys->passthru_ctrl)
903 goto out_unlock;
905 ret = -EINVAL;
906 len = strcspn(page, "\n");
907 if (!len)
908 goto out_unlock;
910 kfree(subsys->passthru_ctrl_path);
911 ret = -ENOMEM;
912 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
913 if (!subsys->passthru_ctrl_path)
914 goto out_unlock;
916 mutex_unlock(&subsys->lock);
918 return count;
919 out_unlock:
920 mutex_unlock(&subsys->lock);
921 return ret;
923 CONFIGFS_ATTR(nvmet_passthru_, device_path);
925 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
926 char *page)
928 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
930 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
933 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
934 const char *page, size_t count)
936 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
937 bool enable;
938 int ret = 0;
940 if (kstrtobool(page, &enable))
941 return -EINVAL;
943 if (enable)
944 ret = nvmet_passthru_ctrl_enable(subsys);
945 else
946 nvmet_passthru_ctrl_disable(subsys);
948 return ret ? ret : count;
950 CONFIGFS_ATTR(nvmet_passthru_, enable);
952 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
953 char *page)
955 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
958 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
959 const char *page, size_t count)
961 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
962 unsigned int timeout;
964 if (kstrtouint(page, 0, &timeout))
965 return -EINVAL;
966 subsys->admin_timeout = timeout;
967 return count;
969 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
971 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
972 char *page)
974 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
977 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
978 const char *page, size_t count)
980 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
981 unsigned int timeout;
983 if (kstrtouint(page, 0, &timeout))
984 return -EINVAL;
985 subsys->io_timeout = timeout;
986 return count;
988 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
990 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
991 char *page)
993 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
996 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
997 const char *page, size_t count)
999 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
1000 unsigned int clear_ids;
1002 if (kstrtouint(page, 0, &clear_ids))
1003 return -EINVAL;
1004 subsys->clear_ids = clear_ids;
1005 return count;
1007 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
1009 static struct configfs_attribute *nvmet_passthru_attrs[] = {
1010 &nvmet_passthru_attr_device_path,
1011 &nvmet_passthru_attr_enable,
1012 &nvmet_passthru_attr_admin_timeout,
1013 &nvmet_passthru_attr_io_timeout,
1014 &nvmet_passthru_attr_clear_ids,
1015 NULL,
1018 static const struct config_item_type nvmet_passthru_type = {
1019 .ct_attrs = nvmet_passthru_attrs,
1020 .ct_owner = THIS_MODULE,
1023 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
1025 config_group_init_type_name(&subsys->passthru_group,
1026 "passthru", &nvmet_passthru_type);
1027 configfs_add_default_group(&subsys->passthru_group,
1028 &subsys->group);
1031 #else /* CONFIG_NVME_TARGET_PASSTHRU */
1033 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
1037 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
1039 static int nvmet_port_subsys_allow_link(struct config_item *parent,
1040 struct config_item *target)
1042 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
1043 struct nvmet_subsys *subsys;
1044 struct nvmet_subsys_link *link, *p;
1045 int ret;
1047 if (target->ci_type != &nvmet_subsys_type) {
1048 pr_err("can only link subsystems into the subsystems dir.!\n");
1049 return -EINVAL;
1051 subsys = to_subsys(target);
1052 link = kmalloc(sizeof(*link), GFP_KERNEL);
1053 if (!link)
1054 return -ENOMEM;
1055 link->subsys = subsys;
1057 down_write(&nvmet_config_sem);
1058 ret = -EEXIST;
1059 list_for_each_entry(p, &port->subsystems, entry) {
1060 if (p->subsys == subsys)
1061 goto out_free_link;
1064 if (list_empty(&port->subsystems)) {
1065 ret = nvmet_enable_port(port);
1066 if (ret)
1067 goto out_free_link;
1070 list_add_tail(&link->entry, &port->subsystems);
1071 nvmet_port_disc_changed(port, subsys);
1073 up_write(&nvmet_config_sem);
1074 return 0;
1076 out_free_link:
1077 up_write(&nvmet_config_sem);
1078 kfree(link);
1079 return ret;
1082 static void nvmet_port_subsys_drop_link(struct config_item *parent,
1083 struct config_item *target)
1085 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
1086 struct nvmet_subsys *subsys = to_subsys(target);
1087 struct nvmet_subsys_link *p;
1089 down_write(&nvmet_config_sem);
1090 list_for_each_entry(p, &port->subsystems, entry) {
1091 if (p->subsys == subsys)
1092 goto found;
1094 up_write(&nvmet_config_sem);
1095 return;
1097 found:
1098 list_del(&p->entry);
1099 nvmet_port_del_ctrls(port, subsys);
1100 nvmet_port_disc_changed(port, subsys);
1102 if (list_empty(&port->subsystems))
1103 nvmet_disable_port(port);
1104 up_write(&nvmet_config_sem);
1105 kfree(p);
1108 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
1109 .allow_link = nvmet_port_subsys_allow_link,
1110 .drop_link = nvmet_port_subsys_drop_link,
1113 static const struct config_item_type nvmet_port_subsys_type = {
1114 .ct_item_ops = &nvmet_port_subsys_item_ops,
1115 .ct_owner = THIS_MODULE,
1118 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
1119 struct config_item *target)
1121 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1122 struct nvmet_host *host;
1123 struct nvmet_host_link *link, *p;
1124 int ret;
1126 if (target->ci_type != &nvmet_host_type) {
1127 pr_err("can only link hosts into the allowed_hosts directory!\n");
1128 return -EINVAL;
1131 host = to_host(target);
1132 link = kmalloc(sizeof(*link), GFP_KERNEL);
1133 if (!link)
1134 return -ENOMEM;
1135 link->host = host;
1137 down_write(&nvmet_config_sem);
1138 ret = -EINVAL;
1139 if (subsys->allow_any_host) {
1140 pr_err("can't add hosts when allow_any_host is set!\n");
1141 goto out_free_link;
1144 ret = -EEXIST;
1145 list_for_each_entry(p, &subsys->hosts, entry) {
1146 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1147 goto out_free_link;
1149 list_add_tail(&link->entry, &subsys->hosts);
1150 nvmet_subsys_disc_changed(subsys, host);
1152 up_write(&nvmet_config_sem);
1153 return 0;
1154 out_free_link:
1155 up_write(&nvmet_config_sem);
1156 kfree(link);
1157 return ret;
1160 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
1161 struct config_item *target)
1163 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1164 struct nvmet_host *host = to_host(target);
1165 struct nvmet_host_link *p;
1167 down_write(&nvmet_config_sem);
1168 list_for_each_entry(p, &subsys->hosts, entry) {
1169 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1170 goto found;
1172 up_write(&nvmet_config_sem);
1173 return;
1175 found:
1176 list_del(&p->entry);
1177 nvmet_subsys_disc_changed(subsys, host);
1179 up_write(&nvmet_config_sem);
1180 kfree(p);
1183 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
1184 .allow_link = nvmet_allowed_hosts_allow_link,
1185 .drop_link = nvmet_allowed_hosts_drop_link,
1188 static const struct config_item_type nvmet_allowed_hosts_type = {
1189 .ct_item_ops = &nvmet_allowed_hosts_item_ops,
1190 .ct_owner = THIS_MODULE,
1193 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
1194 char *page)
1196 return snprintf(page, PAGE_SIZE, "%d\n",
1197 to_subsys(item)->allow_any_host);
1200 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
1201 const char *page, size_t count)
1203 struct nvmet_subsys *subsys = to_subsys(item);
1204 bool allow_any_host;
1205 int ret = 0;
1207 if (kstrtobool(page, &allow_any_host))
1208 return -EINVAL;
1210 down_write(&nvmet_config_sem);
1211 if (allow_any_host && !list_empty(&subsys->hosts)) {
1212 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1213 ret = -EINVAL;
1214 goto out_unlock;
1217 if (subsys->allow_any_host != allow_any_host) {
1218 subsys->allow_any_host = allow_any_host;
1219 nvmet_subsys_disc_changed(subsys, NULL);
1222 out_unlock:
1223 up_write(&nvmet_config_sem);
1224 return ret ? ret : count;
1227 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1229 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1230 char *page)
1232 struct nvmet_subsys *subsys = to_subsys(item);
1234 if (NVME_TERTIARY(subsys->ver))
1235 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1236 NVME_MAJOR(subsys->ver),
1237 NVME_MINOR(subsys->ver),
1238 NVME_TERTIARY(subsys->ver));
1240 return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1241 NVME_MAJOR(subsys->ver),
1242 NVME_MINOR(subsys->ver));
1245 static ssize_t
1246 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1247 const char *page, size_t count)
1249 int major, minor, tertiary = 0;
1250 int ret;
1252 if (subsys->subsys_discovered) {
1253 if (NVME_TERTIARY(subsys->ver))
1254 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1255 NVME_MAJOR(subsys->ver),
1256 NVME_MINOR(subsys->ver),
1257 NVME_TERTIARY(subsys->ver));
1258 else
1259 pr_err("Can't set version number. %llu.%llu is already assigned\n",
1260 NVME_MAJOR(subsys->ver),
1261 NVME_MINOR(subsys->ver));
1262 return -EINVAL;
1265 /* passthru subsystems use the underlying controller's version */
1266 if (nvmet_is_passthru_subsys(subsys))
1267 return -EINVAL;
1269 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1270 if (ret != 2 && ret != 3)
1271 return -EINVAL;
1273 subsys->ver = NVME_VS(major, minor, tertiary);
1275 return count;
1278 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1279 const char *page, size_t count)
1281 struct nvmet_subsys *subsys = to_subsys(item);
1282 ssize_t ret;
1284 down_write(&nvmet_config_sem);
1285 mutex_lock(&subsys->lock);
1286 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1287 mutex_unlock(&subsys->lock);
1288 up_write(&nvmet_config_sem);
1290 return ret;
1292 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1294 /* See Section 1.5 of NVMe 1.4 */
1295 static bool nvmet_is_ascii(const char c)
1297 return c >= 0x20 && c <= 0x7e;
1300 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1301 char *page)
1303 struct nvmet_subsys *subsys = to_subsys(item);
1305 return snprintf(page, PAGE_SIZE, "%.*s\n",
1306 NVMET_SN_MAX_SIZE, subsys->serial);
1309 static ssize_t
1310 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1311 const char *page, size_t count)
1313 int pos, len = strcspn(page, "\n");
1315 if (subsys->subsys_discovered) {
1316 pr_err("Can't set serial number. %s is already assigned\n",
1317 subsys->serial);
1318 return -EINVAL;
1321 if (!len || len > NVMET_SN_MAX_SIZE) {
1322 pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1323 NVMET_SN_MAX_SIZE);
1324 return -EINVAL;
1327 for (pos = 0; pos < len; pos++) {
1328 if (!nvmet_is_ascii(page[pos])) {
1329 pr_err("Serial Number must contain only ASCII strings\n");
1330 return -EINVAL;
1334 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1336 return count;
1339 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1340 const char *page, size_t count)
1342 struct nvmet_subsys *subsys = to_subsys(item);
1343 ssize_t ret;
1345 down_write(&nvmet_config_sem);
1346 mutex_lock(&subsys->lock);
1347 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1348 mutex_unlock(&subsys->lock);
1349 up_write(&nvmet_config_sem);
1351 return ret;
1353 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1355 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1356 char *page)
1358 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1361 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1362 const char *page, size_t cnt)
1364 u16 cntlid_min;
1366 if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1367 return -EINVAL;
1369 if (cntlid_min == 0)
1370 return -EINVAL;
1372 down_write(&nvmet_config_sem);
1373 if (cntlid_min > to_subsys(item)->cntlid_max)
1374 goto out_unlock;
1375 to_subsys(item)->cntlid_min = cntlid_min;
1376 up_write(&nvmet_config_sem);
1377 return cnt;
1379 out_unlock:
1380 up_write(&nvmet_config_sem);
1381 return -EINVAL;
1383 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1385 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1386 char *page)
1388 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1391 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1392 const char *page, size_t cnt)
1394 u16 cntlid_max;
1396 if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1397 return -EINVAL;
1399 if (cntlid_max == 0)
1400 return -EINVAL;
1402 down_write(&nvmet_config_sem);
1403 if (cntlid_max < to_subsys(item)->cntlid_min)
1404 goto out_unlock;
1405 to_subsys(item)->cntlid_max = cntlid_max;
1406 up_write(&nvmet_config_sem);
1407 return cnt;
1409 out_unlock:
1410 up_write(&nvmet_config_sem);
1411 return -EINVAL;
1413 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1415 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1416 char *page)
1418 struct nvmet_subsys *subsys = to_subsys(item);
1420 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1423 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1424 const char *page, size_t count)
1426 int pos = 0, len;
1427 char *val;
1429 if (subsys->subsys_discovered) {
1430 pr_err("Can't set model number. %s is already assigned\n",
1431 subsys->model_number);
1432 return -EINVAL;
1435 len = strcspn(page, "\n");
1436 if (!len)
1437 return -EINVAL;
1439 if (len > NVMET_MN_MAX_SIZE) {
1440 pr_err("Model number size can not exceed %d Bytes\n",
1441 NVMET_MN_MAX_SIZE);
1442 return -EINVAL;
1445 for (pos = 0; pos < len; pos++) {
1446 if (!nvmet_is_ascii(page[pos]))
1447 return -EINVAL;
1450 val = kmemdup_nul(page, len, GFP_KERNEL);
1451 if (!val)
1452 return -ENOMEM;
1453 kfree(subsys->model_number);
1454 subsys->model_number = val;
1455 return count;
1458 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1459 const char *page, size_t count)
1461 struct nvmet_subsys *subsys = to_subsys(item);
1462 ssize_t ret;
1464 down_write(&nvmet_config_sem);
1465 mutex_lock(&subsys->lock);
1466 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1467 mutex_unlock(&subsys->lock);
1468 up_write(&nvmet_config_sem);
1470 return ret;
1472 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1474 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1475 char *page)
1477 struct nvmet_subsys *subsys = to_subsys(item);
1479 return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1482 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1483 const char *page, size_t count)
1485 uint32_t val = 0;
1486 int ret;
1488 if (subsys->subsys_discovered) {
1489 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1490 subsys->ieee_oui);
1491 return -EINVAL;
1494 ret = kstrtou32(page, 0, &val);
1495 if (ret < 0)
1496 return ret;
1498 if (val >= 0x1000000)
1499 return -EINVAL;
1501 subsys->ieee_oui = val;
1503 return count;
1506 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1507 const char *page, size_t count)
1509 struct nvmet_subsys *subsys = to_subsys(item);
1510 ssize_t ret;
1512 down_write(&nvmet_config_sem);
1513 mutex_lock(&subsys->lock);
1514 ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1515 mutex_unlock(&subsys->lock);
1516 up_write(&nvmet_config_sem);
1518 return ret;
1520 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1522 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1523 char *page)
1525 struct nvmet_subsys *subsys = to_subsys(item);
1527 return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1530 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1531 const char *page, size_t count)
1533 int pos = 0, len;
1534 char *val;
1536 if (subsys->subsys_discovered) {
1537 pr_err("Can't set firmware revision. %s is already assigned\n",
1538 subsys->firmware_rev);
1539 return -EINVAL;
1542 len = strcspn(page, "\n");
1543 if (!len)
1544 return -EINVAL;
1546 if (len > NVMET_FR_MAX_SIZE) {
1547 pr_err("Firmware revision size can not exceed %d Bytes\n",
1548 NVMET_FR_MAX_SIZE);
1549 return -EINVAL;
1552 for (pos = 0; pos < len; pos++) {
1553 if (!nvmet_is_ascii(page[pos]))
1554 return -EINVAL;
1557 val = kmemdup_nul(page, len, GFP_KERNEL);
1558 if (!val)
1559 return -ENOMEM;
1561 kfree(subsys->firmware_rev);
1563 subsys->firmware_rev = val;
1565 return count;
1568 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1569 const char *page, size_t count)
1571 struct nvmet_subsys *subsys = to_subsys(item);
1572 ssize_t ret;
1574 down_write(&nvmet_config_sem);
1575 mutex_lock(&subsys->lock);
1576 ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1577 mutex_unlock(&subsys->lock);
1578 up_write(&nvmet_config_sem);
1580 return ret;
1582 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1584 #ifdef CONFIG_BLK_DEV_INTEGRITY
1585 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1586 char *page)
1588 return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1591 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1592 const char *page, size_t count)
1594 struct nvmet_subsys *subsys = to_subsys(item);
1595 bool pi_enable;
1597 if (kstrtobool(page, &pi_enable))
1598 return -EINVAL;
1600 subsys->pi_support = pi_enable;
1601 return count;
1603 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1604 #endif
1606 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1607 char *page)
1609 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
1612 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1613 const char *page, size_t cnt)
1615 struct nvmet_subsys *subsys = to_subsys(item);
1616 struct nvmet_ctrl *ctrl;
1617 u16 qid_max;
1619 if (sscanf(page, "%hu\n", &qid_max) != 1)
1620 return -EINVAL;
1622 if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1623 return -EINVAL;
1625 down_write(&nvmet_config_sem);
1626 subsys->max_qid = qid_max;
1628 /* Force reconnect */
1629 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1630 ctrl->ops->delete_ctrl(ctrl);
1631 up_write(&nvmet_config_sem);
1633 return cnt;
1635 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1637 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1638 &nvmet_subsys_attr_attr_allow_any_host,
1639 &nvmet_subsys_attr_attr_version,
1640 &nvmet_subsys_attr_attr_serial,
1641 &nvmet_subsys_attr_attr_cntlid_min,
1642 &nvmet_subsys_attr_attr_cntlid_max,
1643 &nvmet_subsys_attr_attr_model,
1644 &nvmet_subsys_attr_attr_qid_max,
1645 &nvmet_subsys_attr_attr_ieee_oui,
1646 &nvmet_subsys_attr_attr_firmware,
1647 #ifdef CONFIG_BLK_DEV_INTEGRITY
1648 &nvmet_subsys_attr_attr_pi_enable,
1649 #endif
1650 NULL,
1654 * Subsystem structures & folder operation functions below
1656 static void nvmet_subsys_release(struct config_item *item)
1658 struct nvmet_subsys *subsys = to_subsys(item);
1660 nvmet_subsys_del_ctrls(subsys);
1661 nvmet_subsys_put(subsys);
1664 static struct configfs_item_operations nvmet_subsys_item_ops = {
1665 .release = nvmet_subsys_release,
1668 static const struct config_item_type nvmet_subsys_type = {
1669 .ct_item_ops = &nvmet_subsys_item_ops,
1670 .ct_attrs = nvmet_subsys_attrs,
1671 .ct_owner = THIS_MODULE,
1674 static struct config_group *nvmet_subsys_make(struct config_group *group,
1675 const char *name)
1677 struct nvmet_subsys *subsys;
1679 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1680 pr_err("can't create discovery subsystem through configfs\n");
1681 return ERR_PTR(-EINVAL);
1684 if (sysfs_streq(name, nvmet_disc_subsys->subsysnqn)) {
1685 pr_err("can't create subsystem using unique discovery NQN\n");
1686 return ERR_PTR(-EINVAL);
1689 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1690 if (IS_ERR(subsys))
1691 return ERR_CAST(subsys);
1693 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1695 config_group_init_type_name(&subsys->namespaces_group,
1696 "namespaces", &nvmet_namespaces_type);
1697 configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1699 config_group_init_type_name(&subsys->allowed_hosts_group,
1700 "allowed_hosts", &nvmet_allowed_hosts_type);
1701 configfs_add_default_group(&subsys->allowed_hosts_group,
1702 &subsys->group);
1704 nvmet_add_passthru_group(subsys);
1706 return &subsys->group;
1709 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1710 .make_group = nvmet_subsys_make,
1713 static const struct config_item_type nvmet_subsystems_type = {
1714 .ct_group_ops = &nvmet_subsystems_group_ops,
1715 .ct_owner = THIS_MODULE,
1718 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1719 char *page)
1721 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1724 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1725 const char *page, size_t count)
1727 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1728 struct nvmet_port *port = to_nvmet_port(item);
1729 bool enable;
1731 if (kstrtobool(page, &enable))
1732 goto inval;
1734 if (enable)
1735 nvmet_referral_enable(parent, port);
1736 else
1737 nvmet_referral_disable(parent, port);
1739 return count;
1740 inval:
1741 pr_err("Invalid value '%s' for enable\n", page);
1742 return -EINVAL;
1745 CONFIGFS_ATTR(nvmet_referral_, enable);
1748 * Discovery Service subsystem definitions
1750 static struct configfs_attribute *nvmet_referral_attrs[] = {
1751 &nvmet_attr_addr_adrfam,
1752 &nvmet_attr_addr_portid,
1753 &nvmet_attr_addr_treq,
1754 &nvmet_attr_addr_traddr,
1755 &nvmet_attr_addr_trsvcid,
1756 &nvmet_attr_addr_trtype,
1757 &nvmet_referral_attr_enable,
1758 NULL,
1761 static void nvmet_referral_notify(struct config_group *group,
1762 struct config_item *item)
1764 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1765 struct nvmet_port *port = to_nvmet_port(item);
1767 nvmet_referral_disable(parent, port);
1770 static void nvmet_referral_release(struct config_item *item)
1772 struct nvmet_port *port = to_nvmet_port(item);
1774 kfree(port);
1777 static struct configfs_item_operations nvmet_referral_item_ops = {
1778 .release = nvmet_referral_release,
1781 static const struct config_item_type nvmet_referral_type = {
1782 .ct_owner = THIS_MODULE,
1783 .ct_attrs = nvmet_referral_attrs,
1784 .ct_item_ops = &nvmet_referral_item_ops,
1787 static struct config_group *nvmet_referral_make(
1788 struct config_group *group, const char *name)
1790 struct nvmet_port *port;
1792 port = kzalloc(sizeof(*port), GFP_KERNEL);
1793 if (!port)
1794 return ERR_PTR(-ENOMEM);
1796 INIT_LIST_HEAD(&port->entry);
1797 config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1799 return &port->group;
1802 static struct configfs_group_operations nvmet_referral_group_ops = {
1803 .make_group = nvmet_referral_make,
1804 .disconnect_notify = nvmet_referral_notify,
1807 static const struct config_item_type nvmet_referrals_type = {
1808 .ct_owner = THIS_MODULE,
1809 .ct_group_ops = &nvmet_referral_group_ops,
1812 static struct nvmet_type_name_map nvmet_ana_state[] = {
1813 { NVME_ANA_OPTIMIZED, "optimized" },
1814 { NVME_ANA_NONOPTIMIZED, "non-optimized" },
1815 { NVME_ANA_INACCESSIBLE, "inaccessible" },
1816 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
1817 { NVME_ANA_CHANGE, "change" },
1820 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1821 char *page)
1823 struct nvmet_ana_group *grp = to_ana_group(item);
1824 enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1825 int i;
1827 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1828 if (state == nvmet_ana_state[i].type)
1829 return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1832 return sprintf(page, "\n");
1835 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1836 const char *page, size_t count)
1838 struct nvmet_ana_group *grp = to_ana_group(item);
1839 enum nvme_ana_state *ana_state = grp->port->ana_state;
1840 int i;
1842 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1843 if (sysfs_streq(page, nvmet_ana_state[i].name))
1844 goto found;
1847 pr_err("Invalid value '%s' for ana_state\n", page);
1848 return -EINVAL;
1850 found:
1851 down_write(&nvmet_ana_sem);
1852 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1853 nvmet_ana_chgcnt++;
1854 up_write(&nvmet_ana_sem);
1855 nvmet_port_send_ana_event(grp->port);
1856 return count;
1859 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1861 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1862 &nvmet_ana_group_attr_ana_state,
1863 NULL,
1866 static void nvmet_ana_group_release(struct config_item *item)
1868 struct nvmet_ana_group *grp = to_ana_group(item);
1870 if (grp == &grp->port->ana_default_group)
1871 return;
1873 down_write(&nvmet_ana_sem);
1874 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1875 nvmet_ana_group_enabled[grp->grpid]--;
1876 up_write(&nvmet_ana_sem);
1878 nvmet_port_send_ana_event(grp->port);
1879 kfree(grp);
1882 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1883 .release = nvmet_ana_group_release,
1886 static const struct config_item_type nvmet_ana_group_type = {
1887 .ct_item_ops = &nvmet_ana_group_item_ops,
1888 .ct_attrs = nvmet_ana_group_attrs,
1889 .ct_owner = THIS_MODULE,
1892 static struct config_group *nvmet_ana_groups_make_group(
1893 struct config_group *group, const char *name)
1895 struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1896 struct nvmet_ana_group *grp;
1897 u32 grpid;
1898 int ret;
1900 ret = kstrtou32(name, 0, &grpid);
1901 if (ret)
1902 goto out;
1904 ret = -EINVAL;
1905 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1906 goto out;
1908 ret = -ENOMEM;
1909 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1910 if (!grp)
1911 goto out;
1912 grp->port = port;
1913 grp->grpid = grpid;
1915 down_write(&nvmet_ana_sem);
1916 grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
1917 nvmet_ana_group_enabled[grpid]++;
1918 up_write(&nvmet_ana_sem);
1920 nvmet_port_send_ana_event(grp->port);
1922 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1923 return &grp->group;
1924 out:
1925 return ERR_PTR(ret);
1928 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1929 .make_group = nvmet_ana_groups_make_group,
1932 static const struct config_item_type nvmet_ana_groups_type = {
1933 .ct_group_ops = &nvmet_ana_groups_group_ops,
1934 .ct_owner = THIS_MODULE,
1938 * Ports definitions.
1940 static void nvmet_port_release(struct config_item *item)
1942 struct nvmet_port *port = to_nvmet_port(item);
1944 /* Let inflight controllers teardown complete */
1945 flush_workqueue(nvmet_wq);
1946 list_del(&port->global_entry);
1948 key_put(port->keyring);
1949 kfree(port->ana_state);
1950 kfree(port);
1953 static struct configfs_attribute *nvmet_port_attrs[] = {
1954 &nvmet_attr_addr_adrfam,
1955 &nvmet_attr_addr_treq,
1956 &nvmet_attr_addr_traddr,
1957 &nvmet_attr_addr_trsvcid,
1958 &nvmet_attr_addr_trtype,
1959 &nvmet_attr_addr_tsas,
1960 &nvmet_attr_param_inline_data_size,
1961 &nvmet_attr_param_max_queue_size,
1962 #ifdef CONFIG_BLK_DEV_INTEGRITY
1963 &nvmet_attr_param_pi_enable,
1964 #endif
1965 NULL,
1968 static struct configfs_item_operations nvmet_port_item_ops = {
1969 .release = nvmet_port_release,
1972 static const struct config_item_type nvmet_port_type = {
1973 .ct_attrs = nvmet_port_attrs,
1974 .ct_item_ops = &nvmet_port_item_ops,
1975 .ct_owner = THIS_MODULE,
1978 static struct config_group *nvmet_ports_make(struct config_group *group,
1979 const char *name)
1981 struct nvmet_port *port;
1982 u16 portid;
1983 u32 i;
1985 if (kstrtou16(name, 0, &portid))
1986 return ERR_PTR(-EINVAL);
1988 port = kzalloc(sizeof(*port), GFP_KERNEL);
1989 if (!port)
1990 return ERR_PTR(-ENOMEM);
1992 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1993 sizeof(*port->ana_state), GFP_KERNEL);
1994 if (!port->ana_state) {
1995 kfree(port);
1996 return ERR_PTR(-ENOMEM);
1999 if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) {
2000 port->keyring = key_lookup(nvme_keyring_id());
2001 if (IS_ERR(port->keyring)) {
2002 pr_warn("NVMe keyring not available, disabling TLS\n");
2003 port->keyring = NULL;
2007 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
2008 if (i == NVMET_DEFAULT_ANA_GRPID)
2009 port->ana_state[1] = NVME_ANA_OPTIMIZED;
2010 else
2011 port->ana_state[i] = NVME_ANA_INACCESSIBLE;
2014 list_add(&port->global_entry, &nvmet_ports_list);
2016 INIT_LIST_HEAD(&port->entry);
2017 INIT_LIST_HEAD(&port->subsystems);
2018 INIT_LIST_HEAD(&port->referrals);
2019 port->inline_data_size = -1; /* < 0 == let the transport choose */
2020 port->max_queue_size = -1; /* < 0 == let the transport choose */
2022 port->disc_addr.portid = cpu_to_le16(portid);
2023 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
2024 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
2025 config_group_init_type_name(&port->group, name, &nvmet_port_type);
2027 config_group_init_type_name(&port->subsys_group,
2028 "subsystems", &nvmet_port_subsys_type);
2029 configfs_add_default_group(&port->subsys_group, &port->group);
2031 config_group_init_type_name(&port->referrals_group,
2032 "referrals", &nvmet_referrals_type);
2033 configfs_add_default_group(&port->referrals_group, &port->group);
2035 config_group_init_type_name(&port->ana_groups_group,
2036 "ana_groups", &nvmet_ana_groups_type);
2037 configfs_add_default_group(&port->ana_groups_group, &port->group);
2039 port->ana_default_group.port = port;
2040 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
2041 config_group_init_type_name(&port->ana_default_group.group,
2042 __stringify(NVMET_DEFAULT_ANA_GRPID),
2043 &nvmet_ana_group_type);
2044 configfs_add_default_group(&port->ana_default_group.group,
2045 &port->ana_groups_group);
2047 return &port->group;
2050 static struct configfs_group_operations nvmet_ports_group_ops = {
2051 .make_group = nvmet_ports_make,
2054 static const struct config_item_type nvmet_ports_type = {
2055 .ct_group_ops = &nvmet_ports_group_ops,
2056 .ct_owner = THIS_MODULE,
2059 static struct config_group nvmet_subsystems_group;
2060 static struct config_group nvmet_ports_group;
2062 #ifdef CONFIG_NVME_TARGET_AUTH
2063 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
2064 char *page)
2066 u8 *dhchap_secret;
2067 ssize_t ret;
2069 down_read(&nvmet_config_sem);
2070 dhchap_secret = to_host(item)->dhchap_secret;
2071 if (!dhchap_secret)
2072 ret = sprintf(page, "\n");
2073 else
2074 ret = sprintf(page, "%s\n", dhchap_secret);
2075 up_read(&nvmet_config_sem);
2076 return ret;
2079 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
2080 const char *page, size_t count)
2082 struct nvmet_host *host = to_host(item);
2083 int ret;
2085 ret = nvmet_auth_set_key(host, page, false);
2087 * Re-authentication is a soft state, so keep the
2088 * current authentication valid until the host
2089 * requests re-authentication.
2091 return ret < 0 ? ret : count;
2094 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
2096 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
2097 char *page)
2099 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
2100 ssize_t ret;
2102 down_read(&nvmet_config_sem);
2103 dhchap_secret = to_host(item)->dhchap_ctrl_secret;
2104 if (!dhchap_secret)
2105 ret = sprintf(page, "\n");
2106 else
2107 ret = sprintf(page, "%s\n", dhchap_secret);
2108 up_read(&nvmet_config_sem);
2109 return ret;
2112 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
2113 const char *page, size_t count)
2115 struct nvmet_host *host = to_host(item);
2116 int ret;
2118 ret = nvmet_auth_set_key(host, page, true);
2120 * Re-authentication is a soft state, so keep the
2121 * current authentication valid until the host
2122 * requests re-authentication.
2124 return ret < 0 ? ret : count;
2127 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
2129 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
2130 char *page)
2132 struct nvmet_host *host = to_host(item);
2133 const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
2135 return sprintf(page, "%s\n", hash_name ? hash_name : "none");
2138 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
2139 const char *page, size_t count)
2141 struct nvmet_host *host = to_host(item);
2142 u8 hmac_id;
2144 hmac_id = nvme_auth_hmac_id(page);
2145 if (hmac_id == NVME_AUTH_HASH_INVALID)
2146 return -EINVAL;
2147 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
2148 return -ENOTSUPP;
2149 host->dhchap_hash_id = hmac_id;
2150 return count;
2153 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
2155 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
2156 char *page)
2158 struct nvmet_host *host = to_host(item);
2159 const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
2161 return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
2164 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
2165 const char *page, size_t count)
2167 struct nvmet_host *host = to_host(item);
2168 int dhgroup_id;
2170 dhgroup_id = nvme_auth_dhgroup_id(page);
2171 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
2172 return -EINVAL;
2173 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
2174 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
2176 if (!crypto_has_kpp(kpp, 0, 0))
2177 return -EINVAL;
2179 host->dhchap_dhgroup_id = dhgroup_id;
2180 return count;
2183 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
2185 static struct configfs_attribute *nvmet_host_attrs[] = {
2186 &nvmet_host_attr_dhchap_key,
2187 &nvmet_host_attr_dhchap_ctrl_key,
2188 &nvmet_host_attr_dhchap_hash,
2189 &nvmet_host_attr_dhchap_dhgroup,
2190 NULL,
2192 #endif /* CONFIG_NVME_TARGET_AUTH */
2194 static void nvmet_host_release(struct config_item *item)
2196 struct nvmet_host *host = to_host(item);
2198 #ifdef CONFIG_NVME_TARGET_AUTH
2199 kfree(host->dhchap_secret);
2200 kfree(host->dhchap_ctrl_secret);
2201 #endif
2202 kfree(host);
2205 static struct configfs_item_operations nvmet_host_item_ops = {
2206 .release = nvmet_host_release,
2209 static const struct config_item_type nvmet_host_type = {
2210 .ct_item_ops = &nvmet_host_item_ops,
2211 #ifdef CONFIG_NVME_TARGET_AUTH
2212 .ct_attrs = nvmet_host_attrs,
2213 #endif
2214 .ct_owner = THIS_MODULE,
2217 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
2218 const char *name)
2220 struct nvmet_host *host;
2222 host = kzalloc(sizeof(*host), GFP_KERNEL);
2223 if (!host)
2224 return ERR_PTR(-ENOMEM);
2226 #ifdef CONFIG_NVME_TARGET_AUTH
2227 /* Default to SHA256 */
2228 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
2229 #endif
2231 config_group_init_type_name(&host->group, name, &nvmet_host_type);
2233 return &host->group;
2236 static struct configfs_group_operations nvmet_hosts_group_ops = {
2237 .make_group = nvmet_hosts_make_group,
2240 static const struct config_item_type nvmet_hosts_type = {
2241 .ct_group_ops = &nvmet_hosts_group_ops,
2242 .ct_owner = THIS_MODULE,
2245 static struct config_group nvmet_hosts_group;
2247 static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item,
2248 char *page)
2250 return snprintf(page, PAGE_SIZE, "%s\n", nvmet_disc_subsys->subsysnqn);
2253 static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
2254 const char *page, size_t count)
2256 struct list_head *entry;
2257 size_t len;
2259 len = strcspn(page, "\n");
2260 if (!len || len > NVMF_NQN_FIELD_LEN - 1)
2261 return -EINVAL;
2263 down_write(&nvmet_config_sem);
2264 list_for_each(entry, &nvmet_subsystems_group.cg_children) {
2265 struct config_item *item =
2266 container_of(entry, struct config_item, ci_entry);
2268 if (!strncmp(config_item_name(item), page, len)) {
2269 pr_err("duplicate NQN %s\n", config_item_name(item));
2270 up_write(&nvmet_config_sem);
2271 return -EINVAL;
2274 memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
2275 memcpy(nvmet_disc_subsys->subsysnqn, page, len);
2276 up_write(&nvmet_config_sem);
2278 return len;
2281 CONFIGFS_ATTR(nvmet_root_, discovery_nqn);
2283 static struct configfs_attribute *nvmet_root_attrs[] = {
2284 &nvmet_root_attr_discovery_nqn,
2285 NULL,
2288 static const struct config_item_type nvmet_root_type = {
2289 .ct_attrs = nvmet_root_attrs,
2290 .ct_owner = THIS_MODULE,
2293 static struct configfs_subsystem nvmet_configfs_subsystem = {
2294 .su_group = {
2295 .cg_item = {
2296 .ci_namebuf = "nvmet",
2297 .ci_type = &nvmet_root_type,
2302 int __init nvmet_init_configfs(void)
2304 int ret;
2306 config_group_init(&nvmet_configfs_subsystem.su_group);
2307 mutex_init(&nvmet_configfs_subsystem.su_mutex);
2309 config_group_init_type_name(&nvmet_subsystems_group,
2310 "subsystems", &nvmet_subsystems_type);
2311 configfs_add_default_group(&nvmet_subsystems_group,
2312 &nvmet_configfs_subsystem.su_group);
2314 config_group_init_type_name(&nvmet_ports_group,
2315 "ports", &nvmet_ports_type);
2316 configfs_add_default_group(&nvmet_ports_group,
2317 &nvmet_configfs_subsystem.su_group);
2319 config_group_init_type_name(&nvmet_hosts_group,
2320 "hosts", &nvmet_hosts_type);
2321 configfs_add_default_group(&nvmet_hosts_group,
2322 &nvmet_configfs_subsystem.su_group);
2324 ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
2325 if (ret) {
2326 pr_err("configfs_register_subsystem: %d\n", ret);
2327 return ret;
2330 return 0;
2333 void __exit nvmet_exit_configfs(void)
2335 configfs_unregister_subsystem(&nvmet_configfs_subsystem);