2 * Discovery service for the NVMe over Fabrics target.
3 * Copyright (C) 2016 Intel Corporation. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/slab.h>
16 #include <generated/utsrelease.h>
19 struct nvmet_subsys
*nvmet_disc_subsys
;
23 void nvmet_referral_enable(struct nvmet_port
*parent
, struct nvmet_port
*port
)
25 down_write(&nvmet_config_sem
);
26 if (list_empty(&port
->entry
)) {
27 list_add_tail(&port
->entry
, &parent
->referrals
);
31 up_write(&nvmet_config_sem
);
34 void nvmet_referral_disable(struct nvmet_port
*port
)
36 down_write(&nvmet_config_sem
);
37 if (!list_empty(&port
->entry
)) {
38 port
->enabled
= false;
39 list_del_init(&port
->entry
);
42 up_write(&nvmet_config_sem
);
45 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr
*hdr
,
46 struct nvmet_port
*port
, char *subsys_nqn
, u8 type
, u32 numrec
)
48 struct nvmf_disc_rsp_page_entry
*e
= &hdr
->entries
[numrec
];
50 e
->trtype
= port
->disc_addr
.trtype
;
51 e
->adrfam
= port
->disc_addr
.adrfam
;
52 e
->treq
= port
->disc_addr
.treq
;
53 e
->portid
= port
->disc_addr
.portid
;
54 /* we support only dynamic controllers */
55 e
->cntlid
= cpu_to_le16(NVME_CNTLID_DYNAMIC
);
56 e
->asqsz
= cpu_to_le16(NVMF_AQ_DEPTH
);
58 memcpy(e
->trsvcid
, port
->disc_addr
.trsvcid
, NVMF_TRSVCID_SIZE
);
59 memcpy(e
->traddr
, port
->disc_addr
.traddr
, NVMF_TRADDR_SIZE
);
60 memcpy(e
->tsas
.common
, port
->disc_addr
.tsas
.common
, NVMF_TSAS_SIZE
);
61 memcpy(e
->subnqn
, subsys_nqn
, NVMF_NQN_SIZE
);
64 static void nvmet_execute_get_disc_log_page(struct nvmet_req
*req
)
66 const int entry_size
= sizeof(struct nvmf_disc_rsp_page_entry
);
67 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
68 struct nvmf_disc_rsp_page_hdr
*hdr
;
69 size_t data_len
= nvmet_get_log_page_len(req
->cmd
);
70 size_t alloc_len
= max(data_len
, sizeof(*hdr
));
71 int residual_len
= data_len
- sizeof(*hdr
);
72 struct nvmet_subsys_link
*p
;
78 * Make sure we're passing at least a buffer of response header size.
79 * If host provided data len is less than the header size, only the
80 * number of bytes requested by host will be sent to host.
82 hdr
= kzalloc(alloc_len
, GFP_KERNEL
);
84 status
= NVME_SC_INTERNAL
;
88 down_read(&nvmet_config_sem
);
89 list_for_each_entry(p
, &req
->port
->subsystems
, entry
) {
90 if (!nvmet_host_allowed(req
, p
->subsys
, ctrl
->hostnqn
))
92 if (residual_len
>= entry_size
) {
93 nvmet_format_discovery_entry(hdr
, req
->port
,
95 NVME_NQN_NVME
, numrec
);
96 residual_len
-= entry_size
;
101 list_for_each_entry(r
, &req
->port
->referrals
, entry
) {
102 if (residual_len
>= entry_size
) {
103 nvmet_format_discovery_entry(hdr
, r
,
104 NVME_DISC_SUBSYS_NAME
,
105 NVME_NQN_DISC
, numrec
);
106 residual_len
-= entry_size
;
111 hdr
->genctr
= cpu_to_le64(nvmet_genctr
);
112 hdr
->numrec
= cpu_to_le64(numrec
);
113 hdr
->recfmt
= cpu_to_le16(0);
115 up_read(&nvmet_config_sem
);
117 status
= nvmet_copy_to_sgl(req
, 0, hdr
, data_len
);
120 nvmet_req_complete(req
, status
);
123 static void nvmet_execute_identify_disc_ctrl(struct nvmet_req
*req
)
125 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
126 struct nvme_id_ctrl
*id
;
129 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
131 status
= NVME_SC_INTERNAL
;
135 memset(id
->fr
, ' ', sizeof(id
->fr
));
136 strncpy((char *)id
->fr
, UTS_RELEASE
, sizeof(id
->fr
));
138 /* no limit on data transfer sizes for now */
140 id
->cntlid
= cpu_to_le16(ctrl
->cntlid
);
141 id
->ver
= cpu_to_le32(ctrl
->subsys
->ver
);
144 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
145 id
->maxcmd
= cpu_to_le16(NVMET_MAX_CMD
);
147 id
->sgls
= cpu_to_le32(1 << 0); /* we always support SGLs */
148 if (ctrl
->ops
->has_keyed_sgls
)
149 id
->sgls
|= cpu_to_le32(1 << 2);
150 if (ctrl
->ops
->sqe_inline_size
)
151 id
->sgls
|= cpu_to_le32(1 << 20);
153 strcpy(id
->subnqn
, ctrl
->subsys
->subsysnqn
);
155 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
159 nvmet_req_complete(req
, status
);
162 int nvmet_parse_discovery_cmd(struct nvmet_req
*req
)
164 struct nvme_command
*cmd
= req
->cmd
;
168 if (unlikely(!(req
->sq
->ctrl
->csts
& NVME_CSTS_RDY
))) {
169 pr_err("nvmet: got cmd %d while not ready\n",
171 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
174 switch (cmd
->common
.opcode
) {
175 case nvme_admin_get_log_page
:
176 req
->data_len
= nvmet_get_log_page_len(cmd
);
178 switch (cmd
->get_log_page
.lid
) {
180 req
->execute
= nvmet_execute_get_disc_log_page
;
183 pr_err("nvmet: unsupported get_log_page lid %d\n",
184 cmd
->get_log_page
.lid
);
185 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
187 case nvme_admin_identify
:
188 req
->data_len
= 4096;
189 switch (le32_to_cpu(cmd
->identify
.cns
)) {
192 nvmet_execute_identify_disc_ctrl
;
195 pr_err("nvmet: unsupported identify cns %d\n",
196 le32_to_cpu(cmd
->identify
.cns
));
197 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
200 pr_err("nvmet: unsupported cmd %d\n",
202 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
205 pr_err("nvmet: unhandled cmd %d\n", cmd
->common
.opcode
);
206 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
209 int __init
nvmet_init_discovery(void)
212 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME
, NVME_NQN_DISC
);
213 if (!nvmet_disc_subsys
)
218 void nvmet_exit_discovery(void)
220 nvmet_subsys_put(nvmet_disc_subsys
);