2 * Discovery service for the NVMe over Fabrics target.
3 * Copyright (C) 2016 Intel Corporation. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/slab.h>
16 #include <generated/utsrelease.h>
19 struct nvmet_subsys
*nvmet_disc_subsys
;
21 static u64 nvmet_genctr
;
23 static void __nvmet_disc_changed(struct nvmet_port
*port
,
24 struct nvmet_ctrl
*ctrl
)
26 if (ctrl
->port
!= port
)
29 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_DISC_CHANGE
))
32 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
,
33 NVME_AER_NOTICE_DISC_CHANGED
, NVME_LOG_DISC
);
36 void nvmet_port_disc_changed(struct nvmet_port
*port
,
37 struct nvmet_subsys
*subsys
)
39 struct nvmet_ctrl
*ctrl
;
43 list_for_each_entry(ctrl
, &nvmet_disc_subsys
->ctrls
, subsys_entry
) {
44 if (subsys
&& !nvmet_host_allowed(subsys
, ctrl
->hostnqn
))
47 __nvmet_disc_changed(port
, ctrl
);
51 static void __nvmet_subsys_disc_changed(struct nvmet_port
*port
,
52 struct nvmet_subsys
*subsys
,
53 struct nvmet_host
*host
)
55 struct nvmet_ctrl
*ctrl
;
57 list_for_each_entry(ctrl
, &nvmet_disc_subsys
->ctrls
, subsys_entry
) {
58 if (host
&& strcmp(nvmet_host_name(host
), ctrl
->hostnqn
))
61 __nvmet_disc_changed(port
, ctrl
);
65 void nvmet_subsys_disc_changed(struct nvmet_subsys
*subsys
,
66 struct nvmet_host
*host
)
68 struct nvmet_port
*port
;
69 struct nvmet_subsys_link
*s
;
73 list_for_each_entry(port
, nvmet_ports
, global_entry
)
74 list_for_each_entry(s
, &port
->subsystems
, entry
) {
75 if (s
->subsys
!= subsys
)
77 __nvmet_subsys_disc_changed(port
, subsys
, host
);
81 void nvmet_referral_enable(struct nvmet_port
*parent
, struct nvmet_port
*port
)
83 down_write(&nvmet_config_sem
);
84 if (list_empty(&port
->entry
)) {
85 list_add_tail(&port
->entry
, &parent
->referrals
);
87 nvmet_port_disc_changed(parent
, NULL
);
89 up_write(&nvmet_config_sem
);
92 void nvmet_referral_disable(struct nvmet_port
*parent
, struct nvmet_port
*port
)
94 down_write(&nvmet_config_sem
);
95 if (!list_empty(&port
->entry
)) {
96 port
->enabled
= false;
97 list_del_init(&port
->entry
);
98 nvmet_port_disc_changed(parent
, NULL
);
100 up_write(&nvmet_config_sem
);
103 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr
*hdr
,
104 struct nvmet_port
*port
, char *subsys_nqn
, char *traddr
,
107 struct nvmf_disc_rsp_page_entry
*e
= &hdr
->entries
[numrec
];
109 e
->trtype
= port
->disc_addr
.trtype
;
110 e
->adrfam
= port
->disc_addr
.adrfam
;
111 e
->treq
= port
->disc_addr
.treq
;
112 e
->portid
= port
->disc_addr
.portid
;
113 /* we support only dynamic controllers */
114 e
->cntlid
= cpu_to_le16(NVME_CNTLID_DYNAMIC
);
115 e
->asqsz
= cpu_to_le16(NVME_AQ_DEPTH
);
117 memcpy(e
->trsvcid
, port
->disc_addr
.trsvcid
, NVMF_TRSVCID_SIZE
);
118 memcpy(e
->traddr
, traddr
, NVMF_TRADDR_SIZE
);
119 memcpy(e
->tsas
.common
, port
->disc_addr
.tsas
.common
, NVMF_TSAS_SIZE
);
120 strncpy(e
->subnqn
, subsys_nqn
, NVMF_NQN_SIZE
);
124 * nvmet_set_disc_traddr - set a correct discovery log entry traddr
126 * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
127 * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
128 * must not contain that "any" IP address. If the transport implements
129 * .disc_traddr, use it. this callback will set the discovery traddr
130 * from the req->port address in case the port in question listens
133 static void nvmet_set_disc_traddr(struct nvmet_req
*req
, struct nvmet_port
*port
,
136 if (req
->ops
->disc_traddr
)
137 req
->ops
->disc_traddr(req
, port
, traddr
);
139 memcpy(traddr
, port
->disc_addr
.traddr
, NVMF_TRADDR_SIZE
);
142 static void nvmet_execute_get_disc_log_page(struct nvmet_req
*req
)
144 const int entry_size
= sizeof(struct nvmf_disc_rsp_page_entry
);
145 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
146 struct nvmf_disc_rsp_page_hdr
*hdr
;
147 size_t data_len
= nvmet_get_log_page_len(req
->cmd
);
148 size_t alloc_len
= max(data_len
, sizeof(*hdr
));
149 int residual_len
= data_len
- sizeof(*hdr
);
150 struct nvmet_subsys_link
*p
;
151 struct nvmet_port
*r
;
156 * Make sure we're passing at least a buffer of response header size.
157 * If host provided data len is less than the header size, only the
158 * number of bytes requested by host will be sent to host.
160 hdr
= kzalloc(alloc_len
, GFP_KERNEL
);
162 status
= NVME_SC_INTERNAL
;
166 down_read(&nvmet_config_sem
);
167 list_for_each_entry(p
, &req
->port
->subsystems
, entry
) {
168 if (!nvmet_host_allowed(p
->subsys
, ctrl
->hostnqn
))
170 if (residual_len
>= entry_size
) {
171 char traddr
[NVMF_TRADDR_SIZE
];
173 nvmet_set_disc_traddr(req
, req
->port
, traddr
);
174 nvmet_format_discovery_entry(hdr
, req
->port
,
175 p
->subsys
->subsysnqn
, traddr
,
176 NVME_NQN_NVME
, numrec
);
177 residual_len
-= entry_size
;
182 list_for_each_entry(r
, &req
->port
->referrals
, entry
) {
183 if (residual_len
>= entry_size
) {
184 nvmet_format_discovery_entry(hdr
, r
,
185 NVME_DISC_SUBSYS_NAME
,
187 NVME_NQN_DISC
, numrec
);
188 residual_len
-= entry_size
;
193 hdr
->genctr
= cpu_to_le64(nvmet_genctr
);
194 hdr
->numrec
= cpu_to_le64(numrec
);
195 hdr
->recfmt
= cpu_to_le16(0);
197 nvmet_clear_aen_bit(req
, NVME_AEN_BIT_DISC_CHANGE
);
199 up_read(&nvmet_config_sem
);
201 status
= nvmet_copy_to_sgl(req
, 0, hdr
, data_len
);
204 nvmet_req_complete(req
, status
);
207 static void nvmet_execute_identify_disc_ctrl(struct nvmet_req
*req
)
209 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
210 struct nvme_id_ctrl
*id
;
213 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
215 status
= NVME_SC_INTERNAL
;
219 memset(id
->fr
, ' ', sizeof(id
->fr
));
220 strncpy((char *)id
->fr
, UTS_RELEASE
, sizeof(id
->fr
));
222 /* no limit on data transfer sizes for now */
224 id
->cntlid
= cpu_to_le16(ctrl
->cntlid
);
225 id
->ver
= cpu_to_le32(ctrl
->subsys
->ver
);
228 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
229 id
->maxcmd
= cpu_to_le16(NVMET_MAX_CMD
);
231 id
->sgls
= cpu_to_le32(1 << 0); /* we always support SGLs */
232 if (ctrl
->ops
->has_keyed_sgls
)
233 id
->sgls
|= cpu_to_le32(1 << 2);
234 if (req
->port
->inline_data_size
)
235 id
->sgls
|= cpu_to_le32(1 << 20);
237 id
->oaes
= cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL
);
239 strlcpy(id
->subnqn
, ctrl
->subsys
->subsysnqn
, sizeof(id
->subnqn
));
241 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
245 nvmet_req_complete(req
, status
);
248 static void nvmet_execute_disc_set_features(struct nvmet_req
*req
)
250 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
);
253 switch (cdw10
& 0xff) {
255 stat
= nvmet_set_feat_kato(req
);
257 case NVME_FEAT_ASYNC_EVENT
:
258 stat
= nvmet_set_feat_async_event(req
,
259 NVMET_DISC_AEN_CFG_OPTIONAL
);
263 offsetof(struct nvme_common_command
, cdw10
);
264 stat
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
268 nvmet_req_complete(req
, stat
);
271 static void nvmet_execute_disc_get_features(struct nvmet_req
*req
)
273 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
);
276 switch (cdw10
& 0xff) {
278 nvmet_get_feat_kato(req
);
280 case NVME_FEAT_ASYNC_EVENT
:
281 nvmet_get_feat_async_event(req
);
285 offsetof(struct nvme_common_command
, cdw10
);
286 stat
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
290 nvmet_req_complete(req
, stat
);
293 u16
nvmet_parse_discovery_cmd(struct nvmet_req
*req
)
295 struct nvme_command
*cmd
= req
->cmd
;
297 if (unlikely(!(req
->sq
->ctrl
->csts
& NVME_CSTS_RDY
))) {
298 pr_err("got cmd %d while not ready\n",
301 offsetof(struct nvme_common_command
, opcode
);
302 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
305 switch (cmd
->common
.opcode
) {
306 case nvme_admin_set_features
:
307 req
->execute
= nvmet_execute_disc_set_features
;
310 case nvme_admin_get_features
:
311 req
->execute
= nvmet_execute_disc_get_features
;
314 case nvme_admin_async_event
:
315 req
->execute
= nvmet_execute_async_event
;
318 case nvme_admin_keep_alive
:
319 req
->execute
= nvmet_execute_keep_alive
;
322 case nvme_admin_get_log_page
:
323 req
->data_len
= nvmet_get_log_page_len(cmd
);
325 switch (cmd
->get_log_page
.lid
) {
327 req
->execute
= nvmet_execute_get_disc_log_page
;
330 pr_err("unsupported get_log_page lid %d\n",
331 cmd
->get_log_page
.lid
);
333 offsetof(struct nvme_get_log_page_command
, lid
);
334 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
336 case nvme_admin_identify
:
337 req
->data_len
= NVME_IDENTIFY_DATA_SIZE
;
338 switch (cmd
->identify
.cns
) {
339 case NVME_ID_CNS_CTRL
:
341 nvmet_execute_identify_disc_ctrl
;
344 pr_err("unsupported identify cns %d\n",
346 req
->error_loc
= offsetof(struct nvme_identify
, cns
);
347 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
350 pr_err("unhandled cmd %d\n", cmd
->common
.opcode
);
351 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
352 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
357 int __init
nvmet_init_discovery(void)
360 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME
, NVME_NQN_DISC
);
361 if (!nvmet_disc_subsys
)
366 void nvmet_exit_discovery(void)
368 nvmet_subsys_put(nvmet_disc_subsys
);