1 // SPDX-License-Identifier: GPL-2.0
3 * Discovery service for the NVMe over Fabrics target.
4 * Copyright (C) 2016 Intel Corporation. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/slab.h>
8 #include <generated/utsrelease.h>
11 struct nvmet_subsys
*nvmet_disc_subsys
;
13 static u64 nvmet_genctr
;
15 static void __nvmet_disc_changed(struct nvmet_port
*port
,
16 struct nvmet_ctrl
*ctrl
)
18 if (ctrl
->port
!= port
)
21 if (nvmet_aen_bit_disabled(ctrl
, NVME_AEN_BIT_DISC_CHANGE
))
24 nvmet_add_async_event(ctrl
, NVME_AER_TYPE_NOTICE
,
25 NVME_AER_NOTICE_DISC_CHANGED
, NVME_LOG_DISC
);
28 void nvmet_port_disc_changed(struct nvmet_port
*port
,
29 struct nvmet_subsys
*subsys
)
31 struct nvmet_ctrl
*ctrl
;
33 lockdep_assert_held(&nvmet_config_sem
);
36 mutex_lock(&nvmet_disc_subsys
->lock
);
37 list_for_each_entry(ctrl
, &nvmet_disc_subsys
->ctrls
, subsys_entry
) {
38 if (subsys
&& !nvmet_host_allowed(subsys
, ctrl
->hostnqn
))
41 __nvmet_disc_changed(port
, ctrl
);
43 mutex_unlock(&nvmet_disc_subsys
->lock
);
45 /* If transport can signal change, notify transport */
46 if (port
->tr_ops
&& port
->tr_ops
->discovery_chg
)
47 port
->tr_ops
->discovery_chg(port
);
50 static void __nvmet_subsys_disc_changed(struct nvmet_port
*port
,
51 struct nvmet_subsys
*subsys
,
52 struct nvmet_host
*host
)
54 struct nvmet_ctrl
*ctrl
;
56 mutex_lock(&nvmet_disc_subsys
->lock
);
57 list_for_each_entry(ctrl
, &nvmet_disc_subsys
->ctrls
, subsys_entry
) {
58 if (host
&& strcmp(nvmet_host_name(host
), ctrl
->hostnqn
))
61 __nvmet_disc_changed(port
, ctrl
);
63 mutex_unlock(&nvmet_disc_subsys
->lock
);
66 void nvmet_subsys_disc_changed(struct nvmet_subsys
*subsys
,
67 struct nvmet_host
*host
)
69 struct nvmet_port
*port
;
70 struct nvmet_subsys_link
*s
;
74 list_for_each_entry(port
, nvmet_ports
, global_entry
)
75 list_for_each_entry(s
, &port
->subsystems
, entry
) {
76 if (s
->subsys
!= subsys
)
78 __nvmet_subsys_disc_changed(port
, subsys
, host
);
82 void nvmet_referral_enable(struct nvmet_port
*parent
, struct nvmet_port
*port
)
84 down_write(&nvmet_config_sem
);
85 if (list_empty(&port
->entry
)) {
86 list_add_tail(&port
->entry
, &parent
->referrals
);
88 nvmet_port_disc_changed(parent
, NULL
);
90 up_write(&nvmet_config_sem
);
93 void nvmet_referral_disable(struct nvmet_port
*parent
, struct nvmet_port
*port
)
95 down_write(&nvmet_config_sem
);
96 if (!list_empty(&port
->entry
)) {
97 port
->enabled
= false;
98 list_del_init(&port
->entry
);
99 nvmet_port_disc_changed(parent
, NULL
);
101 up_write(&nvmet_config_sem
);
104 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr
*hdr
,
105 struct nvmet_port
*port
, char *subsys_nqn
, char *traddr
,
108 struct nvmf_disc_rsp_page_entry
*e
= &hdr
->entries
[numrec
];
110 e
->trtype
= port
->disc_addr
.trtype
;
111 e
->adrfam
= port
->disc_addr
.adrfam
;
112 e
->treq
= port
->disc_addr
.treq
;
113 e
->portid
= port
->disc_addr
.portid
;
114 /* we support only dynamic controllers */
115 e
->cntlid
= cpu_to_le16(NVME_CNTLID_DYNAMIC
);
116 e
->asqsz
= cpu_to_le16(NVME_AQ_DEPTH
);
118 memcpy(e
->trsvcid
, port
->disc_addr
.trsvcid
, NVMF_TRSVCID_SIZE
);
119 memcpy(e
->traddr
, traddr
, NVMF_TRADDR_SIZE
);
120 memcpy(e
->tsas
.common
, port
->disc_addr
.tsas
.common
, NVMF_TSAS_SIZE
);
121 strncpy(e
->subnqn
, subsys_nqn
, NVMF_NQN_SIZE
);
125 * nvmet_set_disc_traddr - set a correct discovery log entry traddr
127 * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
128 * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
129 * must not contain that "any" IP address. If the transport implements
130 * .disc_traddr, use it. this callback will set the discovery traddr
131 * from the req->port address in case the port in question listens
134 static void nvmet_set_disc_traddr(struct nvmet_req
*req
, struct nvmet_port
*port
,
137 if (req
->ops
->disc_traddr
)
138 req
->ops
->disc_traddr(req
, port
, traddr
);
140 memcpy(traddr
, port
->disc_addr
.traddr
, NVMF_TRADDR_SIZE
);
143 static size_t discovery_log_entries(struct nvmet_req
*req
)
145 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
146 struct nvmet_subsys_link
*p
;
147 struct nvmet_port
*r
;
150 list_for_each_entry(p
, &req
->port
->subsystems
, entry
) {
151 if (!nvmet_host_allowed(p
->subsys
, ctrl
->hostnqn
))
155 list_for_each_entry(r
, &req
->port
->referrals
, entry
)
160 static void nvmet_execute_disc_get_log_page(struct nvmet_req
*req
)
162 const int entry_size
= sizeof(struct nvmf_disc_rsp_page_entry
);
163 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
164 struct nvmf_disc_rsp_page_hdr
*hdr
;
165 u64 offset
= nvmet_get_log_page_offset(req
->cmd
);
166 size_t data_len
= nvmet_get_log_page_len(req
->cmd
);
168 struct nvmet_subsys_link
*p
;
169 struct nvmet_port
*r
;
174 if (!nvmet_check_data_len(req
, data_len
))
177 if (req
->cmd
->get_log_page
.lid
!= NVME_LOG_DISC
) {
179 offsetof(struct nvme_get_log_page_command
, lid
);
180 status
= NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
184 /* Spec requires dword aligned offsets */
186 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
191 * Make sure we're passing at least a buffer of response header size.
192 * If host provided data len is less than the header size, only the
193 * number of bytes requested by host will be sent to host.
195 down_read(&nvmet_config_sem
);
196 alloc_len
= sizeof(*hdr
) + entry_size
* discovery_log_entries(req
);
197 buffer
= kzalloc(alloc_len
, GFP_KERNEL
);
199 up_read(&nvmet_config_sem
);
200 status
= NVME_SC_INTERNAL
;
205 list_for_each_entry(p
, &req
->port
->subsystems
, entry
) {
206 char traddr
[NVMF_TRADDR_SIZE
];
208 if (!nvmet_host_allowed(p
->subsys
, ctrl
->hostnqn
))
211 nvmet_set_disc_traddr(req
, req
->port
, traddr
);
212 nvmet_format_discovery_entry(hdr
, req
->port
,
213 p
->subsys
->subsysnqn
, traddr
,
214 NVME_NQN_NVME
, numrec
);
218 list_for_each_entry(r
, &req
->port
->referrals
, entry
) {
219 nvmet_format_discovery_entry(hdr
, r
,
220 NVME_DISC_SUBSYS_NAME
,
222 NVME_NQN_DISC
, numrec
);
226 hdr
->genctr
= cpu_to_le64(nvmet_genctr
);
227 hdr
->numrec
= cpu_to_le64(numrec
);
228 hdr
->recfmt
= cpu_to_le16(0);
230 nvmet_clear_aen_bit(req
, NVME_AEN_BIT_DISC_CHANGE
);
232 up_read(&nvmet_config_sem
);
234 status
= nvmet_copy_to_sgl(req
, 0, buffer
+ offset
, data_len
);
237 nvmet_req_complete(req
, status
);
240 static void nvmet_execute_disc_identify(struct nvmet_req
*req
)
242 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
243 struct nvme_id_ctrl
*id
;
244 const char model
[] = "Linux";
247 if (!nvmet_check_data_len(req
, NVME_IDENTIFY_DATA_SIZE
))
250 if (req
->cmd
->identify
.cns
!= NVME_ID_CNS_CTRL
) {
251 req
->error_loc
= offsetof(struct nvme_identify
, cns
);
252 status
= NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
256 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
258 status
= NVME_SC_INTERNAL
;
262 memset(id
->sn
, ' ', sizeof(id
->sn
));
263 bin2hex(id
->sn
, &ctrl
->subsys
->serial
,
264 min(sizeof(ctrl
->subsys
->serial
), sizeof(id
->sn
) / 2));
265 memset(id
->fr
, ' ', sizeof(id
->fr
));
266 memcpy_and_pad(id
->mn
, sizeof(id
->mn
), model
, sizeof(model
) - 1, ' ');
267 memcpy_and_pad(id
->fr
, sizeof(id
->fr
),
268 UTS_RELEASE
, strlen(UTS_RELEASE
), ' ');
270 /* no limit on data transfer sizes for now */
272 id
->cntlid
= cpu_to_le16(ctrl
->cntlid
);
273 id
->ver
= cpu_to_le32(ctrl
->subsys
->ver
);
276 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
277 id
->maxcmd
= cpu_to_le16(NVMET_MAX_CMD
);
279 id
->sgls
= cpu_to_le32(1 << 0); /* we always support SGLs */
280 if (ctrl
->ops
->has_keyed_sgls
)
281 id
->sgls
|= cpu_to_le32(1 << 2);
282 if (req
->port
->inline_data_size
)
283 id
->sgls
|= cpu_to_le32(1 << 20);
285 id
->oaes
= cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL
);
287 strlcpy(id
->subnqn
, ctrl
->subsys
->subsysnqn
, sizeof(id
->subnqn
));
289 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
293 nvmet_req_complete(req
, status
);
296 static void nvmet_execute_disc_set_features(struct nvmet_req
*req
)
298 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
);
301 if (!nvmet_check_data_len(req
, 0))
304 switch (cdw10
& 0xff) {
306 stat
= nvmet_set_feat_kato(req
);
308 case NVME_FEAT_ASYNC_EVENT
:
309 stat
= nvmet_set_feat_async_event(req
,
310 NVMET_DISC_AEN_CFG_OPTIONAL
);
314 offsetof(struct nvme_common_command
, cdw10
);
315 stat
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
319 nvmet_req_complete(req
, stat
);
322 static void nvmet_execute_disc_get_features(struct nvmet_req
*req
)
324 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
);
327 if (!nvmet_check_data_len(req
, 0))
330 switch (cdw10
& 0xff) {
332 nvmet_get_feat_kato(req
);
334 case NVME_FEAT_ASYNC_EVENT
:
335 nvmet_get_feat_async_event(req
);
339 offsetof(struct nvme_common_command
, cdw10
);
340 stat
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
344 nvmet_req_complete(req
, stat
);
347 u16
nvmet_parse_discovery_cmd(struct nvmet_req
*req
)
349 struct nvme_command
*cmd
= req
->cmd
;
351 if (unlikely(!(req
->sq
->ctrl
->csts
& NVME_CSTS_RDY
))) {
352 pr_err("got cmd %d while not ready\n",
355 offsetof(struct nvme_common_command
, opcode
);
356 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
359 switch (cmd
->common
.opcode
) {
360 case nvme_admin_set_features
:
361 req
->execute
= nvmet_execute_disc_set_features
;
363 case nvme_admin_get_features
:
364 req
->execute
= nvmet_execute_disc_get_features
;
366 case nvme_admin_async_event
:
367 req
->execute
= nvmet_execute_async_event
;
369 case nvme_admin_keep_alive
:
370 req
->execute
= nvmet_execute_keep_alive
;
372 case nvme_admin_get_log_page
:
373 req
->execute
= nvmet_execute_disc_get_log_page
;
375 case nvme_admin_identify
:
376 req
->execute
= nvmet_execute_disc_identify
;
379 pr_err("unhandled cmd %d\n", cmd
->common
.opcode
);
380 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
381 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;
386 int __init
nvmet_init_discovery(void)
389 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME
, NVME_NQN_DISC
);
390 return PTR_ERR_OR_ZERO(nvmet_disc_subsys
);
393 void nvmet_exit_discovery(void)
395 nvmet_subsys_put(nvmet_disc_subsys
);