1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe Fabrics command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/blkdev.h>
10 static void nvmet_execute_prop_set(struct nvmet_req
*req
)
12 u64 val
= le64_to_cpu(req
->cmd
->prop_set
.value
);
15 if (!nvmet_check_transfer_len(req
, 0))
18 if (req
->cmd
->prop_set
.attrib
& 1) {
20 offsetof(struct nvmf_property_set_command
, attrib
);
21 status
= NVME_SC_INVALID_FIELD
| NVME_STATUS_DNR
;
25 switch (le32_to_cpu(req
->cmd
->prop_set
.offset
)) {
27 nvmet_update_cc(req
->sq
->ctrl
, val
);
31 offsetof(struct nvmf_property_set_command
, offset
);
32 status
= NVME_SC_INVALID_FIELD
| NVME_STATUS_DNR
;
35 nvmet_req_complete(req
, status
);
38 static void nvmet_execute_prop_get(struct nvmet_req
*req
)
40 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
44 if (!nvmet_check_transfer_len(req
, 0))
47 if (req
->cmd
->prop_get
.attrib
& 1) {
48 switch (le32_to_cpu(req
->cmd
->prop_get
.offset
)) {
53 status
= NVME_SC_INVALID_FIELD
| NVME_STATUS_DNR
;
57 switch (le32_to_cpu(req
->cmd
->prop_get
.offset
)) {
59 val
= ctrl
->subsys
->ver
;
68 val
= NVME_CAP_TIMEOUT(ctrl
->csts
);
71 status
= NVME_SC_INVALID_FIELD
| NVME_STATUS_DNR
;
76 if (status
&& req
->cmd
->prop_get
.attrib
& 1) {
78 offsetof(struct nvmf_property_get_command
, offset
);
81 offsetof(struct nvmf_property_get_command
, attrib
);
84 req
->cqe
->result
.u64
= cpu_to_le64(val
);
85 nvmet_req_complete(req
, status
);
88 u16
nvmet_parse_fabrics_admin_cmd(struct nvmet_req
*req
)
90 struct nvme_command
*cmd
= req
->cmd
;
92 switch (cmd
->fabrics
.fctype
) {
93 case nvme_fabrics_type_property_set
:
94 req
->execute
= nvmet_execute_prop_set
;
96 case nvme_fabrics_type_property_get
:
97 req
->execute
= nvmet_execute_prop_get
;
99 #ifdef CONFIG_NVME_TARGET_AUTH
100 case nvme_fabrics_type_auth_send
:
101 req
->execute
= nvmet_execute_auth_send
;
103 case nvme_fabrics_type_auth_receive
:
104 req
->execute
= nvmet_execute_auth_receive
;
108 pr_debug("received unknown capsule type 0x%x\n",
109 cmd
->fabrics
.fctype
);
110 req
->error_loc
= offsetof(struct nvmf_common_command
, fctype
);
111 return NVME_SC_INVALID_OPCODE
| NVME_STATUS_DNR
;
117 u16
nvmet_parse_fabrics_io_cmd(struct nvmet_req
*req
)
119 struct nvme_command
*cmd
= req
->cmd
;
121 switch (cmd
->fabrics
.fctype
) {
122 #ifdef CONFIG_NVME_TARGET_AUTH
123 case nvme_fabrics_type_auth_send
:
124 req
->execute
= nvmet_execute_auth_send
;
126 case nvme_fabrics_type_auth_receive
:
127 req
->execute
= nvmet_execute_auth_receive
;
131 pr_debug("received unknown capsule type 0x%x\n",
132 cmd
->fabrics
.fctype
);
133 req
->error_loc
= offsetof(struct nvmf_common_command
, fctype
);
134 return NVME_SC_INVALID_OPCODE
| NVME_STATUS_DNR
;
140 static u16
nvmet_install_queue(struct nvmet_ctrl
*ctrl
, struct nvmet_req
*req
)
142 struct nvmf_connect_command
*c
= &req
->cmd
->connect
;
143 u16 qid
= le16_to_cpu(c
->qid
);
144 u16 sqsize
= le16_to_cpu(c
->sqsize
);
145 struct nvmet_ctrl
*old
;
146 u16 mqes
= NVME_CAP_MQES(ctrl
->cap
);
150 pr_warn("queue size zero!\n");
151 req
->error_loc
= offsetof(struct nvmf_connect_command
, sqsize
);
152 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_SQE(sqsize
);
153 ret
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_STATUS_DNR
;
157 if (ctrl
->sqs
[qid
] != NULL
) {
158 pr_warn("qid %u has already been created\n", qid
);
159 req
->error_loc
= offsetof(struct nvmf_connect_command
, qid
);
160 return NVME_SC_CMD_SEQ_ERROR
| NVME_STATUS_DNR
;
163 /* for fabrics, this value applies to only the I/O Submission Queues */
164 if (qid
&& sqsize
> mqes
) {
165 pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
166 sqsize
, mqes
, ctrl
->cntlid
);
167 req
->error_loc
= offsetof(struct nvmf_connect_command
, sqsize
);
168 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_SQE(sqsize
);
169 return NVME_SC_CONNECT_INVALID_PARAM
| NVME_STATUS_DNR
;
172 old
= cmpxchg(&req
->sq
->ctrl
, NULL
, ctrl
);
174 pr_warn("queue already connected!\n");
175 req
->error_loc
= offsetof(struct nvmf_connect_command
, opcode
);
176 return NVME_SC_CONNECT_CTRL_BUSY
| NVME_STATUS_DNR
;
179 /* note: convert queue size from 0's-based value to 1's-based value */
180 nvmet_cq_setup(ctrl
, req
->cq
, qid
, sqsize
+ 1);
181 nvmet_sq_setup(ctrl
, req
->sq
, qid
, sqsize
+ 1);
183 if (c
->cattr
& NVME_CONNECT_DISABLE_SQFLOW
) {
184 req
->sq
->sqhd_disabled
= true;
185 req
->cqe
->sq_head
= cpu_to_le16(0xffff);
188 if (ctrl
->ops
->install_queue
) {
189 ret
= ctrl
->ops
->install_queue(req
->sq
);
191 pr_err("failed to install queue %d cntlid %d ret %x\n",
192 qid
, ctrl
->cntlid
, ret
);
193 ctrl
->sqs
[qid
] = NULL
;
201 req
->sq
->ctrl
= NULL
;
205 static u32
nvmet_connect_result(struct nvmet_ctrl
*ctrl
)
207 return (u32
)ctrl
->cntlid
|
208 (nvmet_has_auth(ctrl
) ? NVME_CONNECT_AUTHREQ_ATR
: 0);
211 static void nvmet_execute_admin_connect(struct nvmet_req
*req
)
213 struct nvmf_connect_command
*c
= &req
->cmd
->connect
;
214 struct nvmf_connect_data
*d
;
215 struct nvmet_ctrl
*ctrl
= NULL
;
219 if (!nvmet_check_transfer_len(req
, sizeof(struct nvmf_connect_data
)))
222 d
= kmalloc(sizeof(*d
), GFP_KERNEL
);
224 status
= NVME_SC_INTERNAL
;
228 status
= nvmet_copy_from_sgl(req
, 0, d
, sizeof(*d
));
232 if (c
->recfmt
!= 0) {
233 pr_warn("invalid connect version (%d).\n",
234 le16_to_cpu(c
->recfmt
));
235 req
->error_loc
= offsetof(struct nvmf_connect_command
, recfmt
);
236 status
= NVME_SC_CONNECT_FORMAT
| NVME_STATUS_DNR
;
240 if (unlikely(d
->cntlid
!= cpu_to_le16(0xffff))) {
241 pr_warn("connect attempt for invalid controller ID %#x\n",
243 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_STATUS_DNR
;
244 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_DATA(cntlid
);
248 d
->subsysnqn
[NVMF_NQN_FIELD_LEN
- 1] = '\0';
249 d
->hostnqn
[NVMF_NQN_FIELD_LEN
- 1] = '\0';
250 status
= nvmet_alloc_ctrl(d
->subsysnqn
, d
->hostnqn
, req
,
251 le32_to_cpu(c
->kato
), &ctrl
, &d
->hostid
);
255 dhchap_status
= nvmet_setup_auth(ctrl
);
257 pr_err("Failed to setup authentication, dhchap status %u\n",
259 nvmet_ctrl_put(ctrl
);
260 if (dhchap_status
== NVME_AUTH_DHCHAP_FAILURE_FAILED
)
261 status
= (NVME_SC_CONNECT_INVALID_HOST
| NVME_STATUS_DNR
);
263 status
= NVME_SC_INTERNAL
;
267 status
= nvmet_install_queue(ctrl
, req
);
269 nvmet_ctrl_put(ctrl
);
273 pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
274 nvmet_is_disc_subsys(ctrl
->subsys
) ? "discovery" : "nvm",
275 ctrl
->cntlid
, ctrl
->subsys
->subsysnqn
, ctrl
->hostnqn
,
276 ctrl
->pi_support
? " T10-PI is enabled" : "",
277 nvmet_has_auth(ctrl
) ? " with DH-HMAC-CHAP" : "");
278 req
->cqe
->result
.u32
= cpu_to_le32(nvmet_connect_result(ctrl
));
282 nvmet_req_complete(req
, status
);
285 static void nvmet_execute_io_connect(struct nvmet_req
*req
)
287 struct nvmf_connect_command
*c
= &req
->cmd
->connect
;
288 struct nvmf_connect_data
*d
;
289 struct nvmet_ctrl
*ctrl
;
290 u16 qid
= le16_to_cpu(c
->qid
);
293 if (!nvmet_check_transfer_len(req
, sizeof(struct nvmf_connect_data
)))
296 d
= kmalloc(sizeof(*d
), GFP_KERNEL
);
298 status
= NVME_SC_INTERNAL
;
302 status
= nvmet_copy_from_sgl(req
, 0, d
, sizeof(*d
));
306 if (c
->recfmt
!= 0) {
307 pr_warn("invalid connect version (%d).\n",
308 le16_to_cpu(c
->recfmt
));
309 status
= NVME_SC_CONNECT_FORMAT
| NVME_STATUS_DNR
;
313 d
->subsysnqn
[NVMF_NQN_FIELD_LEN
- 1] = '\0';
314 d
->hostnqn
[NVMF_NQN_FIELD_LEN
- 1] = '\0';
315 ctrl
= nvmet_ctrl_find_get(d
->subsysnqn
, d
->hostnqn
,
316 le16_to_cpu(d
->cntlid
), req
);
318 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_STATUS_DNR
;
322 if (unlikely(qid
> ctrl
->subsys
->max_qid
)) {
323 pr_warn("invalid queue id (%d)\n", qid
);
324 status
= NVME_SC_CONNECT_INVALID_PARAM
| NVME_STATUS_DNR
;
325 req
->cqe
->result
.u32
= IPO_IATTR_CONNECT_SQE(qid
);
329 status
= nvmet_install_queue(ctrl
, req
);
333 pr_debug("adding queue %d to ctrl %d.\n", qid
, ctrl
->cntlid
);
334 req
->cqe
->result
.u32
= cpu_to_le32(nvmet_connect_result(ctrl
));
338 nvmet_req_complete(req
, status
);
342 nvmet_ctrl_put(ctrl
);
346 u16
nvmet_parse_connect_cmd(struct nvmet_req
*req
)
348 struct nvme_command
*cmd
= req
->cmd
;
350 if (!nvme_is_fabrics(cmd
)) {
351 pr_debug("invalid command 0x%x on unconnected queue.\n",
352 cmd
->fabrics
.opcode
);
353 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
354 return NVME_SC_INVALID_OPCODE
| NVME_STATUS_DNR
;
356 if (cmd
->fabrics
.fctype
!= nvme_fabrics_type_connect
) {
357 pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
358 cmd
->fabrics
.fctype
);
359 req
->error_loc
= offsetof(struct nvmf_common_command
, fctype
);
360 return NVME_SC_INVALID_OPCODE
| NVME_STATUS_DNR
;
363 if (cmd
->connect
.qid
== 0)
364 req
->execute
= nvmet_execute_admin_connect
;
366 req
->execute
= nvmet_execute_io_connect
;