2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/rculist.h>
18 #include <generated/utsrelease.h>
19 #include <asm/unaligned.h>
22 u32
nvmet_get_log_page_len(struct nvme_command
*cmd
)
24 u32 len
= le16_to_cpu(cmd
->get_log_page
.numdu
);
27 len
+= le16_to_cpu(cmd
->get_log_page
.numdl
);
28 /* NUMD is a 0's based value */
35 static void nvmet_execute_get_log_page_noop(struct nvmet_req
*req
)
37 nvmet_req_complete(req
, nvmet_zero_sgl(req
, 0, req
->data_len
));
40 static void nvmet_execute_get_log_page_error(struct nvmet_req
*req
)
42 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
43 u16 status
= NVME_SC_SUCCESS
;
49 spin_lock_irqsave(&ctrl
->error_lock
, flags
);
50 slot
= ctrl
->err_counter
% NVMET_ERROR_LOG_SLOTS
;
52 for (i
= 0; i
< NVMET_ERROR_LOG_SLOTS
; i
++) {
53 status
= nvmet_copy_to_sgl(req
, offset
, &ctrl
->slots
[slot
],
54 sizeof(struct nvme_error_slot
));
59 slot
= NVMET_ERROR_LOG_SLOTS
- 1;
62 offset
+= sizeof(struct nvme_error_slot
);
64 spin_unlock_irqrestore(&ctrl
->error_lock
, flags
);
65 nvmet_req_complete(req
, status
);
68 static u16
nvmet_get_smart_log_nsid(struct nvmet_req
*req
,
69 struct nvme_smart_log
*slog
)
72 u64 host_reads
, host_writes
, data_units_read
, data_units_written
;
74 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->get_log_page
.nsid
);
76 pr_err("Could not find namespace id : %d\n",
77 le32_to_cpu(req
->cmd
->get_log_page
.nsid
));
78 req
->error_loc
= offsetof(struct nvme_rw_command
, nsid
);
79 return NVME_SC_INVALID_NS
;
82 /* we don't have the right data for file backed ns */
86 host_reads
= part_stat_read(ns
->bdev
->bd_part
, ios
[READ
]);
87 data_units_read
= part_stat_read(ns
->bdev
->bd_part
, sectors
[READ
]);
88 host_writes
= part_stat_read(ns
->bdev
->bd_part
, ios
[WRITE
]);
89 data_units_written
= part_stat_read(ns
->bdev
->bd_part
, sectors
[WRITE
]);
91 put_unaligned_le64(host_reads
, &slog
->host_reads
[0]);
92 put_unaligned_le64(data_units_read
, &slog
->data_units_read
[0]);
93 put_unaligned_le64(host_writes
, &slog
->host_writes
[0]);
94 put_unaligned_le64(data_units_written
, &slog
->data_units_written
[0]);
96 nvmet_put_namespace(ns
);
98 return NVME_SC_SUCCESS
;
101 static u16
nvmet_get_smart_log_all(struct nvmet_req
*req
,
102 struct nvme_smart_log
*slog
)
104 u64 host_reads
= 0, host_writes
= 0;
105 u64 data_units_read
= 0, data_units_written
= 0;
107 struct nvmet_ctrl
*ctrl
;
109 ctrl
= req
->sq
->ctrl
;
112 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
113 /* we don't have the right data for file backed ns */
116 host_reads
+= part_stat_read(ns
->bdev
->bd_part
, ios
[READ
]);
118 part_stat_read(ns
->bdev
->bd_part
, sectors
[READ
]);
119 host_writes
+= part_stat_read(ns
->bdev
->bd_part
, ios
[WRITE
]);
120 data_units_written
+=
121 part_stat_read(ns
->bdev
->bd_part
, sectors
[WRITE
]);
126 put_unaligned_le64(host_reads
, &slog
->host_reads
[0]);
127 put_unaligned_le64(data_units_read
, &slog
->data_units_read
[0]);
128 put_unaligned_le64(host_writes
, &slog
->host_writes
[0]);
129 put_unaligned_le64(data_units_written
, &slog
->data_units_written
[0]);
131 return NVME_SC_SUCCESS
;
134 static void nvmet_execute_get_log_page_smart(struct nvmet_req
*req
)
136 struct nvme_smart_log
*log
;
137 u16 status
= NVME_SC_INTERNAL
;
140 if (req
->data_len
!= sizeof(*log
))
143 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
147 if (req
->cmd
->get_log_page
.nsid
== cpu_to_le32(NVME_NSID_ALL
))
148 status
= nvmet_get_smart_log_all(req
, log
);
150 status
= nvmet_get_smart_log_nsid(req
, log
);
154 spin_lock_irqsave(&req
->sq
->ctrl
->error_lock
, flags
);
155 put_unaligned_le64(req
->sq
->ctrl
->err_counter
,
156 &log
->num_err_log_entries
);
157 spin_unlock_irqrestore(&req
->sq
->ctrl
->error_lock
, flags
);
159 status
= nvmet_copy_to_sgl(req
, 0, log
, sizeof(*log
));
163 nvmet_req_complete(req
, status
);
166 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req
*req
)
168 u16 status
= NVME_SC_INTERNAL
;
169 struct nvme_effects_log
*log
;
171 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
175 log
->acs
[nvme_admin_get_log_page
] = cpu_to_le32(1 << 0);
176 log
->acs
[nvme_admin_identify
] = cpu_to_le32(1 << 0);
177 log
->acs
[nvme_admin_abort_cmd
] = cpu_to_le32(1 << 0);
178 log
->acs
[nvme_admin_set_features
] = cpu_to_le32(1 << 0);
179 log
->acs
[nvme_admin_get_features
] = cpu_to_le32(1 << 0);
180 log
->acs
[nvme_admin_async_event
] = cpu_to_le32(1 << 0);
181 log
->acs
[nvme_admin_keep_alive
] = cpu_to_le32(1 << 0);
183 log
->iocs
[nvme_cmd_read
] = cpu_to_le32(1 << 0);
184 log
->iocs
[nvme_cmd_write
] = cpu_to_le32(1 << 0);
185 log
->iocs
[nvme_cmd_flush
] = cpu_to_le32(1 << 0);
186 log
->iocs
[nvme_cmd_dsm
] = cpu_to_le32(1 << 0);
187 log
->iocs
[nvme_cmd_write_zeroes
] = cpu_to_le32(1 << 0);
189 status
= nvmet_copy_to_sgl(req
, 0, log
, sizeof(*log
));
193 nvmet_req_complete(req
, status
);
196 static void nvmet_execute_get_log_changed_ns(struct nvmet_req
*req
)
198 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
199 u16 status
= NVME_SC_INTERNAL
;
202 if (req
->data_len
!= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
))
205 mutex_lock(&ctrl
->lock
);
206 if (ctrl
->nr_changed_ns
== U32_MAX
)
207 len
= sizeof(__le32
);
209 len
= ctrl
->nr_changed_ns
* sizeof(__le32
);
210 status
= nvmet_copy_to_sgl(req
, 0, ctrl
->changed_ns_list
, len
);
212 status
= nvmet_zero_sgl(req
, len
, req
->data_len
- len
);
213 ctrl
->nr_changed_ns
= 0;
214 nvmet_clear_aen_bit(req
, NVME_AEN_BIT_NS_ATTR
);
215 mutex_unlock(&ctrl
->lock
);
217 nvmet_req_complete(req
, status
);
220 static u32
nvmet_format_ana_group(struct nvmet_req
*req
, u32 grpid
,
221 struct nvme_ana_group_desc
*desc
)
223 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
227 if (!(req
->cmd
->get_log_page
.lsp
& NVME_ANA_LOG_RGO
)) {
229 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
)
230 if (ns
->anagrpid
== grpid
)
231 desc
->nsids
[count
++] = cpu_to_le32(ns
->nsid
);
235 desc
->grpid
= cpu_to_le32(grpid
);
236 desc
->nnsids
= cpu_to_le32(count
);
237 desc
->chgcnt
= cpu_to_le64(nvmet_ana_chgcnt
);
238 desc
->state
= req
->port
->ana_state
[grpid
];
239 memset(desc
->rsvd17
, 0, sizeof(desc
->rsvd17
));
240 return sizeof(struct nvme_ana_group_desc
) + count
* sizeof(__le32
);
243 static void nvmet_execute_get_log_page_ana(struct nvmet_req
*req
)
245 struct nvme_ana_rsp_hdr hdr
= { 0, };
246 struct nvme_ana_group_desc
*desc
;
247 size_t offset
= sizeof(struct nvme_ana_rsp_hdr
); /* start beyond hdr */
253 status
= NVME_SC_INTERNAL
;
254 desc
= kmalloc(sizeof(struct nvme_ana_group_desc
) +
255 NVMET_MAX_NAMESPACES
* sizeof(__le32
), GFP_KERNEL
);
259 down_read(&nvmet_ana_sem
);
260 for (grpid
= 1; grpid
<= NVMET_MAX_ANAGRPS
; grpid
++) {
261 if (!nvmet_ana_group_enabled
[grpid
])
263 len
= nvmet_format_ana_group(req
, grpid
, desc
);
264 status
= nvmet_copy_to_sgl(req
, offset
, desc
, len
);
270 for ( ; grpid
<= NVMET_MAX_ANAGRPS
; grpid
++) {
271 if (nvmet_ana_group_enabled
[grpid
])
275 hdr
.chgcnt
= cpu_to_le64(nvmet_ana_chgcnt
);
276 hdr
.ngrps
= cpu_to_le16(ngrps
);
277 nvmet_clear_aen_bit(req
, NVME_AEN_BIT_ANA_CHANGE
);
278 up_read(&nvmet_ana_sem
);
282 /* copy the header last once we know the number of groups */
283 status
= nvmet_copy_to_sgl(req
, 0, &hdr
, sizeof(hdr
));
285 nvmet_req_complete(req
, status
);
288 static void nvmet_execute_identify_ctrl(struct nvmet_req
*req
)
290 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
291 struct nvme_id_ctrl
*id
;
293 const char model
[] = "Linux";
295 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
297 status
= NVME_SC_INTERNAL
;
301 /* XXX: figure out how to assign real vendors IDs. */
305 memset(id
->sn
, ' ', sizeof(id
->sn
));
306 bin2hex(id
->sn
, &ctrl
->subsys
->serial
,
307 min(sizeof(ctrl
->subsys
->serial
), sizeof(id
->sn
) / 2));
308 memcpy_and_pad(id
->mn
, sizeof(id
->mn
), model
, sizeof(model
) - 1, ' ');
309 memcpy_and_pad(id
->fr
, sizeof(id
->fr
),
310 UTS_RELEASE
, strlen(UTS_RELEASE
), ' ');
315 * XXX: figure out how we can assign a IEEE OUI, but until then
316 * the safest is to leave it as zeroes.
319 /* we support multiple ports, multiples hosts and ANA: */
320 id
->cmic
= (1 << 0) | (1 << 1) | (1 << 3);
322 /* no limit on data transfer sizes for now */
324 id
->cntlid
= cpu_to_le16(ctrl
->cntlid
);
325 id
->ver
= cpu_to_le32(ctrl
->subsys
->ver
);
327 /* XXX: figure out what to do about RTD3R/RTD3 */
328 id
->oaes
= cpu_to_le32(NVMET_AEN_CFG_OPTIONAL
);
329 id
->ctratt
= cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT
|
330 NVME_CTRL_ATTR_TBKAS
);
335 * We don't really have a practical limit on the number of abort
336 * comands. But we don't do anything useful for abort either, so
337 * no point in allowing more abort commands than the spec requires.
341 id
->aerl
= NVMET_ASYNC_EVENTS
- 1;
343 /* first slot is read-only, only one slot supported */
344 id
->frmw
= (1 << 0) | (1 << 1);
345 id
->lpa
= (1 << 0) | (1 << 1) | (1 << 2);
346 id
->elpe
= NVMET_ERROR_LOG_SLOTS
- 1;
349 /* We support keep-alive timeout in granularity of seconds */
350 id
->kas
= cpu_to_le16(NVMET_KAS
);
352 id
->sqes
= (0x6 << 4) | 0x6;
353 id
->cqes
= (0x4 << 4) | 0x4;
355 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
356 id
->maxcmd
= cpu_to_le16(NVMET_MAX_CMD
);
358 id
->nn
= cpu_to_le32(ctrl
->subsys
->max_nsid
);
359 id
->mnan
= cpu_to_le32(NVMET_MAX_NAMESPACES
);
360 id
->oncs
= cpu_to_le16(NVME_CTRL_ONCS_DSM
|
361 NVME_CTRL_ONCS_WRITE_ZEROES
);
363 /* XXX: don't report vwc if the underlying device is write through */
364 id
->vwc
= NVME_CTRL_VWC_PRESENT
;
367 * We can't support atomic writes bigger than a LBA without support
368 * from the backend device.
373 id
->sgls
= cpu_to_le32(1 << 0); /* we always support SGLs */
374 if (ctrl
->ops
->has_keyed_sgls
)
375 id
->sgls
|= cpu_to_le32(1 << 2);
376 if (req
->port
->inline_data_size
)
377 id
->sgls
|= cpu_to_le32(1 << 20);
379 strlcpy(id
->subnqn
, ctrl
->subsys
->subsysnqn
, sizeof(id
->subnqn
));
381 /* Max command capsule size is sqe + single page of in-capsule data */
382 id
->ioccsz
= cpu_to_le32((sizeof(struct nvme_command
) +
383 req
->port
->inline_data_size
) / 16);
384 /* Max response capsule size is cqe */
385 id
->iorcsz
= cpu_to_le32(sizeof(struct nvme_completion
) / 16);
387 id
->msdbd
= ctrl
->ops
->msdbd
;
389 id
->anacap
= (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
390 id
->anatt
= 10; /* random value */
391 id
->anagrpmax
= cpu_to_le32(NVMET_MAX_ANAGRPS
);
392 id
->nanagrpid
= cpu_to_le32(NVMET_MAX_ANAGRPS
);
395 * Meh, we don't really support any power state. Fake up the same
396 * values that qemu does.
398 id
->psd
[0].max_power
= cpu_to_le16(0x9c4);
399 id
->psd
[0].entry_lat
= cpu_to_le32(0x10);
400 id
->psd
[0].exit_lat
= cpu_to_le32(0x4);
402 id
->nwpc
= 1 << 0; /* write protect and no write protect */
404 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
408 nvmet_req_complete(req
, status
);
411 static void nvmet_execute_identify_ns(struct nvmet_req
*req
)
414 struct nvme_id_ns
*id
;
417 if (le32_to_cpu(req
->cmd
->identify
.nsid
) == NVME_NSID_ALL
) {
418 req
->error_loc
= offsetof(struct nvme_identify
, nsid
);
419 status
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
423 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
425 status
= NVME_SC_INTERNAL
;
429 /* return an all zeroed buffer if we can't find an active namespace */
430 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->identify
.nsid
);
435 * nuse = ncap = nsze isn't always true, but we have no way to find
436 * that out from the underlying device.
438 id
->ncap
= id
->nsze
= cpu_to_le64(ns
->size
>> ns
->blksize_shift
);
439 switch (req
->port
->ana_state
[ns
->anagrpid
]) {
440 case NVME_ANA_INACCESSIBLE
:
441 case NVME_ANA_PERSISTENT_LOSS
:
449 * We just provide a single LBA format that matches what the
450 * underlying device reports.
456 * Our namespace might always be shared. Not just with other
457 * controllers, but also with any other user of the block device.
460 id
->anagrpid
= cpu_to_le32(ns
->anagrpid
);
462 memcpy(&id
->nguid
, &ns
->nguid
, sizeof(id
->nguid
));
464 id
->lbaf
[0].ds
= ns
->blksize_shift
;
467 id
->nsattr
|= (1 << 0);
468 nvmet_put_namespace(ns
);
470 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
473 nvmet_req_complete(req
, status
);
476 static void nvmet_execute_identify_nslist(struct nvmet_req
*req
)
478 static const int buf_size
= NVME_IDENTIFY_DATA_SIZE
;
479 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
481 u32 min_nsid
= le32_to_cpu(req
->cmd
->identify
.nsid
);
486 list
= kzalloc(buf_size
, GFP_KERNEL
);
488 status
= NVME_SC_INTERNAL
;
493 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
494 if (ns
->nsid
<= min_nsid
)
496 list
[i
++] = cpu_to_le32(ns
->nsid
);
497 if (i
== buf_size
/ sizeof(__le32
))
502 status
= nvmet_copy_to_sgl(req
, 0, list
, buf_size
);
506 nvmet_req_complete(req
, status
);
509 static u16
nvmet_copy_ns_identifier(struct nvmet_req
*req
, u8 type
, u8 len
,
510 void *id
, off_t
*off
)
512 struct nvme_ns_id_desc desc
= {
518 status
= nvmet_copy_to_sgl(req
, *off
, &desc
, sizeof(desc
));
521 *off
+= sizeof(desc
);
523 status
= nvmet_copy_to_sgl(req
, *off
, id
, len
);
531 static void nvmet_execute_identify_desclist(struct nvmet_req
*req
)
537 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->identify
.nsid
);
539 req
->error_loc
= offsetof(struct nvme_identify
, nsid
);
540 status
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
544 if (memchr_inv(&ns
->uuid
, 0, sizeof(ns
->uuid
))) {
545 status
= nvmet_copy_ns_identifier(req
, NVME_NIDT_UUID
,
551 if (memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
))) {
552 status
= nvmet_copy_ns_identifier(req
, NVME_NIDT_NGUID
,
559 if (sg_zero_buffer(req
->sg
, req
->sg_cnt
, NVME_IDENTIFY_DATA_SIZE
- off
,
560 off
) != NVME_IDENTIFY_DATA_SIZE
- off
)
561 status
= NVME_SC_INTERNAL
| NVME_SC_DNR
;
563 nvmet_put_namespace(ns
);
565 nvmet_req_complete(req
, status
);
569 * A "minimum viable" abort implementation: the command is mandatory in the
570 * spec, but we are not required to do any useful work. We couldn't really
571 * do a useful abort, so don't bother even with waiting for the command
572 * to be exectuted and return immediately telling the command to abort
575 static void nvmet_execute_abort(struct nvmet_req
*req
)
577 nvmet_set_result(req
, 1);
578 nvmet_req_complete(req
, 0);
581 static u16
nvmet_write_protect_flush_sync(struct nvmet_req
*req
)
586 status
= nvmet_file_flush(req
);
588 status
= nvmet_bdev_flush(req
);
591 pr_err("write protect flush failed nsid: %u\n", req
->ns
->nsid
);
595 static u16
nvmet_set_feat_write_protect(struct nvmet_req
*req
)
597 u32 write_protect
= le32_to_cpu(req
->cmd
->common
.cdw11
);
598 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
599 u16 status
= NVME_SC_FEATURE_NOT_CHANGEABLE
;
601 req
->ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->rw
.nsid
);
602 if (unlikely(!req
->ns
)) {
603 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
607 mutex_lock(&subsys
->lock
);
608 switch (write_protect
) {
609 case NVME_NS_WRITE_PROTECT
:
610 req
->ns
->readonly
= true;
611 status
= nvmet_write_protect_flush_sync(req
);
613 req
->ns
->readonly
= false;
615 case NVME_NS_NO_WRITE_PROTECT
:
616 req
->ns
->readonly
= false;
624 nvmet_ns_changed(subsys
, req
->ns
->nsid
);
625 mutex_unlock(&subsys
->lock
);
629 u16
nvmet_set_feat_kato(struct nvmet_req
*req
)
631 u32 val32
= le32_to_cpu(req
->cmd
->common
.cdw11
);
633 req
->sq
->ctrl
->kato
= DIV_ROUND_UP(val32
, 1000);
635 nvmet_set_result(req
, req
->sq
->ctrl
->kato
);
640 u16
nvmet_set_feat_async_event(struct nvmet_req
*req
, u32 mask
)
642 u32 val32
= le32_to_cpu(req
->cmd
->common
.cdw11
);
645 req
->error_loc
= offsetof(struct nvme_common_command
, cdw11
);
646 return NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
649 WRITE_ONCE(req
->sq
->ctrl
->aen_enabled
, val32
);
650 nvmet_set_result(req
, val32
);
655 static void nvmet_execute_set_features(struct nvmet_req
*req
)
657 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
658 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
);
661 switch (cdw10
& 0xff) {
662 case NVME_FEAT_NUM_QUEUES
:
663 nvmet_set_result(req
,
664 (subsys
->max_qid
- 1) | ((subsys
->max_qid
- 1) << 16));
667 status
= nvmet_set_feat_kato(req
);
669 case NVME_FEAT_ASYNC_EVENT
:
670 status
= nvmet_set_feat_async_event(req
, NVMET_AEN_CFG_ALL
);
672 case NVME_FEAT_HOST_ID
:
673 status
= NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
675 case NVME_FEAT_WRITE_PROTECT
:
676 status
= nvmet_set_feat_write_protect(req
);
679 req
->error_loc
= offsetof(struct nvme_common_command
, cdw10
);
680 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
684 nvmet_req_complete(req
, status
);
687 static u16
nvmet_get_feat_write_protect(struct nvmet_req
*req
)
689 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
692 req
->ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->common
.nsid
);
694 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
695 return NVME_SC_INVALID_NS
| NVME_SC_DNR
;
697 mutex_lock(&subsys
->lock
);
698 if (req
->ns
->readonly
== true)
699 result
= NVME_NS_WRITE_PROTECT
;
701 result
= NVME_NS_NO_WRITE_PROTECT
;
702 nvmet_set_result(req
, result
);
703 mutex_unlock(&subsys
->lock
);
708 void nvmet_get_feat_kato(struct nvmet_req
*req
)
710 nvmet_set_result(req
, req
->sq
->ctrl
->kato
* 1000);
713 void nvmet_get_feat_async_event(struct nvmet_req
*req
)
715 nvmet_set_result(req
, READ_ONCE(req
->sq
->ctrl
->aen_enabled
));
718 static void nvmet_execute_get_features(struct nvmet_req
*req
)
720 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
721 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
);
724 switch (cdw10
& 0xff) {
726 * These features are mandatory in the spec, but we don't
727 * have a useful way to implement them. We'll eventually
728 * need to come up with some fake values for these.
731 case NVME_FEAT_ARBITRATION
:
733 case NVME_FEAT_POWER_MGMT
:
735 case NVME_FEAT_TEMP_THRESH
:
737 case NVME_FEAT_ERR_RECOVERY
:
739 case NVME_FEAT_IRQ_COALESCE
:
741 case NVME_FEAT_IRQ_CONFIG
:
743 case NVME_FEAT_WRITE_ATOMIC
:
746 case NVME_FEAT_ASYNC_EVENT
:
747 nvmet_get_feat_async_event(req
);
749 case NVME_FEAT_VOLATILE_WC
:
750 nvmet_set_result(req
, 1);
752 case NVME_FEAT_NUM_QUEUES
:
753 nvmet_set_result(req
,
754 (subsys
->max_qid
-1) | ((subsys
->max_qid
-1) << 16));
757 nvmet_get_feat_kato(req
);
759 case NVME_FEAT_HOST_ID
:
760 /* need 128-bit host identifier flag */
761 if (!(req
->cmd
->common
.cdw11
& cpu_to_le32(1 << 0))) {
763 offsetof(struct nvme_common_command
, cdw11
);
764 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
768 status
= nvmet_copy_to_sgl(req
, 0, &req
->sq
->ctrl
->hostid
,
769 sizeof(req
->sq
->ctrl
->hostid
));
771 case NVME_FEAT_WRITE_PROTECT
:
772 status
= nvmet_get_feat_write_protect(req
);
776 offsetof(struct nvme_common_command
, cdw10
);
777 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
781 nvmet_req_complete(req
, status
);
784 void nvmet_execute_async_event(struct nvmet_req
*req
)
786 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
788 mutex_lock(&ctrl
->lock
);
789 if (ctrl
->nr_async_event_cmds
>= NVMET_ASYNC_EVENTS
) {
790 mutex_unlock(&ctrl
->lock
);
791 nvmet_req_complete(req
, NVME_SC_ASYNC_LIMIT
| NVME_SC_DNR
);
794 ctrl
->async_event_cmds
[ctrl
->nr_async_event_cmds
++] = req
;
795 mutex_unlock(&ctrl
->lock
);
797 schedule_work(&ctrl
->async_event_work
);
800 void nvmet_execute_keep_alive(struct nvmet_req
*req
)
802 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
804 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
805 ctrl
->cntlid
, ctrl
->kato
);
807 mod_delayed_work(system_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
808 nvmet_req_complete(req
, 0);
811 u16
nvmet_parse_admin_cmd(struct nvmet_req
*req
)
813 struct nvme_command
*cmd
= req
->cmd
;
816 ret
= nvmet_check_ctrl_status(req
, cmd
);
820 switch (cmd
->common
.opcode
) {
821 case nvme_admin_get_log_page
:
822 req
->data_len
= nvmet_get_log_page_len(cmd
);
824 switch (cmd
->get_log_page
.lid
) {
826 req
->execute
= nvmet_execute_get_log_page_error
;
829 req
->execute
= nvmet_execute_get_log_page_smart
;
831 case NVME_LOG_FW_SLOT
:
833 * We only support a single firmware slot which always
834 * is active, so we can zero out the whole firmware slot
835 * log and still claim to fully implement this mandatory
838 req
->execute
= nvmet_execute_get_log_page_noop
;
840 case NVME_LOG_CHANGED_NS
:
841 req
->execute
= nvmet_execute_get_log_changed_ns
;
843 case NVME_LOG_CMD_EFFECTS
:
844 req
->execute
= nvmet_execute_get_log_cmd_effects_ns
;
847 req
->execute
= nvmet_execute_get_log_page_ana
;
851 case nvme_admin_identify
:
852 req
->data_len
= NVME_IDENTIFY_DATA_SIZE
;
853 switch (cmd
->identify
.cns
) {
855 req
->execute
= nvmet_execute_identify_ns
;
857 case NVME_ID_CNS_CTRL
:
858 req
->execute
= nvmet_execute_identify_ctrl
;
860 case NVME_ID_CNS_NS_ACTIVE_LIST
:
861 req
->execute
= nvmet_execute_identify_nslist
;
863 case NVME_ID_CNS_NS_DESC_LIST
:
864 req
->execute
= nvmet_execute_identify_desclist
;
868 case nvme_admin_abort_cmd
:
869 req
->execute
= nvmet_execute_abort
;
872 case nvme_admin_set_features
:
873 req
->execute
= nvmet_execute_set_features
;
876 case nvme_admin_get_features
:
877 req
->execute
= nvmet_execute_get_features
;
880 case nvme_admin_async_event
:
881 req
->execute
= nvmet_execute_async_event
;
884 case nvme_admin_keep_alive
:
885 req
->execute
= nvmet_execute_keep_alive
;
890 pr_err("unhandled cmd %d on qid %d\n", cmd
->common
.opcode
,
892 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
893 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;