2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/rculist.h>
18 #include <generated/utsrelease.h>
19 #include <asm/unaligned.h>
23 * This helper allows us to clear the AEN based on the RAE bit,
24 * Please use this helper when processing the log pages which are
25 * associated with the AEN.
27 static inline void nvmet_clear_aen(struct nvmet_req
*req
, u32 aen_bit
)
29 int rae
= le32_to_cpu(req
->cmd
->common
.cdw10
[0]) & 1 << 15;
32 clear_bit(aen_bit
, &req
->sq
->ctrl
->aen_masked
);
35 u32
nvmet_get_log_page_len(struct nvme_command
*cmd
)
37 u32 len
= le16_to_cpu(cmd
->get_log_page
.numdu
);
40 len
+= le16_to_cpu(cmd
->get_log_page
.numdl
);
41 /* NUMD is a 0's based value */
48 static void nvmet_execute_get_log_page_noop(struct nvmet_req
*req
)
50 nvmet_req_complete(req
, nvmet_zero_sgl(req
, 0, req
->data_len
));
53 static u16
nvmet_get_smart_log_nsid(struct nvmet_req
*req
,
54 struct nvme_smart_log
*slog
)
57 u64 host_reads
, host_writes
, data_units_read
, data_units_written
;
59 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->get_log_page
.nsid
);
61 pr_err("nvmet : Could not find namespace id : %d\n",
62 le32_to_cpu(req
->cmd
->get_log_page
.nsid
));
63 return NVME_SC_INVALID_NS
;
66 /* we don't have the right data for file backed ns */
70 host_reads
= part_stat_read(ns
->bdev
->bd_part
, ios
[READ
]);
71 data_units_read
= part_stat_read(ns
->bdev
->bd_part
, sectors
[READ
]);
72 host_writes
= part_stat_read(ns
->bdev
->bd_part
, ios
[WRITE
]);
73 data_units_written
= part_stat_read(ns
->bdev
->bd_part
, sectors
[WRITE
]);
75 put_unaligned_le64(host_reads
, &slog
->host_reads
[0]);
76 put_unaligned_le64(data_units_read
, &slog
->data_units_read
[0]);
77 put_unaligned_le64(host_writes
, &slog
->host_writes
[0]);
78 put_unaligned_le64(data_units_written
, &slog
->data_units_written
[0]);
80 nvmet_put_namespace(ns
);
82 return NVME_SC_SUCCESS
;
85 static u16
nvmet_get_smart_log_all(struct nvmet_req
*req
,
86 struct nvme_smart_log
*slog
)
88 u64 host_reads
= 0, host_writes
= 0;
89 u64 data_units_read
= 0, data_units_written
= 0;
91 struct nvmet_ctrl
*ctrl
;
96 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
97 /* we don't have the right data for file backed ns */
100 host_reads
+= part_stat_read(ns
->bdev
->bd_part
, ios
[READ
]);
102 part_stat_read(ns
->bdev
->bd_part
, sectors
[READ
]);
103 host_writes
+= part_stat_read(ns
->bdev
->bd_part
, ios
[WRITE
]);
104 data_units_written
+=
105 part_stat_read(ns
->bdev
->bd_part
, sectors
[WRITE
]);
110 put_unaligned_le64(host_reads
, &slog
->host_reads
[0]);
111 put_unaligned_le64(data_units_read
, &slog
->data_units_read
[0]);
112 put_unaligned_le64(host_writes
, &slog
->host_writes
[0]);
113 put_unaligned_le64(data_units_written
, &slog
->data_units_written
[0]);
115 return NVME_SC_SUCCESS
;
118 static void nvmet_execute_get_log_page_smart(struct nvmet_req
*req
)
120 struct nvme_smart_log
*log
;
121 u16 status
= NVME_SC_INTERNAL
;
123 if (req
->data_len
!= sizeof(*log
))
126 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
130 if (req
->cmd
->get_log_page
.nsid
== cpu_to_le32(NVME_NSID_ALL
))
131 status
= nvmet_get_smart_log_all(req
, log
);
133 status
= nvmet_get_smart_log_nsid(req
, log
);
137 status
= nvmet_copy_to_sgl(req
, 0, log
, sizeof(*log
));
141 nvmet_req_complete(req
, status
);
144 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req
*req
)
146 u16 status
= NVME_SC_INTERNAL
;
147 struct nvme_effects_log
*log
;
149 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
153 log
->acs
[nvme_admin_get_log_page
] = cpu_to_le32(1 << 0);
154 log
->acs
[nvme_admin_identify
] = cpu_to_le32(1 << 0);
155 log
->acs
[nvme_admin_abort_cmd
] = cpu_to_le32(1 << 0);
156 log
->acs
[nvme_admin_set_features
] = cpu_to_le32(1 << 0);
157 log
->acs
[nvme_admin_get_features
] = cpu_to_le32(1 << 0);
158 log
->acs
[nvme_admin_async_event
] = cpu_to_le32(1 << 0);
159 log
->acs
[nvme_admin_keep_alive
] = cpu_to_le32(1 << 0);
161 log
->iocs
[nvme_cmd_read
] = cpu_to_le32(1 << 0);
162 log
->iocs
[nvme_cmd_write
] = cpu_to_le32(1 << 0);
163 log
->iocs
[nvme_cmd_flush
] = cpu_to_le32(1 << 0);
164 log
->iocs
[nvme_cmd_dsm
] = cpu_to_le32(1 << 0);
165 log
->iocs
[nvme_cmd_write_zeroes
] = cpu_to_le32(1 << 0);
167 status
= nvmet_copy_to_sgl(req
, 0, log
, sizeof(*log
));
171 nvmet_req_complete(req
, status
);
174 static void nvmet_execute_get_log_changed_ns(struct nvmet_req
*req
)
176 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
177 u16 status
= NVME_SC_INTERNAL
;
180 if (req
->data_len
!= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
))
183 mutex_lock(&ctrl
->lock
);
184 if (ctrl
->nr_changed_ns
== U32_MAX
)
185 len
= sizeof(__le32
);
187 len
= ctrl
->nr_changed_ns
* sizeof(__le32
);
188 status
= nvmet_copy_to_sgl(req
, 0, ctrl
->changed_ns_list
, len
);
190 status
= nvmet_zero_sgl(req
, len
, req
->data_len
- len
);
191 ctrl
->nr_changed_ns
= 0;
192 nvmet_clear_aen(req
, NVME_AEN_CFG_NS_ATTR
);
193 mutex_unlock(&ctrl
->lock
);
195 nvmet_req_complete(req
, status
);
198 static u32
nvmet_format_ana_group(struct nvmet_req
*req
, u32 grpid
,
199 struct nvme_ana_group_desc
*desc
)
201 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
205 if (!(req
->cmd
->get_log_page
.lsp
& NVME_ANA_LOG_RGO
)) {
207 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
)
208 if (ns
->anagrpid
== grpid
)
209 desc
->nsids
[count
++] = cpu_to_le32(ns
->nsid
);
213 desc
->grpid
= cpu_to_le32(grpid
);
214 desc
->nnsids
= cpu_to_le32(count
);
215 desc
->chgcnt
= cpu_to_le64(nvmet_ana_chgcnt
);
216 desc
->state
= req
->port
->ana_state
[grpid
];
217 memset(desc
->rsvd17
, 0, sizeof(desc
->rsvd17
));
218 return sizeof(struct nvme_ana_group_desc
) + count
* sizeof(__le32
);
221 static void nvmet_execute_get_log_page_ana(struct nvmet_req
*req
)
223 struct nvme_ana_rsp_hdr hdr
= { 0, };
224 struct nvme_ana_group_desc
*desc
;
225 size_t offset
= sizeof(struct nvme_ana_rsp_hdr
); /* start beyond hdr */
231 status
= NVME_SC_INTERNAL
;
232 desc
= kmalloc(sizeof(struct nvme_ana_group_desc
) +
233 NVMET_MAX_NAMESPACES
* sizeof(__le32
), GFP_KERNEL
);
237 down_read(&nvmet_ana_sem
);
238 for (grpid
= 1; grpid
<= NVMET_MAX_ANAGRPS
; grpid
++) {
239 if (!nvmet_ana_group_enabled
[grpid
])
241 len
= nvmet_format_ana_group(req
, grpid
, desc
);
242 status
= nvmet_copy_to_sgl(req
, offset
, desc
, len
);
249 hdr
.chgcnt
= cpu_to_le64(nvmet_ana_chgcnt
);
250 hdr
.ngrps
= cpu_to_le16(ngrps
);
251 nvmet_clear_aen(req
, NVME_AEN_CFG_ANA_CHANGE
);
252 up_read(&nvmet_ana_sem
);
256 /* copy the header last once we know the number of groups */
257 status
= nvmet_copy_to_sgl(req
, 0, &hdr
, sizeof(hdr
));
259 nvmet_req_complete(req
, status
);
262 static void nvmet_execute_identify_ctrl(struct nvmet_req
*req
)
264 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
265 struct nvme_id_ctrl
*id
;
267 const char model
[] = "Linux";
269 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
271 status
= NVME_SC_INTERNAL
;
275 /* XXX: figure out how to assign real vendors IDs. */
279 memset(id
->sn
, ' ', sizeof(id
->sn
));
280 bin2hex(id
->sn
, &ctrl
->subsys
->serial
,
281 min(sizeof(ctrl
->subsys
->serial
), sizeof(id
->sn
) / 2));
282 memcpy_and_pad(id
->mn
, sizeof(id
->mn
), model
, sizeof(model
) - 1, ' ');
283 memcpy_and_pad(id
->fr
, sizeof(id
->fr
),
284 UTS_RELEASE
, strlen(UTS_RELEASE
), ' ');
289 * XXX: figure out how we can assign a IEEE OUI, but until then
290 * the safest is to leave it as zeroes.
293 /* we support multiple ports, multiples hosts and ANA: */
294 id
->cmic
= (1 << 0) | (1 << 1) | (1 << 3);
296 /* no limit on data transfer sizes for now */
298 id
->cntlid
= cpu_to_le16(ctrl
->cntlid
);
299 id
->ver
= cpu_to_le32(ctrl
->subsys
->ver
);
301 /* XXX: figure out what to do about RTD3R/RTD3 */
302 id
->oaes
= cpu_to_le32(NVMET_AEN_CFG_OPTIONAL
);
303 id
->ctratt
= cpu_to_le32(1 << 0);
308 * We don't really have a practical limit on the number of abort
309 * comands. But we don't do anything useful for abort either, so
310 * no point in allowing more abort commands than the spec requires.
314 id
->aerl
= NVMET_ASYNC_EVENTS
- 1;
316 /* first slot is read-only, only one slot supported */
317 id
->frmw
= (1 << 0) | (1 << 1);
318 id
->lpa
= (1 << 0) | (1 << 1) | (1 << 2);
319 id
->elpe
= NVMET_ERROR_LOG_SLOTS
- 1;
322 /* We support keep-alive timeout in granularity of seconds */
323 id
->kas
= cpu_to_le16(NVMET_KAS
);
325 id
->sqes
= (0x6 << 4) | 0x6;
326 id
->cqes
= (0x4 << 4) | 0x4;
328 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
329 id
->maxcmd
= cpu_to_le16(NVMET_MAX_CMD
);
331 id
->nn
= cpu_to_le32(ctrl
->subsys
->max_nsid
);
332 id
->mnan
= cpu_to_le32(NVMET_MAX_NAMESPACES
);
333 id
->oncs
= cpu_to_le16(NVME_CTRL_ONCS_DSM
|
334 NVME_CTRL_ONCS_WRITE_ZEROES
);
336 /* XXX: don't report vwc if the underlying device is write through */
337 id
->vwc
= NVME_CTRL_VWC_PRESENT
;
340 * We can't support atomic writes bigger than a LBA without support
341 * from the backend device.
346 id
->sgls
= cpu_to_le32(1 << 0); /* we always support SGLs */
347 if (ctrl
->ops
->has_keyed_sgls
)
348 id
->sgls
|= cpu_to_le32(1 << 2);
349 if (req
->port
->inline_data_size
)
350 id
->sgls
|= cpu_to_le32(1 << 20);
352 strcpy(id
->subnqn
, ctrl
->subsys
->subsysnqn
);
354 /* Max command capsule size is sqe + single page of in-capsule data */
355 id
->ioccsz
= cpu_to_le32((sizeof(struct nvme_command
) +
356 req
->port
->inline_data_size
) / 16);
357 /* Max response capsule size is cqe */
358 id
->iorcsz
= cpu_to_le32(sizeof(struct nvme_completion
) / 16);
360 id
->msdbd
= ctrl
->ops
->msdbd
;
362 id
->anacap
= (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
363 id
->anatt
= 10; /* random value */
364 id
->anagrpmax
= cpu_to_le32(NVMET_MAX_ANAGRPS
);
365 id
->nanagrpid
= cpu_to_le32(NVMET_MAX_ANAGRPS
);
368 * Meh, we don't really support any power state. Fake up the same
369 * values that qemu does.
371 id
->psd
[0].max_power
= cpu_to_le16(0x9c4);
372 id
->psd
[0].entry_lat
= cpu_to_le32(0x10);
373 id
->psd
[0].exit_lat
= cpu_to_le32(0x4);
375 id
->nwpc
= 1 << 0; /* write protect and no write protect */
377 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
381 nvmet_req_complete(req
, status
);
384 static void nvmet_execute_identify_ns(struct nvmet_req
*req
)
387 struct nvme_id_ns
*id
;
390 if (le32_to_cpu(req
->cmd
->identify
.nsid
) == NVME_NSID_ALL
) {
391 status
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
395 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
397 status
= NVME_SC_INTERNAL
;
401 /* return an all zeroed buffer if we can't find an active namespace */
402 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->identify
.nsid
);
407 * nuse = ncap = nsze isn't always true, but we have no way to find
408 * that out from the underlying device.
410 id
->ncap
= id
->nsze
= cpu_to_le64(ns
->size
>> ns
->blksize_shift
);
411 switch (req
->port
->ana_state
[ns
->anagrpid
]) {
412 case NVME_ANA_INACCESSIBLE
:
413 case NVME_ANA_PERSISTENT_LOSS
:
421 * We just provide a single LBA format that matches what the
422 * underlying device reports.
428 * Our namespace might always be shared. Not just with other
429 * controllers, but also with any other user of the block device.
432 id
->anagrpid
= cpu_to_le32(ns
->anagrpid
);
434 memcpy(&id
->nguid
, &ns
->nguid
, sizeof(id
->nguid
));
436 id
->lbaf
[0].ds
= ns
->blksize_shift
;
439 id
->nsattr
|= (1 << 0);
440 nvmet_put_namespace(ns
);
442 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
445 nvmet_req_complete(req
, status
);
448 static void nvmet_execute_identify_nslist(struct nvmet_req
*req
)
450 static const int buf_size
= NVME_IDENTIFY_DATA_SIZE
;
451 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
453 u32 min_nsid
= le32_to_cpu(req
->cmd
->identify
.nsid
);
458 list
= kzalloc(buf_size
, GFP_KERNEL
);
460 status
= NVME_SC_INTERNAL
;
465 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
466 if (ns
->nsid
<= min_nsid
)
468 list
[i
++] = cpu_to_le32(ns
->nsid
);
469 if (i
== buf_size
/ sizeof(__le32
))
474 status
= nvmet_copy_to_sgl(req
, 0, list
, buf_size
);
478 nvmet_req_complete(req
, status
);
481 static u16
nvmet_copy_ns_identifier(struct nvmet_req
*req
, u8 type
, u8 len
,
482 void *id
, off_t
*off
)
484 struct nvme_ns_id_desc desc
= {
490 status
= nvmet_copy_to_sgl(req
, *off
, &desc
, sizeof(desc
));
493 *off
+= sizeof(desc
);
495 status
= nvmet_copy_to_sgl(req
, *off
, id
, len
);
503 static void nvmet_execute_identify_desclist(struct nvmet_req
*req
)
509 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->identify
.nsid
);
511 status
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
515 if (memchr_inv(&ns
->uuid
, 0, sizeof(ns
->uuid
))) {
516 status
= nvmet_copy_ns_identifier(req
, NVME_NIDT_UUID
,
522 if (memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
))) {
523 status
= nvmet_copy_ns_identifier(req
, NVME_NIDT_NGUID
,
530 if (sg_zero_buffer(req
->sg
, req
->sg_cnt
, NVME_IDENTIFY_DATA_SIZE
- off
,
531 off
) != NVME_IDENTIFY_DATA_SIZE
- off
)
532 status
= NVME_SC_INTERNAL
| NVME_SC_DNR
;
534 nvmet_put_namespace(ns
);
536 nvmet_req_complete(req
, status
);
540 * A "minimum viable" abort implementation: the command is mandatory in the
541 * spec, but we are not required to do any useful work. We couldn't really
542 * do a useful abort, so don't bother even with waiting for the command
543 * to be exectuted and return immediately telling the command to abort
546 static void nvmet_execute_abort(struct nvmet_req
*req
)
548 nvmet_set_result(req
, 1);
549 nvmet_req_complete(req
, 0);
552 static u16
nvmet_write_protect_flush_sync(struct nvmet_req
*req
)
557 status
= nvmet_file_flush(req
);
559 status
= nvmet_bdev_flush(req
);
562 pr_err("write protect flush failed nsid: %u\n", req
->ns
->nsid
);
566 static u16
nvmet_set_feat_write_protect(struct nvmet_req
*req
)
568 u32 write_protect
= le32_to_cpu(req
->cmd
->common
.cdw10
[1]);
569 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
570 u16 status
= NVME_SC_FEATURE_NOT_CHANGEABLE
;
572 req
->ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->rw
.nsid
);
573 if (unlikely(!req
->ns
))
576 mutex_lock(&subsys
->lock
);
577 switch (write_protect
) {
578 case NVME_NS_WRITE_PROTECT
:
579 req
->ns
->readonly
= true;
580 status
= nvmet_write_protect_flush_sync(req
);
582 req
->ns
->readonly
= false;
584 case NVME_NS_NO_WRITE_PROTECT
:
585 req
->ns
->readonly
= false;
593 nvmet_ns_changed(subsys
, req
->ns
->nsid
);
594 mutex_unlock(&subsys
->lock
);
598 static void nvmet_execute_set_features(struct nvmet_req
*req
)
600 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
601 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
[0]);
605 switch (cdw10
& 0xff) {
606 case NVME_FEAT_NUM_QUEUES
:
607 nvmet_set_result(req
,
608 (subsys
->max_qid
- 1) | ((subsys
->max_qid
- 1) << 16));
611 val32
= le32_to_cpu(req
->cmd
->common
.cdw10
[1]);
612 req
->sq
->ctrl
->kato
= DIV_ROUND_UP(val32
, 1000);
613 nvmet_set_result(req
, req
->sq
->ctrl
->kato
);
615 case NVME_FEAT_ASYNC_EVENT
:
616 val32
= le32_to_cpu(req
->cmd
->common
.cdw10
[1]);
617 if (val32
& ~NVMET_AEN_CFG_ALL
) {
618 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
622 WRITE_ONCE(req
->sq
->ctrl
->aen_enabled
, val32
);
623 nvmet_set_result(req
, val32
);
625 case NVME_FEAT_HOST_ID
:
626 status
= NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
628 case NVME_FEAT_WRITE_PROTECT
:
629 status
= nvmet_set_feat_write_protect(req
);
632 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
636 nvmet_req_complete(req
, status
);
639 static u16
nvmet_get_feat_write_protect(struct nvmet_req
*req
)
641 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
644 req
->ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->common
.nsid
);
646 return NVME_SC_INVALID_NS
| NVME_SC_DNR
;
648 mutex_lock(&subsys
->lock
);
649 if (req
->ns
->readonly
== true)
650 result
= NVME_NS_WRITE_PROTECT
;
652 result
= NVME_NS_NO_WRITE_PROTECT
;
653 nvmet_set_result(req
, result
);
654 mutex_unlock(&subsys
->lock
);
659 static void nvmet_execute_get_features(struct nvmet_req
*req
)
661 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
662 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
[0]);
665 switch (cdw10
& 0xff) {
667 * These features are mandatory in the spec, but we don't
668 * have a useful way to implement them. We'll eventually
669 * need to come up with some fake values for these.
672 case NVME_FEAT_ARBITRATION
:
674 case NVME_FEAT_POWER_MGMT
:
676 case NVME_FEAT_TEMP_THRESH
:
678 case NVME_FEAT_ERR_RECOVERY
:
680 case NVME_FEAT_IRQ_COALESCE
:
682 case NVME_FEAT_IRQ_CONFIG
:
684 case NVME_FEAT_WRITE_ATOMIC
:
687 case NVME_FEAT_ASYNC_EVENT
:
688 nvmet_set_result(req
, READ_ONCE(req
->sq
->ctrl
->aen_enabled
));
690 case NVME_FEAT_VOLATILE_WC
:
691 nvmet_set_result(req
, 1);
693 case NVME_FEAT_NUM_QUEUES
:
694 nvmet_set_result(req
,
695 (subsys
->max_qid
-1) | ((subsys
->max_qid
-1) << 16));
698 nvmet_set_result(req
, req
->sq
->ctrl
->kato
* 1000);
700 case NVME_FEAT_HOST_ID
:
701 /* need 128-bit host identifier flag */
702 if (!(req
->cmd
->common
.cdw10
[1] & cpu_to_le32(1 << 0))) {
703 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
707 status
= nvmet_copy_to_sgl(req
, 0, &req
->sq
->ctrl
->hostid
,
708 sizeof(req
->sq
->ctrl
->hostid
));
710 case NVME_FEAT_WRITE_PROTECT
:
711 status
= nvmet_get_feat_write_protect(req
);
714 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
718 nvmet_req_complete(req
, status
);
721 static void nvmet_execute_async_event(struct nvmet_req
*req
)
723 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
725 mutex_lock(&ctrl
->lock
);
726 if (ctrl
->nr_async_event_cmds
>= NVMET_ASYNC_EVENTS
) {
727 mutex_unlock(&ctrl
->lock
);
728 nvmet_req_complete(req
, NVME_SC_ASYNC_LIMIT
| NVME_SC_DNR
);
731 ctrl
->async_event_cmds
[ctrl
->nr_async_event_cmds
++] = req
;
732 mutex_unlock(&ctrl
->lock
);
734 schedule_work(&ctrl
->async_event_work
);
737 static void nvmet_execute_keep_alive(struct nvmet_req
*req
)
739 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
741 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
742 ctrl
->cntlid
, ctrl
->kato
);
744 mod_delayed_work(system_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
745 nvmet_req_complete(req
, 0);
748 u16
nvmet_parse_admin_cmd(struct nvmet_req
*req
)
750 struct nvme_command
*cmd
= req
->cmd
;
753 ret
= nvmet_check_ctrl_status(req
, cmd
);
757 switch (cmd
->common
.opcode
) {
758 case nvme_admin_get_log_page
:
759 req
->data_len
= nvmet_get_log_page_len(cmd
);
761 switch (cmd
->get_log_page
.lid
) {
764 * We currently never set the More bit in the status
765 * field, so all error log entries are invalid and can
766 * be zeroed out. This is called a minum viable
767 * implementation (TM) of this mandatory log page.
769 req
->execute
= nvmet_execute_get_log_page_noop
;
772 req
->execute
= nvmet_execute_get_log_page_smart
;
774 case NVME_LOG_FW_SLOT
:
776 * We only support a single firmware slot which always
777 * is active, so we can zero out the whole firmware slot
778 * log and still claim to fully implement this mandatory
781 req
->execute
= nvmet_execute_get_log_page_noop
;
783 case NVME_LOG_CHANGED_NS
:
784 req
->execute
= nvmet_execute_get_log_changed_ns
;
786 case NVME_LOG_CMD_EFFECTS
:
787 req
->execute
= nvmet_execute_get_log_cmd_effects_ns
;
790 req
->execute
= nvmet_execute_get_log_page_ana
;
794 case nvme_admin_identify
:
795 req
->data_len
= NVME_IDENTIFY_DATA_SIZE
;
796 switch (cmd
->identify
.cns
) {
798 req
->execute
= nvmet_execute_identify_ns
;
800 case NVME_ID_CNS_CTRL
:
801 req
->execute
= nvmet_execute_identify_ctrl
;
803 case NVME_ID_CNS_NS_ACTIVE_LIST
:
804 req
->execute
= nvmet_execute_identify_nslist
;
806 case NVME_ID_CNS_NS_DESC_LIST
:
807 req
->execute
= nvmet_execute_identify_desclist
;
811 case nvme_admin_abort_cmd
:
812 req
->execute
= nvmet_execute_abort
;
815 case nvme_admin_set_features
:
816 req
->execute
= nvmet_execute_set_features
;
819 case nvme_admin_get_features
:
820 req
->execute
= nvmet_execute_get_features
;
823 case nvme_admin_async_event
:
824 req
->execute
= nvmet_execute_async_event
;
827 case nvme_admin_keep_alive
:
828 req
->execute
= nvmet_execute_keep_alive
;
833 pr_err("unhandled cmd %d on qid %d\n", cmd
->common
.opcode
,
835 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;