1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/rculist.h>
10 #include <generated/utsrelease.h>
11 #include <asm/unaligned.h>
14 u32
nvmet_get_log_page_len(struct nvme_command
*cmd
)
16 u32 len
= le16_to_cpu(cmd
->get_log_page
.numdu
);
19 len
+= le16_to_cpu(cmd
->get_log_page
.numdl
);
20 /* NUMD is a 0's based value */
27 u64
nvmet_get_log_page_offset(struct nvme_command
*cmd
)
29 return le64_to_cpu(cmd
->get_log_page
.lpo
);
32 static void nvmet_execute_get_log_page_noop(struct nvmet_req
*req
)
34 nvmet_req_complete(req
, nvmet_zero_sgl(req
, 0, req
->data_len
));
37 static void nvmet_execute_get_log_page_error(struct nvmet_req
*req
)
39 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
40 u16 status
= NVME_SC_SUCCESS
;
46 spin_lock_irqsave(&ctrl
->error_lock
, flags
);
47 slot
= ctrl
->err_counter
% NVMET_ERROR_LOG_SLOTS
;
49 for (i
= 0; i
< NVMET_ERROR_LOG_SLOTS
; i
++) {
50 status
= nvmet_copy_to_sgl(req
, offset
, &ctrl
->slots
[slot
],
51 sizeof(struct nvme_error_slot
));
56 slot
= NVMET_ERROR_LOG_SLOTS
- 1;
59 offset
+= sizeof(struct nvme_error_slot
);
61 spin_unlock_irqrestore(&ctrl
->error_lock
, flags
);
62 nvmet_req_complete(req
, status
);
65 static u16
nvmet_get_smart_log_nsid(struct nvmet_req
*req
,
66 struct nvme_smart_log
*slog
)
69 u64 host_reads
, host_writes
, data_units_read
, data_units_written
;
71 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->get_log_page
.nsid
);
73 pr_err("Could not find namespace id : %d\n",
74 le32_to_cpu(req
->cmd
->get_log_page
.nsid
));
75 req
->error_loc
= offsetof(struct nvme_rw_command
, nsid
);
76 return NVME_SC_INVALID_NS
;
79 /* we don't have the right data for file backed ns */
83 host_reads
= part_stat_read(ns
->bdev
->bd_part
, ios
[READ
]);
84 data_units_read
= part_stat_read(ns
->bdev
->bd_part
, sectors
[READ
]);
85 host_writes
= part_stat_read(ns
->bdev
->bd_part
, ios
[WRITE
]);
86 data_units_written
= part_stat_read(ns
->bdev
->bd_part
, sectors
[WRITE
]);
88 put_unaligned_le64(host_reads
, &slog
->host_reads
[0]);
89 put_unaligned_le64(data_units_read
, &slog
->data_units_read
[0]);
90 put_unaligned_le64(host_writes
, &slog
->host_writes
[0]);
91 put_unaligned_le64(data_units_written
, &slog
->data_units_written
[0]);
93 nvmet_put_namespace(ns
);
95 return NVME_SC_SUCCESS
;
98 static u16
nvmet_get_smart_log_all(struct nvmet_req
*req
,
99 struct nvme_smart_log
*slog
)
101 u64 host_reads
= 0, host_writes
= 0;
102 u64 data_units_read
= 0, data_units_written
= 0;
104 struct nvmet_ctrl
*ctrl
;
106 ctrl
= req
->sq
->ctrl
;
109 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
110 /* we don't have the right data for file backed ns */
113 host_reads
+= part_stat_read(ns
->bdev
->bd_part
, ios
[READ
]);
115 part_stat_read(ns
->bdev
->bd_part
, sectors
[READ
]);
116 host_writes
+= part_stat_read(ns
->bdev
->bd_part
, ios
[WRITE
]);
117 data_units_written
+=
118 part_stat_read(ns
->bdev
->bd_part
, sectors
[WRITE
]);
123 put_unaligned_le64(host_reads
, &slog
->host_reads
[0]);
124 put_unaligned_le64(data_units_read
, &slog
->data_units_read
[0]);
125 put_unaligned_le64(host_writes
, &slog
->host_writes
[0]);
126 put_unaligned_le64(data_units_written
, &slog
->data_units_written
[0]);
128 return NVME_SC_SUCCESS
;
131 static void nvmet_execute_get_log_page_smart(struct nvmet_req
*req
)
133 struct nvme_smart_log
*log
;
134 u16 status
= NVME_SC_INTERNAL
;
137 if (req
->data_len
!= sizeof(*log
))
140 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
144 if (req
->cmd
->get_log_page
.nsid
== cpu_to_le32(NVME_NSID_ALL
))
145 status
= nvmet_get_smart_log_all(req
, log
);
147 status
= nvmet_get_smart_log_nsid(req
, log
);
151 spin_lock_irqsave(&req
->sq
->ctrl
->error_lock
, flags
);
152 put_unaligned_le64(req
->sq
->ctrl
->err_counter
,
153 &log
->num_err_log_entries
);
154 spin_unlock_irqrestore(&req
->sq
->ctrl
->error_lock
, flags
);
156 status
= nvmet_copy_to_sgl(req
, 0, log
, sizeof(*log
));
160 nvmet_req_complete(req
, status
);
163 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req
*req
)
165 u16 status
= NVME_SC_INTERNAL
;
166 struct nvme_effects_log
*log
;
168 log
= kzalloc(sizeof(*log
), GFP_KERNEL
);
172 log
->acs
[nvme_admin_get_log_page
] = cpu_to_le32(1 << 0);
173 log
->acs
[nvme_admin_identify
] = cpu_to_le32(1 << 0);
174 log
->acs
[nvme_admin_abort_cmd
] = cpu_to_le32(1 << 0);
175 log
->acs
[nvme_admin_set_features
] = cpu_to_le32(1 << 0);
176 log
->acs
[nvme_admin_get_features
] = cpu_to_le32(1 << 0);
177 log
->acs
[nvme_admin_async_event
] = cpu_to_le32(1 << 0);
178 log
->acs
[nvme_admin_keep_alive
] = cpu_to_le32(1 << 0);
180 log
->iocs
[nvme_cmd_read
] = cpu_to_le32(1 << 0);
181 log
->iocs
[nvme_cmd_write
] = cpu_to_le32(1 << 0);
182 log
->iocs
[nvme_cmd_flush
] = cpu_to_le32(1 << 0);
183 log
->iocs
[nvme_cmd_dsm
] = cpu_to_le32(1 << 0);
184 log
->iocs
[nvme_cmd_write_zeroes
] = cpu_to_le32(1 << 0);
186 status
= nvmet_copy_to_sgl(req
, 0, log
, sizeof(*log
));
190 nvmet_req_complete(req
, status
);
193 static void nvmet_execute_get_log_changed_ns(struct nvmet_req
*req
)
195 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
196 u16 status
= NVME_SC_INTERNAL
;
199 if (req
->data_len
!= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
))
202 mutex_lock(&ctrl
->lock
);
203 if (ctrl
->nr_changed_ns
== U32_MAX
)
204 len
= sizeof(__le32
);
206 len
= ctrl
->nr_changed_ns
* sizeof(__le32
);
207 status
= nvmet_copy_to_sgl(req
, 0, ctrl
->changed_ns_list
, len
);
209 status
= nvmet_zero_sgl(req
, len
, req
->data_len
- len
);
210 ctrl
->nr_changed_ns
= 0;
211 nvmet_clear_aen_bit(req
, NVME_AEN_BIT_NS_ATTR
);
212 mutex_unlock(&ctrl
->lock
);
214 nvmet_req_complete(req
, status
);
217 static u32
nvmet_format_ana_group(struct nvmet_req
*req
, u32 grpid
,
218 struct nvme_ana_group_desc
*desc
)
220 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
224 if (!(req
->cmd
->get_log_page
.lsp
& NVME_ANA_LOG_RGO
)) {
226 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
)
227 if (ns
->anagrpid
== grpid
)
228 desc
->nsids
[count
++] = cpu_to_le32(ns
->nsid
);
232 desc
->grpid
= cpu_to_le32(grpid
);
233 desc
->nnsids
= cpu_to_le32(count
);
234 desc
->chgcnt
= cpu_to_le64(nvmet_ana_chgcnt
);
235 desc
->state
= req
->port
->ana_state
[grpid
];
236 memset(desc
->rsvd17
, 0, sizeof(desc
->rsvd17
));
237 return sizeof(struct nvme_ana_group_desc
) + count
* sizeof(__le32
);
240 static void nvmet_execute_get_log_page_ana(struct nvmet_req
*req
)
242 struct nvme_ana_rsp_hdr hdr
= { 0, };
243 struct nvme_ana_group_desc
*desc
;
244 size_t offset
= sizeof(struct nvme_ana_rsp_hdr
); /* start beyond hdr */
250 status
= NVME_SC_INTERNAL
;
251 desc
= kmalloc(sizeof(struct nvme_ana_group_desc
) +
252 NVMET_MAX_NAMESPACES
* sizeof(__le32
), GFP_KERNEL
);
256 down_read(&nvmet_ana_sem
);
257 for (grpid
= 1; grpid
<= NVMET_MAX_ANAGRPS
; grpid
++) {
258 if (!nvmet_ana_group_enabled
[grpid
])
260 len
= nvmet_format_ana_group(req
, grpid
, desc
);
261 status
= nvmet_copy_to_sgl(req
, offset
, desc
, len
);
267 for ( ; grpid
<= NVMET_MAX_ANAGRPS
; grpid
++) {
268 if (nvmet_ana_group_enabled
[grpid
])
272 hdr
.chgcnt
= cpu_to_le64(nvmet_ana_chgcnt
);
273 hdr
.ngrps
= cpu_to_le16(ngrps
);
274 nvmet_clear_aen_bit(req
, NVME_AEN_BIT_ANA_CHANGE
);
275 up_read(&nvmet_ana_sem
);
279 /* copy the header last once we know the number of groups */
280 status
= nvmet_copy_to_sgl(req
, 0, &hdr
, sizeof(hdr
));
282 nvmet_req_complete(req
, status
);
285 static void nvmet_execute_identify_ctrl(struct nvmet_req
*req
)
287 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
288 struct nvme_id_ctrl
*id
;
290 const char model
[] = "Linux";
292 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
294 status
= NVME_SC_INTERNAL
;
298 /* XXX: figure out how to assign real vendors IDs. */
302 memset(id
->sn
, ' ', sizeof(id
->sn
));
303 bin2hex(id
->sn
, &ctrl
->subsys
->serial
,
304 min(sizeof(ctrl
->subsys
->serial
), sizeof(id
->sn
) / 2));
305 memcpy_and_pad(id
->mn
, sizeof(id
->mn
), model
, sizeof(model
) - 1, ' ');
306 memcpy_and_pad(id
->fr
, sizeof(id
->fr
),
307 UTS_RELEASE
, strlen(UTS_RELEASE
), ' ');
312 * XXX: figure out how we can assign a IEEE OUI, but until then
313 * the safest is to leave it as zeroes.
316 /* we support multiple ports, multiples hosts and ANA: */
317 id
->cmic
= (1 << 0) | (1 << 1) | (1 << 3);
319 /* no limit on data transfer sizes for now */
321 id
->cntlid
= cpu_to_le16(ctrl
->cntlid
);
322 id
->ver
= cpu_to_le32(ctrl
->subsys
->ver
);
324 /* XXX: figure out what to do about RTD3R/RTD3 */
325 id
->oaes
= cpu_to_le32(NVMET_AEN_CFG_OPTIONAL
);
326 id
->ctratt
= cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT
|
327 NVME_CTRL_ATTR_TBKAS
);
332 * We don't really have a practical limit on the number of abort
333 * comands. But we don't do anything useful for abort either, so
334 * no point in allowing more abort commands than the spec requires.
338 id
->aerl
= NVMET_ASYNC_EVENTS
- 1;
340 /* first slot is read-only, only one slot supported */
341 id
->frmw
= (1 << 0) | (1 << 1);
342 id
->lpa
= (1 << 0) | (1 << 1) | (1 << 2);
343 id
->elpe
= NVMET_ERROR_LOG_SLOTS
- 1;
346 /* We support keep-alive timeout in granularity of seconds */
347 id
->kas
= cpu_to_le16(NVMET_KAS
);
349 id
->sqes
= (0x6 << 4) | 0x6;
350 id
->cqes
= (0x4 << 4) | 0x4;
352 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
353 id
->maxcmd
= cpu_to_le16(NVMET_MAX_CMD
);
355 id
->nn
= cpu_to_le32(ctrl
->subsys
->max_nsid
);
356 id
->mnan
= cpu_to_le32(NVMET_MAX_NAMESPACES
);
357 id
->oncs
= cpu_to_le16(NVME_CTRL_ONCS_DSM
|
358 NVME_CTRL_ONCS_WRITE_ZEROES
);
360 /* XXX: don't report vwc if the underlying device is write through */
361 id
->vwc
= NVME_CTRL_VWC_PRESENT
;
364 * We can't support atomic writes bigger than a LBA without support
365 * from the backend device.
370 id
->sgls
= cpu_to_le32(1 << 0); /* we always support SGLs */
371 if (ctrl
->ops
->has_keyed_sgls
)
372 id
->sgls
|= cpu_to_le32(1 << 2);
373 if (req
->port
->inline_data_size
)
374 id
->sgls
|= cpu_to_le32(1 << 20);
376 strlcpy(id
->subnqn
, ctrl
->subsys
->subsysnqn
, sizeof(id
->subnqn
));
378 /* Max command capsule size is sqe + single page of in-capsule data */
379 id
->ioccsz
= cpu_to_le32((sizeof(struct nvme_command
) +
380 req
->port
->inline_data_size
) / 16);
381 /* Max response capsule size is cqe */
382 id
->iorcsz
= cpu_to_le32(sizeof(struct nvme_completion
) / 16);
384 id
->msdbd
= ctrl
->ops
->msdbd
;
386 id
->anacap
= (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
387 id
->anatt
= 10; /* random value */
388 id
->anagrpmax
= cpu_to_le32(NVMET_MAX_ANAGRPS
);
389 id
->nanagrpid
= cpu_to_le32(NVMET_MAX_ANAGRPS
);
392 * Meh, we don't really support any power state. Fake up the same
393 * values that qemu does.
395 id
->psd
[0].max_power
= cpu_to_le16(0x9c4);
396 id
->psd
[0].entry_lat
= cpu_to_le32(0x10);
397 id
->psd
[0].exit_lat
= cpu_to_le32(0x4);
399 id
->nwpc
= 1 << 0; /* write protect and no write protect */
401 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
405 nvmet_req_complete(req
, status
);
408 static void nvmet_execute_identify_ns(struct nvmet_req
*req
)
411 struct nvme_id_ns
*id
;
414 if (le32_to_cpu(req
->cmd
->identify
.nsid
) == NVME_NSID_ALL
) {
415 req
->error_loc
= offsetof(struct nvme_identify
, nsid
);
416 status
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
420 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
422 status
= NVME_SC_INTERNAL
;
426 /* return an all zeroed buffer if we can't find an active namespace */
427 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->identify
.nsid
);
432 * nuse = ncap = nsze isn't always true, but we have no way to find
433 * that out from the underlying device.
435 id
->ncap
= id
->nsze
= cpu_to_le64(ns
->size
>> ns
->blksize_shift
);
436 switch (req
->port
->ana_state
[ns
->anagrpid
]) {
437 case NVME_ANA_INACCESSIBLE
:
438 case NVME_ANA_PERSISTENT_LOSS
:
446 * We just provide a single LBA format that matches what the
447 * underlying device reports.
453 * Our namespace might always be shared. Not just with other
454 * controllers, but also with any other user of the block device.
457 id
->anagrpid
= cpu_to_le32(ns
->anagrpid
);
459 memcpy(&id
->nguid
, &ns
->nguid
, sizeof(id
->nguid
));
461 id
->lbaf
[0].ds
= ns
->blksize_shift
;
464 id
->nsattr
|= (1 << 0);
465 nvmet_put_namespace(ns
);
467 status
= nvmet_copy_to_sgl(req
, 0, id
, sizeof(*id
));
470 nvmet_req_complete(req
, status
);
473 static void nvmet_execute_identify_nslist(struct nvmet_req
*req
)
475 static const int buf_size
= NVME_IDENTIFY_DATA_SIZE
;
476 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
478 u32 min_nsid
= le32_to_cpu(req
->cmd
->identify
.nsid
);
483 list
= kzalloc(buf_size
, GFP_KERNEL
);
485 status
= NVME_SC_INTERNAL
;
490 list_for_each_entry_rcu(ns
, &ctrl
->subsys
->namespaces
, dev_link
) {
491 if (ns
->nsid
<= min_nsid
)
493 list
[i
++] = cpu_to_le32(ns
->nsid
);
494 if (i
== buf_size
/ sizeof(__le32
))
499 status
= nvmet_copy_to_sgl(req
, 0, list
, buf_size
);
503 nvmet_req_complete(req
, status
);
506 static u16
nvmet_copy_ns_identifier(struct nvmet_req
*req
, u8 type
, u8 len
,
507 void *id
, off_t
*off
)
509 struct nvme_ns_id_desc desc
= {
515 status
= nvmet_copy_to_sgl(req
, *off
, &desc
, sizeof(desc
));
518 *off
+= sizeof(desc
);
520 status
= nvmet_copy_to_sgl(req
, *off
, id
, len
);
528 static void nvmet_execute_identify_desclist(struct nvmet_req
*req
)
534 ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->identify
.nsid
);
536 req
->error_loc
= offsetof(struct nvme_identify
, nsid
);
537 status
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
541 if (memchr_inv(&ns
->uuid
, 0, sizeof(ns
->uuid
))) {
542 status
= nvmet_copy_ns_identifier(req
, NVME_NIDT_UUID
,
548 if (memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
))) {
549 status
= nvmet_copy_ns_identifier(req
, NVME_NIDT_NGUID
,
556 if (sg_zero_buffer(req
->sg
, req
->sg_cnt
, NVME_IDENTIFY_DATA_SIZE
- off
,
557 off
) != NVME_IDENTIFY_DATA_SIZE
- off
)
558 status
= NVME_SC_INTERNAL
| NVME_SC_DNR
;
560 nvmet_put_namespace(ns
);
562 nvmet_req_complete(req
, status
);
566 * A "minimum viable" abort implementation: the command is mandatory in the
567 * spec, but we are not required to do any useful work. We couldn't really
568 * do a useful abort, so don't bother even with waiting for the command
569 * to be exectuted and return immediately telling the command to abort
572 static void nvmet_execute_abort(struct nvmet_req
*req
)
574 nvmet_set_result(req
, 1);
575 nvmet_req_complete(req
, 0);
578 static u16
nvmet_write_protect_flush_sync(struct nvmet_req
*req
)
583 status
= nvmet_file_flush(req
);
585 status
= nvmet_bdev_flush(req
);
588 pr_err("write protect flush failed nsid: %u\n", req
->ns
->nsid
);
592 static u16
nvmet_set_feat_write_protect(struct nvmet_req
*req
)
594 u32 write_protect
= le32_to_cpu(req
->cmd
->common
.cdw11
);
595 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
596 u16 status
= NVME_SC_FEATURE_NOT_CHANGEABLE
;
598 req
->ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->rw
.nsid
);
599 if (unlikely(!req
->ns
)) {
600 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
604 mutex_lock(&subsys
->lock
);
605 switch (write_protect
) {
606 case NVME_NS_WRITE_PROTECT
:
607 req
->ns
->readonly
= true;
608 status
= nvmet_write_protect_flush_sync(req
);
610 req
->ns
->readonly
= false;
612 case NVME_NS_NO_WRITE_PROTECT
:
613 req
->ns
->readonly
= false;
621 nvmet_ns_changed(subsys
, req
->ns
->nsid
);
622 mutex_unlock(&subsys
->lock
);
626 u16
nvmet_set_feat_kato(struct nvmet_req
*req
)
628 u32 val32
= le32_to_cpu(req
->cmd
->common
.cdw11
);
630 req
->sq
->ctrl
->kato
= DIV_ROUND_UP(val32
, 1000);
632 nvmet_set_result(req
, req
->sq
->ctrl
->kato
);
637 u16
nvmet_set_feat_async_event(struct nvmet_req
*req
, u32 mask
)
639 u32 val32
= le32_to_cpu(req
->cmd
->common
.cdw11
);
642 req
->error_loc
= offsetof(struct nvme_common_command
, cdw11
);
643 return NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
646 WRITE_ONCE(req
->sq
->ctrl
->aen_enabled
, val32
);
647 nvmet_set_result(req
, val32
);
652 static void nvmet_execute_set_features(struct nvmet_req
*req
)
654 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
655 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
);
658 switch (cdw10
& 0xff) {
659 case NVME_FEAT_NUM_QUEUES
:
660 nvmet_set_result(req
,
661 (subsys
->max_qid
- 1) | ((subsys
->max_qid
- 1) << 16));
664 status
= nvmet_set_feat_kato(req
);
666 case NVME_FEAT_ASYNC_EVENT
:
667 status
= nvmet_set_feat_async_event(req
, NVMET_AEN_CFG_ALL
);
669 case NVME_FEAT_HOST_ID
:
670 status
= NVME_SC_CMD_SEQ_ERROR
| NVME_SC_DNR
;
672 case NVME_FEAT_WRITE_PROTECT
:
673 status
= nvmet_set_feat_write_protect(req
);
676 req
->error_loc
= offsetof(struct nvme_common_command
, cdw10
);
677 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
681 nvmet_req_complete(req
, status
);
684 static u16
nvmet_get_feat_write_protect(struct nvmet_req
*req
)
686 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
689 req
->ns
= nvmet_find_namespace(req
->sq
->ctrl
, req
->cmd
->common
.nsid
);
691 req
->error_loc
= offsetof(struct nvme_common_command
, nsid
);
692 return NVME_SC_INVALID_NS
| NVME_SC_DNR
;
694 mutex_lock(&subsys
->lock
);
695 if (req
->ns
->readonly
== true)
696 result
= NVME_NS_WRITE_PROTECT
;
698 result
= NVME_NS_NO_WRITE_PROTECT
;
699 nvmet_set_result(req
, result
);
700 mutex_unlock(&subsys
->lock
);
705 void nvmet_get_feat_kato(struct nvmet_req
*req
)
707 nvmet_set_result(req
, req
->sq
->ctrl
->kato
* 1000);
710 void nvmet_get_feat_async_event(struct nvmet_req
*req
)
712 nvmet_set_result(req
, READ_ONCE(req
->sq
->ctrl
->aen_enabled
));
715 static void nvmet_execute_get_features(struct nvmet_req
*req
)
717 struct nvmet_subsys
*subsys
= req
->sq
->ctrl
->subsys
;
718 u32 cdw10
= le32_to_cpu(req
->cmd
->common
.cdw10
);
721 switch (cdw10
& 0xff) {
723 * These features are mandatory in the spec, but we don't
724 * have a useful way to implement them. We'll eventually
725 * need to come up with some fake values for these.
728 case NVME_FEAT_ARBITRATION
:
730 case NVME_FEAT_POWER_MGMT
:
732 case NVME_FEAT_TEMP_THRESH
:
734 case NVME_FEAT_ERR_RECOVERY
:
736 case NVME_FEAT_IRQ_COALESCE
:
738 case NVME_FEAT_IRQ_CONFIG
:
740 case NVME_FEAT_WRITE_ATOMIC
:
743 case NVME_FEAT_ASYNC_EVENT
:
744 nvmet_get_feat_async_event(req
);
746 case NVME_FEAT_VOLATILE_WC
:
747 nvmet_set_result(req
, 1);
749 case NVME_FEAT_NUM_QUEUES
:
750 nvmet_set_result(req
,
751 (subsys
->max_qid
-1) | ((subsys
->max_qid
-1) << 16));
754 nvmet_get_feat_kato(req
);
756 case NVME_FEAT_HOST_ID
:
757 /* need 128-bit host identifier flag */
758 if (!(req
->cmd
->common
.cdw11
& cpu_to_le32(1 << 0))) {
760 offsetof(struct nvme_common_command
, cdw11
);
761 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
765 status
= nvmet_copy_to_sgl(req
, 0, &req
->sq
->ctrl
->hostid
,
766 sizeof(req
->sq
->ctrl
->hostid
));
768 case NVME_FEAT_WRITE_PROTECT
:
769 status
= nvmet_get_feat_write_protect(req
);
773 offsetof(struct nvme_common_command
, cdw10
);
774 status
= NVME_SC_INVALID_FIELD
| NVME_SC_DNR
;
778 nvmet_req_complete(req
, status
);
781 void nvmet_execute_async_event(struct nvmet_req
*req
)
783 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
785 mutex_lock(&ctrl
->lock
);
786 if (ctrl
->nr_async_event_cmds
>= NVMET_ASYNC_EVENTS
) {
787 mutex_unlock(&ctrl
->lock
);
788 nvmet_req_complete(req
, NVME_SC_ASYNC_LIMIT
| NVME_SC_DNR
);
791 ctrl
->async_event_cmds
[ctrl
->nr_async_event_cmds
++] = req
;
792 mutex_unlock(&ctrl
->lock
);
794 schedule_work(&ctrl
->async_event_work
);
797 void nvmet_execute_keep_alive(struct nvmet_req
*req
)
799 struct nvmet_ctrl
*ctrl
= req
->sq
->ctrl
;
801 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
802 ctrl
->cntlid
, ctrl
->kato
);
804 mod_delayed_work(system_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
);
805 nvmet_req_complete(req
, 0);
808 u16
nvmet_parse_admin_cmd(struct nvmet_req
*req
)
810 struct nvme_command
*cmd
= req
->cmd
;
813 ret
= nvmet_check_ctrl_status(req
, cmd
);
817 switch (cmd
->common
.opcode
) {
818 case nvme_admin_get_log_page
:
819 req
->data_len
= nvmet_get_log_page_len(cmd
);
821 switch (cmd
->get_log_page
.lid
) {
823 req
->execute
= nvmet_execute_get_log_page_error
;
826 req
->execute
= nvmet_execute_get_log_page_smart
;
828 case NVME_LOG_FW_SLOT
:
830 * We only support a single firmware slot which always
831 * is active, so we can zero out the whole firmware slot
832 * log and still claim to fully implement this mandatory
835 req
->execute
= nvmet_execute_get_log_page_noop
;
837 case NVME_LOG_CHANGED_NS
:
838 req
->execute
= nvmet_execute_get_log_changed_ns
;
840 case NVME_LOG_CMD_EFFECTS
:
841 req
->execute
= nvmet_execute_get_log_cmd_effects_ns
;
844 req
->execute
= nvmet_execute_get_log_page_ana
;
848 case nvme_admin_identify
:
849 req
->data_len
= NVME_IDENTIFY_DATA_SIZE
;
850 switch (cmd
->identify
.cns
) {
852 req
->execute
= nvmet_execute_identify_ns
;
854 case NVME_ID_CNS_CTRL
:
855 req
->execute
= nvmet_execute_identify_ctrl
;
857 case NVME_ID_CNS_NS_ACTIVE_LIST
:
858 req
->execute
= nvmet_execute_identify_nslist
;
860 case NVME_ID_CNS_NS_DESC_LIST
:
861 req
->execute
= nvmet_execute_identify_desclist
;
865 case nvme_admin_abort_cmd
:
866 req
->execute
= nvmet_execute_abort
;
869 case nvme_admin_set_features
:
870 req
->execute
= nvmet_execute_set_features
;
873 case nvme_admin_get_features
:
874 req
->execute
= nvmet_execute_get_features
;
877 case nvme_admin_async_event
:
878 req
->execute
= nvmet_execute_async_event
;
881 case nvme_admin_keep_alive
:
882 req
->execute
= nvmet_execute_keep_alive
;
887 pr_err("unhandled cmd %d on qid %d\n", cmd
->common
.opcode
,
889 req
->error_loc
= offsetof(struct nvme_common_command
, opcode
);
890 return NVME_SC_INVALID_OPCODE
| NVME_SC_DNR
;