1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/compat.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/hdreg.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
27 #include <linux/nvme-auth.h>
29 #define CREATE_TRACE_POINTS
32 #define NVME_MINORS (1U << MINORBITS)
35 struct nvme_ns_ids ids
;
43 unsigned int admin_timeout
= 60;
44 module_param(admin_timeout
, uint
, 0644);
45 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
46 EXPORT_SYMBOL_GPL(admin_timeout
);
48 unsigned int nvme_io_timeout
= 30;
49 module_param_named(io_timeout
, nvme_io_timeout
, uint
, 0644);
50 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
51 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
53 static unsigned char shutdown_timeout
= 5;
54 module_param(shutdown_timeout
, byte
, 0644);
55 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
57 static u8 nvme_max_retries
= 5;
58 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
59 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
61 static unsigned long default_ps_max_latency_us
= 100000;
62 module_param(default_ps_max_latency_us
, ulong
, 0644);
63 MODULE_PARM_DESC(default_ps_max_latency_us
,
64 "max power saving latency for new devices; use PM QOS to change per device");
66 static bool force_apst
;
67 module_param(force_apst
, bool, 0644);
68 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
70 static unsigned long apst_primary_timeout_ms
= 100;
71 module_param(apst_primary_timeout_ms
, ulong
, 0644);
72 MODULE_PARM_DESC(apst_primary_timeout_ms
,
73 "primary APST timeout in ms");
75 static unsigned long apst_secondary_timeout_ms
= 2000;
76 module_param(apst_secondary_timeout_ms
, ulong
, 0644);
77 MODULE_PARM_DESC(apst_secondary_timeout_ms
,
78 "secondary APST timeout in ms");
80 static unsigned long apst_primary_latency_tol_us
= 15000;
81 module_param(apst_primary_latency_tol_us
, ulong
, 0644);
82 MODULE_PARM_DESC(apst_primary_latency_tol_us
,
83 "primary APST latency tolerance in us");
85 static unsigned long apst_secondary_latency_tol_us
= 100000;
86 module_param(apst_secondary_latency_tol_us
, ulong
, 0644);
87 MODULE_PARM_DESC(apst_secondary_latency_tol_us
,
88 "secondary APST latency tolerance in us");
91 * nvme_wq - hosts nvme related works that are not reset or delete
92 * nvme_reset_wq - hosts nvme reset works
93 * nvme_delete_wq - hosts nvme delete works
95 * nvme_wq will host works such as scan, aen handling, fw activation,
96 * keep-alive, periodic reconnects etc. nvme_reset_wq
97 * runs reset works which also flush works hosted on nvme_wq for
98 * serialization purposes. nvme_delete_wq host controller deletion
99 * works which flush reset works for serialization.
101 struct workqueue_struct
*nvme_wq
;
102 EXPORT_SYMBOL_GPL(nvme_wq
);
104 struct workqueue_struct
*nvme_reset_wq
;
105 EXPORT_SYMBOL_GPL(nvme_reset_wq
);
107 struct workqueue_struct
*nvme_delete_wq
;
108 EXPORT_SYMBOL_GPL(nvme_delete_wq
);
110 static LIST_HEAD(nvme_subsystems
);
111 static DEFINE_MUTEX(nvme_subsystems_lock
);
113 static DEFINE_IDA(nvme_instance_ida
);
114 static dev_t nvme_ctrl_base_chr_devt
;
115 static struct class *nvme_class
;
116 static struct class *nvme_subsys_class
;
118 static DEFINE_IDA(nvme_ns_chr_minor_ida
);
119 static dev_t nvme_ns_chr_devt
;
120 static struct class *nvme_ns_chr_class
;
122 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
);
123 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
125 static void nvme_update_keep_alive(struct nvme_ctrl
*ctrl
,
126 struct nvme_command
*cmd
);
128 void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
131 * Only new queue scan work when admin and IO queues are both alive
133 if (ctrl
->state
== NVME_CTRL_LIVE
&& ctrl
->tagset
)
134 queue_work(nvme_wq
, &ctrl
->scan_work
);
138 * Use this function to proceed with scheduling reset_work for a controller
139 * that had previously been set to the resetting state. This is intended for
140 * code paths that can't be interrupted by other reset attempts. A hot removal
141 * may prevent this from succeeding.
143 int nvme_try_sched_reset(struct nvme_ctrl
*ctrl
)
145 if (ctrl
->state
!= NVME_CTRL_RESETTING
)
147 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
151 EXPORT_SYMBOL_GPL(nvme_try_sched_reset
);
153 static void nvme_failfast_work(struct work_struct
*work
)
155 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
156 struct nvme_ctrl
, failfast_work
);
158 if (ctrl
->state
!= NVME_CTRL_CONNECTING
)
161 set_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
162 dev_info(ctrl
->device
, "failfast expired\n");
163 nvme_kick_requeue_lists(ctrl
);
166 static inline void nvme_start_failfast_work(struct nvme_ctrl
*ctrl
)
168 if (!ctrl
->opts
|| ctrl
->opts
->fast_io_fail_tmo
== -1)
171 schedule_delayed_work(&ctrl
->failfast_work
,
172 ctrl
->opts
->fast_io_fail_tmo
* HZ
);
175 static inline void nvme_stop_failfast_work(struct nvme_ctrl
*ctrl
)
180 cancel_delayed_work_sync(&ctrl
->failfast_work
);
181 clear_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
185 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
187 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
189 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
193 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
195 int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
199 ret
= nvme_reset_ctrl(ctrl
);
201 flush_work(&ctrl
->reset_work
);
202 if (ctrl
->state
!= NVME_CTRL_LIVE
)
209 static void nvme_do_delete_ctrl(struct nvme_ctrl
*ctrl
)
211 dev_info(ctrl
->device
,
212 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl
));
214 flush_work(&ctrl
->reset_work
);
215 nvme_stop_ctrl(ctrl
);
216 nvme_remove_namespaces(ctrl
);
217 ctrl
->ops
->delete_ctrl(ctrl
);
218 nvme_uninit_ctrl(ctrl
);
221 static void nvme_delete_ctrl_work(struct work_struct
*work
)
223 struct nvme_ctrl
*ctrl
=
224 container_of(work
, struct nvme_ctrl
, delete_work
);
226 nvme_do_delete_ctrl(ctrl
);
229 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
)
231 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
233 if (!queue_work(nvme_delete_wq
, &ctrl
->delete_work
))
237 EXPORT_SYMBOL_GPL(nvme_delete_ctrl
);
239 static void nvme_delete_ctrl_sync(struct nvme_ctrl
*ctrl
)
242 * Keep a reference until nvme_do_delete_ctrl() complete,
243 * since ->delete_ctrl can free the controller.
246 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
247 nvme_do_delete_ctrl(ctrl
);
251 static blk_status_t
nvme_error_status(u16 status
)
253 switch (status
& 0x7ff) {
254 case NVME_SC_SUCCESS
:
256 case NVME_SC_CAP_EXCEEDED
:
257 return BLK_STS_NOSPC
;
258 case NVME_SC_LBA_RANGE
:
259 case NVME_SC_CMD_INTERRUPTED
:
260 case NVME_SC_NS_NOT_READY
:
261 return BLK_STS_TARGET
;
262 case NVME_SC_BAD_ATTRIBUTES
:
263 case NVME_SC_ONCS_NOT_SUPPORTED
:
264 case NVME_SC_INVALID_OPCODE
:
265 case NVME_SC_INVALID_FIELD
:
266 case NVME_SC_INVALID_NS
:
267 return BLK_STS_NOTSUPP
;
268 case NVME_SC_WRITE_FAULT
:
269 case NVME_SC_READ_ERROR
:
270 case NVME_SC_UNWRITTEN_BLOCK
:
271 case NVME_SC_ACCESS_DENIED
:
272 case NVME_SC_READ_ONLY
:
273 case NVME_SC_COMPARE_FAILED
:
274 return BLK_STS_MEDIUM
;
275 case NVME_SC_GUARD_CHECK
:
276 case NVME_SC_APPTAG_CHECK
:
277 case NVME_SC_REFTAG_CHECK
:
278 case NVME_SC_INVALID_PI
:
279 return BLK_STS_PROTECTION
;
280 case NVME_SC_RESERVATION_CONFLICT
:
281 return BLK_STS_NEXUS
;
282 case NVME_SC_HOST_PATH_ERROR
:
283 return BLK_STS_TRANSPORT
;
284 case NVME_SC_ZONE_TOO_MANY_ACTIVE
:
285 return BLK_STS_ZONE_ACTIVE_RESOURCE
;
286 case NVME_SC_ZONE_TOO_MANY_OPEN
:
287 return BLK_STS_ZONE_OPEN_RESOURCE
;
289 return BLK_STS_IOERR
;
293 static void nvme_retry_req(struct request
*req
)
295 unsigned long delay
= 0;
298 /* The mask and shift result must be <= 3 */
299 crd
= (nvme_req(req
)->status
& NVME_SC_CRD
) >> 11;
301 delay
= nvme_req(req
)->ctrl
->crdt
[crd
- 1] * 100;
303 nvme_req(req
)->retries
++;
304 blk_mq_requeue_request(req
, false);
305 blk_mq_delay_kick_requeue_list(req
->q
, delay
);
308 static void nvme_log_error(struct request
*req
)
310 struct nvme_ns
*ns
= req
->q
->queuedata
;
311 struct nvme_request
*nr
= nvme_req(req
);
314 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
315 ns
->disk
? ns
->disk
->disk_name
: "?",
316 nvme_get_opcode_str(nr
->cmd
->common
.opcode
),
317 nr
->cmd
->common
.opcode
,
318 (unsigned long long)nvme_sect_to_lba(ns
, blk_rq_pos(req
)),
319 (unsigned long long)blk_rq_bytes(req
) >> ns
->lba_shift
,
320 nvme_get_error_status_str(nr
->status
),
321 nr
->status
>> 8 & 7, /* Status Code Type */
322 nr
->status
& 0xff, /* Status Code */
323 nr
->status
& NVME_SC_MORE
? "MORE " : "",
324 nr
->status
& NVME_SC_DNR
? "DNR " : "");
328 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
329 dev_name(nr
->ctrl
->device
),
330 nvme_get_admin_opcode_str(nr
->cmd
->common
.opcode
),
331 nr
->cmd
->common
.opcode
,
332 nvme_get_error_status_str(nr
->status
),
333 nr
->status
>> 8 & 7, /* Status Code Type */
334 nr
->status
& 0xff, /* Status Code */
335 nr
->status
& NVME_SC_MORE
? "MORE " : "",
336 nr
->status
& NVME_SC_DNR
? "DNR " : "");
339 enum nvme_disposition
{
346 static inline enum nvme_disposition
nvme_decide_disposition(struct request
*req
)
348 if (likely(nvme_req(req
)->status
== 0))
351 if ((nvme_req(req
)->status
& 0x7ff) == NVME_SC_AUTH_REQUIRED
)
354 if (blk_noretry_request(req
) ||
355 (nvme_req(req
)->status
& NVME_SC_DNR
) ||
356 nvme_req(req
)->retries
>= nvme_max_retries
)
359 if (req
->cmd_flags
& REQ_NVME_MPATH
) {
360 if (nvme_is_path_error(nvme_req(req
)->status
) ||
361 blk_queue_dying(req
->q
))
364 if (blk_queue_dying(req
->q
))
371 static inline void nvme_end_req_zoned(struct request
*req
)
373 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED
) &&
374 req_op(req
) == REQ_OP_ZONE_APPEND
)
375 req
->__sector
= nvme_lba_to_sect(req
->q
->queuedata
,
376 le64_to_cpu(nvme_req(req
)->result
.u64
));
379 static inline void nvme_end_req(struct request
*req
)
381 blk_status_t status
= nvme_error_status(nvme_req(req
)->status
);
383 if (unlikely(nvme_req(req
)->status
&& !(req
->rq_flags
& RQF_QUIET
)))
385 nvme_end_req_zoned(req
);
386 nvme_trace_bio_complete(req
);
387 blk_mq_end_request(req
, status
);
390 void nvme_complete_rq(struct request
*req
)
392 struct nvme_ctrl
*ctrl
= nvme_req(req
)->ctrl
;
394 trace_nvme_complete_rq(req
);
395 nvme_cleanup_cmd(req
);
398 ctrl
->comp_seen
= true;
400 switch (nvme_decide_disposition(req
)) {
408 nvme_failover_req(req
);
411 #ifdef CONFIG_NVME_AUTH
412 queue_work(nvme_wq
, &ctrl
->dhchap_auth_work
);
420 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
422 void nvme_complete_batch_req(struct request
*req
)
424 trace_nvme_complete_rq(req
);
425 nvme_cleanup_cmd(req
);
426 nvme_end_req_zoned(req
);
428 EXPORT_SYMBOL_GPL(nvme_complete_batch_req
);
431 * Called to unwind from ->queue_rq on a failed command submission so that the
432 * multipathing code gets called to potentially failover to another path.
433 * The caller needs to unwind all transport specific resource allocations and
434 * must return propagate the return value.
436 blk_status_t
nvme_host_path_error(struct request
*req
)
438 nvme_req(req
)->status
= NVME_SC_HOST_PATH_ERROR
;
439 blk_mq_set_request_complete(req
);
440 nvme_complete_rq(req
);
443 EXPORT_SYMBOL_GPL(nvme_host_path_error
);
445 bool nvme_cancel_request(struct request
*req
, void *data
)
447 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
448 "Cancelling I/O %d", req
->tag
);
450 /* don't abort one completed request */
451 if (blk_mq_request_completed(req
))
454 nvme_req(req
)->status
= NVME_SC_HOST_ABORTED_CMD
;
455 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
456 blk_mq_complete_request(req
);
459 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
461 void nvme_cancel_tagset(struct nvme_ctrl
*ctrl
)
464 blk_mq_tagset_busy_iter(ctrl
->tagset
,
465 nvme_cancel_request
, ctrl
);
466 blk_mq_tagset_wait_completed_request(ctrl
->tagset
);
469 EXPORT_SYMBOL_GPL(nvme_cancel_tagset
);
471 void nvme_cancel_admin_tagset(struct nvme_ctrl
*ctrl
)
473 if (ctrl
->admin_tagset
) {
474 blk_mq_tagset_busy_iter(ctrl
->admin_tagset
,
475 nvme_cancel_request
, ctrl
);
476 blk_mq_tagset_wait_completed_request(ctrl
->admin_tagset
);
479 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset
);
481 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
482 enum nvme_ctrl_state new_state
)
484 enum nvme_ctrl_state old_state
;
486 bool changed
= false;
488 spin_lock_irqsave(&ctrl
->lock
, flags
);
490 old_state
= ctrl
->state
;
495 case NVME_CTRL_RESETTING
:
496 case NVME_CTRL_CONNECTING
:
503 case NVME_CTRL_RESETTING
:
513 case NVME_CTRL_CONNECTING
:
516 case NVME_CTRL_RESETTING
:
523 case NVME_CTRL_DELETING
:
526 case NVME_CTRL_RESETTING
:
527 case NVME_CTRL_CONNECTING
:
534 case NVME_CTRL_DELETING_NOIO
:
536 case NVME_CTRL_DELETING
:
546 case NVME_CTRL_DELETING
:
558 ctrl
->state
= new_state
;
559 wake_up_all(&ctrl
->state_wq
);
562 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
566 if (ctrl
->state
== NVME_CTRL_LIVE
) {
567 if (old_state
== NVME_CTRL_CONNECTING
)
568 nvme_stop_failfast_work(ctrl
);
569 nvme_kick_requeue_lists(ctrl
);
570 } else if (ctrl
->state
== NVME_CTRL_CONNECTING
&&
571 old_state
== NVME_CTRL_RESETTING
) {
572 nvme_start_failfast_work(ctrl
);
576 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
579 * Returns true for sink states that can't ever transition back to live.
581 static bool nvme_state_terminal(struct nvme_ctrl
*ctrl
)
583 switch (ctrl
->state
) {
586 case NVME_CTRL_RESETTING
:
587 case NVME_CTRL_CONNECTING
:
589 case NVME_CTRL_DELETING
:
590 case NVME_CTRL_DELETING_NOIO
:
594 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl
->state
);
600 * Waits for the controller state to be resetting, or returns false if it is
601 * not possible to ever transition to that state.
603 bool nvme_wait_reset(struct nvme_ctrl
*ctrl
)
605 wait_event(ctrl
->state_wq
,
606 nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
) ||
607 nvme_state_terminal(ctrl
));
608 return ctrl
->state
== NVME_CTRL_RESETTING
;
610 EXPORT_SYMBOL_GPL(nvme_wait_reset
);
612 static void nvme_free_ns_head(struct kref
*ref
)
614 struct nvme_ns_head
*head
=
615 container_of(ref
, struct nvme_ns_head
, ref
);
617 nvme_mpath_remove_disk(head
);
618 ida_free(&head
->subsys
->ns_ida
, head
->instance
);
619 cleanup_srcu_struct(&head
->srcu
);
620 nvme_put_subsystem(head
->subsys
);
624 bool nvme_tryget_ns_head(struct nvme_ns_head
*head
)
626 return kref_get_unless_zero(&head
->ref
);
629 void nvme_put_ns_head(struct nvme_ns_head
*head
)
631 kref_put(&head
->ref
, nvme_free_ns_head
);
634 static void nvme_free_ns(struct kref
*kref
)
636 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
639 nvme_put_ns_head(ns
->head
);
640 nvme_put_ctrl(ns
->ctrl
);
644 static inline bool nvme_get_ns(struct nvme_ns
*ns
)
646 return kref_get_unless_zero(&ns
->kref
);
649 void nvme_put_ns(struct nvme_ns
*ns
)
651 kref_put(&ns
->kref
, nvme_free_ns
);
653 EXPORT_SYMBOL_NS_GPL(nvme_put_ns
, NVME_TARGET_PASSTHRU
);
655 static inline void nvme_clear_nvme_request(struct request
*req
)
657 nvme_req(req
)->status
= 0;
658 nvme_req(req
)->retries
= 0;
659 nvme_req(req
)->flags
= 0;
660 req
->rq_flags
|= RQF_DONTPREP
;
663 /* initialize a passthrough request */
664 void nvme_init_request(struct request
*req
, struct nvme_command
*cmd
)
666 if (req
->q
->queuedata
)
667 req
->timeout
= NVME_IO_TIMEOUT
;
668 else /* no queuedata implies admin queue */
669 req
->timeout
= NVME_ADMIN_TIMEOUT
;
671 /* passthru commands should let the driver set the SGL flags */
672 cmd
->common
.flags
&= ~NVME_CMD_SGL_ALL
;
674 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
675 if (req
->mq_hctx
->type
== HCTX_TYPE_POLL
)
676 req
->cmd_flags
|= REQ_POLLED
;
677 nvme_clear_nvme_request(req
);
678 memcpy(nvme_req(req
)->cmd
, cmd
, sizeof(*cmd
));
680 EXPORT_SYMBOL_GPL(nvme_init_request
);
683 * For something we're not in a state to send to the device the default action
684 * is to busy it and retry it after the controller state is recovered. However,
685 * if the controller is deleting or if anything is marked for failfast or
686 * nvme multipath it is immediately failed.
688 * Note: commands used to initialize the controller will be marked for failfast.
689 * Note: nvme cli/ioctl commands are marked for failfast.
691 blk_status_t
nvme_fail_nonready_command(struct nvme_ctrl
*ctrl
,
694 if (ctrl
->state
!= NVME_CTRL_DELETING_NOIO
&&
695 ctrl
->state
!= NVME_CTRL_DELETING
&&
696 ctrl
->state
!= NVME_CTRL_DEAD
&&
697 !test_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
) &&
698 !blk_noretry_request(rq
) && !(rq
->cmd_flags
& REQ_NVME_MPATH
))
699 return BLK_STS_RESOURCE
;
700 return nvme_host_path_error(rq
);
702 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command
);
704 bool __nvme_check_ready(struct nvme_ctrl
*ctrl
, struct request
*rq
,
707 struct nvme_request
*req
= nvme_req(rq
);
710 * currently we have a problem sending passthru commands
711 * on the admin_q if the controller is not LIVE because we can't
712 * make sure that they are going out after the admin connect,
713 * controller enable and/or other commands in the initialization
714 * sequence. until the controller will be LIVE, fail with
715 * BLK_STS_RESOURCE so that they will be rescheduled.
717 if (rq
->q
== ctrl
->admin_q
&& (req
->flags
& NVME_REQ_USERCMD
))
720 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
722 * Only allow commands on a live queue, except for the connect
723 * command, which is require to set the queue live in the
724 * appropinquate states.
726 switch (ctrl
->state
) {
727 case NVME_CTRL_CONNECTING
:
728 if (blk_rq_is_passthrough(rq
) && nvme_is_fabrics(req
->cmd
) &&
729 (req
->cmd
->fabrics
.fctype
== nvme_fabrics_type_connect
||
730 req
->cmd
->fabrics
.fctype
== nvme_fabrics_type_auth_send
||
731 req
->cmd
->fabrics
.fctype
== nvme_fabrics_type_auth_receive
))
743 EXPORT_SYMBOL_GPL(__nvme_check_ready
);
745 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
746 struct nvme_command
*cmnd
)
748 memset(cmnd
, 0, sizeof(*cmnd
));
749 cmnd
->common
.opcode
= nvme_cmd_flush
;
750 cmnd
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
753 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
754 struct nvme_command
*cmnd
)
756 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
757 struct nvme_dsm_range
*range
;
761 * Some devices do not consider the DSM 'Number of Ranges' field when
762 * determining how much data to DMA. Always allocate memory for maximum
763 * number of segments to prevent device reading beyond end of buffer.
765 static const size_t alloc_size
= sizeof(*range
) * NVME_DSM_MAX_RANGES
;
767 range
= kzalloc(alloc_size
, GFP_ATOMIC
| __GFP_NOWARN
);
770 * If we fail allocation our range, fallback to the controller
771 * discard page. If that's also busy, it's safe to return
772 * busy, as we know we can make progress once that's freed.
774 if (test_and_set_bit_lock(0, &ns
->ctrl
->discard_page_busy
))
775 return BLK_STS_RESOURCE
;
777 range
= page_address(ns
->ctrl
->discard_page
);
780 __rq_for_each_bio(bio
, req
) {
781 u64 slba
= nvme_sect_to_lba(ns
, bio
->bi_iter
.bi_sector
);
782 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
785 range
[n
].cattr
= cpu_to_le32(0);
786 range
[n
].nlb
= cpu_to_le32(nlb
);
787 range
[n
].slba
= cpu_to_le64(slba
);
792 if (WARN_ON_ONCE(n
!= segments
)) {
793 if (virt_to_page(range
) == ns
->ctrl
->discard_page
)
794 clear_bit_unlock(0, &ns
->ctrl
->discard_page_busy
);
797 return BLK_STS_IOERR
;
800 memset(cmnd
, 0, sizeof(*cmnd
));
801 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
802 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
803 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
804 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
806 req
->special_vec
.bv_page
= virt_to_page(range
);
807 req
->special_vec
.bv_offset
= offset_in_page(range
);
808 req
->special_vec
.bv_len
= alloc_size
;
809 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
814 static void nvme_set_ref_tag(struct nvme_ns
*ns
, struct nvme_command
*cmnd
,
820 /* both rw and write zeroes share the same reftag format */
821 switch (ns
->guard_type
) {
822 case NVME_NVM_NS_16B_GUARD
:
823 cmnd
->rw
.reftag
= cpu_to_le32(t10_pi_ref_tag(req
));
825 case NVME_NVM_NS_64B_GUARD
:
826 ref48
= ext_pi_ref_tag(req
);
827 lower
= lower_32_bits(ref48
);
828 upper
= upper_32_bits(ref48
);
830 cmnd
->rw
.reftag
= cpu_to_le32(lower
);
831 cmnd
->rw
.cdw3
= cpu_to_le32(upper
);
838 static inline blk_status_t
nvme_setup_write_zeroes(struct nvme_ns
*ns
,
839 struct request
*req
, struct nvme_command
*cmnd
)
841 memset(cmnd
, 0, sizeof(*cmnd
));
843 if (ns
->ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
844 return nvme_setup_discard(ns
, req
, cmnd
);
846 cmnd
->write_zeroes
.opcode
= nvme_cmd_write_zeroes
;
847 cmnd
->write_zeroes
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
848 cmnd
->write_zeroes
.slba
=
849 cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
850 cmnd
->write_zeroes
.length
=
851 cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
853 if (nvme_ns_has_pi(ns
)) {
854 cmnd
->write_zeroes
.control
= cpu_to_le16(NVME_RW_PRINFO_PRACT
);
856 switch (ns
->pi_type
) {
857 case NVME_NS_DPS_PI_TYPE1
:
858 case NVME_NS_DPS_PI_TYPE2
:
859 nvme_set_ref_tag(ns
, cmnd
, req
);
867 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
868 struct request
*req
, struct nvme_command
*cmnd
,
874 if (req
->cmd_flags
& REQ_FUA
)
875 control
|= NVME_RW_FUA
;
876 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
877 control
|= NVME_RW_LR
;
879 if (req
->cmd_flags
& REQ_RAHEAD
)
880 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
882 cmnd
->rw
.opcode
= op
;
884 cmnd
->rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
887 cmnd
->rw
.metadata
= 0;
888 cmnd
->rw
.slba
= cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
889 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
892 cmnd
->rw
.appmask
= 0;
896 * If formated with metadata, the block layer always provides a
897 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
898 * we enable the PRACT bit for protection information or set the
899 * namespace capacity to zero to prevent any I/O.
901 if (!blk_integrity_rq(req
)) {
902 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns
)))
903 return BLK_STS_NOTSUPP
;
904 control
|= NVME_RW_PRINFO_PRACT
;
907 switch (ns
->pi_type
) {
908 case NVME_NS_DPS_PI_TYPE3
:
909 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
911 case NVME_NS_DPS_PI_TYPE1
:
912 case NVME_NS_DPS_PI_TYPE2
:
913 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
914 NVME_RW_PRINFO_PRCHK_REF
;
915 if (op
== nvme_cmd_zone_append
)
916 control
|= NVME_RW_APPEND_PIREMAP
;
917 nvme_set_ref_tag(ns
, cmnd
, req
);
922 cmnd
->rw
.control
= cpu_to_le16(control
);
923 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
927 void nvme_cleanup_cmd(struct request
*req
)
929 if (req
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
930 struct nvme_ctrl
*ctrl
= nvme_req(req
)->ctrl
;
932 if (req
->special_vec
.bv_page
== ctrl
->discard_page
)
933 clear_bit_unlock(0, &ctrl
->discard_page_busy
);
935 kfree(bvec_virt(&req
->special_vec
));
938 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd
);
940 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
)
942 struct nvme_command
*cmd
= nvme_req(req
)->cmd
;
943 blk_status_t ret
= BLK_STS_OK
;
945 if (!(req
->rq_flags
& RQF_DONTPREP
))
946 nvme_clear_nvme_request(req
);
948 switch (req_op(req
)) {
951 /* these are setup prior to execution in nvme_init_request() */
954 nvme_setup_flush(ns
, cmd
);
956 case REQ_OP_ZONE_RESET_ALL
:
957 case REQ_OP_ZONE_RESET
:
958 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_RESET
);
960 case REQ_OP_ZONE_OPEN
:
961 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_OPEN
);
963 case REQ_OP_ZONE_CLOSE
:
964 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_CLOSE
);
966 case REQ_OP_ZONE_FINISH
:
967 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_FINISH
);
969 case REQ_OP_WRITE_ZEROES
:
970 ret
= nvme_setup_write_zeroes(ns
, req
, cmd
);
973 ret
= nvme_setup_discard(ns
, req
, cmd
);
976 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_read
);
979 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_write
);
981 case REQ_OP_ZONE_APPEND
:
982 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_zone_append
);
986 return BLK_STS_IOERR
;
989 cmd
->common
.command_id
= nvme_cid(req
);
990 trace_nvme_setup_cmd(req
, cmd
);
993 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
998 * >0: nvme controller's cqe status response
999 * <0: kernel error in lieu of controller response
1001 static int nvme_execute_rq(struct request
*rq
, bool at_head
)
1003 blk_status_t status
;
1005 status
= blk_execute_rq(rq
, at_head
);
1006 if (nvme_req(rq
)->flags
& NVME_REQ_CANCELLED
)
1008 if (nvme_req(rq
)->status
)
1009 return nvme_req(rq
)->status
;
1010 return blk_status_to_errno(status
);
1014 * Returns 0 on success. If the result is negative, it's a Linux error code;
1015 * if the result is positive, it's an NVM Express status code
1017 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
1018 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
1019 int qid
, int at_head
, blk_mq_req_flags_t flags
)
1021 struct request
*req
;
1024 if (qid
== NVME_QID_ANY
)
1025 req
= blk_mq_alloc_request(q
, nvme_req_op(cmd
), flags
);
1027 req
= blk_mq_alloc_request_hctx(q
, nvme_req_op(cmd
), flags
,
1031 return PTR_ERR(req
);
1032 nvme_init_request(req
, cmd
);
1034 if (buffer
&& bufflen
) {
1035 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
1040 req
->rq_flags
|= RQF_QUIET
;
1041 ret
= nvme_execute_rq(req
, at_head
);
1042 if (result
&& ret
>= 0)
1043 *result
= nvme_req(req
)->result
;
1045 blk_mq_free_request(req
);
1048 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
1050 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
1051 void *buffer
, unsigned bufflen
)
1053 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
,
1054 NVME_QID_ANY
, 0, 0);
1056 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
1058 static u32
nvme_known_admin_effects(u8 opcode
)
1061 case nvme_admin_format_nvm
:
1062 return NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_NCC
|
1063 NVME_CMD_EFFECTS_CSE_MASK
;
1064 case nvme_admin_sanitize_nvm
:
1065 return NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
;
1072 u32
nvme_command_effects(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
, u8 opcode
)
1077 if (ns
->head
->effects
)
1078 effects
= le32_to_cpu(ns
->head
->effects
->iocs
[opcode
]);
1079 if (effects
& ~(NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
))
1080 dev_warn_once(ctrl
->device
,
1081 "IO command:%02x has unhandled effects:%08x\n",
1087 effects
= le32_to_cpu(ctrl
->effects
->acs
[opcode
]);
1088 effects
|= nvme_known_admin_effects(opcode
);
1092 EXPORT_SYMBOL_NS_GPL(nvme_command_effects
, NVME_TARGET_PASSTHRU
);
1094 static u32
nvme_passthru_start(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1097 u32 effects
= nvme_command_effects(ctrl
, ns
, opcode
);
1100 * For simplicity, IO to all namespaces is quiesced even if the command
1101 * effects say only one namespace is affected.
1103 if (effects
& NVME_CMD_EFFECTS_CSE_MASK
) {
1104 mutex_lock(&ctrl
->scan_lock
);
1105 mutex_lock(&ctrl
->subsys
->lock
);
1106 nvme_mpath_start_freeze(ctrl
->subsys
);
1107 nvme_mpath_wait_freeze(ctrl
->subsys
);
1108 nvme_start_freeze(ctrl
);
1109 nvme_wait_freeze(ctrl
);
1114 static void nvme_passthru_end(struct nvme_ctrl
*ctrl
, u32 effects
,
1115 struct nvme_command
*cmd
, int status
)
1117 if (effects
& NVME_CMD_EFFECTS_CSE_MASK
) {
1118 nvme_unfreeze(ctrl
);
1119 nvme_mpath_unfreeze(ctrl
->subsys
);
1120 mutex_unlock(&ctrl
->subsys
->lock
);
1121 nvme_remove_invalid_namespaces(ctrl
, NVME_NSID_ALL
);
1122 mutex_unlock(&ctrl
->scan_lock
);
1124 if (effects
& NVME_CMD_EFFECTS_CCC
)
1125 nvme_init_ctrl_finish(ctrl
);
1126 if (effects
& (NVME_CMD_EFFECTS_NIC
| NVME_CMD_EFFECTS_NCC
)) {
1127 nvme_queue_scan(ctrl
);
1128 flush_work(&ctrl
->scan_work
);
1131 switch (cmd
->common
.opcode
) {
1132 case nvme_admin_set_features
:
1133 switch (le32_to_cpu(cmd
->common
.cdw10
) & 0xFF) {
1134 case NVME_FEAT_KATO
:
1136 * Keep alive commands interval on the host should be
1137 * updated when KATO is modified by Set Features
1141 nvme_update_keep_alive(ctrl
, cmd
);
1152 int nvme_execute_passthru_rq(struct request
*rq
)
1154 struct nvme_command
*cmd
= nvme_req(rq
)->cmd
;
1155 struct nvme_ctrl
*ctrl
= nvme_req(rq
)->ctrl
;
1156 struct nvme_ns
*ns
= rq
->q
->queuedata
;
1160 effects
= nvme_passthru_start(ctrl
, ns
, cmd
->common
.opcode
);
1161 ret
= nvme_execute_rq(rq
, false);
1162 if (effects
) /* nothing to be done for zero cmd effects */
1163 nvme_passthru_end(ctrl
, effects
, cmd
, ret
);
1167 EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq
, NVME_TARGET_PASSTHRU
);
1170 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
1172 * The host should send Keep Alive commands at half of the Keep Alive Timeout
1173 * accounting for transport roundtrip times [..].
1175 static void nvme_queue_keep_alive_work(struct nvme_ctrl
*ctrl
)
1177 queue_delayed_work(nvme_wq
, &ctrl
->ka_work
, ctrl
->kato
* HZ
/ 2);
1180 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
1182 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
1183 unsigned long flags
;
1184 bool startka
= false;
1186 blk_mq_free_request(rq
);
1189 dev_err(ctrl
->device
,
1190 "failed nvme_keep_alive_end_io error=%d\n",
1195 ctrl
->comp_seen
= false;
1196 spin_lock_irqsave(&ctrl
->lock
, flags
);
1197 if (ctrl
->state
== NVME_CTRL_LIVE
||
1198 ctrl
->state
== NVME_CTRL_CONNECTING
)
1200 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1202 nvme_queue_keep_alive_work(ctrl
);
1205 static void nvme_keep_alive_work(struct work_struct
*work
)
1207 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
1208 struct nvme_ctrl
, ka_work
);
1209 bool comp_seen
= ctrl
->comp_seen
;
1212 if ((ctrl
->ctratt
& NVME_CTRL_ATTR_TBKAS
) && comp_seen
) {
1213 dev_dbg(ctrl
->device
,
1214 "reschedule traffic based keep-alive timer\n");
1215 ctrl
->comp_seen
= false;
1216 nvme_queue_keep_alive_work(ctrl
);
1220 rq
= blk_mq_alloc_request(ctrl
->admin_q
, nvme_req_op(&ctrl
->ka_cmd
),
1221 BLK_MQ_REQ_RESERVED
| BLK_MQ_REQ_NOWAIT
);
1223 /* allocation failure, reset the controller */
1224 dev_err(ctrl
->device
, "keep-alive failed: %ld\n", PTR_ERR(rq
));
1225 nvme_reset_ctrl(ctrl
);
1228 nvme_init_request(rq
, &ctrl
->ka_cmd
);
1230 rq
->timeout
= ctrl
->kato
* HZ
;
1231 rq
->end_io
= nvme_keep_alive_end_io
;
1232 rq
->end_io_data
= ctrl
;
1233 rq
->rq_flags
|= RQF_QUIET
;
1234 blk_execute_rq_nowait(rq
, false);
1237 static void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
1239 if (unlikely(ctrl
->kato
== 0))
1242 nvme_queue_keep_alive_work(ctrl
);
1245 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
1247 if (unlikely(ctrl
->kato
== 0))
1250 cancel_delayed_work_sync(&ctrl
->ka_work
);
1252 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
1254 static void nvme_update_keep_alive(struct nvme_ctrl
*ctrl
,
1255 struct nvme_command
*cmd
)
1257 unsigned int new_kato
=
1258 DIV_ROUND_UP(le32_to_cpu(cmd
->common
.cdw11
), 1000);
1260 dev_info(ctrl
->device
,
1261 "keep alive interval updated from %u ms to %u ms\n",
1262 ctrl
->kato
* 1000 / 2, new_kato
* 1000 / 2);
1264 nvme_stop_keep_alive(ctrl
);
1265 ctrl
->kato
= new_kato
;
1266 nvme_start_keep_alive(ctrl
);
1270 * In NVMe 1.0 the CNS field was just a binary controller or namespace
1271 * flag, thus sending any new CNS opcodes has a big chance of not working.
1272 * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1273 * (but not for any later version).
1275 static bool nvme_ctrl_limited_cns(struct nvme_ctrl
*ctrl
)
1277 if (ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)
1278 return ctrl
->vs
< NVME_VS(1, 2, 0);
1279 return ctrl
->vs
< NVME_VS(1, 1, 0);
1282 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
1284 struct nvme_command c
= { };
1287 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1288 c
.identify
.opcode
= nvme_admin_identify
;
1289 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
1291 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
1295 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
1296 sizeof(struct nvme_id_ctrl
));
1302 static int nvme_process_ns_desc(struct nvme_ctrl
*ctrl
, struct nvme_ns_ids
*ids
,
1303 struct nvme_ns_id_desc
*cur
, bool *csi_seen
)
1305 const char *warn_str
= "ctrl returned bogus length:";
1308 switch (cur
->nidt
) {
1309 case NVME_NIDT_EUI64
:
1310 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
1311 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_EUI64\n",
1312 warn_str
, cur
->nidl
);
1315 if (ctrl
->quirks
& NVME_QUIRK_BOGUS_NID
)
1316 return NVME_NIDT_EUI64_LEN
;
1317 memcpy(ids
->eui64
, data
+ sizeof(*cur
), NVME_NIDT_EUI64_LEN
);
1318 return NVME_NIDT_EUI64_LEN
;
1319 case NVME_NIDT_NGUID
:
1320 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
1321 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_NGUID\n",
1322 warn_str
, cur
->nidl
);
1325 if (ctrl
->quirks
& NVME_QUIRK_BOGUS_NID
)
1326 return NVME_NIDT_NGUID_LEN
;
1327 memcpy(ids
->nguid
, data
+ sizeof(*cur
), NVME_NIDT_NGUID_LEN
);
1328 return NVME_NIDT_NGUID_LEN
;
1329 case NVME_NIDT_UUID
:
1330 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
1331 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_UUID\n",
1332 warn_str
, cur
->nidl
);
1335 if (ctrl
->quirks
& NVME_QUIRK_BOGUS_NID
)
1336 return NVME_NIDT_UUID_LEN
;
1337 uuid_copy(&ids
->uuid
, data
+ sizeof(*cur
));
1338 return NVME_NIDT_UUID_LEN
;
1340 if (cur
->nidl
!= NVME_NIDT_CSI_LEN
) {
1341 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_CSI\n",
1342 warn_str
, cur
->nidl
);
1345 memcpy(&ids
->csi
, data
+ sizeof(*cur
), NVME_NIDT_CSI_LEN
);
1347 return NVME_NIDT_CSI_LEN
;
1349 /* Skip unknown types */
1354 static int nvme_identify_ns_descs(struct nvme_ctrl
*ctrl
,
1355 struct nvme_ns_info
*info
)
1357 struct nvme_command c
= { };
1358 bool csi_seen
= false;
1359 int status
, pos
, len
;
1362 if (ctrl
->vs
< NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl
))
1364 if (ctrl
->quirks
& NVME_QUIRK_NO_NS_DESC_LIST
)
1367 c
.identify
.opcode
= nvme_admin_identify
;
1368 c
.identify
.nsid
= cpu_to_le32(info
->nsid
);
1369 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
1371 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
1375 status
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, data
,
1376 NVME_IDENTIFY_DATA_SIZE
);
1378 dev_warn(ctrl
->device
,
1379 "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1380 info
->nsid
, status
);
1384 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
1385 struct nvme_ns_id_desc
*cur
= data
+ pos
;
1390 len
= nvme_process_ns_desc(ctrl
, &info
->ids
, cur
, &csi_seen
);
1394 len
+= sizeof(*cur
);
1397 if (nvme_multi_css(ctrl
) && !csi_seen
) {
1398 dev_warn(ctrl
->device
, "Command set not reported for nsid:%d\n",
1408 static int nvme_identify_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
,
1409 struct nvme_id_ns
**id
)
1411 struct nvme_command c
= { };
1414 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1415 c
.identify
.opcode
= nvme_admin_identify
;
1416 c
.identify
.nsid
= cpu_to_le32(nsid
);
1417 c
.identify
.cns
= NVME_ID_CNS_NS
;
1419 *id
= kmalloc(sizeof(**id
), GFP_KERNEL
);
1423 error
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, *id
, sizeof(**id
));
1425 dev_warn(ctrl
->device
, "Identify namespace failed (%d)\n", error
);
1429 error
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
1430 if ((*id
)->ncap
== 0) /* namespace not allocated or attached */
1439 static int nvme_ns_info_from_identify(struct nvme_ctrl
*ctrl
,
1440 struct nvme_ns_info
*info
)
1442 struct nvme_ns_ids
*ids
= &info
->ids
;
1443 struct nvme_id_ns
*id
;
1446 ret
= nvme_identify_ns(ctrl
, info
->nsid
, &id
);
1449 info
->anagrpid
= id
->anagrpid
;
1450 info
->is_shared
= id
->nmic
& NVME_NS_NMIC_SHARED
;
1451 info
->is_readonly
= id
->nsattr
& NVME_NS_ATTR_RO
;
1452 info
->is_ready
= true;
1453 if (ctrl
->quirks
& NVME_QUIRK_BOGUS_NID
) {
1454 dev_info(ctrl
->device
,
1455 "Ignoring bogus Namespace Identifiers\n");
1457 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
1458 !memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
1459 memcpy(ids
->eui64
, id
->eui64
, sizeof(ids
->eui64
));
1460 if (ctrl
->vs
>= NVME_VS(1, 2, 0) &&
1461 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
1462 memcpy(ids
->nguid
, id
->nguid
, sizeof(ids
->nguid
));
1468 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl
*ctrl
,
1469 struct nvme_ns_info
*info
)
1471 struct nvme_id_ns_cs_indep
*id
;
1472 struct nvme_command c
= {
1473 .identify
.opcode
= nvme_admin_identify
,
1474 .identify
.nsid
= cpu_to_le32(info
->nsid
),
1475 .identify
.cns
= NVME_ID_CNS_NS_CS_INDEP
,
1479 id
= kmalloc(sizeof(*id
), GFP_KERNEL
);
1483 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, id
, sizeof(*id
));
1485 info
->anagrpid
= id
->anagrpid
;
1486 info
->is_shared
= id
->nmic
& NVME_NS_NMIC_SHARED
;
1487 info
->is_readonly
= id
->nsattr
& NVME_NS_ATTR_RO
;
1488 info
->is_ready
= id
->nstat
& NVME_NSTAT_NRDY
;
1494 static int nvme_features(struct nvme_ctrl
*dev
, u8 op
, unsigned int fid
,
1495 unsigned int dword11
, void *buffer
, size_t buflen
, u32
*result
)
1497 union nvme_result res
= { 0 };
1498 struct nvme_command c
= { };
1501 c
.features
.opcode
= op
;
1502 c
.features
.fid
= cpu_to_le32(fid
);
1503 c
.features
.dword11
= cpu_to_le32(dword11
);
1505 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
1506 buffer
, buflen
, NVME_QID_ANY
, 0, 0);
1507 if (ret
>= 0 && result
)
1508 *result
= le32_to_cpu(res
.u32
);
1512 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1513 unsigned int dword11
, void *buffer
, size_t buflen
,
1516 return nvme_features(dev
, nvme_admin_set_features
, fid
, dword11
, buffer
,
1519 EXPORT_SYMBOL_GPL(nvme_set_features
);
1521 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1522 unsigned int dword11
, void *buffer
, size_t buflen
,
1525 return nvme_features(dev
, nvme_admin_get_features
, fid
, dword11
, buffer
,
1528 EXPORT_SYMBOL_GPL(nvme_get_features
);
1530 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
1532 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
1534 int status
, nr_io_queues
;
1536 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
1542 * Degraded controllers might return an error when setting the queue
1543 * count. We still want to be able to bring them online and offer
1544 * access to the admin queue, as that might be only way to fix them up.
1547 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
1550 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
1551 *count
= min(*count
, nr_io_queues
);
1556 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
1558 #define NVME_AEN_SUPPORTED \
1559 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1560 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1562 static void nvme_enable_aen(struct nvme_ctrl
*ctrl
)
1564 u32 result
, supported_aens
= ctrl
->oaes
& NVME_AEN_SUPPORTED
;
1567 if (!supported_aens
)
1570 status
= nvme_set_features(ctrl
, NVME_FEAT_ASYNC_EVENT
, supported_aens
,
1573 dev_warn(ctrl
->device
, "Failed to configure AEN (cfg %x)\n",
1576 queue_work(nvme_wq
, &ctrl
->async_event_work
);
1579 static int nvme_ns_open(struct nvme_ns
*ns
)
1582 /* should never be called due to GENHD_FL_HIDDEN */
1583 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns
->head
)))
1585 if (!nvme_get_ns(ns
))
1587 if (!try_module_get(ns
->ctrl
->ops
->module
))
1598 static void nvme_ns_release(struct nvme_ns
*ns
)
1601 module_put(ns
->ctrl
->ops
->module
);
1605 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1607 return nvme_ns_open(bdev
->bd_disk
->private_data
);
1610 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1612 nvme_ns_release(disk
->private_data
);
1615 int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1617 /* some standard values */
1618 geo
->heads
= 1 << 6;
1619 geo
->sectors
= 1 << 5;
1620 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1624 #ifdef CONFIG_BLK_DEV_INTEGRITY
1625 static void nvme_init_integrity(struct gendisk
*disk
, struct nvme_ns
*ns
,
1626 u32 max_integrity_segments
)
1628 struct blk_integrity integrity
= { };
1630 switch (ns
->pi_type
) {
1631 case NVME_NS_DPS_PI_TYPE3
:
1632 switch (ns
->guard_type
) {
1633 case NVME_NVM_NS_16B_GUARD
:
1634 integrity
.profile
= &t10_pi_type3_crc
;
1635 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1636 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1638 case NVME_NVM_NS_64B_GUARD
:
1639 integrity
.profile
= &ext_pi_type3_crc64
;
1640 integrity
.tag_size
= sizeof(u16
) + 6;
1641 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1644 integrity
.profile
= NULL
;
1648 case NVME_NS_DPS_PI_TYPE1
:
1649 case NVME_NS_DPS_PI_TYPE2
:
1650 switch (ns
->guard_type
) {
1651 case NVME_NVM_NS_16B_GUARD
:
1652 integrity
.profile
= &t10_pi_type1_crc
;
1653 integrity
.tag_size
= sizeof(u16
);
1654 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1656 case NVME_NVM_NS_64B_GUARD
:
1657 integrity
.profile
= &ext_pi_type1_crc64
;
1658 integrity
.tag_size
= sizeof(u16
);
1659 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1662 integrity
.profile
= NULL
;
1667 integrity
.profile
= NULL
;
1671 integrity
.tuple_size
= ns
->ms
;
1672 blk_integrity_register(disk
, &integrity
);
1673 blk_queue_max_integrity_segments(disk
->queue
, max_integrity_segments
);
1676 static void nvme_init_integrity(struct gendisk
*disk
, struct nvme_ns
*ns
,
1677 u32 max_integrity_segments
)
1680 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1682 static void nvme_config_discard(struct gendisk
*disk
, struct nvme_ns
*ns
)
1684 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1685 struct request_queue
*queue
= disk
->queue
;
1686 u32 size
= queue_logical_block_size(queue
);
1688 if (ctrl
->max_discard_sectors
== 0) {
1689 blk_queue_max_discard_sectors(queue
, 0);
1693 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1694 NVME_DSM_MAX_RANGES
);
1696 queue
->limits
.discard_granularity
= size
;
1698 /* If discard is already enabled, don't reset queue limits */
1699 if (queue
->limits
.max_discard_sectors
)
1702 if (ctrl
->dmrsl
&& ctrl
->dmrsl
<= nvme_sect_to_lba(ns
, UINT_MAX
))
1703 ctrl
->max_discard_sectors
= nvme_lba_to_sect(ns
, ctrl
->dmrsl
);
1705 blk_queue_max_discard_sectors(queue
, ctrl
->max_discard_sectors
);
1706 blk_queue_max_discard_segments(queue
, ctrl
->max_discard_segments
);
1708 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1709 blk_queue_max_write_zeroes_sectors(queue
, UINT_MAX
);
1712 static bool nvme_ns_ids_equal(struct nvme_ns_ids
*a
, struct nvme_ns_ids
*b
)
1714 return uuid_equal(&a
->uuid
, &b
->uuid
) &&
1715 memcmp(&a
->nguid
, &b
->nguid
, sizeof(a
->nguid
)) == 0 &&
1716 memcmp(&a
->eui64
, &b
->eui64
, sizeof(a
->eui64
)) == 0 &&
1720 static int nvme_init_ms(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1722 bool first
= id
->dps
& NVME_NS_DPS_PI_FIRST
;
1723 unsigned lbaf
= nvme_lbaf_index(id
->flbas
);
1724 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1725 struct nvme_command c
= { };
1726 struct nvme_id_ns_nvm
*nvm
;
1731 ns
->ms
= le16_to_cpu(id
->lbaf
[lbaf
].ms
);
1732 if (!(ctrl
->ctratt
& NVME_CTRL_ATTR_ELBAS
)) {
1733 ns
->pi_size
= sizeof(struct t10_pi_tuple
);
1734 ns
->guard_type
= NVME_NVM_NS_16B_GUARD
;
1738 nvm
= kzalloc(sizeof(*nvm
), GFP_KERNEL
);
1742 c
.identify
.opcode
= nvme_admin_identify
;
1743 c
.identify
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1744 c
.identify
.cns
= NVME_ID_CNS_CS_NS
;
1745 c
.identify
.csi
= NVME_CSI_NVM
;
1747 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, &c
, nvm
, sizeof(*nvm
));
1751 elbaf
= le32_to_cpu(nvm
->elbaf
[lbaf
]);
1753 /* no support for storage tag formats right now */
1754 if (nvme_elbaf_sts(elbaf
))
1757 ns
->guard_type
= nvme_elbaf_guard_type(elbaf
);
1758 switch (ns
->guard_type
) {
1759 case NVME_NVM_NS_64B_GUARD
:
1760 ns
->pi_size
= sizeof(struct crc64_pi_tuple
);
1762 case NVME_NVM_NS_16B_GUARD
:
1763 ns
->pi_size
= sizeof(struct t10_pi_tuple
);
1772 if (ns
->pi_size
&& (first
|| ns
->ms
== ns
->pi_size
))
1773 ns
->pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1780 static void nvme_configure_metadata(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1782 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1784 if (nvme_init_ms(ns
, id
))
1787 ns
->features
&= ~(NVME_NS_METADATA_SUPPORTED
| NVME_NS_EXT_LBAS
);
1788 if (!ns
->ms
|| !(ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
))
1791 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
1793 * The NVMe over Fabrics specification only supports metadata as
1794 * part of the extended data LBA. We rely on HCA/HBA support to
1795 * remap the separate metadata buffer from the block layer.
1797 if (WARN_ON_ONCE(!(id
->flbas
& NVME_NS_FLBAS_META_EXT
)))
1800 ns
->features
|= NVME_NS_EXT_LBAS
;
1803 * The current fabrics transport drivers support namespace
1804 * metadata formats only if nvme_ns_has_pi() returns true.
1805 * Suppress support for all other formats so the namespace will
1806 * have a 0 capacity and not be usable through the block stack.
1808 * Note, this check will need to be modified if any drivers
1809 * gain the ability to use other metadata formats.
1811 if (ctrl
->max_integrity_segments
&& nvme_ns_has_pi(ns
))
1812 ns
->features
|= NVME_NS_METADATA_SUPPORTED
;
1815 * For PCIe controllers, we can't easily remap the separate
1816 * metadata buffer from the block layer and thus require a
1817 * separate metadata buffer for block layer metadata/PI support.
1818 * We allow extended LBAs for the passthrough interface, though.
1820 if (id
->flbas
& NVME_NS_FLBAS_META_EXT
)
1821 ns
->features
|= NVME_NS_EXT_LBAS
;
1823 ns
->features
|= NVME_NS_METADATA_SUPPORTED
;
1827 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1828 struct request_queue
*q
)
1830 bool vwc
= ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
;
1832 if (ctrl
->max_hw_sectors
) {
1834 (ctrl
->max_hw_sectors
/ (NVME_CTRL_PAGE_SIZE
>> 9)) + 1;
1836 max_segments
= min_not_zero(max_segments
, ctrl
->max_segments
);
1837 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1838 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1840 blk_queue_virt_boundary(q
, NVME_CTRL_PAGE_SIZE
- 1);
1841 blk_queue_dma_alignment(q
, 3);
1842 blk_queue_write_cache(q
, vwc
, vwc
);
1845 static void nvme_update_disk_info(struct gendisk
*disk
,
1846 struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1848 sector_t capacity
= nvme_lba_to_sect(ns
, le64_to_cpu(id
->nsze
));
1849 unsigned short bs
= 1 << ns
->lba_shift
;
1850 u32 atomic_bs
, phys_bs
, io_opt
= 0;
1853 * The block layer can't support LBA sizes larger than the page size
1854 * yet, so catch this early and don't allow block I/O.
1856 if (ns
->lba_shift
> PAGE_SHIFT
) {
1861 blk_integrity_unregister(disk
);
1863 atomic_bs
= phys_bs
= bs
;
1864 if (id
->nabo
== 0) {
1866 * Bit 1 indicates whether NAWUPF is defined for this namespace
1867 * and whether it should be used instead of AWUPF. If NAWUPF ==
1868 * 0 then AWUPF must be used instead.
1870 if (id
->nsfeat
& NVME_NS_FEAT_ATOMICS
&& id
->nawupf
)
1871 atomic_bs
= (1 + le16_to_cpu(id
->nawupf
)) * bs
;
1873 atomic_bs
= (1 + ns
->ctrl
->subsys
->awupf
) * bs
;
1876 if (id
->nsfeat
& NVME_NS_FEAT_IO_OPT
) {
1877 /* NPWG = Namespace Preferred Write Granularity */
1878 phys_bs
= bs
* (1 + le16_to_cpu(id
->npwg
));
1879 /* NOWS = Namespace Optimal Write Size */
1880 io_opt
= bs
* (1 + le16_to_cpu(id
->nows
));
1883 blk_queue_logical_block_size(disk
->queue
, bs
);
1885 * Linux filesystems assume writing a single physical block is
1886 * an atomic operation. Hence limit the physical block size to the
1887 * value of the Atomic Write Unit Power Fail parameter.
1889 blk_queue_physical_block_size(disk
->queue
, min(phys_bs
, atomic_bs
));
1890 blk_queue_io_min(disk
->queue
, phys_bs
);
1891 blk_queue_io_opt(disk
->queue
, io_opt
);
1894 * Register a metadata profile for PI, or the plain non-integrity NVMe
1895 * metadata masquerading as Type 0 if supported, otherwise reject block
1896 * I/O to namespaces with metadata except when the namespace supports
1897 * PI, as it can strip/insert in that case.
1900 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY
) &&
1901 (ns
->features
& NVME_NS_METADATA_SUPPORTED
))
1902 nvme_init_integrity(disk
, ns
,
1903 ns
->ctrl
->max_integrity_segments
);
1904 else if (!nvme_ns_has_pi(ns
))
1908 set_capacity_and_notify(disk
, capacity
);
1910 nvme_config_discard(disk
, ns
);
1911 blk_queue_max_write_zeroes_sectors(disk
->queue
,
1912 ns
->ctrl
->max_zeroes_sectors
);
1915 static bool nvme_ns_is_readonly(struct nvme_ns
*ns
, struct nvme_ns_info
*info
)
1917 return info
->is_readonly
|| test_bit(NVME_NS_FORCE_RO
, &ns
->flags
);
1920 static inline bool nvme_first_scan(struct gendisk
*disk
)
1922 /* nvme_alloc_ns() scans the disk prior to adding it */
1923 return !disk_live(disk
);
1926 static void nvme_set_chunk_sectors(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1928 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1931 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) &&
1932 is_power_of_2(ctrl
->max_hw_sectors
))
1933 iob
= ctrl
->max_hw_sectors
;
1935 iob
= nvme_lba_to_sect(ns
, le16_to_cpu(id
->noiob
));
1940 if (!is_power_of_2(iob
)) {
1941 if (nvme_first_scan(ns
->disk
))
1942 pr_warn("%s: ignoring unaligned IO boundary:%u\n",
1943 ns
->disk
->disk_name
, iob
);
1947 if (blk_queue_is_zoned(ns
->disk
->queue
)) {
1948 if (nvme_first_scan(ns
->disk
))
1949 pr_warn("%s: ignoring zoned namespace IO boundary\n",
1950 ns
->disk
->disk_name
);
1954 blk_queue_chunk_sectors(ns
->queue
, iob
);
1957 static int nvme_update_ns_info_generic(struct nvme_ns
*ns
,
1958 struct nvme_ns_info
*info
)
1960 blk_mq_freeze_queue(ns
->disk
->queue
);
1961 nvme_set_queue_limits(ns
->ctrl
, ns
->queue
);
1962 set_disk_ro(ns
->disk
, nvme_ns_is_readonly(ns
, info
));
1963 blk_mq_unfreeze_queue(ns
->disk
->queue
);
1965 if (nvme_ns_head_multipath(ns
->head
)) {
1966 blk_mq_freeze_queue(ns
->head
->disk
->queue
);
1967 set_disk_ro(ns
->head
->disk
, nvme_ns_is_readonly(ns
, info
));
1968 nvme_mpath_revalidate_paths(ns
);
1969 blk_stack_limits(&ns
->head
->disk
->queue
->limits
,
1970 &ns
->queue
->limits
, 0);
1971 ns
->head
->disk
->flags
|= GENHD_FL_HIDDEN
;
1972 blk_mq_unfreeze_queue(ns
->head
->disk
->queue
);
1975 /* Hide the block-interface for these devices */
1976 ns
->disk
->flags
|= GENHD_FL_HIDDEN
;
1977 set_bit(NVME_NS_READY
, &ns
->flags
);
1982 static int nvme_update_ns_info_block(struct nvme_ns
*ns
,
1983 struct nvme_ns_info
*info
)
1985 struct nvme_id_ns
*id
;
1989 ret
= nvme_identify_ns(ns
->ctrl
, info
->nsid
, &id
);
1993 blk_mq_freeze_queue(ns
->disk
->queue
);
1994 lbaf
= nvme_lbaf_index(id
->flbas
);
1995 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
1996 nvme_set_queue_limits(ns
->ctrl
, ns
->queue
);
1998 nvme_configure_metadata(ns
, id
);
1999 nvme_set_chunk_sectors(ns
, id
);
2000 nvme_update_disk_info(ns
->disk
, ns
, id
);
2002 if (ns
->head
->ids
.csi
== NVME_CSI_ZNS
) {
2003 ret
= nvme_update_zone_info(ns
, lbaf
);
2005 blk_mq_unfreeze_queue(ns
->disk
->queue
);
2010 set_disk_ro(ns
->disk
, nvme_ns_is_readonly(ns
, info
));
2011 set_bit(NVME_NS_READY
, &ns
->flags
);
2012 blk_mq_unfreeze_queue(ns
->disk
->queue
);
2014 if (blk_queue_is_zoned(ns
->queue
)) {
2015 ret
= nvme_revalidate_zones(ns
);
2016 if (ret
&& !nvme_first_scan(ns
->disk
))
2020 if (nvme_ns_head_multipath(ns
->head
)) {
2021 blk_mq_freeze_queue(ns
->head
->disk
->queue
);
2022 nvme_update_disk_info(ns
->head
->disk
, ns
, id
);
2023 set_disk_ro(ns
->head
->disk
, nvme_ns_is_readonly(ns
, info
));
2024 nvme_mpath_revalidate_paths(ns
);
2025 blk_stack_limits(&ns
->head
->disk
->queue
->limits
,
2026 &ns
->queue
->limits
, 0);
2027 disk_update_readahead(ns
->head
->disk
);
2028 blk_mq_unfreeze_queue(ns
->head
->disk
->queue
);
2034 * If probing fails due an unsupported feature, hide the block device,
2035 * but still allow other access.
2037 if (ret
== -ENODEV
) {
2038 ns
->disk
->flags
|= GENHD_FL_HIDDEN
;
2039 set_bit(NVME_NS_READY
, &ns
->flags
);
2046 static int nvme_update_ns_info(struct nvme_ns
*ns
, struct nvme_ns_info
*info
)
2048 switch (info
->ids
.csi
) {
2050 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED
)) {
2051 dev_info(ns
->ctrl
->device
,
2052 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
2054 return nvme_update_ns_info_generic(ns
, info
);
2056 return nvme_update_ns_info_block(ns
, info
);
2058 return nvme_update_ns_info_block(ns
, info
);
2060 dev_info(ns
->ctrl
->device
,
2061 "block device for nsid %u not supported (csi %u)\n",
2062 info
->nsid
, info
->ids
.csi
);
2063 return nvme_update_ns_info_generic(ns
, info
);
2067 static char nvme_pr_type(enum pr_type type
)
2070 case PR_WRITE_EXCLUSIVE
:
2072 case PR_EXCLUSIVE_ACCESS
:
2074 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
2076 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
2078 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
2080 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
2087 static int nvme_send_ns_head_pr_command(struct block_device
*bdev
,
2088 struct nvme_command
*c
, u8 data
[16])
2090 struct nvme_ns_head
*head
= bdev
->bd_disk
->private_data
;
2091 int srcu_idx
= srcu_read_lock(&head
->srcu
);
2092 struct nvme_ns
*ns
= nvme_find_path(head
);
2093 int ret
= -EWOULDBLOCK
;
2096 c
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
2097 ret
= nvme_submit_sync_cmd(ns
->queue
, c
, data
, 16);
2099 srcu_read_unlock(&head
->srcu
, srcu_idx
);
2103 static int nvme_send_ns_pr_command(struct nvme_ns
*ns
, struct nvme_command
*c
,
2106 c
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
2107 return nvme_submit_sync_cmd(ns
->queue
, c
, data
, 16);
2110 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
2111 u64 key
, u64 sa_key
, u8 op
)
2113 struct nvme_command c
= { };
2114 u8 data
[16] = { 0, };
2116 put_unaligned_le64(key
, &data
[0]);
2117 put_unaligned_le64(sa_key
, &data
[8]);
2119 c
.common
.opcode
= op
;
2120 c
.common
.cdw10
= cpu_to_le32(cdw10
);
2122 if (IS_ENABLED(CONFIG_NVME_MULTIPATH
) &&
2123 bdev
->bd_disk
->fops
== &nvme_ns_head_ops
)
2124 return nvme_send_ns_head_pr_command(bdev
, &c
, data
);
2125 return nvme_send_ns_pr_command(bdev
->bd_disk
->private_data
, &c
, data
);
2128 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
2129 u64
new, unsigned flags
)
2133 if (flags
& ~PR_FL_IGNORE_KEY
)
2136 cdw10
= old
? 2 : 0;
2137 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
2138 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
2139 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
2142 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
2143 enum pr_type type
, unsigned flags
)
2147 if (flags
& ~PR_FL_IGNORE_KEY
)
2150 cdw10
= nvme_pr_type(type
) << 8;
2151 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
2152 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
2155 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
2156 enum pr_type type
, bool abort
)
2158 u32 cdw10
= nvme_pr_type(type
) << 8 | (abort
? 2 : 1);
2160 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
2163 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
2165 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
2167 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
2170 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
2172 u32 cdw10
= nvme_pr_type(type
) << 8 | (key
? 1 << 3 : 0);
2174 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
2177 const struct pr_ops nvme_pr_ops
= {
2178 .pr_register
= nvme_pr_register
,
2179 .pr_reserve
= nvme_pr_reserve
,
2180 .pr_release
= nvme_pr_release
,
2181 .pr_preempt
= nvme_pr_preempt
,
2182 .pr_clear
= nvme_pr_clear
,
2185 #ifdef CONFIG_BLK_SED_OPAL
2186 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
2189 struct nvme_ctrl
*ctrl
= data
;
2190 struct nvme_command cmd
= { };
2193 cmd
.common
.opcode
= nvme_admin_security_send
;
2195 cmd
.common
.opcode
= nvme_admin_security_recv
;
2196 cmd
.common
.nsid
= 0;
2197 cmd
.common
.cdw10
= cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
2198 cmd
.common
.cdw11
= cpu_to_le32(len
);
2200 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
,
2201 NVME_QID_ANY
, 1, 0);
2203 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
2204 #endif /* CONFIG_BLK_SED_OPAL */
2206 #ifdef CONFIG_BLK_DEV_ZONED
2207 static int nvme_report_zones(struct gendisk
*disk
, sector_t sector
,
2208 unsigned int nr_zones
, report_zones_cb cb
, void *data
)
2210 return nvme_ns_report_zones(disk
->private_data
, sector
, nr_zones
, cb
,
2214 #define nvme_report_zones NULL
2215 #endif /* CONFIG_BLK_DEV_ZONED */
2217 static const struct block_device_operations nvme_bdev_ops
= {
2218 .owner
= THIS_MODULE
,
2219 .ioctl
= nvme_ioctl
,
2220 .compat_ioctl
= blkdev_compat_ptr_ioctl
,
2222 .release
= nvme_release
,
2223 .getgeo
= nvme_getgeo
,
2224 .report_zones
= nvme_report_zones
,
2225 .pr_ops
= &nvme_pr_ops
,
2228 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u32 timeout
, bool enabled
)
2230 unsigned long timeout_jiffies
= ((timeout
+ 1) * HZ
/ 2) + jiffies
;
2231 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
2234 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2237 if ((csts
& NVME_CSTS_RDY
) == bit
)
2240 usleep_range(1000, 2000);
2241 if (fatal_signal_pending(current
))
2243 if (time_after(jiffies
, timeout_jiffies
)) {
2244 dev_err(ctrl
->device
,
2245 "Device not ready; aborting %s, CSTS=0x%x\n",
2246 enabled
? "initialisation" : "reset", csts
);
2255 * If the device has been passed off to us in an enabled state, just clear
2256 * the enabled bit. The spec says we should set the 'shutdown notification
2257 * bits', but doing so may cause the device to complete commands to the
2258 * admin queue ... and we don't know what memory that might be pointing at!
2260 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
)
2264 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2265 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
2267 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2271 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
2272 msleep(NVME_QUIRK_DELAY_AMOUNT
);
2274 return nvme_wait_ready(ctrl
, NVME_CAP_TIMEOUT(ctrl
->cap
), false);
2276 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
2278 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
)
2280 unsigned dev_page_min
;
2284 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &ctrl
->cap
);
2286 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
2289 dev_page_min
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12;
2291 if (NVME_CTRL_PAGE_SHIFT
< dev_page_min
) {
2292 dev_err(ctrl
->device
,
2293 "Minimum device page size %u too large for host (%u)\n",
2294 1 << dev_page_min
, 1 << NVME_CTRL_PAGE_SHIFT
);
2298 if (NVME_CAP_CSS(ctrl
->cap
) & NVME_CAP_CSS_CSI
)
2299 ctrl
->ctrl_config
= NVME_CC_CSS_CSI
;
2301 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
2303 if (ctrl
->cap
& NVME_CAP_CRMS_CRWMS
) {
2306 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CRTO
, &crto
);
2308 dev_err(ctrl
->device
, "Reading CRTO failed (%d)\n",
2313 if (ctrl
->cap
& NVME_CAP_CRMS_CRIMS
) {
2314 ctrl
->ctrl_config
|= NVME_CC_CRIME
;
2315 timeout
= NVME_CRTO_CRIMT(crto
);
2317 timeout
= NVME_CRTO_CRWMT(crto
);
2320 timeout
= NVME_CAP_TIMEOUT(ctrl
->cap
);
2323 ctrl
->ctrl_config
|= (NVME_CTRL_PAGE_SHIFT
- 12) << NVME_CC_MPS_SHIFT
;
2324 ctrl
->ctrl_config
|= NVME_CC_AMS_RR
| NVME_CC_SHN_NONE
;
2325 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
2326 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2330 /* Flush write to device (required if transport is PCI) */
2331 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CC
, &ctrl
->ctrl_config
);
2335 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
2336 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2339 return nvme_wait_ready(ctrl
, timeout
, true);
2341 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
2343 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
2345 unsigned long timeout
= jiffies
+ (ctrl
->shutdown_timeout
* HZ
);
2349 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2350 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
2352 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2356 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2357 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
2361 if (fatal_signal_pending(current
))
2363 if (time_after(jiffies
, timeout
)) {
2364 dev_err(ctrl
->device
,
2365 "Device shutdown incomplete; abort shutdown\n");
2372 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
2374 static int nvme_configure_timestamp(struct nvme_ctrl
*ctrl
)
2379 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_TIMESTAMP
))
2382 ts
= cpu_to_le64(ktime_to_ms(ktime_get_real()));
2383 ret
= nvme_set_features(ctrl
, NVME_FEAT_TIMESTAMP
, 0, &ts
, sizeof(ts
),
2386 dev_warn_once(ctrl
->device
,
2387 "could not set timestamp (%d)\n", ret
);
2391 static int nvme_configure_host_options(struct nvme_ctrl
*ctrl
)
2393 struct nvme_feat_host_behavior
*host
;
2394 u8 acre
= 0, lbafee
= 0;
2397 /* Don't bother enabling the feature if retry delay is not reported */
2399 acre
= NVME_ENABLE_ACRE
;
2400 if (ctrl
->ctratt
& NVME_CTRL_ATTR_ELBAS
)
2401 lbafee
= NVME_ENABLE_LBAFEE
;
2403 if (!acre
&& !lbafee
)
2406 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
2411 host
->lbafee
= lbafee
;
2412 ret
= nvme_set_features(ctrl
, NVME_FEAT_HOST_BEHAVIOR
, 0,
2413 host
, sizeof(*host
), NULL
);
2419 * The function checks whether the given total (exlat + enlat) latency of
2420 * a power state allows the latter to be used as an APST transition target.
2421 * It does so by comparing the latency to the primary and secondary latency
2422 * tolerances defined by module params. If there's a match, the corresponding
2423 * timeout value is returned and the matching tolerance index (1 or 2) is
2426 static bool nvme_apst_get_transition_time(u64 total_latency
,
2427 u64
*transition_time
, unsigned *last_index
)
2429 if (total_latency
<= apst_primary_latency_tol_us
) {
2430 if (*last_index
== 1)
2433 *transition_time
= apst_primary_timeout_ms
;
2436 if (apst_secondary_timeout_ms
&&
2437 total_latency
<= apst_secondary_latency_tol_us
) {
2438 if (*last_index
<= 2)
2441 *transition_time
= apst_secondary_timeout_ms
;
2448 * APST (Autonomous Power State Transition) lets us program a table of power
2449 * state transitions that the controller will perform automatically.
2451 * Depending on module params, one of the two supported techniques will be used:
2453 * - If the parameters provide explicit timeouts and tolerances, they will be
2454 * used to build a table with up to 2 non-operational states to transition to.
2455 * The default parameter values were selected based on the values used by
2456 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
2457 * regeneration of the APST table in the event of switching between external
2458 * and battery power, the timeouts and tolerances reflect a compromise
2459 * between values used by Microsoft for AC and battery scenarios.
2460 * - If not, we'll configure the table with a simple heuristic: we are willing
2461 * to spend at most 2% of the time transitioning between power states.
2462 * Therefore, when running in any given state, we will enter the next
2463 * lower-power non-operational state after waiting 50 * (enlat + exlat)
2464 * microseconds, as long as that state's exit latency is under the requested
2467 * We will not autonomously enter any non-operational state for which the total
2468 * latency exceeds ps_max_latency_us.
2470 * Users can set ps_max_latency_us to zero to turn off APST.
2472 static int nvme_configure_apst(struct nvme_ctrl
*ctrl
)
2474 struct nvme_feat_auto_pst
*table
;
2481 unsigned last_lt_index
= UINT_MAX
;
2484 * If APST isn't supported or if we haven't been initialized yet,
2485 * then don't do anything.
2490 if (ctrl
->npss
> 31) {
2491 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
2495 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
2499 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
2500 /* Turn off APST. */
2501 dev_dbg(ctrl
->device
, "APST disabled\n");
2506 * Walk through all states from lowest- to highest-power.
2507 * According to the spec, lower-numbered states use more power. NPSS,
2508 * despite the name, is the index of the lowest-power state, not the
2511 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
2512 u64 total_latency_us
, exit_latency_us
, transition_ms
;
2515 table
->entries
[state
] = target
;
2518 * Don't allow transitions to the deepest state if it's quirked
2521 if (state
== ctrl
->npss
&&
2522 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
2526 * Is this state a useful non-operational state for higher-power
2527 * states to autonomously transition to?
2529 if (!(ctrl
->psd
[state
].flags
& NVME_PS_FLAGS_NON_OP_STATE
))
2532 exit_latency_us
= (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
2533 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
2536 total_latency_us
= exit_latency_us
+
2537 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
2540 * This state is good. It can be used as the APST idle target
2541 * for higher power states.
2543 if (apst_primary_timeout_ms
&& apst_primary_latency_tol_us
) {
2544 if (!nvme_apst_get_transition_time(total_latency_us
,
2545 &transition_ms
, &last_lt_index
))
2548 transition_ms
= total_latency_us
+ 19;
2549 do_div(transition_ms
, 20);
2550 if (transition_ms
> (1 << 24) - 1)
2551 transition_ms
= (1 << 24) - 1;
2554 target
= cpu_to_le64((state
<< 3) | (transition_ms
<< 8));
2557 if (total_latency_us
> max_lat_us
)
2558 max_lat_us
= total_latency_us
;
2562 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
2564 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2565 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
2569 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
2570 table
, sizeof(*table
), NULL
);
2572 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
2577 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
2579 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2583 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
2584 case PM_QOS_LATENCY_ANY
:
2592 if (ctrl
->ps_max_latency_us
!= latency
) {
2593 ctrl
->ps_max_latency_us
= latency
;
2594 if (ctrl
->state
== NVME_CTRL_LIVE
)
2595 nvme_configure_apst(ctrl
);
2599 struct nvme_core_quirk_entry
{
2601 * NVMe model and firmware strings are padded with spaces. For
2602 * simplicity, strings in the quirk table are padded with NULLs
2608 unsigned long quirks
;
2611 static const struct nvme_core_quirk_entry core_quirks
[] = {
2614 * This Toshiba device seems to die using any APST states. See:
2615 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2618 .mn
= "THNSF5256GPUK TOSHIBA",
2619 .quirks
= NVME_QUIRK_NO_APST
,
2623 * This LiteON CL1-3D*-Q11 firmware version has a race
2624 * condition associated with actions related to suspend to idle
2625 * LiteON has resolved the problem in future firmware
2629 .quirks
= NVME_QUIRK_SIMPLE_SUSPEND
,
2633 * This Kioxia CD6-V Series / HPE PE8030 device times out and
2634 * aborts I/O during any load, but more easily reproducible
2635 * with discards (fstrim).
2637 * The device is left in a state where it is also not possible
2638 * to use "nvme set-feature" to disable APST, but booting with
2639 * nvme_core.default_ps_max_latency=0 works.
2642 .mn
= "KCD6XVUL6T40",
2643 .quirks
= NVME_QUIRK_NO_APST
,
2647 * The external Samsung X5 SSD fails initialization without a
2648 * delay before checking if it is ready and has a whole set of
2649 * other problems. To make this even more interesting, it
2650 * shares the PCI ID with internal Samsung 970 Evo Plus that
2651 * does not need or want these quirks.
2654 .mn
= "Samsung Portable SSD X5",
2655 .quirks
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
|
2656 NVME_QUIRK_NO_DEEPEST_PS
|
2657 NVME_QUIRK_IGNORE_DEV_SUBNQN
,
2661 /* match is null-terminated but idstr is space-padded. */
2662 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
2669 matchlen
= strlen(match
);
2670 WARN_ON_ONCE(matchlen
> len
);
2672 if (memcmp(idstr
, match
, matchlen
))
2675 for (; matchlen
< len
; matchlen
++)
2676 if (idstr
[matchlen
] != ' ')
2682 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
2683 const struct nvme_core_quirk_entry
*q
)
2685 return q
->vid
== le16_to_cpu(id
->vid
) &&
2686 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
2687 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
2690 static void nvme_init_subnqn(struct nvme_subsystem
*subsys
, struct nvme_ctrl
*ctrl
,
2691 struct nvme_id_ctrl
*id
)
2696 if(!(ctrl
->quirks
& NVME_QUIRK_IGNORE_DEV_SUBNQN
)) {
2697 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
2698 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
2699 strlcpy(subsys
->subnqn
, id
->subnqn
, NVMF_NQN_SIZE
);
2703 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
2704 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
2707 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2708 off
= snprintf(subsys
->subnqn
, NVMF_NQN_SIZE
,
2709 "nqn.2014.08.org.nvmexpress:%04x%04x",
2710 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
2711 memcpy(subsys
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
2712 off
+= sizeof(id
->sn
);
2713 memcpy(subsys
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
2714 off
+= sizeof(id
->mn
);
2715 memset(subsys
->subnqn
+ off
, 0, sizeof(subsys
->subnqn
) - off
);
2718 static void nvme_release_subsystem(struct device
*dev
)
2720 struct nvme_subsystem
*subsys
=
2721 container_of(dev
, struct nvme_subsystem
, dev
);
2723 if (subsys
->instance
>= 0)
2724 ida_free(&nvme_instance_ida
, subsys
->instance
);
2728 static void nvme_destroy_subsystem(struct kref
*ref
)
2730 struct nvme_subsystem
*subsys
=
2731 container_of(ref
, struct nvme_subsystem
, ref
);
2733 mutex_lock(&nvme_subsystems_lock
);
2734 list_del(&subsys
->entry
);
2735 mutex_unlock(&nvme_subsystems_lock
);
2737 ida_destroy(&subsys
->ns_ida
);
2738 device_del(&subsys
->dev
);
2739 put_device(&subsys
->dev
);
2742 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
)
2744 kref_put(&subsys
->ref
, nvme_destroy_subsystem
);
2747 static struct nvme_subsystem
*__nvme_find_get_subsystem(const char *subsysnqn
)
2749 struct nvme_subsystem
*subsys
;
2751 lockdep_assert_held(&nvme_subsystems_lock
);
2754 * Fail matches for discovery subsystems. This results
2755 * in each discovery controller bound to a unique subsystem.
2756 * This avoids issues with validating controller values
2757 * that can only be true when there is a single unique subsystem.
2758 * There may be multiple and completely independent entities
2759 * that provide discovery controllers.
2761 if (!strcmp(subsysnqn
, NVME_DISC_SUBSYS_NAME
))
2764 list_for_each_entry(subsys
, &nvme_subsystems
, entry
) {
2765 if (strcmp(subsys
->subnqn
, subsysnqn
))
2767 if (!kref_get_unless_zero(&subsys
->ref
))
2775 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
2776 struct device_attribute subsys_attr_##_name = \
2777 __ATTR(_name, _mode, _show, NULL)
2779 static ssize_t
nvme_subsys_show_nqn(struct device
*dev
,
2780 struct device_attribute
*attr
,
2783 struct nvme_subsystem
*subsys
=
2784 container_of(dev
, struct nvme_subsystem
, dev
);
2786 return sysfs_emit(buf
, "%s\n", subsys
->subnqn
);
2788 static SUBSYS_ATTR_RO(subsysnqn
, S_IRUGO
, nvme_subsys_show_nqn
);
2790 static ssize_t
nvme_subsys_show_type(struct device
*dev
,
2791 struct device_attribute
*attr
,
2794 struct nvme_subsystem
*subsys
=
2795 container_of(dev
, struct nvme_subsystem
, dev
);
2797 switch (subsys
->subtype
) {
2799 return sysfs_emit(buf
, "discovery\n");
2801 return sysfs_emit(buf
, "nvm\n");
2803 return sysfs_emit(buf
, "reserved\n");
2806 static SUBSYS_ATTR_RO(subsystype
, S_IRUGO
, nvme_subsys_show_type
);
2808 #define nvme_subsys_show_str_function(field) \
2809 static ssize_t subsys_##field##_show(struct device *dev, \
2810 struct device_attribute *attr, char *buf) \
2812 struct nvme_subsystem *subsys = \
2813 container_of(dev, struct nvme_subsystem, dev); \
2814 return sysfs_emit(buf, "%.*s\n", \
2815 (int)sizeof(subsys->field), subsys->field); \
2817 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2819 nvme_subsys_show_str_function(model
);
2820 nvme_subsys_show_str_function(serial
);
2821 nvme_subsys_show_str_function(firmware_rev
);
2823 static struct attribute
*nvme_subsys_attrs
[] = {
2824 &subsys_attr_model
.attr
,
2825 &subsys_attr_serial
.attr
,
2826 &subsys_attr_firmware_rev
.attr
,
2827 &subsys_attr_subsysnqn
.attr
,
2828 &subsys_attr_subsystype
.attr
,
2829 #ifdef CONFIG_NVME_MULTIPATH
2830 &subsys_attr_iopolicy
.attr
,
2835 static const struct attribute_group nvme_subsys_attrs_group
= {
2836 .attrs
= nvme_subsys_attrs
,
2839 static const struct attribute_group
*nvme_subsys_attrs_groups
[] = {
2840 &nvme_subsys_attrs_group
,
2844 static inline bool nvme_discovery_ctrl(struct nvme_ctrl
*ctrl
)
2846 return ctrl
->opts
&& ctrl
->opts
->discovery_nqn
;
2849 static bool nvme_validate_cntlid(struct nvme_subsystem
*subsys
,
2850 struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2852 struct nvme_ctrl
*tmp
;
2854 lockdep_assert_held(&nvme_subsystems_lock
);
2856 list_for_each_entry(tmp
, &subsys
->ctrls
, subsys_entry
) {
2857 if (nvme_state_terminal(tmp
))
2860 if (tmp
->cntlid
== ctrl
->cntlid
) {
2861 dev_err(ctrl
->device
,
2862 "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
2863 ctrl
->cntlid
, dev_name(tmp
->device
),
2868 if ((id
->cmic
& NVME_CTRL_CMIC_MULTI_CTRL
) ||
2869 nvme_discovery_ctrl(ctrl
))
2872 dev_err(ctrl
->device
,
2873 "Subsystem does not support multiple controllers\n");
2880 static int nvme_init_subsystem(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2882 struct nvme_subsystem
*subsys
, *found
;
2885 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
2889 subsys
->instance
= -1;
2890 mutex_init(&subsys
->lock
);
2891 kref_init(&subsys
->ref
);
2892 INIT_LIST_HEAD(&subsys
->ctrls
);
2893 INIT_LIST_HEAD(&subsys
->nsheads
);
2894 nvme_init_subnqn(subsys
, ctrl
, id
);
2895 memcpy(subsys
->serial
, id
->sn
, sizeof(subsys
->serial
));
2896 memcpy(subsys
->model
, id
->mn
, sizeof(subsys
->model
));
2897 memcpy(subsys
->firmware_rev
, id
->fr
, sizeof(subsys
->firmware_rev
));
2898 subsys
->vendor_id
= le16_to_cpu(id
->vid
);
2899 subsys
->cmic
= id
->cmic
;
2901 /* Versions prior to 1.4 don't necessarily report a valid type */
2902 if (id
->cntrltype
== NVME_CTRL_DISC
||
2903 !strcmp(subsys
->subnqn
, NVME_DISC_SUBSYS_NAME
))
2904 subsys
->subtype
= NVME_NQN_DISC
;
2906 subsys
->subtype
= NVME_NQN_NVME
;
2908 if (nvme_discovery_ctrl(ctrl
) && subsys
->subtype
!= NVME_NQN_DISC
) {
2909 dev_err(ctrl
->device
,
2910 "Subsystem %s is not a discovery controller",
2915 subsys
->awupf
= le16_to_cpu(id
->awupf
);
2916 nvme_mpath_default_iopolicy(subsys
);
2918 subsys
->dev
.class = nvme_subsys_class
;
2919 subsys
->dev
.release
= nvme_release_subsystem
;
2920 subsys
->dev
.groups
= nvme_subsys_attrs_groups
;
2921 dev_set_name(&subsys
->dev
, "nvme-subsys%d", ctrl
->instance
);
2922 device_initialize(&subsys
->dev
);
2924 mutex_lock(&nvme_subsystems_lock
);
2925 found
= __nvme_find_get_subsystem(subsys
->subnqn
);
2927 put_device(&subsys
->dev
);
2930 if (!nvme_validate_cntlid(subsys
, ctrl
, id
)) {
2932 goto out_put_subsystem
;
2935 ret
= device_add(&subsys
->dev
);
2937 dev_err(ctrl
->device
,
2938 "failed to register subsystem device.\n");
2939 put_device(&subsys
->dev
);
2942 ida_init(&subsys
->ns_ida
);
2943 list_add_tail(&subsys
->entry
, &nvme_subsystems
);
2946 ret
= sysfs_create_link(&subsys
->dev
.kobj
, &ctrl
->device
->kobj
,
2947 dev_name(ctrl
->device
));
2949 dev_err(ctrl
->device
,
2950 "failed to create sysfs link from subsystem.\n");
2951 goto out_put_subsystem
;
2955 subsys
->instance
= ctrl
->instance
;
2956 ctrl
->subsys
= subsys
;
2957 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
2958 mutex_unlock(&nvme_subsystems_lock
);
2962 nvme_put_subsystem(subsys
);
2964 mutex_unlock(&nvme_subsystems_lock
);
2968 int nvme_get_log(struct nvme_ctrl
*ctrl
, u32 nsid
, u8 log_page
, u8 lsp
, u8 csi
,
2969 void *log
, size_t size
, u64 offset
)
2971 struct nvme_command c
= { };
2972 u32 dwlen
= nvme_bytes_to_numd(size
);
2974 c
.get_log_page
.opcode
= nvme_admin_get_log_page
;
2975 c
.get_log_page
.nsid
= cpu_to_le32(nsid
);
2976 c
.get_log_page
.lid
= log_page
;
2977 c
.get_log_page
.lsp
= lsp
;
2978 c
.get_log_page
.numdl
= cpu_to_le16(dwlen
& ((1 << 16) - 1));
2979 c
.get_log_page
.numdu
= cpu_to_le16(dwlen
>> 16);
2980 c
.get_log_page
.lpol
= cpu_to_le32(lower_32_bits(offset
));
2981 c
.get_log_page
.lpou
= cpu_to_le32(upper_32_bits(offset
));
2982 c
.get_log_page
.csi
= csi
;
2984 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, log
, size
);
2987 static int nvme_get_effects_log(struct nvme_ctrl
*ctrl
, u8 csi
,
2988 struct nvme_effects_log
**log
)
2990 struct nvme_effects_log
*cel
= xa_load(&ctrl
->cels
, csi
);
2996 cel
= kzalloc(sizeof(*cel
), GFP_KERNEL
);
3000 ret
= nvme_get_log(ctrl
, 0x00, NVME_LOG_CMD_EFFECTS
, 0, csi
,
3001 cel
, sizeof(*cel
), 0);
3007 xa_store(&ctrl
->cels
, csi
, cel
, GFP_KERNEL
);
3013 static inline u32
nvme_mps_to_sectors(struct nvme_ctrl
*ctrl
, u32 units
)
3015 u32 page_shift
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12, val
;
3017 if (check_shl_overflow(1U, units
+ page_shift
- 9, &val
))
3022 static int nvme_init_non_mdts_limits(struct nvme_ctrl
*ctrl
)
3024 struct nvme_command c
= { };
3025 struct nvme_id_ctrl_nvm
*id
;
3028 if (ctrl
->oncs
& NVME_CTRL_ONCS_DSM
) {
3029 ctrl
->max_discard_sectors
= UINT_MAX
;
3030 ctrl
->max_discard_segments
= NVME_DSM_MAX_RANGES
;
3032 ctrl
->max_discard_sectors
= 0;
3033 ctrl
->max_discard_segments
= 0;
3037 * Even though NVMe spec explicitly states that MDTS is not applicable
3038 * to the write-zeroes, we are cautious and limit the size to the
3039 * controllers max_hw_sectors value, which is based on the MDTS field
3040 * and possibly other limiting factors.
3042 if ((ctrl
->oncs
& NVME_CTRL_ONCS_WRITE_ZEROES
) &&
3043 !(ctrl
->quirks
& NVME_QUIRK_DISABLE_WRITE_ZEROES
))
3044 ctrl
->max_zeroes_sectors
= ctrl
->max_hw_sectors
;
3046 ctrl
->max_zeroes_sectors
= 0;
3048 if (nvme_ctrl_limited_cns(ctrl
))
3051 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
3055 c
.identify
.opcode
= nvme_admin_identify
;
3056 c
.identify
.cns
= NVME_ID_CNS_CS_CTRL
;
3057 c
.identify
.csi
= NVME_CSI_NVM
;
3059 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, id
, sizeof(*id
));
3064 ctrl
->max_discard_segments
= id
->dmrl
;
3065 ctrl
->dmrsl
= le32_to_cpu(id
->dmrsl
);
3067 ctrl
->max_zeroes_sectors
= nvme_mps_to_sectors(ctrl
, id
->wzsl
);
3074 static int nvme_init_identify(struct nvme_ctrl
*ctrl
)
3076 struct nvme_id_ctrl
*id
;
3078 bool prev_apst_enabled
;
3081 ret
= nvme_identify_ctrl(ctrl
, &id
);
3083 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
3087 if (id
->lpa
& NVME_CTRL_LPA_CMD_EFFECTS_LOG
) {
3088 ret
= nvme_get_effects_log(ctrl
, NVME_CSI_NVM
, &ctrl
->effects
);
3093 if (!(ctrl
->ops
->flags
& NVME_F_FABRICS
))
3094 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
3096 if (!ctrl
->identified
) {
3099 ret
= nvme_init_subsystem(ctrl
, id
);
3104 * Check for quirks. Quirk can depend on firmware version,
3105 * so, in principle, the set of quirks present can change
3106 * across a reset. As a possible future enhancement, we
3107 * could re-scan for quirks every time we reinitialize
3108 * the device, but we'd have to make sure that the driver
3109 * behaves intelligently if the quirks change.
3111 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
3112 if (quirk_matches(id
, &core_quirks
[i
]))
3113 ctrl
->quirks
|= core_quirks
[i
].quirks
;
3117 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
3118 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3119 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
3122 ctrl
->crdt
[0] = le16_to_cpu(id
->crdt1
);
3123 ctrl
->crdt
[1] = le16_to_cpu(id
->crdt2
);
3124 ctrl
->crdt
[2] = le16_to_cpu(id
->crdt3
);
3126 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
3127 ctrl
->oncs
= le16_to_cpu(id
->oncs
);
3128 ctrl
->mtfa
= le16_to_cpu(id
->mtfa
);
3129 ctrl
->oaes
= le32_to_cpu(id
->oaes
);
3130 ctrl
->wctemp
= le16_to_cpu(id
->wctemp
);
3131 ctrl
->cctemp
= le16_to_cpu(id
->cctemp
);
3133 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
3134 ctrl
->vwc
= id
->vwc
;
3136 max_hw_sectors
= nvme_mps_to_sectors(ctrl
, id
->mdts
);
3138 max_hw_sectors
= UINT_MAX
;
3139 ctrl
->max_hw_sectors
=
3140 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
3142 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
3143 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
3144 ctrl
->kas
= le16_to_cpu(id
->kas
);
3145 ctrl
->max_namespaces
= le32_to_cpu(id
->mnan
);
3146 ctrl
->ctratt
= le32_to_cpu(id
->ctratt
);
3148 ctrl
->cntrltype
= id
->cntrltype
;
3149 ctrl
->dctype
= id
->dctype
;
3153 u32 transition_time
= le32_to_cpu(id
->rtd3e
) / USEC_PER_SEC
;
3155 ctrl
->shutdown_timeout
= clamp_t(unsigned int, transition_time
,
3156 shutdown_timeout
, 60);
3158 if (ctrl
->shutdown_timeout
!= shutdown_timeout
)
3159 dev_info(ctrl
->device
,
3160 "Shutdown timeout set to %u seconds\n",
3161 ctrl
->shutdown_timeout
);
3163 ctrl
->shutdown_timeout
= shutdown_timeout
;
3165 ctrl
->npss
= id
->npss
;
3166 ctrl
->apsta
= id
->apsta
;
3167 prev_apst_enabled
= ctrl
->apst_enabled
;
3168 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
3169 if (force_apst
&& id
->apsta
) {
3170 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3171 ctrl
->apst_enabled
= true;
3173 ctrl
->apst_enabled
= false;
3176 ctrl
->apst_enabled
= id
->apsta
;
3178 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
3180 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
3181 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
3182 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
3183 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
3184 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
3187 * In fabrics we need to verify the cntlid matches the
3190 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
)) {
3191 dev_err(ctrl
->device
,
3192 "Mismatching cntlid: Connect %u vs Identify "
3194 ctrl
->cntlid
, le16_to_cpu(id
->cntlid
));
3199 if (!nvme_discovery_ctrl(ctrl
) && !ctrl
->kas
) {
3200 dev_err(ctrl
->device
,
3201 "keep-alive support is mandatory for fabrics\n");
3206 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
3207 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
3208 ctrl
->hmminds
= le32_to_cpu(id
->hmminds
);
3209 ctrl
->hmmaxd
= le16_to_cpu(id
->hmmaxd
);
3212 ret
= nvme_mpath_init_identify(ctrl
, id
);
3216 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
3217 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
3218 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
3219 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
3227 * Initialize the cached copies of the Identify data and various controller
3228 * register in our nvme_ctrl structure. This should be called as soon as
3229 * the admin queue is fully up and running.
3231 int nvme_init_ctrl_finish(struct nvme_ctrl
*ctrl
)
3235 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
3237 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
3241 ctrl
->sqsize
= min_t(u16
, NVME_CAP_MQES(ctrl
->cap
), ctrl
->sqsize
);
3243 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
3244 ctrl
->subsystem
= NVME_CAP_NSSRC(ctrl
->cap
);
3246 ret
= nvme_init_identify(ctrl
);
3250 ret
= nvme_configure_apst(ctrl
);
3254 ret
= nvme_configure_timestamp(ctrl
);
3258 ret
= nvme_configure_host_options(ctrl
);
3262 if (!ctrl
->identified
&& !nvme_discovery_ctrl(ctrl
)) {
3263 ret
= nvme_hwmon_init(ctrl
);
3268 ctrl
->identified
= true;
3272 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish
);
3274 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
3276 struct nvme_ctrl
*ctrl
=
3277 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
3279 switch (ctrl
->state
) {
3280 case NVME_CTRL_LIVE
:
3283 return -EWOULDBLOCK
;
3286 nvme_get_ctrl(ctrl
);
3287 if (!try_module_get(ctrl
->ops
->module
)) {
3288 nvme_put_ctrl(ctrl
);
3292 file
->private_data
= ctrl
;
3296 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
3298 struct nvme_ctrl
*ctrl
=
3299 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
3301 module_put(ctrl
->ops
->module
);
3302 nvme_put_ctrl(ctrl
);
3306 static const struct file_operations nvme_dev_fops
= {
3307 .owner
= THIS_MODULE
,
3308 .open
= nvme_dev_open
,
3309 .release
= nvme_dev_release
,
3310 .unlocked_ioctl
= nvme_dev_ioctl
,
3311 .compat_ioctl
= compat_ptr_ioctl
,
3312 .uring_cmd
= nvme_dev_uring_cmd
,
3315 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
3316 struct device_attribute
*attr
, const char *buf
,
3319 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3322 ret
= nvme_reset_ctrl_sync(ctrl
);
3327 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
3329 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
3330 struct device_attribute
*attr
, const char *buf
,
3333 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3335 nvme_queue_scan(ctrl
);
3338 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
3340 static inline struct nvme_ns_head
*dev_to_ns_head(struct device
*dev
)
3342 struct gendisk
*disk
= dev_to_disk(dev
);
3344 if (disk
->fops
== &nvme_bdev_ops
)
3345 return nvme_get_ns_from_dev(dev
)->head
;
3347 return disk
->private_data
;
3350 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
3353 struct nvme_ns_head
*head
= dev_to_ns_head(dev
);
3354 struct nvme_ns_ids
*ids
= &head
->ids
;
3355 struct nvme_subsystem
*subsys
= head
->subsys
;
3356 int serial_len
= sizeof(subsys
->serial
);
3357 int model_len
= sizeof(subsys
->model
);
3359 if (!uuid_is_null(&ids
->uuid
))
3360 return sysfs_emit(buf
, "uuid.%pU\n", &ids
->uuid
);
3362 if (memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3363 return sysfs_emit(buf
, "eui.%16phN\n", ids
->nguid
);
3365 if (memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3366 return sysfs_emit(buf
, "eui.%8phN\n", ids
->eui64
);
3368 while (serial_len
> 0 && (subsys
->serial
[serial_len
- 1] == ' ' ||
3369 subsys
->serial
[serial_len
- 1] == '\0'))
3371 while (model_len
> 0 && (subsys
->model
[model_len
- 1] == ' ' ||
3372 subsys
->model
[model_len
- 1] == '\0'))
3375 return sysfs_emit(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", subsys
->vendor_id
,
3376 serial_len
, subsys
->serial
, model_len
, subsys
->model
,
3379 static DEVICE_ATTR_RO(wwid
);
3381 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
3384 return sysfs_emit(buf
, "%pU\n", dev_to_ns_head(dev
)->ids
.nguid
);
3386 static DEVICE_ATTR_RO(nguid
);
3388 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
3391 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3393 /* For backward compatibility expose the NGUID to userspace if
3394 * we have no UUID set
3396 if (uuid_is_null(&ids
->uuid
)) {
3397 dev_warn_ratelimited(dev
,
3398 "No UUID available providing old NGUID\n");
3399 return sysfs_emit(buf
, "%pU\n", ids
->nguid
);
3401 return sysfs_emit(buf
, "%pU\n", &ids
->uuid
);
3403 static DEVICE_ATTR_RO(uuid
);
3405 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
3408 return sysfs_emit(buf
, "%8ph\n", dev_to_ns_head(dev
)->ids
.eui64
);
3410 static DEVICE_ATTR_RO(eui
);
3412 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
3415 return sysfs_emit(buf
, "%d\n", dev_to_ns_head(dev
)->ns_id
);
3417 static DEVICE_ATTR_RO(nsid
);
3419 static struct attribute
*nvme_ns_id_attrs
[] = {
3420 &dev_attr_wwid
.attr
,
3421 &dev_attr_uuid
.attr
,
3422 &dev_attr_nguid
.attr
,
3424 &dev_attr_nsid
.attr
,
3425 #ifdef CONFIG_NVME_MULTIPATH
3426 &dev_attr_ana_grpid
.attr
,
3427 &dev_attr_ana_state
.attr
,
3432 static umode_t
nvme_ns_id_attrs_are_visible(struct kobject
*kobj
,
3433 struct attribute
*a
, int n
)
3435 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3436 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
3438 if (a
== &dev_attr_uuid
.attr
) {
3439 if (uuid_is_null(&ids
->uuid
) &&
3440 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3443 if (a
== &dev_attr_nguid
.attr
) {
3444 if (!memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
3447 if (a
== &dev_attr_eui
.attr
) {
3448 if (!memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
3451 #ifdef CONFIG_NVME_MULTIPATH
3452 if (a
== &dev_attr_ana_grpid
.attr
|| a
== &dev_attr_ana_state
.attr
) {
3453 if (dev_to_disk(dev
)->fops
!= &nvme_bdev_ops
) /* per-path attr */
3455 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev
)->ctrl
))
3462 static const struct attribute_group nvme_ns_id_attr_group
= {
3463 .attrs
= nvme_ns_id_attrs
,
3464 .is_visible
= nvme_ns_id_attrs_are_visible
,
3467 const struct attribute_group
*nvme_ns_id_attr_groups
[] = {
3468 &nvme_ns_id_attr_group
,
3472 #define nvme_show_str_function(field) \
3473 static ssize_t field##_show(struct device *dev, \
3474 struct device_attribute *attr, char *buf) \
3476 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3477 return sysfs_emit(buf, "%.*s\n", \
3478 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
3480 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3482 nvme_show_str_function(model
);
3483 nvme_show_str_function(serial
);
3484 nvme_show_str_function(firmware_rev
);
3486 #define nvme_show_int_function(field) \
3487 static ssize_t field##_show(struct device *dev, \
3488 struct device_attribute *attr, char *buf) \
3490 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
3491 return sysfs_emit(buf, "%d\n", ctrl->field); \
3493 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
3495 nvme_show_int_function(cntlid
);
3496 nvme_show_int_function(numa_node
);
3497 nvme_show_int_function(queue_count
);
3498 nvme_show_int_function(sqsize
);
3499 nvme_show_int_function(kato
);
3501 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
3502 struct device_attribute
*attr
, const char *buf
,
3505 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3507 if (device_remove_file_self(dev
, attr
))
3508 nvme_delete_ctrl_sync(ctrl
);
3511 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
3513 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
3514 struct device_attribute
*attr
,
3517 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3519 return sysfs_emit(buf
, "%s\n", ctrl
->ops
->name
);
3521 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
3523 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
3524 struct device_attribute
*attr
,
3527 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3528 static const char *const state_name
[] = {
3529 [NVME_CTRL_NEW
] = "new",
3530 [NVME_CTRL_LIVE
] = "live",
3531 [NVME_CTRL_RESETTING
] = "resetting",
3532 [NVME_CTRL_CONNECTING
] = "connecting",
3533 [NVME_CTRL_DELETING
] = "deleting",
3534 [NVME_CTRL_DELETING_NOIO
]= "deleting (no IO)",
3535 [NVME_CTRL_DEAD
] = "dead",
3538 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
3539 state_name
[ctrl
->state
])
3540 return sysfs_emit(buf
, "%s\n", state_name
[ctrl
->state
]);
3542 return sysfs_emit(buf
, "unknown state\n");
3545 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
3547 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
3548 struct device_attribute
*attr
,
3551 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3553 return sysfs_emit(buf
, "%s\n", ctrl
->subsys
->subnqn
);
3555 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
3557 static ssize_t
nvme_sysfs_show_hostnqn(struct device
*dev
,
3558 struct device_attribute
*attr
,
3561 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3563 return sysfs_emit(buf
, "%s\n", ctrl
->opts
->host
->nqn
);
3565 static DEVICE_ATTR(hostnqn
, S_IRUGO
, nvme_sysfs_show_hostnqn
, NULL
);
3567 static ssize_t
nvme_sysfs_show_hostid(struct device
*dev
,
3568 struct device_attribute
*attr
,
3571 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3573 return sysfs_emit(buf
, "%pU\n", &ctrl
->opts
->host
->id
);
3575 static DEVICE_ATTR(hostid
, S_IRUGO
, nvme_sysfs_show_hostid
, NULL
);
3577 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
3578 struct device_attribute
*attr
,
3581 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3583 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
3585 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
3587 static ssize_t
nvme_ctrl_loss_tmo_show(struct device
*dev
,
3588 struct device_attribute
*attr
, char *buf
)
3590 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3591 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3593 if (ctrl
->opts
->max_reconnects
== -1)
3594 return sysfs_emit(buf
, "off\n");
3595 return sysfs_emit(buf
, "%d\n",
3596 opts
->max_reconnects
* opts
->reconnect_delay
);
3599 static ssize_t
nvme_ctrl_loss_tmo_store(struct device
*dev
,
3600 struct device_attribute
*attr
, const char *buf
, size_t count
)
3602 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3603 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3604 int ctrl_loss_tmo
, err
;
3606 err
= kstrtoint(buf
, 10, &ctrl_loss_tmo
);
3610 if (ctrl_loss_tmo
< 0)
3611 opts
->max_reconnects
= -1;
3613 opts
->max_reconnects
= DIV_ROUND_UP(ctrl_loss_tmo
,
3614 opts
->reconnect_delay
);
3617 static DEVICE_ATTR(ctrl_loss_tmo
, S_IRUGO
| S_IWUSR
,
3618 nvme_ctrl_loss_tmo_show
, nvme_ctrl_loss_tmo_store
);
3620 static ssize_t
nvme_ctrl_reconnect_delay_show(struct device
*dev
,
3621 struct device_attribute
*attr
, char *buf
)
3623 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3625 if (ctrl
->opts
->reconnect_delay
== -1)
3626 return sysfs_emit(buf
, "off\n");
3627 return sysfs_emit(buf
, "%d\n", ctrl
->opts
->reconnect_delay
);
3630 static ssize_t
nvme_ctrl_reconnect_delay_store(struct device
*dev
,
3631 struct device_attribute
*attr
, const char *buf
, size_t count
)
3633 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3637 err
= kstrtou32(buf
, 10, &v
);
3641 ctrl
->opts
->reconnect_delay
= v
;
3644 static DEVICE_ATTR(reconnect_delay
, S_IRUGO
| S_IWUSR
,
3645 nvme_ctrl_reconnect_delay_show
, nvme_ctrl_reconnect_delay_store
);
3647 static ssize_t
nvme_ctrl_fast_io_fail_tmo_show(struct device
*dev
,
3648 struct device_attribute
*attr
, char *buf
)
3650 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3652 if (ctrl
->opts
->fast_io_fail_tmo
== -1)
3653 return sysfs_emit(buf
, "off\n");
3654 return sysfs_emit(buf
, "%d\n", ctrl
->opts
->fast_io_fail_tmo
);
3657 static ssize_t
nvme_ctrl_fast_io_fail_tmo_store(struct device
*dev
,
3658 struct device_attribute
*attr
, const char *buf
, size_t count
)
3660 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3661 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3662 int fast_io_fail_tmo
, err
;
3664 err
= kstrtoint(buf
, 10, &fast_io_fail_tmo
);
3668 if (fast_io_fail_tmo
< 0)
3669 opts
->fast_io_fail_tmo
= -1;
3671 opts
->fast_io_fail_tmo
= fast_io_fail_tmo
;
3674 static DEVICE_ATTR(fast_io_fail_tmo
, S_IRUGO
| S_IWUSR
,
3675 nvme_ctrl_fast_io_fail_tmo_show
, nvme_ctrl_fast_io_fail_tmo_store
);
3677 static ssize_t
cntrltype_show(struct device
*dev
,
3678 struct device_attribute
*attr
, char *buf
)
3680 static const char * const type
[] = {
3681 [NVME_CTRL_IO
] = "io\n",
3682 [NVME_CTRL_DISC
] = "discovery\n",
3683 [NVME_CTRL_ADMIN
] = "admin\n",
3685 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3687 if (ctrl
->cntrltype
> NVME_CTRL_ADMIN
|| !type
[ctrl
->cntrltype
])
3688 return sysfs_emit(buf
, "reserved\n");
3690 return sysfs_emit(buf
, type
[ctrl
->cntrltype
]);
3692 static DEVICE_ATTR_RO(cntrltype
);
3694 static ssize_t
dctype_show(struct device
*dev
,
3695 struct device_attribute
*attr
, char *buf
)
3697 static const char * const type
[] = {
3698 [NVME_DCTYPE_NOT_REPORTED
] = "none\n",
3699 [NVME_DCTYPE_DDC
] = "ddc\n",
3700 [NVME_DCTYPE_CDC
] = "cdc\n",
3702 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3704 if (ctrl
->dctype
> NVME_DCTYPE_CDC
|| !type
[ctrl
->dctype
])
3705 return sysfs_emit(buf
, "reserved\n");
3707 return sysfs_emit(buf
, type
[ctrl
->dctype
]);
3709 static DEVICE_ATTR_RO(dctype
);
3711 #ifdef CONFIG_NVME_AUTH
3712 static ssize_t
nvme_ctrl_dhchap_secret_show(struct device
*dev
,
3713 struct device_attribute
*attr
, char *buf
)
3715 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3716 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3718 if (!opts
->dhchap_secret
)
3719 return sysfs_emit(buf
, "none\n");
3720 return sysfs_emit(buf
, "%s\n", opts
->dhchap_secret
);
3723 static ssize_t
nvme_ctrl_dhchap_secret_store(struct device
*dev
,
3724 struct device_attribute
*attr
, const char *buf
, size_t count
)
3726 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3727 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3728 char *dhchap_secret
;
3730 if (!ctrl
->opts
->dhchap_secret
)
3734 if (memcmp(buf
, "DHHC-1:", 7))
3737 dhchap_secret
= kzalloc(count
+ 1, GFP_KERNEL
);
3740 memcpy(dhchap_secret
, buf
, count
);
3741 nvme_auth_stop(ctrl
);
3742 if (strcmp(dhchap_secret
, opts
->dhchap_secret
)) {
3745 ret
= nvme_auth_generate_key(dhchap_secret
, &ctrl
->host_key
);
3748 kfree(opts
->dhchap_secret
);
3749 opts
->dhchap_secret
= dhchap_secret
;
3750 /* Key has changed; re-authentication with new key */
3751 nvme_auth_reset(ctrl
);
3753 /* Start re-authentication */
3754 dev_info(ctrl
->device
, "re-authenticating controller\n");
3755 queue_work(nvme_wq
, &ctrl
->dhchap_auth_work
);
3759 static DEVICE_ATTR(dhchap_secret
, S_IRUGO
| S_IWUSR
,
3760 nvme_ctrl_dhchap_secret_show
, nvme_ctrl_dhchap_secret_store
);
3762 static ssize_t
nvme_ctrl_dhchap_ctrl_secret_show(struct device
*dev
,
3763 struct device_attribute
*attr
, char *buf
)
3765 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3766 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3768 if (!opts
->dhchap_ctrl_secret
)
3769 return sysfs_emit(buf
, "none\n");
3770 return sysfs_emit(buf
, "%s\n", opts
->dhchap_ctrl_secret
);
3773 static ssize_t
nvme_ctrl_dhchap_ctrl_secret_store(struct device
*dev
,
3774 struct device_attribute
*attr
, const char *buf
, size_t count
)
3776 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3777 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3778 char *dhchap_secret
;
3780 if (!ctrl
->opts
->dhchap_ctrl_secret
)
3784 if (memcmp(buf
, "DHHC-1:", 7))
3787 dhchap_secret
= kzalloc(count
+ 1, GFP_KERNEL
);
3790 memcpy(dhchap_secret
, buf
, count
);
3791 nvme_auth_stop(ctrl
);
3792 if (strcmp(dhchap_secret
, opts
->dhchap_ctrl_secret
)) {
3795 ret
= nvme_auth_generate_key(dhchap_secret
, &ctrl
->ctrl_key
);
3798 kfree(opts
->dhchap_ctrl_secret
);
3799 opts
->dhchap_ctrl_secret
= dhchap_secret
;
3800 /* Key has changed; re-authentication with new key */
3801 nvme_auth_reset(ctrl
);
3803 /* Start re-authentication */
3804 dev_info(ctrl
->device
, "re-authenticating controller\n");
3805 queue_work(nvme_wq
, &ctrl
->dhchap_auth_work
);
3809 static DEVICE_ATTR(dhchap_ctrl_secret
, S_IRUGO
| S_IWUSR
,
3810 nvme_ctrl_dhchap_ctrl_secret_show
, nvme_ctrl_dhchap_ctrl_secret_store
);
3813 static struct attribute
*nvme_dev_attrs
[] = {
3814 &dev_attr_reset_controller
.attr
,
3815 &dev_attr_rescan_controller
.attr
,
3816 &dev_attr_model
.attr
,
3817 &dev_attr_serial
.attr
,
3818 &dev_attr_firmware_rev
.attr
,
3819 &dev_attr_cntlid
.attr
,
3820 &dev_attr_delete_controller
.attr
,
3821 &dev_attr_transport
.attr
,
3822 &dev_attr_subsysnqn
.attr
,
3823 &dev_attr_address
.attr
,
3824 &dev_attr_state
.attr
,
3825 &dev_attr_numa_node
.attr
,
3826 &dev_attr_queue_count
.attr
,
3827 &dev_attr_sqsize
.attr
,
3828 &dev_attr_hostnqn
.attr
,
3829 &dev_attr_hostid
.attr
,
3830 &dev_attr_ctrl_loss_tmo
.attr
,
3831 &dev_attr_reconnect_delay
.attr
,
3832 &dev_attr_fast_io_fail_tmo
.attr
,
3833 &dev_attr_kato
.attr
,
3834 &dev_attr_cntrltype
.attr
,
3835 &dev_attr_dctype
.attr
,
3836 #ifdef CONFIG_NVME_AUTH
3837 &dev_attr_dhchap_secret
.attr
,
3838 &dev_attr_dhchap_ctrl_secret
.attr
,
3843 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
3844 struct attribute
*a
, int n
)
3846 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3847 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
3849 if (a
== &dev_attr_delete_controller
.attr
&& !ctrl
->ops
->delete_ctrl
)
3851 if (a
== &dev_attr_address
.attr
&& !ctrl
->ops
->get_address
)
3853 if (a
== &dev_attr_hostnqn
.attr
&& !ctrl
->opts
)
3855 if (a
== &dev_attr_hostid
.attr
&& !ctrl
->opts
)
3857 if (a
== &dev_attr_ctrl_loss_tmo
.attr
&& !ctrl
->opts
)
3859 if (a
== &dev_attr_reconnect_delay
.attr
&& !ctrl
->opts
)
3861 if (a
== &dev_attr_fast_io_fail_tmo
.attr
&& !ctrl
->opts
)
3863 #ifdef CONFIG_NVME_AUTH
3864 if (a
== &dev_attr_dhchap_secret
.attr
&& !ctrl
->opts
)
3866 if (a
== &dev_attr_dhchap_ctrl_secret
.attr
&& !ctrl
->opts
)
3873 static const struct attribute_group nvme_dev_attrs_group
= {
3874 .attrs
= nvme_dev_attrs
,
3875 .is_visible
= nvme_dev_attrs_are_visible
,
3878 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
3879 &nvme_dev_attrs_group
,
3883 static struct nvme_ns_head
*nvme_find_ns_head(struct nvme_ctrl
*ctrl
,
3886 struct nvme_ns_head
*h
;
3888 lockdep_assert_held(&ctrl
->subsys
->lock
);
3890 list_for_each_entry(h
, &ctrl
->subsys
->nsheads
, entry
) {
3892 * Private namespaces can share NSIDs under some conditions.
3893 * In that case we can't use the same ns_head for namespaces
3894 * with the same NSID.
3896 if (h
->ns_id
!= nsid
|| !nvme_is_unique_nsid(ctrl
, h
))
3898 if (!list_empty(&h
->list
) && nvme_tryget_ns_head(h
))
3905 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem
*subsys
,
3906 struct nvme_ns_ids
*ids
)
3908 bool has_uuid
= !uuid_is_null(&ids
->uuid
);
3909 bool has_nguid
= memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
));
3910 bool has_eui64
= memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
));
3911 struct nvme_ns_head
*h
;
3913 lockdep_assert_held(&subsys
->lock
);
3915 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3916 if (has_uuid
&& uuid_equal(&ids
->uuid
, &h
->ids
.uuid
))
3919 memcmp(&ids
->nguid
, &h
->ids
.nguid
, sizeof(ids
->nguid
)) == 0)
3922 memcmp(&ids
->eui64
, &h
->ids
.eui64
, sizeof(ids
->eui64
)) == 0)
3929 static void nvme_cdev_rel(struct device
*dev
)
3931 ida_free(&nvme_ns_chr_minor_ida
, MINOR(dev
->devt
));
3934 void nvme_cdev_del(struct cdev
*cdev
, struct device
*cdev_device
)
3936 cdev_device_del(cdev
, cdev_device
);
3937 put_device(cdev_device
);
3940 int nvme_cdev_add(struct cdev
*cdev
, struct device
*cdev_device
,
3941 const struct file_operations
*fops
, struct module
*owner
)
3945 minor
= ida_alloc(&nvme_ns_chr_minor_ida
, GFP_KERNEL
);
3948 cdev_device
->devt
= MKDEV(MAJOR(nvme_ns_chr_devt
), minor
);
3949 cdev_device
->class = nvme_ns_chr_class
;
3950 cdev_device
->release
= nvme_cdev_rel
;
3951 device_initialize(cdev_device
);
3952 cdev_init(cdev
, fops
);
3953 cdev
->owner
= owner
;
3954 ret
= cdev_device_add(cdev
, cdev_device
);
3956 put_device(cdev_device
);
3961 static int nvme_ns_chr_open(struct inode
*inode
, struct file
*file
)
3963 return nvme_ns_open(container_of(inode
->i_cdev
, struct nvme_ns
, cdev
));
3966 static int nvme_ns_chr_release(struct inode
*inode
, struct file
*file
)
3968 nvme_ns_release(container_of(inode
->i_cdev
, struct nvme_ns
, cdev
));
3972 static const struct file_operations nvme_ns_chr_fops
= {
3973 .owner
= THIS_MODULE
,
3974 .open
= nvme_ns_chr_open
,
3975 .release
= nvme_ns_chr_release
,
3976 .unlocked_ioctl
= nvme_ns_chr_ioctl
,
3977 .compat_ioctl
= compat_ptr_ioctl
,
3978 .uring_cmd
= nvme_ns_chr_uring_cmd
,
3981 static int nvme_add_ns_cdev(struct nvme_ns
*ns
)
3985 ns
->cdev_device
.parent
= ns
->ctrl
->device
;
3986 ret
= dev_set_name(&ns
->cdev_device
, "ng%dn%d",
3987 ns
->ctrl
->instance
, ns
->head
->instance
);
3991 return nvme_cdev_add(&ns
->cdev
, &ns
->cdev_device
, &nvme_ns_chr_fops
,
3992 ns
->ctrl
->ops
->module
);
3995 static struct nvme_ns_head
*nvme_alloc_ns_head(struct nvme_ctrl
*ctrl
,
3996 struct nvme_ns_info
*info
)
3998 struct nvme_ns_head
*head
;
3999 size_t size
= sizeof(*head
);
4002 #ifdef CONFIG_NVME_MULTIPATH
4003 size
+= num_possible_nodes() * sizeof(struct nvme_ns
*);
4006 head
= kzalloc(size
, GFP_KERNEL
);
4009 ret
= ida_alloc_min(&ctrl
->subsys
->ns_ida
, 1, GFP_KERNEL
);
4012 head
->instance
= ret
;
4013 INIT_LIST_HEAD(&head
->list
);
4014 ret
= init_srcu_struct(&head
->srcu
);
4016 goto out_ida_remove
;
4017 head
->subsys
= ctrl
->subsys
;
4018 head
->ns_id
= info
->nsid
;
4019 head
->ids
= info
->ids
;
4020 head
->shared
= info
->is_shared
;
4021 kref_init(&head
->ref
);
4023 if (head
->ids
.csi
) {
4024 ret
= nvme_get_effects_log(ctrl
, head
->ids
.csi
, &head
->effects
);
4026 goto out_cleanup_srcu
;
4028 head
->effects
= ctrl
->effects
;
4030 ret
= nvme_mpath_alloc_disk(ctrl
, head
);
4032 goto out_cleanup_srcu
;
4034 list_add_tail(&head
->entry
, &ctrl
->subsys
->nsheads
);
4036 kref_get(&ctrl
->subsys
->ref
);
4040 cleanup_srcu_struct(&head
->srcu
);
4042 ida_free(&ctrl
->subsys
->ns_ida
, head
->instance
);
4047 ret
= blk_status_to_errno(nvme_error_status(ret
));
4048 return ERR_PTR(ret
);
4051 static int nvme_global_check_duplicate_ids(struct nvme_subsystem
*this,
4052 struct nvme_ns_ids
*ids
)
4054 struct nvme_subsystem
*s
;
4058 * Note that this check is racy as we try to avoid holding the global
4059 * lock over the whole ns_head creation. But it is only intended as
4060 * a sanity check anyway.
4062 mutex_lock(&nvme_subsystems_lock
);
4063 list_for_each_entry(s
, &nvme_subsystems
, entry
) {
4066 mutex_lock(&s
->lock
);
4067 ret
= nvme_subsys_check_duplicate_ids(s
, ids
);
4068 mutex_unlock(&s
->lock
);
4072 mutex_unlock(&nvme_subsystems_lock
);
4077 static int nvme_init_ns_head(struct nvme_ns
*ns
, struct nvme_ns_info
*info
)
4079 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
4080 struct nvme_ns_head
*head
= NULL
;
4083 ret
= nvme_global_check_duplicate_ids(ctrl
->subsys
, &info
->ids
);
4085 dev_err(ctrl
->device
,
4086 "globally duplicate IDs for nsid %d\n", info
->nsid
);
4087 nvme_print_device_info(ctrl
);
4091 mutex_lock(&ctrl
->subsys
->lock
);
4092 head
= nvme_find_ns_head(ctrl
, info
->nsid
);
4094 ret
= nvme_subsys_check_duplicate_ids(ctrl
->subsys
, &info
->ids
);
4096 dev_err(ctrl
->device
,
4097 "duplicate IDs in subsystem for nsid %d\n",
4101 head
= nvme_alloc_ns_head(ctrl
, info
);
4103 ret
= PTR_ERR(head
);
4108 if (!info
->is_shared
|| !head
->shared
) {
4109 dev_err(ctrl
->device
,
4110 "Duplicate unshared namespace %d\n",
4112 goto out_put_ns_head
;
4114 if (!nvme_ns_ids_equal(&head
->ids
, &info
->ids
)) {
4115 dev_err(ctrl
->device
,
4116 "IDs don't match for shared namespace %d\n",
4118 goto out_put_ns_head
;
4121 if (!multipath
&& !list_empty(&head
->list
)) {
4122 dev_warn(ctrl
->device
,
4123 "Found shared namespace %d, but multipathing not supported.\n",
4125 dev_warn_once(ctrl
->device
,
4126 "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
4130 list_add_tail_rcu(&ns
->siblings
, &head
->list
);
4132 mutex_unlock(&ctrl
->subsys
->lock
);
4136 nvme_put_ns_head(head
);
4138 mutex_unlock(&ctrl
->subsys
->lock
);
4142 struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
4144 struct nvme_ns
*ns
, *ret
= NULL
;
4146 down_read(&ctrl
->namespaces_rwsem
);
4147 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
4148 if (ns
->head
->ns_id
== nsid
) {
4149 if (!nvme_get_ns(ns
))
4154 if (ns
->head
->ns_id
> nsid
)
4157 up_read(&ctrl
->namespaces_rwsem
);
4160 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns
, NVME_TARGET_PASSTHRU
);
4163 * Add the namespace to the controller list while keeping the list ordered.
4165 static void nvme_ns_add_to_ctrl_list(struct nvme_ns
*ns
)
4167 struct nvme_ns
*tmp
;
4169 list_for_each_entry_reverse(tmp
, &ns
->ctrl
->namespaces
, list
) {
4170 if (tmp
->head
->ns_id
< ns
->head
->ns_id
) {
4171 list_add(&ns
->list
, &tmp
->list
);
4175 list_add(&ns
->list
, &ns
->ctrl
->namespaces
);
4178 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns_info
*info
)
4181 struct gendisk
*disk
;
4182 int node
= ctrl
->numa_node
;
4184 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
4188 disk
= blk_mq_alloc_disk(ctrl
->tagset
, ns
);
4191 disk
->fops
= &nvme_bdev_ops
;
4192 disk
->private_data
= ns
;
4195 ns
->queue
= disk
->queue
;
4197 if (ctrl
->opts
&& ctrl
->opts
->data_digest
)
4198 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES
, ns
->queue
);
4200 blk_queue_flag_set(QUEUE_FLAG_NONROT
, ns
->queue
);
4201 if (ctrl
->ops
->supports_pci_p2pdma
&&
4202 ctrl
->ops
->supports_pci_p2pdma(ctrl
))
4203 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA
, ns
->queue
);
4206 kref_init(&ns
->kref
);
4208 if (nvme_init_ns_head(ns
, info
))
4209 goto out_cleanup_disk
;
4212 * If multipathing is enabled, the device name for all disks and not
4213 * just those that represent shared namespaces needs to be based on the
4214 * subsystem instance. Using the controller instance for private
4215 * namespaces could lead to naming collisions between shared and private
4216 * namespaces if they don't use a common numbering scheme.
4218 * If multipathing is not enabled, disk names must use the controller
4219 * instance as shared namespaces will show up as multiple block
4222 if (ns
->head
->disk
) {
4223 sprintf(disk
->disk_name
, "nvme%dc%dn%d", ctrl
->subsys
->instance
,
4224 ctrl
->instance
, ns
->head
->instance
);
4225 disk
->flags
|= GENHD_FL_HIDDEN
;
4226 } else if (multipath
) {
4227 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->subsys
->instance
,
4228 ns
->head
->instance
);
4230 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->instance
,
4231 ns
->head
->instance
);
4234 if (nvme_update_ns_info(ns
, info
))
4237 down_write(&ctrl
->namespaces_rwsem
);
4238 nvme_ns_add_to_ctrl_list(ns
);
4239 up_write(&ctrl
->namespaces_rwsem
);
4240 nvme_get_ctrl(ctrl
);
4242 if (device_add_disk(ctrl
->device
, ns
->disk
, nvme_ns_id_attr_groups
))
4243 goto out_cleanup_ns_from_list
;
4245 if (!nvme_ns_head_multipath(ns
->head
))
4246 nvme_add_ns_cdev(ns
);
4248 nvme_mpath_add_disk(ns
, info
->anagrpid
);
4249 nvme_fault_inject_init(&ns
->fault_inject
, ns
->disk
->disk_name
);
4253 out_cleanup_ns_from_list
:
4254 nvme_put_ctrl(ctrl
);
4255 down_write(&ctrl
->namespaces_rwsem
);
4256 list_del_init(&ns
->list
);
4257 up_write(&ctrl
->namespaces_rwsem
);
4259 mutex_lock(&ctrl
->subsys
->lock
);
4260 list_del_rcu(&ns
->siblings
);
4261 if (list_empty(&ns
->head
->list
))
4262 list_del_init(&ns
->head
->entry
);
4263 mutex_unlock(&ctrl
->subsys
->lock
);
4264 nvme_put_ns_head(ns
->head
);
4271 static void nvme_ns_remove(struct nvme_ns
*ns
)
4273 bool last_path
= false;
4275 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
4278 clear_bit(NVME_NS_READY
, &ns
->flags
);
4279 set_capacity(ns
->disk
, 0);
4280 nvme_fault_inject_fini(&ns
->fault_inject
);
4283 * Ensure that !NVME_NS_READY is seen by other threads to prevent
4284 * this ns going back into current_path.
4286 synchronize_srcu(&ns
->head
->srcu
);
4288 /* wait for concurrent submissions */
4289 if (nvme_mpath_clear_current_path(ns
))
4290 synchronize_srcu(&ns
->head
->srcu
);
4292 mutex_lock(&ns
->ctrl
->subsys
->lock
);
4293 list_del_rcu(&ns
->siblings
);
4294 if (list_empty(&ns
->head
->list
)) {
4295 list_del_init(&ns
->head
->entry
);
4298 mutex_unlock(&ns
->ctrl
->subsys
->lock
);
4300 /* guarantee not available in head->list */
4303 if (!nvme_ns_head_multipath(ns
->head
))
4304 nvme_cdev_del(&ns
->cdev
, &ns
->cdev_device
);
4305 del_gendisk(ns
->disk
);
4307 down_write(&ns
->ctrl
->namespaces_rwsem
);
4308 list_del_init(&ns
->list
);
4309 up_write(&ns
->ctrl
->namespaces_rwsem
);
4312 nvme_mpath_shutdown_disk(ns
->head
);
4316 static void nvme_ns_remove_by_nsid(struct nvme_ctrl
*ctrl
, u32 nsid
)
4318 struct nvme_ns
*ns
= nvme_find_get_ns(ctrl
, nsid
);
4326 static void nvme_validate_ns(struct nvme_ns
*ns
, struct nvme_ns_info
*info
)
4328 int ret
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
4330 if (test_bit(NVME_NS_DEAD
, &ns
->flags
))
4333 ret
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
4334 if (!nvme_ns_ids_equal(&ns
->head
->ids
, &info
->ids
)) {
4335 dev_err(ns
->ctrl
->device
,
4336 "identifiers changed for nsid %d\n", ns
->head
->ns_id
);
4340 ret
= nvme_update_ns_info(ns
, info
);
4343 * Only remove the namespace if we got a fatal error back from the
4344 * device, otherwise ignore the error and just move on.
4346 * TODO: we should probably schedule a delayed retry here.
4348 if (ret
> 0 && (ret
& NVME_SC_DNR
))
4352 static void nvme_scan_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
4354 struct nvme_ns_info info
= { .nsid
= nsid
};
4357 if (nvme_identify_ns_descs(ctrl
, &info
))
4360 if (info
.ids
.csi
!= NVME_CSI_NVM
&& !nvme_multi_css(ctrl
)) {
4361 dev_warn(ctrl
->device
,
4362 "command set not reported for nsid: %d\n", nsid
);
4367 * If available try to use the Command Set Idependent Identify Namespace
4368 * data structure to find all the generic information that is needed to
4369 * set up a namespace. If not fall back to the legacy version.
4371 if ((ctrl
->cap
& NVME_CAP_CRMS_CRIMS
) ||
4372 (info
.ids
.csi
!= NVME_CSI_NVM
&& info
.ids
.csi
!= NVME_CSI_ZNS
)) {
4373 if (nvme_ns_info_from_id_cs_indep(ctrl
, &info
))
4376 if (nvme_ns_info_from_identify(ctrl
, &info
))
4381 * Ignore the namespace if it is not ready. We will get an AEN once it
4382 * becomes ready and restart the scan.
4387 ns
= nvme_find_get_ns(ctrl
, nsid
);
4389 nvme_validate_ns(ns
, &info
);
4392 nvme_alloc_ns(ctrl
, &info
);
4396 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
4399 struct nvme_ns
*ns
, *next
;
4402 down_write(&ctrl
->namespaces_rwsem
);
4403 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
4404 if (ns
->head
->ns_id
> nsid
|| test_bit(NVME_NS_DEAD
, &ns
->flags
))
4405 list_move_tail(&ns
->list
, &rm_list
);
4407 up_write(&ctrl
->namespaces_rwsem
);
4409 list_for_each_entry_safe(ns
, next
, &rm_list
, list
)
4414 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
)
4416 const int nr_entries
= NVME_IDENTIFY_DATA_SIZE
/ sizeof(__le32
);
4421 if (nvme_ctrl_limited_cns(ctrl
))
4424 ns_list
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
4429 struct nvme_command cmd
= {
4430 .identify
.opcode
= nvme_admin_identify
,
4431 .identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
,
4432 .identify
.nsid
= cpu_to_le32(prev
),
4435 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, ns_list
,
4436 NVME_IDENTIFY_DATA_SIZE
);
4438 dev_warn(ctrl
->device
,
4439 "Identify NS List failed (status=0x%x)\n", ret
);
4443 for (i
= 0; i
< nr_entries
; i
++) {
4444 u32 nsid
= le32_to_cpu(ns_list
[i
]);
4446 if (!nsid
) /* end of the list? */
4448 nvme_scan_ns(ctrl
, nsid
);
4449 while (++prev
< nsid
)
4450 nvme_ns_remove_by_nsid(ctrl
, prev
);
4454 nvme_remove_invalid_namespaces(ctrl
, prev
);
4460 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
)
4462 struct nvme_id_ctrl
*id
;
4465 if (nvme_identify_ctrl(ctrl
, &id
))
4467 nn
= le32_to_cpu(id
->nn
);
4470 for (i
= 1; i
<= nn
; i
++)
4471 nvme_scan_ns(ctrl
, i
);
4473 nvme_remove_invalid_namespaces(ctrl
, nn
);
4476 static void nvme_clear_changed_ns_log(struct nvme_ctrl
*ctrl
)
4478 size_t log_size
= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
);
4482 log
= kzalloc(log_size
, GFP_KERNEL
);
4487 * We need to read the log to clear the AEN, but we don't want to rely
4488 * on it for the changed namespace information as userspace could have
4489 * raced with us in reading the log page, which could cause us to miss
4492 error
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CHANGED_NS
, 0,
4493 NVME_CSI_NVM
, log
, log_size
, 0);
4495 dev_warn(ctrl
->device
,
4496 "reading changed ns log failed: %d\n", error
);
4501 static void nvme_scan_work(struct work_struct
*work
)
4503 struct nvme_ctrl
*ctrl
=
4504 container_of(work
, struct nvme_ctrl
, scan_work
);
4507 /* No tagset on a live ctrl means IO queues could not created */
4508 if (ctrl
->state
!= NVME_CTRL_LIVE
|| !ctrl
->tagset
)
4512 * Identify controller limits can change at controller reset due to
4513 * new firmware download, even though it is not common we cannot ignore
4514 * such scenario. Controller's non-mdts limits are reported in the unit
4515 * of logical blocks that is dependent on the format of attached
4516 * namespace. Hence re-read the limits at the time of ns allocation.
4518 ret
= nvme_init_non_mdts_limits(ctrl
);
4520 dev_warn(ctrl
->device
,
4521 "reading non-mdts-limits failed: %d\n", ret
);
4525 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
)) {
4526 dev_info(ctrl
->device
, "rescanning namespaces.\n");
4527 nvme_clear_changed_ns_log(ctrl
);
4530 mutex_lock(&ctrl
->scan_lock
);
4531 if (nvme_scan_ns_list(ctrl
) != 0)
4532 nvme_scan_ns_sequential(ctrl
);
4533 mutex_unlock(&ctrl
->scan_lock
);
4537 * This function iterates the namespace list unlocked to allow recovery from
4538 * controller failure. It is up to the caller to ensure the namespace list is
4539 * not modified by scan work while this function is executing.
4541 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
4543 struct nvme_ns
*ns
, *next
;
4547 * make sure to requeue I/O to all namespaces as these
4548 * might result from the scan itself and must complete
4549 * for the scan_work to make progress
4551 nvme_mpath_clear_ctrl_paths(ctrl
);
4553 /* prevent racing with ns scanning */
4554 flush_work(&ctrl
->scan_work
);
4557 * The dead states indicates the controller was not gracefully
4558 * disconnected. In that case, we won't be able to flush any data while
4559 * removing the namespaces' disks; fail all the queues now to avoid
4560 * potentially having to clean up the failed sync later.
4562 if (ctrl
->state
== NVME_CTRL_DEAD
)
4563 nvme_kill_queues(ctrl
);
4565 /* this is a no-op when called from the controller reset handler */
4566 nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING_NOIO
);
4568 down_write(&ctrl
->namespaces_rwsem
);
4569 list_splice_init(&ctrl
->namespaces
, &ns_list
);
4570 up_write(&ctrl
->namespaces_rwsem
);
4572 list_for_each_entry_safe(ns
, next
, &ns_list
, list
)
4575 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
4577 static int nvme_class_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
4579 struct nvme_ctrl
*ctrl
=
4580 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
4581 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
4584 ret
= add_uevent_var(env
, "NVME_TRTYPE=%s", ctrl
->ops
->name
);
4589 ret
= add_uevent_var(env
, "NVME_TRADDR=%s", opts
->traddr
);
4593 ret
= add_uevent_var(env
, "NVME_TRSVCID=%s",
4594 opts
->trsvcid
?: "none");
4598 ret
= add_uevent_var(env
, "NVME_HOST_TRADDR=%s",
4599 opts
->host_traddr
?: "none");
4603 ret
= add_uevent_var(env
, "NVME_HOST_IFACE=%s",
4604 opts
->host_iface
?: "none");
4609 static void nvme_change_uevent(struct nvme_ctrl
*ctrl
, char *envdata
)
4611 char *envp
[2] = { envdata
, NULL
};
4613 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
4616 static void nvme_aen_uevent(struct nvme_ctrl
*ctrl
)
4618 char *envp
[2] = { NULL
, NULL
};
4619 u32 aen_result
= ctrl
->aen_result
;
4621 ctrl
->aen_result
= 0;
4625 envp
[0] = kasprintf(GFP_KERNEL
, "NVME_AEN=%#08x", aen_result
);
4628 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
4632 static void nvme_async_event_work(struct work_struct
*work
)
4634 struct nvme_ctrl
*ctrl
=
4635 container_of(work
, struct nvme_ctrl
, async_event_work
);
4637 nvme_aen_uevent(ctrl
);
4640 * The transport drivers must guarantee AER submission here is safe by
4641 * flushing ctrl async_event_work after changing the controller state
4642 * from LIVE and before freeing the admin queue.
4644 if (ctrl
->state
== NVME_CTRL_LIVE
)
4645 ctrl
->ops
->submit_async_event(ctrl
);
4648 static bool nvme_ctrl_pp_status(struct nvme_ctrl
*ctrl
)
4653 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
))
4659 return ((ctrl
->ctrl_config
& NVME_CC_ENABLE
) && (csts
& NVME_CSTS_PP
));
4662 static void nvme_get_fw_slot_info(struct nvme_ctrl
*ctrl
)
4664 struct nvme_fw_slot_info_log
*log
;
4666 log
= kmalloc(sizeof(*log
), GFP_KERNEL
);
4670 if (nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_FW_SLOT
, 0, NVME_CSI_NVM
,
4671 log
, sizeof(*log
), 0))
4672 dev_warn(ctrl
->device
, "Get FW SLOT INFO log error\n");
4676 static void nvme_fw_act_work(struct work_struct
*work
)
4678 struct nvme_ctrl
*ctrl
= container_of(work
,
4679 struct nvme_ctrl
, fw_act_work
);
4680 unsigned long fw_act_timeout
;
4683 fw_act_timeout
= jiffies
+
4684 msecs_to_jiffies(ctrl
->mtfa
* 100);
4686 fw_act_timeout
= jiffies
+
4687 msecs_to_jiffies(admin_timeout
* 1000);
4689 nvme_stop_queues(ctrl
);
4690 while (nvme_ctrl_pp_status(ctrl
)) {
4691 if (time_after(jiffies
, fw_act_timeout
)) {
4692 dev_warn(ctrl
->device
,
4693 "Fw activation timeout, reset controller\n");
4694 nvme_try_sched_reset(ctrl
);
4700 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_LIVE
))
4703 nvme_start_queues(ctrl
);
4704 /* read FW slot information to clear the AER */
4705 nvme_get_fw_slot_info(ctrl
);
4707 queue_work(nvme_wq
, &ctrl
->async_event_work
);
4710 static u32
nvme_aer_type(u32 result
)
4712 return result
& 0x7;
4715 static u32
nvme_aer_subtype(u32 result
)
4717 return (result
& 0xff00) >> 8;
4720 static bool nvme_handle_aen_notice(struct nvme_ctrl
*ctrl
, u32 result
)
4722 u32 aer_notice_type
= nvme_aer_subtype(result
);
4723 bool requeue
= true;
4725 trace_nvme_async_event(ctrl
, aer_notice_type
);
4727 switch (aer_notice_type
) {
4728 case NVME_AER_NOTICE_NS_CHANGED
:
4729 set_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
);
4730 nvme_queue_scan(ctrl
);
4732 case NVME_AER_NOTICE_FW_ACT_STARTING
:
4734 * We are (ab)using the RESETTING state to prevent subsequent
4735 * recovery actions from interfering with the controller's
4736 * firmware activation.
4738 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
)) {
4739 nvme_auth_stop(ctrl
);
4741 queue_work(nvme_wq
, &ctrl
->fw_act_work
);
4744 #ifdef CONFIG_NVME_MULTIPATH
4745 case NVME_AER_NOTICE_ANA
:
4746 if (!ctrl
->ana_log_buf
)
4748 queue_work(nvme_wq
, &ctrl
->ana_work
);
4751 case NVME_AER_NOTICE_DISC_CHANGED
:
4752 ctrl
->aen_result
= result
;
4755 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
4760 static void nvme_handle_aer_persistent_error(struct nvme_ctrl
*ctrl
)
4762 trace_nvme_async_event(ctrl
, NVME_AER_ERROR
);
4763 dev_warn(ctrl
->device
, "resetting controller due to AER\n");
4764 nvme_reset_ctrl(ctrl
);
4767 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
4768 volatile union nvme_result
*res
)
4770 u32 result
= le32_to_cpu(res
->u32
);
4771 u32 aer_type
= nvme_aer_type(result
);
4772 u32 aer_subtype
= nvme_aer_subtype(result
);
4773 bool requeue
= true;
4775 if (le16_to_cpu(status
) >> 1 != NVME_SC_SUCCESS
)
4779 case NVME_AER_NOTICE
:
4780 requeue
= nvme_handle_aen_notice(ctrl
, result
);
4782 case NVME_AER_ERROR
:
4784 * For a persistent internal error, don't run async_event_work
4785 * to submit a new AER. The controller reset will do it.
4787 if (aer_subtype
== NVME_AER_ERROR_PERSIST_INT_ERR
) {
4788 nvme_handle_aer_persistent_error(ctrl
);
4792 case NVME_AER_SMART
:
4795 trace_nvme_async_event(ctrl
, aer_type
);
4796 ctrl
->aen_result
= result
;
4803 queue_work(nvme_wq
, &ctrl
->async_event_work
);
4805 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
4807 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
4809 nvme_mpath_stop(ctrl
);
4810 nvme_auth_stop(ctrl
);
4811 nvme_stop_keep_alive(ctrl
);
4812 nvme_stop_failfast_work(ctrl
);
4813 flush_work(&ctrl
->async_event_work
);
4814 cancel_work_sync(&ctrl
->fw_act_work
);
4815 if (ctrl
->ops
->stop_ctrl
)
4816 ctrl
->ops
->stop_ctrl(ctrl
);
4818 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
4820 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
4822 nvme_start_keep_alive(ctrl
);
4824 nvme_enable_aen(ctrl
);
4826 if (ctrl
->queue_count
> 1) {
4827 nvme_queue_scan(ctrl
);
4828 nvme_start_queues(ctrl
);
4829 nvme_mpath_update(ctrl
);
4832 nvme_change_uevent(ctrl
, "NVME_EVENT=connected");
4834 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
4836 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
4838 nvme_hwmon_exit(ctrl
);
4839 nvme_fault_inject_fini(&ctrl
->fault_inject
);
4840 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
4841 cdev_device_del(&ctrl
->cdev
, ctrl
->device
);
4842 nvme_put_ctrl(ctrl
);
4844 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
4846 static void nvme_free_cels(struct nvme_ctrl
*ctrl
)
4848 struct nvme_effects_log
*cel
;
4851 xa_for_each(&ctrl
->cels
, i
, cel
) {
4852 xa_erase(&ctrl
->cels
, i
);
4856 xa_destroy(&ctrl
->cels
);
4859 static void nvme_free_ctrl(struct device
*dev
)
4861 struct nvme_ctrl
*ctrl
=
4862 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
4863 struct nvme_subsystem
*subsys
= ctrl
->subsys
;
4865 if (!subsys
|| ctrl
->instance
!= subsys
->instance
)
4866 ida_free(&nvme_instance_ida
, ctrl
->instance
);
4868 nvme_free_cels(ctrl
);
4869 nvme_mpath_uninit(ctrl
);
4870 nvme_auth_stop(ctrl
);
4871 nvme_auth_free(ctrl
);
4872 __free_page(ctrl
->discard_page
);
4875 mutex_lock(&nvme_subsystems_lock
);
4876 list_del(&ctrl
->subsys_entry
);
4877 sysfs_remove_link(&subsys
->dev
.kobj
, dev_name(ctrl
->device
));
4878 mutex_unlock(&nvme_subsystems_lock
);
4881 ctrl
->ops
->free_ctrl(ctrl
);
4884 nvme_put_subsystem(subsys
);
4888 * Initialize a NVMe controller structures. This needs to be called during
4889 * earliest initialization so that we have the initialized structured around
4892 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
4893 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
4897 ctrl
->state
= NVME_CTRL_NEW
;
4898 clear_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
4899 spin_lock_init(&ctrl
->lock
);
4900 mutex_init(&ctrl
->scan_lock
);
4901 INIT_LIST_HEAD(&ctrl
->namespaces
);
4902 xa_init(&ctrl
->cels
);
4903 init_rwsem(&ctrl
->namespaces_rwsem
);
4906 ctrl
->quirks
= quirks
;
4907 ctrl
->numa_node
= NUMA_NO_NODE
;
4908 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
4909 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
4910 INIT_WORK(&ctrl
->fw_act_work
, nvme_fw_act_work
);
4911 INIT_WORK(&ctrl
->delete_work
, nvme_delete_ctrl_work
);
4912 init_waitqueue_head(&ctrl
->state_wq
);
4914 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
4915 INIT_DELAYED_WORK(&ctrl
->failfast_work
, nvme_failfast_work
);
4916 memset(&ctrl
->ka_cmd
, 0, sizeof(ctrl
->ka_cmd
));
4917 ctrl
->ka_cmd
.common
.opcode
= nvme_admin_keep_alive
;
4919 BUILD_BUG_ON(NVME_DSM_MAX_RANGES
* sizeof(struct nvme_dsm_range
) >
4921 ctrl
->discard_page
= alloc_page(GFP_KERNEL
);
4922 if (!ctrl
->discard_page
) {
4927 ret
= ida_alloc(&nvme_instance_ida
, GFP_KERNEL
);
4930 ctrl
->instance
= ret
;
4932 device_initialize(&ctrl
->ctrl_device
);
4933 ctrl
->device
= &ctrl
->ctrl_device
;
4934 ctrl
->device
->devt
= MKDEV(MAJOR(nvme_ctrl_base_chr_devt
),
4936 ctrl
->device
->class = nvme_class
;
4937 ctrl
->device
->parent
= ctrl
->dev
;
4938 ctrl
->device
->groups
= nvme_dev_attr_groups
;
4939 ctrl
->device
->release
= nvme_free_ctrl
;
4940 dev_set_drvdata(ctrl
->device
, ctrl
);
4941 ret
= dev_set_name(ctrl
->device
, "nvme%d", ctrl
->instance
);
4943 goto out_release_instance
;
4945 nvme_get_ctrl(ctrl
);
4946 cdev_init(&ctrl
->cdev
, &nvme_dev_fops
);
4947 ctrl
->cdev
.owner
= ops
->module
;
4948 ret
= cdev_device_add(&ctrl
->cdev
, ctrl
->device
);
4953 * Initialize latency tolerance controls. The sysfs files won't
4954 * be visible to userspace unless the device actually supports APST.
4956 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
4957 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
4958 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
4960 nvme_fault_inject_init(&ctrl
->fault_inject
, dev_name(ctrl
->device
));
4961 nvme_mpath_init_ctrl(ctrl
);
4962 nvme_auth_init_ctrl(ctrl
);
4966 nvme_put_ctrl(ctrl
);
4967 kfree_const(ctrl
->device
->kobj
.name
);
4968 out_release_instance
:
4969 ida_free(&nvme_instance_ida
, ctrl
->instance
);
4971 if (ctrl
->discard_page
)
4972 __free_page(ctrl
->discard_page
);
4975 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
4977 static void nvme_start_ns_queue(struct nvme_ns
*ns
)
4979 if (test_and_clear_bit(NVME_NS_STOPPED
, &ns
->flags
))
4980 blk_mq_unquiesce_queue(ns
->queue
);
4983 static void nvme_stop_ns_queue(struct nvme_ns
*ns
)
4985 if (!test_and_set_bit(NVME_NS_STOPPED
, &ns
->flags
))
4986 blk_mq_quiesce_queue(ns
->queue
);
4988 blk_mq_wait_quiesce_done(ns
->queue
);
4992 * Prepare a queue for teardown.
4994 * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
4995 * the capacity to 0 after that to avoid blocking dispatchers that may be
4996 * holding bd_butex. This will end buffered writers dirtying pages that can't
4999 static void nvme_set_queue_dying(struct nvme_ns
*ns
)
5001 if (test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
5004 blk_mark_disk_dead(ns
->disk
);
5005 nvme_start_ns_queue(ns
);
5007 set_capacity_and_notify(ns
->disk
, 0);
5011 * nvme_kill_queues(): Ends all namespace queues
5012 * @ctrl: the dead controller that needs to end
5014 * Call this function when the driver determines it is unable to get the
5015 * controller in a state capable of servicing IO.
5017 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
5021 down_read(&ctrl
->namespaces_rwsem
);
5023 /* Forcibly unquiesce queues to avoid blocking dispatch */
5024 if (ctrl
->admin_q
&& !blk_queue_dying(ctrl
->admin_q
))
5025 nvme_start_admin_queue(ctrl
);
5027 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
5028 nvme_set_queue_dying(ns
);
5030 up_read(&ctrl
->namespaces_rwsem
);
5032 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
5034 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
5038 down_read(&ctrl
->namespaces_rwsem
);
5039 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
5040 blk_mq_unfreeze_queue(ns
->queue
);
5041 up_read(&ctrl
->namespaces_rwsem
);
5043 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
5045 int nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
5049 down_read(&ctrl
->namespaces_rwsem
);
5050 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
5051 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
5055 up_read(&ctrl
->namespaces_rwsem
);
5058 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
5060 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
5064 down_read(&ctrl
->namespaces_rwsem
);
5065 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
5066 blk_mq_freeze_queue_wait(ns
->queue
);
5067 up_read(&ctrl
->namespaces_rwsem
);
5069 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
5071 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
5075 down_read(&ctrl
->namespaces_rwsem
);
5076 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
5077 blk_freeze_queue_start(ns
->queue
);
5078 up_read(&ctrl
->namespaces_rwsem
);
5080 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
5082 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
5086 down_read(&ctrl
->namespaces_rwsem
);
5087 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
5088 nvme_stop_ns_queue(ns
);
5089 up_read(&ctrl
->namespaces_rwsem
);
5091 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
5093 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
5097 down_read(&ctrl
->namespaces_rwsem
);
5098 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
5099 nvme_start_ns_queue(ns
);
5100 up_read(&ctrl
->namespaces_rwsem
);
5102 EXPORT_SYMBOL_GPL(nvme_start_queues
);
5104 void nvme_stop_admin_queue(struct nvme_ctrl
*ctrl
)
5106 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED
, &ctrl
->flags
))
5107 blk_mq_quiesce_queue(ctrl
->admin_q
);
5109 blk_mq_wait_quiesce_done(ctrl
->admin_q
);
5111 EXPORT_SYMBOL_GPL(nvme_stop_admin_queue
);
5113 void nvme_start_admin_queue(struct nvme_ctrl
*ctrl
)
5115 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED
, &ctrl
->flags
))
5116 blk_mq_unquiesce_queue(ctrl
->admin_q
);
5118 EXPORT_SYMBOL_GPL(nvme_start_admin_queue
);
5120 void nvme_sync_io_queues(struct nvme_ctrl
*ctrl
)
5124 down_read(&ctrl
->namespaces_rwsem
);
5125 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
5126 blk_sync_queue(ns
->queue
);
5127 up_read(&ctrl
->namespaces_rwsem
);
5129 EXPORT_SYMBOL_GPL(nvme_sync_io_queues
);
5131 void nvme_sync_queues(struct nvme_ctrl
*ctrl
)
5133 nvme_sync_io_queues(ctrl
);
5135 blk_sync_queue(ctrl
->admin_q
);
5137 EXPORT_SYMBOL_GPL(nvme_sync_queues
);
5139 struct nvme_ctrl
*nvme_ctrl_from_file(struct file
*file
)
5141 if (file
->f_op
!= &nvme_dev_fops
)
5143 return file
->private_data
;
5145 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file
, NVME_TARGET_PASSTHRU
);
5148 * Check we didn't inadvertently grow the command structure sizes:
5150 static inline void _nvme_check_size(void)
5152 BUILD_BUG_ON(sizeof(struct nvme_common_command
) != 64);
5153 BUILD_BUG_ON(sizeof(struct nvme_rw_command
) != 64);
5154 BUILD_BUG_ON(sizeof(struct nvme_identify
) != 64);
5155 BUILD_BUG_ON(sizeof(struct nvme_features
) != 64);
5156 BUILD_BUG_ON(sizeof(struct nvme_download_firmware
) != 64);
5157 BUILD_BUG_ON(sizeof(struct nvme_format_cmd
) != 64);
5158 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd
) != 64);
5159 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd
) != 64);
5160 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd
) != 64);
5161 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command
) != 64);
5162 BUILD_BUG_ON(sizeof(struct nvme_command
) != 64);
5163 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl
) != NVME_IDENTIFY_DATA_SIZE
);
5164 BUILD_BUG_ON(sizeof(struct nvme_id_ns
) != NVME_IDENTIFY_DATA_SIZE
);
5165 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep
) !=
5166 NVME_IDENTIFY_DATA_SIZE
);
5167 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns
) != NVME_IDENTIFY_DATA_SIZE
);
5168 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm
) != NVME_IDENTIFY_DATA_SIZE
);
5169 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns
) != NVME_IDENTIFY_DATA_SIZE
);
5170 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm
) != NVME_IDENTIFY_DATA_SIZE
);
5171 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type
) != 64);
5172 BUILD_BUG_ON(sizeof(struct nvme_smart_log
) != 512);
5173 BUILD_BUG_ON(sizeof(struct nvme_dbbuf
) != 64);
5174 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd
) != 64);
5175 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior
) != 512);
5179 static int __init
nvme_core_init(void)
5181 int result
= -ENOMEM
;
5185 nvme_wq
= alloc_workqueue("nvme-wq",
5186 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
5190 nvme_reset_wq
= alloc_workqueue("nvme-reset-wq",
5191 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
5195 nvme_delete_wq
= alloc_workqueue("nvme-delete-wq",
5196 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
5197 if (!nvme_delete_wq
)
5198 goto destroy_reset_wq
;
5200 result
= alloc_chrdev_region(&nvme_ctrl_base_chr_devt
, 0,
5201 NVME_MINORS
, "nvme");
5203 goto destroy_delete_wq
;
5205 nvme_class
= class_create(THIS_MODULE
, "nvme");
5206 if (IS_ERR(nvme_class
)) {
5207 result
= PTR_ERR(nvme_class
);
5208 goto unregister_chrdev
;
5210 nvme_class
->dev_uevent
= nvme_class_uevent
;
5212 nvme_subsys_class
= class_create(THIS_MODULE
, "nvme-subsystem");
5213 if (IS_ERR(nvme_subsys_class
)) {
5214 result
= PTR_ERR(nvme_subsys_class
);
5218 result
= alloc_chrdev_region(&nvme_ns_chr_devt
, 0, NVME_MINORS
,
5221 goto destroy_subsys_class
;
5223 nvme_ns_chr_class
= class_create(THIS_MODULE
, "nvme-generic");
5224 if (IS_ERR(nvme_ns_chr_class
)) {
5225 result
= PTR_ERR(nvme_ns_chr_class
);
5226 goto unregister_generic_ns
;
5231 unregister_generic_ns
:
5232 unregister_chrdev_region(nvme_ns_chr_devt
, NVME_MINORS
);
5233 destroy_subsys_class
:
5234 class_destroy(nvme_subsys_class
);
5236 class_destroy(nvme_class
);
5238 unregister_chrdev_region(nvme_ctrl_base_chr_devt
, NVME_MINORS
);
5240 destroy_workqueue(nvme_delete_wq
);
5242 destroy_workqueue(nvme_reset_wq
);
5244 destroy_workqueue(nvme_wq
);
5249 static void __exit
nvme_core_exit(void)
5251 class_destroy(nvme_ns_chr_class
);
5252 class_destroy(nvme_subsys_class
);
5253 class_destroy(nvme_class
);
5254 unregister_chrdev_region(nvme_ns_chr_devt
, NVME_MINORS
);
5255 unregister_chrdev_region(nvme_ctrl_base_chr_devt
, NVME_MINORS
);
5256 destroy_workqueue(nvme_delete_wq
);
5257 destroy_workqueue(nvme_reset_wq
);
5258 destroy_workqueue(nvme_wq
);
5259 ida_destroy(&nvme_ns_chr_minor_ida
);
5260 ida_destroy(&nvme_instance_ida
);
5263 MODULE_LICENSE("GPL");
5264 MODULE_VERSION("1.0");
5265 module_init(nvme_core_init
);
5266 module_exit(nvme_core_exit
);