1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
7 #include <linux/blkdev.h>
8 #include <linux/blk-mq.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/compat.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/hdreg.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/nvme_ioctl.h>
22 #include <linux/pm_qos.h>
23 #include <asm/unaligned.h>
27 #include <linux/nvme-auth.h>
29 #define CREATE_TRACE_POINTS
32 #define NVME_MINORS (1U << MINORBITS)
35 struct nvme_ns_ids ids
;
44 unsigned int admin_timeout
= 60;
45 module_param(admin_timeout
, uint
, 0644);
46 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
47 EXPORT_SYMBOL_GPL(admin_timeout
);
49 unsigned int nvme_io_timeout
= 30;
50 module_param_named(io_timeout
, nvme_io_timeout
, uint
, 0644);
51 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
52 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
54 static unsigned char shutdown_timeout
= 5;
55 module_param(shutdown_timeout
, byte
, 0644);
56 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
58 static u8 nvme_max_retries
= 5;
59 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
60 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
62 static unsigned long default_ps_max_latency_us
= 100000;
63 module_param(default_ps_max_latency_us
, ulong
, 0644);
64 MODULE_PARM_DESC(default_ps_max_latency_us
,
65 "max power saving latency for new devices; use PM QOS to change per device");
67 static bool force_apst
;
68 module_param(force_apst
, bool, 0644);
69 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
71 static unsigned long apst_primary_timeout_ms
= 100;
72 module_param(apst_primary_timeout_ms
, ulong
, 0644);
73 MODULE_PARM_DESC(apst_primary_timeout_ms
,
74 "primary APST timeout in ms");
76 static unsigned long apst_secondary_timeout_ms
= 2000;
77 module_param(apst_secondary_timeout_ms
, ulong
, 0644);
78 MODULE_PARM_DESC(apst_secondary_timeout_ms
,
79 "secondary APST timeout in ms");
81 static unsigned long apst_primary_latency_tol_us
= 15000;
82 module_param(apst_primary_latency_tol_us
, ulong
, 0644);
83 MODULE_PARM_DESC(apst_primary_latency_tol_us
,
84 "primary APST latency tolerance in us");
86 static unsigned long apst_secondary_latency_tol_us
= 100000;
87 module_param(apst_secondary_latency_tol_us
, ulong
, 0644);
88 MODULE_PARM_DESC(apst_secondary_latency_tol_us
,
89 "secondary APST latency tolerance in us");
92 * nvme_wq - hosts nvme related works that are not reset or delete
93 * nvme_reset_wq - hosts nvme reset works
94 * nvme_delete_wq - hosts nvme delete works
96 * nvme_wq will host works such as scan, aen handling, fw activation,
97 * keep-alive, periodic reconnects etc. nvme_reset_wq
98 * runs reset works which also flush works hosted on nvme_wq for
99 * serialization purposes. nvme_delete_wq host controller deletion
100 * works which flush reset works for serialization.
102 struct workqueue_struct
*nvme_wq
;
103 EXPORT_SYMBOL_GPL(nvme_wq
);
105 struct workqueue_struct
*nvme_reset_wq
;
106 EXPORT_SYMBOL_GPL(nvme_reset_wq
);
108 struct workqueue_struct
*nvme_delete_wq
;
109 EXPORT_SYMBOL_GPL(nvme_delete_wq
);
111 static LIST_HEAD(nvme_subsystems
);
112 static DEFINE_MUTEX(nvme_subsystems_lock
);
114 static DEFINE_IDA(nvme_instance_ida
);
115 static dev_t nvme_ctrl_base_chr_devt
;
116 static struct class *nvme_class
;
117 static struct class *nvme_subsys_class
;
119 static DEFINE_IDA(nvme_ns_chr_minor_ida
);
120 static dev_t nvme_ns_chr_devt
;
121 static struct class *nvme_ns_chr_class
;
123 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
);
124 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
126 static void nvme_update_keep_alive(struct nvme_ctrl
*ctrl
,
127 struct nvme_command
*cmd
);
129 void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
132 * Only new queue scan work when admin and IO queues are both alive
134 if (ctrl
->state
== NVME_CTRL_LIVE
&& ctrl
->tagset
)
135 queue_work(nvme_wq
, &ctrl
->scan_work
);
139 * Use this function to proceed with scheduling reset_work for a controller
140 * that had previously been set to the resetting state. This is intended for
141 * code paths that can't be interrupted by other reset attempts. A hot removal
142 * may prevent this from succeeding.
144 int nvme_try_sched_reset(struct nvme_ctrl
*ctrl
)
146 if (ctrl
->state
!= NVME_CTRL_RESETTING
)
148 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
152 EXPORT_SYMBOL_GPL(nvme_try_sched_reset
);
154 static void nvme_failfast_work(struct work_struct
*work
)
156 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
157 struct nvme_ctrl
, failfast_work
);
159 if (ctrl
->state
!= NVME_CTRL_CONNECTING
)
162 set_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
163 dev_info(ctrl
->device
, "failfast expired\n");
164 nvme_kick_requeue_lists(ctrl
);
167 static inline void nvme_start_failfast_work(struct nvme_ctrl
*ctrl
)
169 if (!ctrl
->opts
|| ctrl
->opts
->fast_io_fail_tmo
== -1)
172 schedule_delayed_work(&ctrl
->failfast_work
,
173 ctrl
->opts
->fast_io_fail_tmo
* HZ
);
176 static inline void nvme_stop_failfast_work(struct nvme_ctrl
*ctrl
)
181 cancel_delayed_work_sync(&ctrl
->failfast_work
);
182 clear_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
186 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
188 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
190 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
194 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
196 int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
200 ret
= nvme_reset_ctrl(ctrl
);
202 flush_work(&ctrl
->reset_work
);
203 if (ctrl
->state
!= NVME_CTRL_LIVE
)
210 static void nvme_do_delete_ctrl(struct nvme_ctrl
*ctrl
)
212 dev_info(ctrl
->device
,
213 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl
));
215 flush_work(&ctrl
->reset_work
);
216 nvme_stop_ctrl(ctrl
);
217 nvme_remove_namespaces(ctrl
);
218 ctrl
->ops
->delete_ctrl(ctrl
);
219 nvme_uninit_ctrl(ctrl
);
222 static void nvme_delete_ctrl_work(struct work_struct
*work
)
224 struct nvme_ctrl
*ctrl
=
225 container_of(work
, struct nvme_ctrl
, delete_work
);
227 nvme_do_delete_ctrl(ctrl
);
230 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
)
232 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
234 if (!queue_work(nvme_delete_wq
, &ctrl
->delete_work
))
238 EXPORT_SYMBOL_GPL(nvme_delete_ctrl
);
240 void nvme_delete_ctrl_sync(struct nvme_ctrl
*ctrl
)
243 * Keep a reference until nvme_do_delete_ctrl() complete,
244 * since ->delete_ctrl can free the controller.
247 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
248 nvme_do_delete_ctrl(ctrl
);
252 static blk_status_t
nvme_error_status(u16 status
)
254 switch (status
& 0x7ff) {
255 case NVME_SC_SUCCESS
:
257 case NVME_SC_CAP_EXCEEDED
:
258 return BLK_STS_NOSPC
;
259 case NVME_SC_LBA_RANGE
:
260 case NVME_SC_CMD_INTERRUPTED
:
261 case NVME_SC_NS_NOT_READY
:
262 return BLK_STS_TARGET
;
263 case NVME_SC_BAD_ATTRIBUTES
:
264 case NVME_SC_ONCS_NOT_SUPPORTED
:
265 case NVME_SC_INVALID_OPCODE
:
266 case NVME_SC_INVALID_FIELD
:
267 case NVME_SC_INVALID_NS
:
268 return BLK_STS_NOTSUPP
;
269 case NVME_SC_WRITE_FAULT
:
270 case NVME_SC_READ_ERROR
:
271 case NVME_SC_UNWRITTEN_BLOCK
:
272 case NVME_SC_ACCESS_DENIED
:
273 case NVME_SC_READ_ONLY
:
274 case NVME_SC_COMPARE_FAILED
:
275 return BLK_STS_MEDIUM
;
276 case NVME_SC_GUARD_CHECK
:
277 case NVME_SC_APPTAG_CHECK
:
278 case NVME_SC_REFTAG_CHECK
:
279 case NVME_SC_INVALID_PI
:
280 return BLK_STS_PROTECTION
;
281 case NVME_SC_RESERVATION_CONFLICT
:
282 return BLK_STS_RESV_CONFLICT
;
283 case NVME_SC_HOST_PATH_ERROR
:
284 return BLK_STS_TRANSPORT
;
285 case NVME_SC_ZONE_TOO_MANY_ACTIVE
:
286 return BLK_STS_ZONE_ACTIVE_RESOURCE
;
287 case NVME_SC_ZONE_TOO_MANY_OPEN
:
288 return BLK_STS_ZONE_OPEN_RESOURCE
;
290 return BLK_STS_IOERR
;
294 static void nvme_retry_req(struct request
*req
)
296 unsigned long delay
= 0;
299 /* The mask and shift result must be <= 3 */
300 crd
= (nvme_req(req
)->status
& NVME_SC_CRD
) >> 11;
302 delay
= nvme_req(req
)->ctrl
->crdt
[crd
- 1] * 100;
304 nvme_req(req
)->retries
++;
305 blk_mq_requeue_request(req
, false);
306 blk_mq_delay_kick_requeue_list(req
->q
, delay
);
309 static void nvme_log_error(struct request
*req
)
311 struct nvme_ns
*ns
= req
->q
->queuedata
;
312 struct nvme_request
*nr
= nvme_req(req
);
315 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
316 ns
->disk
? ns
->disk
->disk_name
: "?",
317 nvme_get_opcode_str(nr
->cmd
->common
.opcode
),
318 nr
->cmd
->common
.opcode
,
319 (unsigned long long)nvme_sect_to_lba(ns
, blk_rq_pos(req
)),
320 (unsigned long long)blk_rq_bytes(req
) >> ns
->lba_shift
,
321 nvme_get_error_status_str(nr
->status
),
322 nr
->status
>> 8 & 7, /* Status Code Type */
323 nr
->status
& 0xff, /* Status Code */
324 nr
->status
& NVME_SC_MORE
? "MORE " : "",
325 nr
->status
& NVME_SC_DNR
? "DNR " : "");
329 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
330 dev_name(nr
->ctrl
->device
),
331 nvme_get_admin_opcode_str(nr
->cmd
->common
.opcode
),
332 nr
->cmd
->common
.opcode
,
333 nvme_get_error_status_str(nr
->status
),
334 nr
->status
>> 8 & 7, /* Status Code Type */
335 nr
->status
& 0xff, /* Status Code */
336 nr
->status
& NVME_SC_MORE
? "MORE " : "",
337 nr
->status
& NVME_SC_DNR
? "DNR " : "");
340 enum nvme_disposition
{
347 static inline enum nvme_disposition
nvme_decide_disposition(struct request
*req
)
349 if (likely(nvme_req(req
)->status
== 0))
352 if ((nvme_req(req
)->status
& 0x7ff) == NVME_SC_AUTH_REQUIRED
)
355 if (blk_noretry_request(req
) ||
356 (nvme_req(req
)->status
& NVME_SC_DNR
) ||
357 nvme_req(req
)->retries
>= nvme_max_retries
)
360 if (req
->cmd_flags
& REQ_NVME_MPATH
) {
361 if (nvme_is_path_error(nvme_req(req
)->status
) ||
362 blk_queue_dying(req
->q
))
365 if (blk_queue_dying(req
->q
))
372 static inline void nvme_end_req_zoned(struct request
*req
)
374 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED
) &&
375 req_op(req
) == REQ_OP_ZONE_APPEND
)
376 req
->__sector
= nvme_lba_to_sect(req
->q
->queuedata
,
377 le64_to_cpu(nvme_req(req
)->result
.u64
));
380 static inline void nvme_end_req(struct request
*req
)
382 blk_status_t status
= nvme_error_status(nvme_req(req
)->status
);
384 if (unlikely(nvme_req(req
)->status
&& !(req
->rq_flags
& RQF_QUIET
)))
386 nvme_end_req_zoned(req
);
387 nvme_trace_bio_complete(req
);
388 if (req
->cmd_flags
& REQ_NVME_MPATH
)
389 nvme_mpath_end_request(req
);
390 blk_mq_end_request(req
, status
);
393 void nvme_complete_rq(struct request
*req
)
395 struct nvme_ctrl
*ctrl
= nvme_req(req
)->ctrl
;
397 trace_nvme_complete_rq(req
);
398 nvme_cleanup_cmd(req
);
401 * Completions of long-running commands should not be able to
402 * defer sending of periodic keep alives, since the controller
403 * may have completed processing such commands a long time ago
404 * (arbitrarily close to command submission time).
405 * req->deadline - req->timeout is the command submission time
409 req
->deadline
- req
->timeout
>= ctrl
->ka_last_check_time
)
410 ctrl
->comp_seen
= true;
412 switch (nvme_decide_disposition(req
)) {
420 nvme_failover_req(req
);
423 #ifdef CONFIG_NVME_AUTH
424 queue_work(nvme_wq
, &ctrl
->dhchap_auth_work
);
432 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
434 void nvme_complete_batch_req(struct request
*req
)
436 trace_nvme_complete_rq(req
);
437 nvme_cleanup_cmd(req
);
438 nvme_end_req_zoned(req
);
440 EXPORT_SYMBOL_GPL(nvme_complete_batch_req
);
443 * Called to unwind from ->queue_rq on a failed command submission so that the
444 * multipathing code gets called to potentially failover to another path.
445 * The caller needs to unwind all transport specific resource allocations and
446 * must return propagate the return value.
448 blk_status_t
nvme_host_path_error(struct request
*req
)
450 nvme_req(req
)->status
= NVME_SC_HOST_PATH_ERROR
;
451 blk_mq_set_request_complete(req
);
452 nvme_complete_rq(req
);
455 EXPORT_SYMBOL_GPL(nvme_host_path_error
);
457 bool nvme_cancel_request(struct request
*req
, void *data
)
459 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
460 "Cancelling I/O %d", req
->tag
);
462 /* don't abort one completed or idle request */
463 if (blk_mq_rq_state(req
) != MQ_RQ_IN_FLIGHT
)
466 nvme_req(req
)->status
= NVME_SC_HOST_ABORTED_CMD
;
467 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
468 blk_mq_complete_request(req
);
471 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
473 void nvme_cancel_tagset(struct nvme_ctrl
*ctrl
)
476 blk_mq_tagset_busy_iter(ctrl
->tagset
,
477 nvme_cancel_request
, ctrl
);
478 blk_mq_tagset_wait_completed_request(ctrl
->tagset
);
481 EXPORT_SYMBOL_GPL(nvme_cancel_tagset
);
483 void nvme_cancel_admin_tagset(struct nvme_ctrl
*ctrl
)
485 if (ctrl
->admin_tagset
) {
486 blk_mq_tagset_busy_iter(ctrl
->admin_tagset
,
487 nvme_cancel_request
, ctrl
);
488 blk_mq_tagset_wait_completed_request(ctrl
->admin_tagset
);
491 EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset
);
493 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
494 enum nvme_ctrl_state new_state
)
496 enum nvme_ctrl_state old_state
;
498 bool changed
= false;
500 spin_lock_irqsave(&ctrl
->lock
, flags
);
502 old_state
= ctrl
->state
;
507 case NVME_CTRL_RESETTING
:
508 case NVME_CTRL_CONNECTING
:
515 case NVME_CTRL_RESETTING
:
525 case NVME_CTRL_CONNECTING
:
528 case NVME_CTRL_RESETTING
:
535 case NVME_CTRL_DELETING
:
538 case NVME_CTRL_RESETTING
:
539 case NVME_CTRL_CONNECTING
:
546 case NVME_CTRL_DELETING_NOIO
:
548 case NVME_CTRL_DELETING
:
558 case NVME_CTRL_DELETING
:
570 ctrl
->state
= new_state
;
571 wake_up_all(&ctrl
->state_wq
);
574 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
578 if (ctrl
->state
== NVME_CTRL_LIVE
) {
579 if (old_state
== NVME_CTRL_CONNECTING
)
580 nvme_stop_failfast_work(ctrl
);
581 nvme_kick_requeue_lists(ctrl
);
582 } else if (ctrl
->state
== NVME_CTRL_CONNECTING
&&
583 old_state
== NVME_CTRL_RESETTING
) {
584 nvme_start_failfast_work(ctrl
);
588 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
591 * Returns true for sink states that can't ever transition back to live.
593 static bool nvme_state_terminal(struct nvme_ctrl
*ctrl
)
595 switch (ctrl
->state
) {
598 case NVME_CTRL_RESETTING
:
599 case NVME_CTRL_CONNECTING
:
601 case NVME_CTRL_DELETING
:
602 case NVME_CTRL_DELETING_NOIO
:
606 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl
->state
);
612 * Waits for the controller state to be resetting, or returns false if it is
613 * not possible to ever transition to that state.
615 bool nvme_wait_reset(struct nvme_ctrl
*ctrl
)
617 wait_event(ctrl
->state_wq
,
618 nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
) ||
619 nvme_state_terminal(ctrl
));
620 return ctrl
->state
== NVME_CTRL_RESETTING
;
622 EXPORT_SYMBOL_GPL(nvme_wait_reset
);
624 static void nvme_free_ns_head(struct kref
*ref
)
626 struct nvme_ns_head
*head
=
627 container_of(ref
, struct nvme_ns_head
, ref
);
629 nvme_mpath_remove_disk(head
);
630 ida_free(&head
->subsys
->ns_ida
, head
->instance
);
631 cleanup_srcu_struct(&head
->srcu
);
632 nvme_put_subsystem(head
->subsys
);
636 bool nvme_tryget_ns_head(struct nvme_ns_head
*head
)
638 return kref_get_unless_zero(&head
->ref
);
641 void nvme_put_ns_head(struct nvme_ns_head
*head
)
643 kref_put(&head
->ref
, nvme_free_ns_head
);
646 static void nvme_free_ns(struct kref
*kref
)
648 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
651 nvme_put_ns_head(ns
->head
);
652 nvme_put_ctrl(ns
->ctrl
);
656 static inline bool nvme_get_ns(struct nvme_ns
*ns
)
658 return kref_get_unless_zero(&ns
->kref
);
661 void nvme_put_ns(struct nvme_ns
*ns
)
663 kref_put(&ns
->kref
, nvme_free_ns
);
665 EXPORT_SYMBOL_NS_GPL(nvme_put_ns
, NVME_TARGET_PASSTHRU
);
667 static inline void nvme_clear_nvme_request(struct request
*req
)
669 nvme_req(req
)->status
= 0;
670 nvme_req(req
)->retries
= 0;
671 nvme_req(req
)->flags
= 0;
672 req
->rq_flags
|= RQF_DONTPREP
;
675 /* initialize a passthrough request */
676 void nvme_init_request(struct request
*req
, struct nvme_command
*cmd
)
678 if (req
->q
->queuedata
)
679 req
->timeout
= NVME_IO_TIMEOUT
;
680 else /* no queuedata implies admin queue */
681 req
->timeout
= NVME_ADMIN_TIMEOUT
;
683 /* passthru commands should let the driver set the SGL flags */
684 cmd
->common
.flags
&= ~NVME_CMD_SGL_ALL
;
686 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
687 if (req
->mq_hctx
->type
== HCTX_TYPE_POLL
)
688 req
->cmd_flags
|= REQ_POLLED
;
689 nvme_clear_nvme_request(req
);
690 req
->rq_flags
|= RQF_QUIET
;
691 memcpy(nvme_req(req
)->cmd
, cmd
, sizeof(*cmd
));
693 EXPORT_SYMBOL_GPL(nvme_init_request
);
696 * For something we're not in a state to send to the device the default action
697 * is to busy it and retry it after the controller state is recovered. However,
698 * if the controller is deleting or if anything is marked for failfast or
699 * nvme multipath it is immediately failed.
701 * Note: commands used to initialize the controller will be marked for failfast.
702 * Note: nvme cli/ioctl commands are marked for failfast.
704 blk_status_t
nvme_fail_nonready_command(struct nvme_ctrl
*ctrl
,
707 if (ctrl
->state
!= NVME_CTRL_DELETING_NOIO
&&
708 ctrl
->state
!= NVME_CTRL_DELETING
&&
709 ctrl
->state
!= NVME_CTRL_DEAD
&&
710 !test_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
) &&
711 !blk_noretry_request(rq
) && !(rq
->cmd_flags
& REQ_NVME_MPATH
))
712 return BLK_STS_RESOURCE
;
713 return nvme_host_path_error(rq
);
715 EXPORT_SYMBOL_GPL(nvme_fail_nonready_command
);
717 bool __nvme_check_ready(struct nvme_ctrl
*ctrl
, struct request
*rq
,
720 struct nvme_request
*req
= nvme_req(rq
);
723 * currently we have a problem sending passthru commands
724 * on the admin_q if the controller is not LIVE because we can't
725 * make sure that they are going out after the admin connect,
726 * controller enable and/or other commands in the initialization
727 * sequence. until the controller will be LIVE, fail with
728 * BLK_STS_RESOURCE so that they will be rescheduled.
730 if (rq
->q
== ctrl
->admin_q
&& (req
->flags
& NVME_REQ_USERCMD
))
733 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
735 * Only allow commands on a live queue, except for the connect
736 * command, which is require to set the queue live in the
737 * appropinquate states.
739 switch (ctrl
->state
) {
740 case NVME_CTRL_CONNECTING
:
741 if (blk_rq_is_passthrough(rq
) && nvme_is_fabrics(req
->cmd
) &&
742 (req
->cmd
->fabrics
.fctype
== nvme_fabrics_type_connect
||
743 req
->cmd
->fabrics
.fctype
== nvme_fabrics_type_auth_send
||
744 req
->cmd
->fabrics
.fctype
== nvme_fabrics_type_auth_receive
))
756 EXPORT_SYMBOL_GPL(__nvme_check_ready
);
758 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
759 struct nvme_command
*cmnd
)
761 memset(cmnd
, 0, sizeof(*cmnd
));
762 cmnd
->common
.opcode
= nvme_cmd_flush
;
763 cmnd
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
766 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
767 struct nvme_command
*cmnd
)
769 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
770 struct nvme_dsm_range
*range
;
774 * Some devices do not consider the DSM 'Number of Ranges' field when
775 * determining how much data to DMA. Always allocate memory for maximum
776 * number of segments to prevent device reading beyond end of buffer.
778 static const size_t alloc_size
= sizeof(*range
) * NVME_DSM_MAX_RANGES
;
780 range
= kzalloc(alloc_size
, GFP_ATOMIC
| __GFP_NOWARN
);
783 * If we fail allocation our range, fallback to the controller
784 * discard page. If that's also busy, it's safe to return
785 * busy, as we know we can make progress once that's freed.
787 if (test_and_set_bit_lock(0, &ns
->ctrl
->discard_page_busy
))
788 return BLK_STS_RESOURCE
;
790 range
= page_address(ns
->ctrl
->discard_page
);
793 if (queue_max_discard_segments(req
->q
) == 1) {
794 u64 slba
= nvme_sect_to_lba(ns
, blk_rq_pos(req
));
795 u32 nlb
= blk_rq_sectors(req
) >> (ns
->lba_shift
- 9);
797 range
[0].cattr
= cpu_to_le32(0);
798 range
[0].nlb
= cpu_to_le32(nlb
);
799 range
[0].slba
= cpu_to_le64(slba
);
802 __rq_for_each_bio(bio
, req
) {
803 u64 slba
= nvme_sect_to_lba(ns
, bio
->bi_iter
.bi_sector
);
804 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
807 range
[n
].cattr
= cpu_to_le32(0);
808 range
[n
].nlb
= cpu_to_le32(nlb
);
809 range
[n
].slba
= cpu_to_le64(slba
);
815 if (WARN_ON_ONCE(n
!= segments
)) {
816 if (virt_to_page(range
) == ns
->ctrl
->discard_page
)
817 clear_bit_unlock(0, &ns
->ctrl
->discard_page_busy
);
820 return BLK_STS_IOERR
;
823 memset(cmnd
, 0, sizeof(*cmnd
));
824 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
825 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
826 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
827 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
829 bvec_set_virt(&req
->special_vec
, range
, alloc_size
);
830 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
835 static void nvme_set_ref_tag(struct nvme_ns
*ns
, struct nvme_command
*cmnd
,
841 /* both rw and write zeroes share the same reftag format */
842 switch (ns
->guard_type
) {
843 case NVME_NVM_NS_16B_GUARD
:
844 cmnd
->rw
.reftag
= cpu_to_le32(t10_pi_ref_tag(req
));
846 case NVME_NVM_NS_64B_GUARD
:
847 ref48
= ext_pi_ref_tag(req
);
848 lower
= lower_32_bits(ref48
);
849 upper
= upper_32_bits(ref48
);
851 cmnd
->rw
.reftag
= cpu_to_le32(lower
);
852 cmnd
->rw
.cdw3
= cpu_to_le32(upper
);
859 static inline blk_status_t
nvme_setup_write_zeroes(struct nvme_ns
*ns
,
860 struct request
*req
, struct nvme_command
*cmnd
)
862 memset(cmnd
, 0, sizeof(*cmnd
));
864 if (ns
->ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
865 return nvme_setup_discard(ns
, req
, cmnd
);
867 cmnd
->write_zeroes
.opcode
= nvme_cmd_write_zeroes
;
868 cmnd
->write_zeroes
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
869 cmnd
->write_zeroes
.slba
=
870 cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
871 cmnd
->write_zeroes
.length
=
872 cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
874 if (!(req
->cmd_flags
& REQ_NOUNMAP
) && (ns
->features
& NVME_NS_DEAC
))
875 cmnd
->write_zeroes
.control
|= cpu_to_le16(NVME_WZ_DEAC
);
877 if (nvme_ns_has_pi(ns
)) {
878 cmnd
->write_zeroes
.control
|= cpu_to_le16(NVME_RW_PRINFO_PRACT
);
880 switch (ns
->pi_type
) {
881 case NVME_NS_DPS_PI_TYPE1
:
882 case NVME_NS_DPS_PI_TYPE2
:
883 nvme_set_ref_tag(ns
, cmnd
, req
);
891 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
892 struct request
*req
, struct nvme_command
*cmnd
,
898 if (req
->cmd_flags
& REQ_FUA
)
899 control
|= NVME_RW_FUA
;
900 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
901 control
|= NVME_RW_LR
;
903 if (req
->cmd_flags
& REQ_RAHEAD
)
904 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
906 cmnd
->rw
.opcode
= op
;
908 cmnd
->rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
911 cmnd
->rw
.metadata
= 0;
912 cmnd
->rw
.slba
= cpu_to_le64(nvme_sect_to_lba(ns
, blk_rq_pos(req
)));
913 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
916 cmnd
->rw
.appmask
= 0;
920 * If formated with metadata, the block layer always provides a
921 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
922 * we enable the PRACT bit for protection information or set the
923 * namespace capacity to zero to prevent any I/O.
925 if (!blk_integrity_rq(req
)) {
926 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns
)))
927 return BLK_STS_NOTSUPP
;
928 control
|= NVME_RW_PRINFO_PRACT
;
931 switch (ns
->pi_type
) {
932 case NVME_NS_DPS_PI_TYPE3
:
933 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
935 case NVME_NS_DPS_PI_TYPE1
:
936 case NVME_NS_DPS_PI_TYPE2
:
937 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
938 NVME_RW_PRINFO_PRCHK_REF
;
939 if (op
== nvme_cmd_zone_append
)
940 control
|= NVME_RW_APPEND_PIREMAP
;
941 nvme_set_ref_tag(ns
, cmnd
, req
);
946 cmnd
->rw
.control
= cpu_to_le16(control
);
947 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
951 void nvme_cleanup_cmd(struct request
*req
)
953 if (req
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
954 struct nvme_ctrl
*ctrl
= nvme_req(req
)->ctrl
;
956 if (req
->special_vec
.bv_page
== ctrl
->discard_page
)
957 clear_bit_unlock(0, &ctrl
->discard_page_busy
);
959 kfree(bvec_virt(&req
->special_vec
));
962 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd
);
964 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
)
966 struct nvme_command
*cmd
= nvme_req(req
)->cmd
;
967 blk_status_t ret
= BLK_STS_OK
;
969 if (!(req
->rq_flags
& RQF_DONTPREP
))
970 nvme_clear_nvme_request(req
);
972 switch (req_op(req
)) {
975 /* these are setup prior to execution in nvme_init_request() */
978 nvme_setup_flush(ns
, cmd
);
980 case REQ_OP_ZONE_RESET_ALL
:
981 case REQ_OP_ZONE_RESET
:
982 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_RESET
);
984 case REQ_OP_ZONE_OPEN
:
985 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_OPEN
);
987 case REQ_OP_ZONE_CLOSE
:
988 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_CLOSE
);
990 case REQ_OP_ZONE_FINISH
:
991 ret
= nvme_setup_zone_mgmt_send(ns
, req
, cmd
, NVME_ZONE_FINISH
);
993 case REQ_OP_WRITE_ZEROES
:
994 ret
= nvme_setup_write_zeroes(ns
, req
, cmd
);
997 ret
= nvme_setup_discard(ns
, req
, cmd
);
1000 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_read
);
1003 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_write
);
1005 case REQ_OP_ZONE_APPEND
:
1006 ret
= nvme_setup_rw(ns
, req
, cmd
, nvme_cmd_zone_append
);
1010 return BLK_STS_IOERR
;
1013 cmd
->common
.command_id
= nvme_cid(req
);
1014 trace_nvme_setup_cmd(req
, cmd
);
1017 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
1022 * >0: nvme controller's cqe status response
1023 * <0: kernel error in lieu of controller response
1025 int nvme_execute_rq(struct request
*rq
, bool at_head
)
1027 blk_status_t status
;
1029 status
= blk_execute_rq(rq
, at_head
);
1030 if (nvme_req(rq
)->flags
& NVME_REQ_CANCELLED
)
1032 if (nvme_req(rq
)->status
)
1033 return nvme_req(rq
)->status
;
1034 return blk_status_to_errno(status
);
1036 EXPORT_SYMBOL_NS_GPL(nvme_execute_rq
, NVME_TARGET_PASSTHRU
);
1039 * Returns 0 on success. If the result is negative, it's a Linux error code;
1040 * if the result is positive, it's an NVM Express status code
1042 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
1043 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
1044 int qid
, int at_head
, blk_mq_req_flags_t flags
)
1046 struct request
*req
;
1049 if (qid
== NVME_QID_ANY
)
1050 req
= blk_mq_alloc_request(q
, nvme_req_op(cmd
), flags
);
1052 req
= blk_mq_alloc_request_hctx(q
, nvme_req_op(cmd
), flags
,
1056 return PTR_ERR(req
);
1057 nvme_init_request(req
, cmd
);
1059 if (buffer
&& bufflen
) {
1060 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
1065 ret
= nvme_execute_rq(req
, at_head
);
1066 if (result
&& ret
>= 0)
1067 *result
= nvme_req(req
)->result
;
1069 blk_mq_free_request(req
);
1072 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
1074 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
1075 void *buffer
, unsigned bufflen
)
1077 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
,
1078 NVME_QID_ANY
, 0, 0);
1080 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
1082 u32
nvme_command_effects(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
, u8 opcode
)
1087 effects
= le32_to_cpu(ns
->head
->effects
->iocs
[opcode
]);
1088 if (effects
& ~(NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
))
1089 dev_warn_once(ctrl
->device
,
1090 "IO command:%02x has unusual effects:%08x\n",
1094 * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
1095 * which would deadlock when done on an I/O command. Note that
1096 * We already warn about an unusual effect above.
1098 effects
&= ~NVME_CMD_EFFECTS_CSE_MASK
;
1100 effects
= le32_to_cpu(ctrl
->effects
->acs
[opcode
]);
1105 EXPORT_SYMBOL_NS_GPL(nvme_command_effects
, NVME_TARGET_PASSTHRU
);
1107 u32
nvme_passthru_start(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
, u8 opcode
)
1109 u32 effects
= nvme_command_effects(ctrl
, ns
, opcode
);
1112 * For simplicity, IO to all namespaces is quiesced even if the command
1113 * effects say only one namespace is affected.
1115 if (effects
& NVME_CMD_EFFECTS_CSE_MASK
) {
1116 mutex_lock(&ctrl
->scan_lock
);
1117 mutex_lock(&ctrl
->subsys
->lock
);
1118 nvme_mpath_start_freeze(ctrl
->subsys
);
1119 nvme_mpath_wait_freeze(ctrl
->subsys
);
1120 nvme_start_freeze(ctrl
);
1121 nvme_wait_freeze(ctrl
);
1125 EXPORT_SYMBOL_NS_GPL(nvme_passthru_start
, NVME_TARGET_PASSTHRU
);
1127 void nvme_passthru_end(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
, u32 effects
,
1128 struct nvme_command
*cmd
, int status
)
1130 if (effects
& NVME_CMD_EFFECTS_CSE_MASK
) {
1131 nvme_unfreeze(ctrl
);
1132 nvme_mpath_unfreeze(ctrl
->subsys
);
1133 mutex_unlock(&ctrl
->subsys
->lock
);
1134 mutex_unlock(&ctrl
->scan_lock
);
1136 if (effects
& NVME_CMD_EFFECTS_CCC
) {
1137 if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY
,
1139 dev_info(ctrl
->device
,
1140 "controller capabilities changed, reset may be required to take effect.\n");
1143 if (effects
& (NVME_CMD_EFFECTS_NIC
| NVME_CMD_EFFECTS_NCC
)) {
1144 nvme_queue_scan(ctrl
);
1145 flush_work(&ctrl
->scan_work
);
1150 switch (cmd
->common
.opcode
) {
1151 case nvme_admin_set_features
:
1152 switch (le32_to_cpu(cmd
->common
.cdw10
) & 0xFF) {
1153 case NVME_FEAT_KATO
:
1155 * Keep alive commands interval on the host should be
1156 * updated when KATO is modified by Set Features
1160 nvme_update_keep_alive(ctrl
, cmd
);
1170 EXPORT_SYMBOL_NS_GPL(nvme_passthru_end
, NVME_TARGET_PASSTHRU
);
1173 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
1175 * The host should send Keep Alive commands at half of the Keep Alive Timeout
1176 * accounting for transport roundtrip times [..].
1178 static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl
*ctrl
)
1180 unsigned long delay
= ctrl
->kato
* HZ
/ 2;
1183 * When using Traffic Based Keep Alive, we need to run
1184 * nvme_keep_alive_work at twice the normal frequency, as one
1185 * command completion can postpone sending a keep alive command
1186 * by up to twice the delay between runs.
1188 if (ctrl
->ctratt
& NVME_CTRL_ATTR_TBKAS
)
1193 static void nvme_queue_keep_alive_work(struct nvme_ctrl
*ctrl
)
1195 queue_delayed_work(nvme_wq
, &ctrl
->ka_work
,
1196 nvme_keep_alive_work_period(ctrl
));
1199 static enum rq_end_io_ret
nvme_keep_alive_end_io(struct request
*rq
,
1200 blk_status_t status
)
1202 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
1203 unsigned long flags
;
1204 bool startka
= false;
1205 unsigned long rtt
= jiffies
- (rq
->deadline
- rq
->timeout
);
1206 unsigned long delay
= nvme_keep_alive_work_period(ctrl
);
1209 * Subtract off the keepalive RTT so nvme_keep_alive_work runs
1210 * at the desired frequency.
1215 dev_warn(ctrl
->device
, "long keepalive RTT (%u ms)\n",
1216 jiffies_to_msecs(rtt
));
1220 blk_mq_free_request(rq
);
1223 dev_err(ctrl
->device
,
1224 "failed nvme_keep_alive_end_io error=%d\n",
1226 return RQ_END_IO_NONE
;
1229 ctrl
->ka_last_check_time
= jiffies
;
1230 ctrl
->comp_seen
= false;
1231 spin_lock_irqsave(&ctrl
->lock
, flags
);
1232 if (ctrl
->state
== NVME_CTRL_LIVE
||
1233 ctrl
->state
== NVME_CTRL_CONNECTING
)
1235 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
1237 queue_delayed_work(nvme_wq
, &ctrl
->ka_work
, delay
);
1238 return RQ_END_IO_NONE
;
1241 static void nvme_keep_alive_work(struct work_struct
*work
)
1243 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
1244 struct nvme_ctrl
, ka_work
);
1245 bool comp_seen
= ctrl
->comp_seen
;
1248 ctrl
->ka_last_check_time
= jiffies
;
1250 if ((ctrl
->ctratt
& NVME_CTRL_ATTR_TBKAS
) && comp_seen
) {
1251 dev_dbg(ctrl
->device
,
1252 "reschedule traffic based keep-alive timer\n");
1253 ctrl
->comp_seen
= false;
1254 nvme_queue_keep_alive_work(ctrl
);
1258 rq
= blk_mq_alloc_request(ctrl
->admin_q
, nvme_req_op(&ctrl
->ka_cmd
),
1259 BLK_MQ_REQ_RESERVED
| BLK_MQ_REQ_NOWAIT
);
1261 /* allocation failure, reset the controller */
1262 dev_err(ctrl
->device
, "keep-alive failed: %ld\n", PTR_ERR(rq
));
1263 nvme_reset_ctrl(ctrl
);
1266 nvme_init_request(rq
, &ctrl
->ka_cmd
);
1268 rq
->timeout
= ctrl
->kato
* HZ
;
1269 rq
->end_io
= nvme_keep_alive_end_io
;
1270 rq
->end_io_data
= ctrl
;
1271 blk_execute_rq_nowait(rq
, false);
1274 static void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
1276 if (unlikely(ctrl
->kato
== 0))
1279 nvme_queue_keep_alive_work(ctrl
);
1282 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
1284 if (unlikely(ctrl
->kato
== 0))
1287 cancel_delayed_work_sync(&ctrl
->ka_work
);
1289 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
1291 static void nvme_update_keep_alive(struct nvme_ctrl
*ctrl
,
1292 struct nvme_command
*cmd
)
1294 unsigned int new_kato
=
1295 DIV_ROUND_UP(le32_to_cpu(cmd
->common
.cdw11
), 1000);
1297 dev_info(ctrl
->device
,
1298 "keep alive interval updated from %u ms to %u ms\n",
1299 ctrl
->kato
* 1000 / 2, new_kato
* 1000 / 2);
1301 nvme_stop_keep_alive(ctrl
);
1302 ctrl
->kato
= new_kato
;
1303 nvme_start_keep_alive(ctrl
);
1307 * In NVMe 1.0 the CNS field was just a binary controller or namespace
1308 * flag, thus sending any new CNS opcodes has a big chance of not working.
1309 * Qemu unfortunately had that bug after reporting a 1.1 version compliance
1310 * (but not for any later version).
1312 static bool nvme_ctrl_limited_cns(struct nvme_ctrl
*ctrl
)
1314 if (ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)
1315 return ctrl
->vs
< NVME_VS(1, 2, 0);
1316 return ctrl
->vs
< NVME_VS(1, 1, 0);
1319 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
1321 struct nvme_command c
= { };
1324 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1325 c
.identify
.opcode
= nvme_admin_identify
;
1326 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
1328 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
1332 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
1333 sizeof(struct nvme_id_ctrl
));
1339 static int nvme_process_ns_desc(struct nvme_ctrl
*ctrl
, struct nvme_ns_ids
*ids
,
1340 struct nvme_ns_id_desc
*cur
, bool *csi_seen
)
1342 const char *warn_str
= "ctrl returned bogus length:";
1345 switch (cur
->nidt
) {
1346 case NVME_NIDT_EUI64
:
1347 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
1348 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_EUI64\n",
1349 warn_str
, cur
->nidl
);
1352 if (ctrl
->quirks
& NVME_QUIRK_BOGUS_NID
)
1353 return NVME_NIDT_EUI64_LEN
;
1354 memcpy(ids
->eui64
, data
+ sizeof(*cur
), NVME_NIDT_EUI64_LEN
);
1355 return NVME_NIDT_EUI64_LEN
;
1356 case NVME_NIDT_NGUID
:
1357 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
1358 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_NGUID\n",
1359 warn_str
, cur
->nidl
);
1362 if (ctrl
->quirks
& NVME_QUIRK_BOGUS_NID
)
1363 return NVME_NIDT_NGUID_LEN
;
1364 memcpy(ids
->nguid
, data
+ sizeof(*cur
), NVME_NIDT_NGUID_LEN
);
1365 return NVME_NIDT_NGUID_LEN
;
1366 case NVME_NIDT_UUID
:
1367 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
1368 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_UUID\n",
1369 warn_str
, cur
->nidl
);
1372 if (ctrl
->quirks
& NVME_QUIRK_BOGUS_NID
)
1373 return NVME_NIDT_UUID_LEN
;
1374 uuid_copy(&ids
->uuid
, data
+ sizeof(*cur
));
1375 return NVME_NIDT_UUID_LEN
;
1377 if (cur
->nidl
!= NVME_NIDT_CSI_LEN
) {
1378 dev_warn(ctrl
->device
, "%s %d for NVME_NIDT_CSI\n",
1379 warn_str
, cur
->nidl
);
1382 memcpy(&ids
->csi
, data
+ sizeof(*cur
), NVME_NIDT_CSI_LEN
);
1384 return NVME_NIDT_CSI_LEN
;
1386 /* Skip unknown types */
1391 static int nvme_identify_ns_descs(struct nvme_ctrl
*ctrl
,
1392 struct nvme_ns_info
*info
)
1394 struct nvme_command c
= { };
1395 bool csi_seen
= false;
1396 int status
, pos
, len
;
1399 if (ctrl
->vs
< NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl
))
1401 if (ctrl
->quirks
& NVME_QUIRK_NO_NS_DESC_LIST
)
1404 c
.identify
.opcode
= nvme_admin_identify
;
1405 c
.identify
.nsid
= cpu_to_le32(info
->nsid
);
1406 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
1408 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
1412 status
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, data
,
1413 NVME_IDENTIFY_DATA_SIZE
);
1415 dev_warn(ctrl
->device
,
1416 "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1417 info
->nsid
, status
);
1421 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
1422 struct nvme_ns_id_desc
*cur
= data
+ pos
;
1427 len
= nvme_process_ns_desc(ctrl
, &info
->ids
, cur
, &csi_seen
);
1431 len
+= sizeof(*cur
);
1434 if (nvme_multi_css(ctrl
) && !csi_seen
) {
1435 dev_warn(ctrl
->device
, "Command set not reported for nsid:%d\n",
1445 static int nvme_identify_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
,
1446 struct nvme_id_ns
**id
)
1448 struct nvme_command c
= { };
1451 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1452 c
.identify
.opcode
= nvme_admin_identify
;
1453 c
.identify
.nsid
= cpu_to_le32(nsid
);
1454 c
.identify
.cns
= NVME_ID_CNS_NS
;
1456 *id
= kmalloc(sizeof(**id
), GFP_KERNEL
);
1460 error
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, *id
, sizeof(**id
));
1462 dev_warn(ctrl
->device
, "Identify namespace failed (%d)\n", error
);
1468 static int nvme_ns_info_from_identify(struct nvme_ctrl
*ctrl
,
1469 struct nvme_ns_info
*info
)
1471 struct nvme_ns_ids
*ids
= &info
->ids
;
1472 struct nvme_id_ns
*id
;
1475 ret
= nvme_identify_ns(ctrl
, info
->nsid
, &id
);
1479 if (id
->ncap
== 0) {
1480 /* namespace not allocated or attached */
1481 info
->is_removed
= true;
1485 info
->anagrpid
= id
->anagrpid
;
1486 info
->is_shared
= id
->nmic
& NVME_NS_NMIC_SHARED
;
1487 info
->is_readonly
= id
->nsattr
& NVME_NS_ATTR_RO
;
1488 info
->is_ready
= true;
1489 if (ctrl
->quirks
& NVME_QUIRK_BOGUS_NID
) {
1490 dev_info(ctrl
->device
,
1491 "Ignoring bogus Namespace Identifiers\n");
1493 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
1494 !memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
1495 memcpy(ids
->eui64
, id
->eui64
, sizeof(ids
->eui64
));
1496 if (ctrl
->vs
>= NVME_VS(1, 2, 0) &&
1497 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
1498 memcpy(ids
->nguid
, id
->nguid
, sizeof(ids
->nguid
));
1504 static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl
*ctrl
,
1505 struct nvme_ns_info
*info
)
1507 struct nvme_id_ns_cs_indep
*id
;
1508 struct nvme_command c
= {
1509 .identify
.opcode
= nvme_admin_identify
,
1510 .identify
.nsid
= cpu_to_le32(info
->nsid
),
1511 .identify
.cns
= NVME_ID_CNS_NS_CS_INDEP
,
1515 id
= kmalloc(sizeof(*id
), GFP_KERNEL
);
1519 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, id
, sizeof(*id
));
1521 info
->anagrpid
= id
->anagrpid
;
1522 info
->is_shared
= id
->nmic
& NVME_NS_NMIC_SHARED
;
1523 info
->is_readonly
= id
->nsattr
& NVME_NS_ATTR_RO
;
1524 info
->is_ready
= id
->nstat
& NVME_NSTAT_NRDY
;
1530 static int nvme_features(struct nvme_ctrl
*dev
, u8 op
, unsigned int fid
,
1531 unsigned int dword11
, void *buffer
, size_t buflen
, u32
*result
)
1533 union nvme_result res
= { 0 };
1534 struct nvme_command c
= { };
1537 c
.features
.opcode
= op
;
1538 c
.features
.fid
= cpu_to_le32(fid
);
1539 c
.features
.dword11
= cpu_to_le32(dword11
);
1541 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
1542 buffer
, buflen
, NVME_QID_ANY
, 0, 0);
1543 if (ret
>= 0 && result
)
1544 *result
= le32_to_cpu(res
.u32
);
1548 int nvme_set_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1549 unsigned int dword11
, void *buffer
, size_t buflen
,
1552 return nvme_features(dev
, nvme_admin_set_features
, fid
, dword11
, buffer
,
1555 EXPORT_SYMBOL_GPL(nvme_set_features
);
1557 int nvme_get_features(struct nvme_ctrl
*dev
, unsigned int fid
,
1558 unsigned int dword11
, void *buffer
, size_t buflen
,
1561 return nvme_features(dev
, nvme_admin_get_features
, fid
, dword11
, buffer
,
1564 EXPORT_SYMBOL_GPL(nvme_get_features
);
1566 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
1568 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
1570 int status
, nr_io_queues
;
1572 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
1578 * Degraded controllers might return an error when setting the queue
1579 * count. We still want to be able to bring them online and offer
1580 * access to the admin queue, as that might be only way to fix them up.
1583 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
1586 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
1587 *count
= min(*count
, nr_io_queues
);
1592 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
1594 #define NVME_AEN_SUPPORTED \
1595 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1596 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1598 static void nvme_enable_aen(struct nvme_ctrl
*ctrl
)
1600 u32 result
, supported_aens
= ctrl
->oaes
& NVME_AEN_SUPPORTED
;
1603 if (!supported_aens
)
1606 status
= nvme_set_features(ctrl
, NVME_FEAT_ASYNC_EVENT
, supported_aens
,
1609 dev_warn(ctrl
->device
, "Failed to configure AEN (cfg %x)\n",
1612 queue_work(nvme_wq
, &ctrl
->async_event_work
);
1615 static int nvme_ns_open(struct nvme_ns
*ns
)
1618 /* should never be called due to GENHD_FL_HIDDEN */
1619 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns
->head
)))
1621 if (!nvme_get_ns(ns
))
1623 if (!try_module_get(ns
->ctrl
->ops
->module
))
1634 static void nvme_ns_release(struct nvme_ns
*ns
)
1637 module_put(ns
->ctrl
->ops
->module
);
1641 static int nvme_open(struct gendisk
*disk
, blk_mode_t mode
)
1643 return nvme_ns_open(disk
->private_data
);
1646 static void nvme_release(struct gendisk
*disk
)
1648 nvme_ns_release(disk
->private_data
);
1651 int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1653 /* some standard values */
1654 geo
->heads
= 1 << 6;
1655 geo
->sectors
= 1 << 5;
1656 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1660 #ifdef CONFIG_BLK_DEV_INTEGRITY
1661 static void nvme_init_integrity(struct gendisk
*disk
, struct nvme_ns
*ns
,
1662 u32 max_integrity_segments
)
1664 struct blk_integrity integrity
= { };
1666 switch (ns
->pi_type
) {
1667 case NVME_NS_DPS_PI_TYPE3
:
1668 switch (ns
->guard_type
) {
1669 case NVME_NVM_NS_16B_GUARD
:
1670 integrity
.profile
= &t10_pi_type3_crc
;
1671 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1672 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1674 case NVME_NVM_NS_64B_GUARD
:
1675 integrity
.profile
= &ext_pi_type3_crc64
;
1676 integrity
.tag_size
= sizeof(u16
) + 6;
1677 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1680 integrity
.profile
= NULL
;
1684 case NVME_NS_DPS_PI_TYPE1
:
1685 case NVME_NS_DPS_PI_TYPE2
:
1686 switch (ns
->guard_type
) {
1687 case NVME_NVM_NS_16B_GUARD
:
1688 integrity
.profile
= &t10_pi_type1_crc
;
1689 integrity
.tag_size
= sizeof(u16
);
1690 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1692 case NVME_NVM_NS_64B_GUARD
:
1693 integrity
.profile
= &ext_pi_type1_crc64
;
1694 integrity
.tag_size
= sizeof(u16
);
1695 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1698 integrity
.profile
= NULL
;
1703 integrity
.profile
= NULL
;
1707 integrity
.tuple_size
= ns
->ms
;
1708 blk_integrity_register(disk
, &integrity
);
1709 blk_queue_max_integrity_segments(disk
->queue
, max_integrity_segments
);
1712 static void nvme_init_integrity(struct gendisk
*disk
, struct nvme_ns
*ns
,
1713 u32 max_integrity_segments
)
1716 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1718 static void nvme_config_discard(struct gendisk
*disk
, struct nvme_ns
*ns
)
1720 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1721 struct request_queue
*queue
= disk
->queue
;
1722 u32 size
= queue_logical_block_size(queue
);
1724 if (ctrl
->dmrsl
&& ctrl
->dmrsl
<= nvme_sect_to_lba(ns
, UINT_MAX
))
1725 ctrl
->max_discard_sectors
= nvme_lba_to_sect(ns
, ctrl
->dmrsl
);
1727 if (ctrl
->max_discard_sectors
== 0) {
1728 blk_queue_max_discard_sectors(queue
, 0);
1732 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1733 NVME_DSM_MAX_RANGES
);
1735 queue
->limits
.discard_granularity
= size
;
1737 /* If discard is already enabled, don't reset queue limits */
1738 if (queue
->limits
.max_discard_sectors
)
1741 blk_queue_max_discard_sectors(queue
, ctrl
->max_discard_sectors
);
1742 blk_queue_max_discard_segments(queue
, ctrl
->max_discard_segments
);
1744 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1745 blk_queue_max_write_zeroes_sectors(queue
, UINT_MAX
);
1748 static bool nvme_ns_ids_equal(struct nvme_ns_ids
*a
, struct nvme_ns_ids
*b
)
1750 return uuid_equal(&a
->uuid
, &b
->uuid
) &&
1751 memcmp(&a
->nguid
, &b
->nguid
, sizeof(a
->nguid
)) == 0 &&
1752 memcmp(&a
->eui64
, &b
->eui64
, sizeof(a
->eui64
)) == 0 &&
1756 static int nvme_init_ms(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1758 bool first
= id
->dps
& NVME_NS_DPS_PI_FIRST
;
1759 unsigned lbaf
= nvme_lbaf_index(id
->flbas
);
1760 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1761 struct nvme_command c
= { };
1762 struct nvme_id_ns_nvm
*nvm
;
1767 ns
->ms
= le16_to_cpu(id
->lbaf
[lbaf
].ms
);
1768 if (!(ctrl
->ctratt
& NVME_CTRL_ATTR_ELBAS
)) {
1769 ns
->pi_size
= sizeof(struct t10_pi_tuple
);
1770 ns
->guard_type
= NVME_NVM_NS_16B_GUARD
;
1774 nvm
= kzalloc(sizeof(*nvm
), GFP_KERNEL
);
1778 c
.identify
.opcode
= nvme_admin_identify
;
1779 c
.identify
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1780 c
.identify
.cns
= NVME_ID_CNS_CS_NS
;
1781 c
.identify
.csi
= NVME_CSI_NVM
;
1783 ret
= nvme_submit_sync_cmd(ns
->ctrl
->admin_q
, &c
, nvm
, sizeof(*nvm
));
1787 elbaf
= le32_to_cpu(nvm
->elbaf
[lbaf
]);
1789 /* no support for storage tag formats right now */
1790 if (nvme_elbaf_sts(elbaf
))
1793 ns
->guard_type
= nvme_elbaf_guard_type(elbaf
);
1794 switch (ns
->guard_type
) {
1795 case NVME_NVM_NS_64B_GUARD
:
1796 ns
->pi_size
= sizeof(struct crc64_pi_tuple
);
1798 case NVME_NVM_NS_16B_GUARD
:
1799 ns
->pi_size
= sizeof(struct t10_pi_tuple
);
1808 if (ns
->pi_size
&& (first
|| ns
->ms
== ns
->pi_size
))
1809 ns
->pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1816 static void nvme_configure_metadata(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1818 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1820 if (nvme_init_ms(ns
, id
))
1823 ns
->features
&= ~(NVME_NS_METADATA_SUPPORTED
| NVME_NS_EXT_LBAS
);
1824 if (!ns
->ms
|| !(ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
))
1827 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
1829 * The NVMe over Fabrics specification only supports metadata as
1830 * part of the extended data LBA. We rely on HCA/HBA support to
1831 * remap the separate metadata buffer from the block layer.
1833 if (WARN_ON_ONCE(!(id
->flbas
& NVME_NS_FLBAS_META_EXT
)))
1836 ns
->features
|= NVME_NS_EXT_LBAS
;
1839 * The current fabrics transport drivers support namespace
1840 * metadata formats only if nvme_ns_has_pi() returns true.
1841 * Suppress support for all other formats so the namespace will
1842 * have a 0 capacity and not be usable through the block stack.
1844 * Note, this check will need to be modified if any drivers
1845 * gain the ability to use other metadata formats.
1847 if (ctrl
->max_integrity_segments
&& nvme_ns_has_pi(ns
))
1848 ns
->features
|= NVME_NS_METADATA_SUPPORTED
;
1851 * For PCIe controllers, we can't easily remap the separate
1852 * metadata buffer from the block layer and thus require a
1853 * separate metadata buffer for block layer metadata/PI support.
1854 * We allow extended LBAs for the passthrough interface, though.
1856 if (id
->flbas
& NVME_NS_FLBAS_META_EXT
)
1857 ns
->features
|= NVME_NS_EXT_LBAS
;
1859 ns
->features
|= NVME_NS_METADATA_SUPPORTED
;
1863 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1864 struct request_queue
*q
)
1866 bool vwc
= ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
;
1868 if (ctrl
->max_hw_sectors
) {
1870 (ctrl
->max_hw_sectors
/ (NVME_CTRL_PAGE_SIZE
>> 9)) + 1;
1872 max_segments
= min_not_zero(max_segments
, ctrl
->max_segments
);
1873 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1874 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1876 blk_queue_virt_boundary(q
, NVME_CTRL_PAGE_SIZE
- 1);
1877 blk_queue_dma_alignment(q
, 3);
1878 blk_queue_write_cache(q
, vwc
, vwc
);
1881 static void nvme_update_disk_info(struct gendisk
*disk
,
1882 struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1884 sector_t capacity
= nvme_lba_to_sect(ns
, le64_to_cpu(id
->nsze
));
1885 u32 bs
= 1U << ns
->lba_shift
;
1886 u32 atomic_bs
, phys_bs
, io_opt
= 0;
1889 * The block layer can't support LBA sizes larger than the page size
1890 * yet, so catch this early and don't allow block I/O.
1892 if (ns
->lba_shift
> PAGE_SHIFT
) {
1897 blk_integrity_unregister(disk
);
1899 atomic_bs
= phys_bs
= bs
;
1900 if (id
->nabo
== 0) {
1902 * Bit 1 indicates whether NAWUPF is defined for this namespace
1903 * and whether it should be used instead of AWUPF. If NAWUPF ==
1904 * 0 then AWUPF must be used instead.
1906 if (id
->nsfeat
& NVME_NS_FEAT_ATOMICS
&& id
->nawupf
)
1907 atomic_bs
= (1 + le16_to_cpu(id
->nawupf
)) * bs
;
1909 atomic_bs
= (1 + ns
->ctrl
->subsys
->awupf
) * bs
;
1912 if (id
->nsfeat
& NVME_NS_FEAT_IO_OPT
) {
1913 /* NPWG = Namespace Preferred Write Granularity */
1914 phys_bs
= bs
* (1 + le16_to_cpu(id
->npwg
));
1915 /* NOWS = Namespace Optimal Write Size */
1916 io_opt
= bs
* (1 + le16_to_cpu(id
->nows
));
1919 blk_queue_logical_block_size(disk
->queue
, bs
);
1921 * Linux filesystems assume writing a single physical block is
1922 * an atomic operation. Hence limit the physical block size to the
1923 * value of the Atomic Write Unit Power Fail parameter.
1925 blk_queue_physical_block_size(disk
->queue
, min(phys_bs
, atomic_bs
));
1926 blk_queue_io_min(disk
->queue
, phys_bs
);
1927 blk_queue_io_opt(disk
->queue
, io_opt
);
1930 * Register a metadata profile for PI, or the plain non-integrity NVMe
1931 * metadata masquerading as Type 0 if supported, otherwise reject block
1932 * I/O to namespaces with metadata except when the namespace supports
1933 * PI, as it can strip/insert in that case.
1936 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY
) &&
1937 (ns
->features
& NVME_NS_METADATA_SUPPORTED
))
1938 nvme_init_integrity(disk
, ns
,
1939 ns
->ctrl
->max_integrity_segments
);
1940 else if (!nvme_ns_has_pi(ns
))
1944 set_capacity_and_notify(disk
, capacity
);
1946 nvme_config_discard(disk
, ns
);
1947 blk_queue_max_write_zeroes_sectors(disk
->queue
,
1948 ns
->ctrl
->max_zeroes_sectors
);
1951 static bool nvme_ns_is_readonly(struct nvme_ns
*ns
, struct nvme_ns_info
*info
)
1953 return info
->is_readonly
|| test_bit(NVME_NS_FORCE_RO
, &ns
->flags
);
1956 static inline bool nvme_first_scan(struct gendisk
*disk
)
1958 /* nvme_alloc_ns() scans the disk prior to adding it */
1959 return !disk_live(disk
);
1962 static void nvme_set_chunk_sectors(struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1964 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1967 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) &&
1968 is_power_of_2(ctrl
->max_hw_sectors
))
1969 iob
= ctrl
->max_hw_sectors
;
1971 iob
= nvme_lba_to_sect(ns
, le16_to_cpu(id
->noiob
));
1976 if (!is_power_of_2(iob
)) {
1977 if (nvme_first_scan(ns
->disk
))
1978 pr_warn("%s: ignoring unaligned IO boundary:%u\n",
1979 ns
->disk
->disk_name
, iob
);
1983 if (blk_queue_is_zoned(ns
->disk
->queue
)) {
1984 if (nvme_first_scan(ns
->disk
))
1985 pr_warn("%s: ignoring zoned namespace IO boundary\n",
1986 ns
->disk
->disk_name
);
1990 blk_queue_chunk_sectors(ns
->queue
, iob
);
1993 static int nvme_update_ns_info_generic(struct nvme_ns
*ns
,
1994 struct nvme_ns_info
*info
)
1996 blk_mq_freeze_queue(ns
->disk
->queue
);
1997 nvme_set_queue_limits(ns
->ctrl
, ns
->queue
);
1998 set_disk_ro(ns
->disk
, nvme_ns_is_readonly(ns
, info
));
1999 blk_mq_unfreeze_queue(ns
->disk
->queue
);
2001 if (nvme_ns_head_multipath(ns
->head
)) {
2002 blk_mq_freeze_queue(ns
->head
->disk
->queue
);
2003 set_disk_ro(ns
->head
->disk
, nvme_ns_is_readonly(ns
, info
));
2004 nvme_mpath_revalidate_paths(ns
);
2005 blk_stack_limits(&ns
->head
->disk
->queue
->limits
,
2006 &ns
->queue
->limits
, 0);
2007 ns
->head
->disk
->flags
|= GENHD_FL_HIDDEN
;
2008 blk_mq_unfreeze_queue(ns
->head
->disk
->queue
);
2011 /* Hide the block-interface for these devices */
2012 ns
->disk
->flags
|= GENHD_FL_HIDDEN
;
2013 set_bit(NVME_NS_READY
, &ns
->flags
);
2018 static int nvme_update_ns_info_block(struct nvme_ns
*ns
,
2019 struct nvme_ns_info
*info
)
2021 struct nvme_id_ns
*id
;
2025 ret
= nvme_identify_ns(ns
->ctrl
, info
->nsid
, &id
);
2029 blk_mq_freeze_queue(ns
->disk
->queue
);
2030 lbaf
= nvme_lbaf_index(id
->flbas
);
2031 ns
->lba_shift
= id
->lbaf
[lbaf
].ds
;
2032 nvme_set_queue_limits(ns
->ctrl
, ns
->queue
);
2034 nvme_configure_metadata(ns
, id
);
2035 nvme_set_chunk_sectors(ns
, id
);
2036 nvme_update_disk_info(ns
->disk
, ns
, id
);
2038 if (ns
->head
->ids
.csi
== NVME_CSI_ZNS
) {
2039 ret
= nvme_update_zone_info(ns
, lbaf
);
2041 blk_mq_unfreeze_queue(ns
->disk
->queue
);
2047 * Only set the DEAC bit if the device guarantees that reads from
2048 * deallocated data return zeroes. While the DEAC bit does not
2049 * require that, it must be a no-op if reads from deallocated data
2050 * do not return zeroes.
2052 if ((id
->dlfeat
& 0x7) == 0x1 && (id
->dlfeat
& (1 << 3)))
2053 ns
->features
|= NVME_NS_DEAC
;
2054 set_disk_ro(ns
->disk
, nvme_ns_is_readonly(ns
, info
));
2055 set_bit(NVME_NS_READY
, &ns
->flags
);
2056 blk_mq_unfreeze_queue(ns
->disk
->queue
);
2058 if (blk_queue_is_zoned(ns
->queue
)) {
2059 ret
= nvme_revalidate_zones(ns
);
2060 if (ret
&& !nvme_first_scan(ns
->disk
))
2064 if (nvme_ns_head_multipath(ns
->head
)) {
2065 blk_mq_freeze_queue(ns
->head
->disk
->queue
);
2066 nvme_update_disk_info(ns
->head
->disk
, ns
, id
);
2067 set_disk_ro(ns
->head
->disk
, nvme_ns_is_readonly(ns
, info
));
2068 nvme_mpath_revalidate_paths(ns
);
2069 blk_stack_limits(&ns
->head
->disk
->queue
->limits
,
2070 &ns
->queue
->limits
, 0);
2071 disk_update_readahead(ns
->head
->disk
);
2072 blk_mq_unfreeze_queue(ns
->head
->disk
->queue
);
2078 * If probing fails due an unsupported feature, hide the block device,
2079 * but still allow other access.
2081 if (ret
== -ENODEV
) {
2082 ns
->disk
->flags
|= GENHD_FL_HIDDEN
;
2083 set_bit(NVME_NS_READY
, &ns
->flags
);
2090 static int nvme_update_ns_info(struct nvme_ns
*ns
, struct nvme_ns_info
*info
)
2092 switch (info
->ids
.csi
) {
2094 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED
)) {
2095 dev_info(ns
->ctrl
->device
,
2096 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
2098 return nvme_update_ns_info_generic(ns
, info
);
2100 return nvme_update_ns_info_block(ns
, info
);
2102 return nvme_update_ns_info_block(ns
, info
);
2104 dev_info(ns
->ctrl
->device
,
2105 "block device for nsid %u not supported (csi %u)\n",
2106 info
->nsid
, info
->ids
.csi
);
2107 return nvme_update_ns_info_generic(ns
, info
);
2111 #ifdef CONFIG_BLK_SED_OPAL
2112 static int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
2115 struct nvme_ctrl
*ctrl
= data
;
2116 struct nvme_command cmd
= { };
2119 cmd
.common
.opcode
= nvme_admin_security_send
;
2121 cmd
.common
.opcode
= nvme_admin_security_recv
;
2122 cmd
.common
.nsid
= 0;
2123 cmd
.common
.cdw10
= cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
2124 cmd
.common
.cdw11
= cpu_to_le32(len
);
2126 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
,
2127 NVME_QID_ANY
, 1, 0);
2130 static void nvme_configure_opal(struct nvme_ctrl
*ctrl
, bool was_suspended
)
2132 if (ctrl
->oacs
& NVME_CTRL_OACS_SEC_SUPP
) {
2133 if (!ctrl
->opal_dev
)
2134 ctrl
->opal_dev
= init_opal_dev(ctrl
, &nvme_sec_submit
);
2135 else if (was_suspended
)
2136 opal_unlock_from_suspend(ctrl
->opal_dev
);
2138 free_opal_dev(ctrl
->opal_dev
);
2139 ctrl
->opal_dev
= NULL
;
2143 static void nvme_configure_opal(struct nvme_ctrl
*ctrl
, bool was_suspended
)
2146 #endif /* CONFIG_BLK_SED_OPAL */
2148 #ifdef CONFIG_BLK_DEV_ZONED
2149 static int nvme_report_zones(struct gendisk
*disk
, sector_t sector
,
2150 unsigned int nr_zones
, report_zones_cb cb
, void *data
)
2152 return nvme_ns_report_zones(disk
->private_data
, sector
, nr_zones
, cb
,
2156 #define nvme_report_zones NULL
2157 #endif /* CONFIG_BLK_DEV_ZONED */
2159 const struct block_device_operations nvme_bdev_ops
= {
2160 .owner
= THIS_MODULE
,
2161 .ioctl
= nvme_ioctl
,
2162 .compat_ioctl
= blkdev_compat_ptr_ioctl
,
2164 .release
= nvme_release
,
2165 .getgeo
= nvme_getgeo
,
2166 .report_zones
= nvme_report_zones
,
2167 .pr_ops
= &nvme_pr_ops
,
2170 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u32 mask
, u32 val
,
2171 u32 timeout
, const char *op
)
2173 unsigned long timeout_jiffies
= jiffies
+ timeout
* HZ
;
2177 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
2180 if ((csts
& mask
) == val
)
2183 usleep_range(1000, 2000);
2184 if (fatal_signal_pending(current
))
2186 if (time_after(jiffies
, timeout_jiffies
)) {
2187 dev_err(ctrl
->device
,
2188 "Device not ready; aborting %s, CSTS=0x%x\n",
2197 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, bool shutdown
)
2201 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
2203 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
2205 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
2207 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2212 return nvme_wait_ready(ctrl
, NVME_CSTS_SHST_MASK
,
2213 NVME_CSTS_SHST_CMPLT
,
2214 ctrl
->shutdown_timeout
, "shutdown");
2216 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
2217 msleep(NVME_QUIRK_DELAY_AMOUNT
);
2218 return nvme_wait_ready(ctrl
, NVME_CSTS_RDY
, 0,
2219 (NVME_CAP_TIMEOUT(ctrl
->cap
) + 1) / 2, "reset");
2221 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
2223 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
)
2225 unsigned dev_page_min
;
2229 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &ctrl
->cap
);
2231 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
2234 dev_page_min
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12;
2236 if (NVME_CTRL_PAGE_SHIFT
< dev_page_min
) {
2237 dev_err(ctrl
->device
,
2238 "Minimum device page size %u too large for host (%u)\n",
2239 1 << dev_page_min
, 1 << NVME_CTRL_PAGE_SHIFT
);
2243 if (NVME_CAP_CSS(ctrl
->cap
) & NVME_CAP_CSS_CSI
)
2244 ctrl
->ctrl_config
= NVME_CC_CSS_CSI
;
2246 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
2248 if (ctrl
->cap
& NVME_CAP_CRMS_CRWMS
) {
2251 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CRTO
, &crto
);
2253 dev_err(ctrl
->device
, "Reading CRTO failed (%d)\n",
2258 if (ctrl
->cap
& NVME_CAP_CRMS_CRIMS
) {
2259 ctrl
->ctrl_config
|= NVME_CC_CRIME
;
2260 timeout
= NVME_CRTO_CRIMT(crto
);
2262 timeout
= NVME_CRTO_CRWMT(crto
);
2265 timeout
= NVME_CAP_TIMEOUT(ctrl
->cap
);
2268 ctrl
->ctrl_config
|= (NVME_CTRL_PAGE_SHIFT
- 12) << NVME_CC_MPS_SHIFT
;
2269 ctrl
->ctrl_config
|= NVME_CC_AMS_RR
| NVME_CC_SHN_NONE
;
2270 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
2271 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2275 /* Flush write to device (required if transport is PCI) */
2276 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CC
, &ctrl
->ctrl_config
);
2280 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
2281 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
2284 return nvme_wait_ready(ctrl
, NVME_CSTS_RDY
, NVME_CSTS_RDY
,
2285 (timeout
+ 1) / 2, "initialisation");
2287 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
2289 static int nvme_configure_timestamp(struct nvme_ctrl
*ctrl
)
2294 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_TIMESTAMP
))
2297 ts
= cpu_to_le64(ktime_to_ms(ktime_get_real()));
2298 ret
= nvme_set_features(ctrl
, NVME_FEAT_TIMESTAMP
, 0, &ts
, sizeof(ts
),
2301 dev_warn_once(ctrl
->device
,
2302 "could not set timestamp (%d)\n", ret
);
2306 static int nvme_configure_host_options(struct nvme_ctrl
*ctrl
)
2308 struct nvme_feat_host_behavior
*host
;
2309 u8 acre
= 0, lbafee
= 0;
2312 /* Don't bother enabling the feature if retry delay is not reported */
2314 acre
= NVME_ENABLE_ACRE
;
2315 if (ctrl
->ctratt
& NVME_CTRL_ATTR_ELBAS
)
2316 lbafee
= NVME_ENABLE_LBAFEE
;
2318 if (!acre
&& !lbafee
)
2321 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
2326 host
->lbafee
= lbafee
;
2327 ret
= nvme_set_features(ctrl
, NVME_FEAT_HOST_BEHAVIOR
, 0,
2328 host
, sizeof(*host
), NULL
);
2334 * The function checks whether the given total (exlat + enlat) latency of
2335 * a power state allows the latter to be used as an APST transition target.
2336 * It does so by comparing the latency to the primary and secondary latency
2337 * tolerances defined by module params. If there's a match, the corresponding
2338 * timeout value is returned and the matching tolerance index (1 or 2) is
2341 static bool nvme_apst_get_transition_time(u64 total_latency
,
2342 u64
*transition_time
, unsigned *last_index
)
2344 if (total_latency
<= apst_primary_latency_tol_us
) {
2345 if (*last_index
== 1)
2348 *transition_time
= apst_primary_timeout_ms
;
2351 if (apst_secondary_timeout_ms
&&
2352 total_latency
<= apst_secondary_latency_tol_us
) {
2353 if (*last_index
<= 2)
2356 *transition_time
= apst_secondary_timeout_ms
;
2363 * APST (Autonomous Power State Transition) lets us program a table of power
2364 * state transitions that the controller will perform automatically.
2366 * Depending on module params, one of the two supported techniques will be used:
2368 * - If the parameters provide explicit timeouts and tolerances, they will be
2369 * used to build a table with up to 2 non-operational states to transition to.
2370 * The default parameter values were selected based on the values used by
2371 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
2372 * regeneration of the APST table in the event of switching between external
2373 * and battery power, the timeouts and tolerances reflect a compromise
2374 * between values used by Microsoft for AC and battery scenarios.
2375 * - If not, we'll configure the table with a simple heuristic: we are willing
2376 * to spend at most 2% of the time transitioning between power states.
2377 * Therefore, when running in any given state, we will enter the next
2378 * lower-power non-operational state after waiting 50 * (enlat + exlat)
2379 * microseconds, as long as that state's exit latency is under the requested
2382 * We will not autonomously enter any non-operational state for which the total
2383 * latency exceeds ps_max_latency_us.
2385 * Users can set ps_max_latency_us to zero to turn off APST.
2387 static int nvme_configure_apst(struct nvme_ctrl
*ctrl
)
2389 struct nvme_feat_auto_pst
*table
;
2396 unsigned last_lt_index
= UINT_MAX
;
2399 * If APST isn't supported or if we haven't been initialized yet,
2400 * then don't do anything.
2405 if (ctrl
->npss
> 31) {
2406 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
2410 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
2414 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
2415 /* Turn off APST. */
2416 dev_dbg(ctrl
->device
, "APST disabled\n");
2421 * Walk through all states from lowest- to highest-power.
2422 * According to the spec, lower-numbered states use more power. NPSS,
2423 * despite the name, is the index of the lowest-power state, not the
2426 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
2427 u64 total_latency_us
, exit_latency_us
, transition_ms
;
2430 table
->entries
[state
] = target
;
2433 * Don't allow transitions to the deepest state if it's quirked
2436 if (state
== ctrl
->npss
&&
2437 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
2441 * Is this state a useful non-operational state for higher-power
2442 * states to autonomously transition to?
2444 if (!(ctrl
->psd
[state
].flags
& NVME_PS_FLAGS_NON_OP_STATE
))
2447 exit_latency_us
= (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
2448 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
2451 total_latency_us
= exit_latency_us
+
2452 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
2455 * This state is good. It can be used as the APST idle target
2456 * for higher power states.
2458 if (apst_primary_timeout_ms
&& apst_primary_latency_tol_us
) {
2459 if (!nvme_apst_get_transition_time(total_latency_us
,
2460 &transition_ms
, &last_lt_index
))
2463 transition_ms
= total_latency_us
+ 19;
2464 do_div(transition_ms
, 20);
2465 if (transition_ms
> (1 << 24) - 1)
2466 transition_ms
= (1 << 24) - 1;
2469 target
= cpu_to_le64((state
<< 3) | (transition_ms
<< 8));
2472 if (total_latency_us
> max_lat_us
)
2473 max_lat_us
= total_latency_us
;
2477 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
2479 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2480 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
2484 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
2485 table
, sizeof(*table
), NULL
);
2487 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
2492 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
2494 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2498 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
2499 case PM_QOS_LATENCY_ANY
:
2507 if (ctrl
->ps_max_latency_us
!= latency
) {
2508 ctrl
->ps_max_latency_us
= latency
;
2509 if (ctrl
->state
== NVME_CTRL_LIVE
)
2510 nvme_configure_apst(ctrl
);
2514 struct nvme_core_quirk_entry
{
2516 * NVMe model and firmware strings are padded with spaces. For
2517 * simplicity, strings in the quirk table are padded with NULLs
2523 unsigned long quirks
;
2526 static const struct nvme_core_quirk_entry core_quirks
[] = {
2529 * This Toshiba device seems to die using any APST states. See:
2530 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2533 .mn
= "THNSF5256GPUK TOSHIBA",
2534 .quirks
= NVME_QUIRK_NO_APST
,
2538 * This LiteON CL1-3D*-Q11 firmware version has a race
2539 * condition associated with actions related to suspend to idle
2540 * LiteON has resolved the problem in future firmware
2544 .quirks
= NVME_QUIRK_SIMPLE_SUSPEND
,
2548 * This Kioxia CD6-V Series / HPE PE8030 device times out and
2549 * aborts I/O during any load, but more easily reproducible
2550 * with discards (fstrim).
2552 * The device is left in a state where it is also not possible
2553 * to use "nvme set-feature" to disable APST, but booting with
2554 * nvme_core.default_ps_max_latency=0 works.
2557 .mn
= "KCD6XVUL6T40",
2558 .quirks
= NVME_QUIRK_NO_APST
,
2562 * The external Samsung X5 SSD fails initialization without a
2563 * delay before checking if it is ready and has a whole set of
2564 * other problems. To make this even more interesting, it
2565 * shares the PCI ID with internal Samsung 970 Evo Plus that
2566 * does not need or want these quirks.
2569 .mn
= "Samsung Portable SSD X5",
2570 .quirks
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
|
2571 NVME_QUIRK_NO_DEEPEST_PS
|
2572 NVME_QUIRK_IGNORE_DEV_SUBNQN
,
2576 /* match is null-terminated but idstr is space-padded. */
2577 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
2584 matchlen
= strlen(match
);
2585 WARN_ON_ONCE(matchlen
> len
);
2587 if (memcmp(idstr
, match
, matchlen
))
2590 for (; matchlen
< len
; matchlen
++)
2591 if (idstr
[matchlen
] != ' ')
2597 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
2598 const struct nvme_core_quirk_entry
*q
)
2600 return q
->vid
== le16_to_cpu(id
->vid
) &&
2601 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
2602 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
2605 static void nvme_init_subnqn(struct nvme_subsystem
*subsys
, struct nvme_ctrl
*ctrl
,
2606 struct nvme_id_ctrl
*id
)
2611 if(!(ctrl
->quirks
& NVME_QUIRK_IGNORE_DEV_SUBNQN
)) {
2612 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
2613 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
2614 strscpy(subsys
->subnqn
, id
->subnqn
, NVMF_NQN_SIZE
);
2618 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
2619 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
2623 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe
2624 * Base Specification 2.0. It is slightly different from the format
2625 * specified there due to historic reasons, and we can't change it now.
2627 off
= snprintf(subsys
->subnqn
, NVMF_NQN_SIZE
,
2628 "nqn.2014.08.org.nvmexpress:%04x%04x",
2629 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
2630 memcpy(subsys
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
2631 off
+= sizeof(id
->sn
);
2632 memcpy(subsys
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
2633 off
+= sizeof(id
->mn
);
2634 memset(subsys
->subnqn
+ off
, 0, sizeof(subsys
->subnqn
) - off
);
2637 static void nvme_release_subsystem(struct device
*dev
)
2639 struct nvme_subsystem
*subsys
=
2640 container_of(dev
, struct nvme_subsystem
, dev
);
2642 if (subsys
->instance
>= 0)
2643 ida_free(&nvme_instance_ida
, subsys
->instance
);
2647 static void nvme_destroy_subsystem(struct kref
*ref
)
2649 struct nvme_subsystem
*subsys
=
2650 container_of(ref
, struct nvme_subsystem
, ref
);
2652 mutex_lock(&nvme_subsystems_lock
);
2653 list_del(&subsys
->entry
);
2654 mutex_unlock(&nvme_subsystems_lock
);
2656 ida_destroy(&subsys
->ns_ida
);
2657 device_del(&subsys
->dev
);
2658 put_device(&subsys
->dev
);
2661 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
)
2663 kref_put(&subsys
->ref
, nvme_destroy_subsystem
);
2666 static struct nvme_subsystem
*__nvme_find_get_subsystem(const char *subsysnqn
)
2668 struct nvme_subsystem
*subsys
;
2670 lockdep_assert_held(&nvme_subsystems_lock
);
2673 * Fail matches for discovery subsystems. This results
2674 * in each discovery controller bound to a unique subsystem.
2675 * This avoids issues with validating controller values
2676 * that can only be true when there is a single unique subsystem.
2677 * There may be multiple and completely independent entities
2678 * that provide discovery controllers.
2680 if (!strcmp(subsysnqn
, NVME_DISC_SUBSYS_NAME
))
2683 list_for_each_entry(subsys
, &nvme_subsystems
, entry
) {
2684 if (strcmp(subsys
->subnqn
, subsysnqn
))
2686 if (!kref_get_unless_zero(&subsys
->ref
))
2694 static inline bool nvme_discovery_ctrl(struct nvme_ctrl
*ctrl
)
2696 return ctrl
->opts
&& ctrl
->opts
->discovery_nqn
;
2699 static bool nvme_validate_cntlid(struct nvme_subsystem
*subsys
,
2700 struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2702 struct nvme_ctrl
*tmp
;
2704 lockdep_assert_held(&nvme_subsystems_lock
);
2706 list_for_each_entry(tmp
, &subsys
->ctrls
, subsys_entry
) {
2707 if (nvme_state_terminal(tmp
))
2710 if (tmp
->cntlid
== ctrl
->cntlid
) {
2711 dev_err(ctrl
->device
,
2712 "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
2713 ctrl
->cntlid
, dev_name(tmp
->device
),
2718 if ((id
->cmic
& NVME_CTRL_CMIC_MULTI_CTRL
) ||
2719 nvme_discovery_ctrl(ctrl
))
2722 dev_err(ctrl
->device
,
2723 "Subsystem does not support multiple controllers\n");
2730 static int nvme_init_subsystem(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2732 struct nvme_subsystem
*subsys
, *found
;
2735 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
2739 subsys
->instance
= -1;
2740 mutex_init(&subsys
->lock
);
2741 kref_init(&subsys
->ref
);
2742 INIT_LIST_HEAD(&subsys
->ctrls
);
2743 INIT_LIST_HEAD(&subsys
->nsheads
);
2744 nvme_init_subnqn(subsys
, ctrl
, id
);
2745 memcpy(subsys
->serial
, id
->sn
, sizeof(subsys
->serial
));
2746 memcpy(subsys
->model
, id
->mn
, sizeof(subsys
->model
));
2747 subsys
->vendor_id
= le16_to_cpu(id
->vid
);
2748 subsys
->cmic
= id
->cmic
;
2750 /* Versions prior to 1.4 don't necessarily report a valid type */
2751 if (id
->cntrltype
== NVME_CTRL_DISC
||
2752 !strcmp(subsys
->subnqn
, NVME_DISC_SUBSYS_NAME
))
2753 subsys
->subtype
= NVME_NQN_DISC
;
2755 subsys
->subtype
= NVME_NQN_NVME
;
2757 if (nvme_discovery_ctrl(ctrl
) && subsys
->subtype
!= NVME_NQN_DISC
) {
2758 dev_err(ctrl
->device
,
2759 "Subsystem %s is not a discovery controller",
2764 subsys
->awupf
= le16_to_cpu(id
->awupf
);
2765 nvme_mpath_default_iopolicy(subsys
);
2767 subsys
->dev
.class = nvme_subsys_class
;
2768 subsys
->dev
.release
= nvme_release_subsystem
;
2769 subsys
->dev
.groups
= nvme_subsys_attrs_groups
;
2770 dev_set_name(&subsys
->dev
, "nvme-subsys%d", ctrl
->instance
);
2771 device_initialize(&subsys
->dev
);
2773 mutex_lock(&nvme_subsystems_lock
);
2774 found
= __nvme_find_get_subsystem(subsys
->subnqn
);
2776 put_device(&subsys
->dev
);
2779 if (!nvme_validate_cntlid(subsys
, ctrl
, id
)) {
2781 goto out_put_subsystem
;
2784 ret
= device_add(&subsys
->dev
);
2786 dev_err(ctrl
->device
,
2787 "failed to register subsystem device.\n");
2788 put_device(&subsys
->dev
);
2791 ida_init(&subsys
->ns_ida
);
2792 list_add_tail(&subsys
->entry
, &nvme_subsystems
);
2795 ret
= sysfs_create_link(&subsys
->dev
.kobj
, &ctrl
->device
->kobj
,
2796 dev_name(ctrl
->device
));
2798 dev_err(ctrl
->device
,
2799 "failed to create sysfs link from subsystem.\n");
2800 goto out_put_subsystem
;
2804 subsys
->instance
= ctrl
->instance
;
2805 ctrl
->subsys
= subsys
;
2806 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
2807 mutex_unlock(&nvme_subsystems_lock
);
2811 nvme_put_subsystem(subsys
);
2813 mutex_unlock(&nvme_subsystems_lock
);
2817 int nvme_get_log(struct nvme_ctrl
*ctrl
, u32 nsid
, u8 log_page
, u8 lsp
, u8 csi
,
2818 void *log
, size_t size
, u64 offset
)
2820 struct nvme_command c
= { };
2821 u32 dwlen
= nvme_bytes_to_numd(size
);
2823 c
.get_log_page
.opcode
= nvme_admin_get_log_page
;
2824 c
.get_log_page
.nsid
= cpu_to_le32(nsid
);
2825 c
.get_log_page
.lid
= log_page
;
2826 c
.get_log_page
.lsp
= lsp
;
2827 c
.get_log_page
.numdl
= cpu_to_le16(dwlen
& ((1 << 16) - 1));
2828 c
.get_log_page
.numdu
= cpu_to_le16(dwlen
>> 16);
2829 c
.get_log_page
.lpol
= cpu_to_le32(lower_32_bits(offset
));
2830 c
.get_log_page
.lpou
= cpu_to_le32(upper_32_bits(offset
));
2831 c
.get_log_page
.csi
= csi
;
2833 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, log
, size
);
2836 static int nvme_get_effects_log(struct nvme_ctrl
*ctrl
, u8 csi
,
2837 struct nvme_effects_log
**log
)
2839 struct nvme_effects_log
*cel
= xa_load(&ctrl
->cels
, csi
);
2845 cel
= kzalloc(sizeof(*cel
), GFP_KERNEL
);
2849 ret
= nvme_get_log(ctrl
, 0x00, NVME_LOG_CMD_EFFECTS
, 0, csi
,
2850 cel
, sizeof(*cel
), 0);
2856 xa_store(&ctrl
->cels
, csi
, cel
, GFP_KERNEL
);
2862 static inline u32
nvme_mps_to_sectors(struct nvme_ctrl
*ctrl
, u32 units
)
2864 u32 page_shift
= NVME_CAP_MPSMIN(ctrl
->cap
) + 12, val
;
2866 if (check_shl_overflow(1U, units
+ page_shift
- 9, &val
))
2871 static int nvme_init_non_mdts_limits(struct nvme_ctrl
*ctrl
)
2873 struct nvme_command c
= { };
2874 struct nvme_id_ctrl_nvm
*id
;
2877 if (ctrl
->oncs
& NVME_CTRL_ONCS_DSM
) {
2878 ctrl
->max_discard_sectors
= UINT_MAX
;
2879 ctrl
->max_discard_segments
= NVME_DSM_MAX_RANGES
;
2881 ctrl
->max_discard_sectors
= 0;
2882 ctrl
->max_discard_segments
= 0;
2886 * Even though NVMe spec explicitly states that MDTS is not applicable
2887 * to the write-zeroes, we are cautious and limit the size to the
2888 * controllers max_hw_sectors value, which is based on the MDTS field
2889 * and possibly other limiting factors.
2891 if ((ctrl
->oncs
& NVME_CTRL_ONCS_WRITE_ZEROES
) &&
2892 !(ctrl
->quirks
& NVME_QUIRK_DISABLE_WRITE_ZEROES
))
2893 ctrl
->max_zeroes_sectors
= ctrl
->max_hw_sectors
;
2895 ctrl
->max_zeroes_sectors
= 0;
2897 if (ctrl
->subsys
->subtype
!= NVME_NQN_NVME
||
2898 nvme_ctrl_limited_cns(ctrl
) ||
2899 test_bit(NVME_CTRL_SKIP_ID_CNS_CS
, &ctrl
->flags
))
2902 id
= kzalloc(sizeof(*id
), GFP_KERNEL
);
2906 c
.identify
.opcode
= nvme_admin_identify
;
2907 c
.identify
.cns
= NVME_ID_CNS_CS_CTRL
;
2908 c
.identify
.csi
= NVME_CSI_NVM
;
2910 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, id
, sizeof(*id
));
2915 ctrl
->max_discard_segments
= id
->dmrl
;
2916 ctrl
->dmrsl
= le32_to_cpu(id
->dmrsl
);
2918 ctrl
->max_zeroes_sectors
= nvme_mps_to_sectors(ctrl
, id
->wzsl
);
2922 set_bit(NVME_CTRL_SKIP_ID_CNS_CS
, &ctrl
->flags
);
2927 static void nvme_init_known_nvm_effects(struct nvme_ctrl
*ctrl
)
2929 struct nvme_effects_log
*log
= ctrl
->effects
;
2931 log
->acs
[nvme_admin_format_nvm
] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC
|
2932 NVME_CMD_EFFECTS_NCC
|
2933 NVME_CMD_EFFECTS_CSE_MASK
);
2934 log
->acs
[nvme_admin_sanitize_nvm
] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC
|
2935 NVME_CMD_EFFECTS_CSE_MASK
);
2938 * The spec says the result of a security receive command depends on
2939 * the previous security send command. As such, many vendors log this
2940 * command as one to submitted only when no other commands to the same
2941 * namespace are outstanding. The intention is to tell the host to
2942 * prevent mixing security send and receive.
2944 * This driver can only enforce such exclusive access against IO
2945 * queues, though. We are not readily able to enforce such a rule for
2946 * two commands to the admin queue, which is the only queue that
2947 * matters for this command.
2949 * Rather than blindly freezing the IO queues for this effect that
2950 * doesn't even apply to IO, mask it off.
2952 log
->acs
[nvme_admin_security_recv
] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK
);
2954 log
->iocs
[nvme_cmd_write
] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC
);
2955 log
->iocs
[nvme_cmd_write_zeroes
] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC
);
2956 log
->iocs
[nvme_cmd_write_uncor
] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC
);
2959 static int nvme_init_effects(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2966 if (id
->lpa
& NVME_CTRL_LPA_CMD_EFFECTS_LOG
) {
2967 ret
= nvme_get_effects_log(ctrl
, NVME_CSI_NVM
, &ctrl
->effects
);
2972 if (!ctrl
->effects
) {
2973 ctrl
->effects
= kzalloc(sizeof(*ctrl
->effects
), GFP_KERNEL
);
2976 xa_store(&ctrl
->cels
, NVME_CSI_NVM
, ctrl
->effects
, GFP_KERNEL
);
2979 nvme_init_known_nvm_effects(ctrl
);
2983 static int nvme_init_identify(struct nvme_ctrl
*ctrl
)
2985 struct nvme_id_ctrl
*id
;
2987 bool prev_apst_enabled
;
2990 ret
= nvme_identify_ctrl(ctrl
, &id
);
2992 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
2996 if (!(ctrl
->ops
->flags
& NVME_F_FABRICS
))
2997 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
2999 if (!ctrl
->identified
) {
3003 * Check for quirks. Quirk can depend on firmware version,
3004 * so, in principle, the set of quirks present can change
3005 * across a reset. As a possible future enhancement, we
3006 * could re-scan for quirks every time we reinitialize
3007 * the device, but we'd have to make sure that the driver
3008 * behaves intelligently if the quirks change.
3010 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
3011 if (quirk_matches(id
, &core_quirks
[i
]))
3012 ctrl
->quirks
|= core_quirks
[i
].quirks
;
3015 ret
= nvme_init_subsystem(ctrl
, id
);
3019 ret
= nvme_init_effects(ctrl
, id
);
3023 memcpy(ctrl
->subsys
->firmware_rev
, id
->fr
,
3024 sizeof(ctrl
->subsys
->firmware_rev
));
3026 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
3027 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3028 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
3031 ctrl
->crdt
[0] = le16_to_cpu(id
->crdt1
);
3032 ctrl
->crdt
[1] = le16_to_cpu(id
->crdt2
);
3033 ctrl
->crdt
[2] = le16_to_cpu(id
->crdt3
);
3035 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
3036 ctrl
->oncs
= le16_to_cpu(id
->oncs
);
3037 ctrl
->mtfa
= le16_to_cpu(id
->mtfa
);
3038 ctrl
->oaes
= le32_to_cpu(id
->oaes
);
3039 ctrl
->wctemp
= le16_to_cpu(id
->wctemp
);
3040 ctrl
->cctemp
= le16_to_cpu(id
->cctemp
);
3042 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
3043 ctrl
->vwc
= id
->vwc
;
3045 max_hw_sectors
= nvme_mps_to_sectors(ctrl
, id
->mdts
);
3047 max_hw_sectors
= UINT_MAX
;
3048 ctrl
->max_hw_sectors
=
3049 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
3051 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
3052 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
3053 ctrl
->kas
= le16_to_cpu(id
->kas
);
3054 ctrl
->max_namespaces
= le32_to_cpu(id
->mnan
);
3055 ctrl
->ctratt
= le32_to_cpu(id
->ctratt
);
3057 ctrl
->cntrltype
= id
->cntrltype
;
3058 ctrl
->dctype
= id
->dctype
;
3062 u32 transition_time
= le32_to_cpu(id
->rtd3e
) / USEC_PER_SEC
;
3064 ctrl
->shutdown_timeout
= clamp_t(unsigned int, transition_time
,
3065 shutdown_timeout
, 60);
3067 if (ctrl
->shutdown_timeout
!= shutdown_timeout
)
3068 dev_info(ctrl
->device
,
3069 "Shutdown timeout set to %u seconds\n",
3070 ctrl
->shutdown_timeout
);
3072 ctrl
->shutdown_timeout
= shutdown_timeout
;
3074 ctrl
->npss
= id
->npss
;
3075 ctrl
->apsta
= id
->apsta
;
3076 prev_apst_enabled
= ctrl
->apst_enabled
;
3077 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
3078 if (force_apst
&& id
->apsta
) {
3079 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3080 ctrl
->apst_enabled
= true;
3082 ctrl
->apst_enabled
= false;
3085 ctrl
->apst_enabled
= id
->apsta
;
3087 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
3089 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
3090 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
3091 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
3092 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
3093 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
3096 * In fabrics we need to verify the cntlid matches the
3099 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
)) {
3100 dev_err(ctrl
->device
,
3101 "Mismatching cntlid: Connect %u vs Identify "
3103 ctrl
->cntlid
, le16_to_cpu(id
->cntlid
));
3108 if (!nvme_discovery_ctrl(ctrl
) && !ctrl
->kas
) {
3109 dev_err(ctrl
->device
,
3110 "keep-alive support is mandatory for fabrics\n");
3115 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
3116 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
3117 ctrl
->hmminds
= le32_to_cpu(id
->hmminds
);
3118 ctrl
->hmmaxd
= le16_to_cpu(id
->hmmaxd
);
3121 ret
= nvme_mpath_init_identify(ctrl
, id
);
3125 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
3126 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
3127 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
3128 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
3136 * Initialize the cached copies of the Identify data and various controller
3137 * register in our nvme_ctrl structure. This should be called as soon as
3138 * the admin queue is fully up and running.
3140 int nvme_init_ctrl_finish(struct nvme_ctrl
*ctrl
, bool was_suspended
)
3144 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
3146 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
3150 ctrl
->sqsize
= min_t(u16
, NVME_CAP_MQES(ctrl
->cap
), ctrl
->sqsize
);
3152 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
3153 ctrl
->subsystem
= NVME_CAP_NSSRC(ctrl
->cap
);
3155 ret
= nvme_init_identify(ctrl
);
3159 ret
= nvme_configure_apst(ctrl
);
3163 ret
= nvme_configure_timestamp(ctrl
);
3167 ret
= nvme_configure_host_options(ctrl
);
3171 nvme_configure_opal(ctrl
, was_suspended
);
3173 if (!ctrl
->identified
&& !nvme_discovery_ctrl(ctrl
)) {
3175 * Do not return errors unless we are in a controller reset,
3176 * the controller works perfectly fine without hwmon.
3178 ret
= nvme_hwmon_init(ctrl
);
3183 clear_bit(NVME_CTRL_DIRTY_CAPABILITY
, &ctrl
->flags
);
3184 ctrl
->identified
= true;
3188 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish
);
3190 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
3192 struct nvme_ctrl
*ctrl
=
3193 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
3195 switch (ctrl
->state
) {
3196 case NVME_CTRL_LIVE
:
3199 return -EWOULDBLOCK
;
3202 nvme_get_ctrl(ctrl
);
3203 if (!try_module_get(ctrl
->ops
->module
)) {
3204 nvme_put_ctrl(ctrl
);
3208 file
->private_data
= ctrl
;
3212 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
3214 struct nvme_ctrl
*ctrl
=
3215 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
3217 module_put(ctrl
->ops
->module
);
3218 nvme_put_ctrl(ctrl
);
3222 static const struct file_operations nvme_dev_fops
= {
3223 .owner
= THIS_MODULE
,
3224 .open
= nvme_dev_open
,
3225 .release
= nvme_dev_release
,
3226 .unlocked_ioctl
= nvme_dev_ioctl
,
3227 .compat_ioctl
= compat_ptr_ioctl
,
3228 .uring_cmd
= nvme_dev_uring_cmd
,
3231 static struct nvme_ns_head
*nvme_find_ns_head(struct nvme_ctrl
*ctrl
,
3234 struct nvme_ns_head
*h
;
3236 lockdep_assert_held(&ctrl
->subsys
->lock
);
3238 list_for_each_entry(h
, &ctrl
->subsys
->nsheads
, entry
) {
3240 * Private namespaces can share NSIDs under some conditions.
3241 * In that case we can't use the same ns_head for namespaces
3242 * with the same NSID.
3244 if (h
->ns_id
!= nsid
|| !nvme_is_unique_nsid(ctrl
, h
))
3246 if (!list_empty(&h
->list
) && nvme_tryget_ns_head(h
))
3253 static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem
*subsys
,
3254 struct nvme_ns_ids
*ids
)
3256 bool has_uuid
= !uuid_is_null(&ids
->uuid
);
3257 bool has_nguid
= memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
));
3258 bool has_eui64
= memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
));
3259 struct nvme_ns_head
*h
;
3261 lockdep_assert_held(&subsys
->lock
);
3263 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
3264 if (has_uuid
&& uuid_equal(&ids
->uuid
, &h
->ids
.uuid
))
3267 memcmp(&ids
->nguid
, &h
->ids
.nguid
, sizeof(ids
->nguid
)) == 0)
3270 memcmp(&ids
->eui64
, &h
->ids
.eui64
, sizeof(ids
->eui64
)) == 0)
3277 static void nvme_cdev_rel(struct device
*dev
)
3279 ida_free(&nvme_ns_chr_minor_ida
, MINOR(dev
->devt
));
3282 void nvme_cdev_del(struct cdev
*cdev
, struct device
*cdev_device
)
3284 cdev_device_del(cdev
, cdev_device
);
3285 put_device(cdev_device
);
3288 int nvme_cdev_add(struct cdev
*cdev
, struct device
*cdev_device
,
3289 const struct file_operations
*fops
, struct module
*owner
)
3293 minor
= ida_alloc(&nvme_ns_chr_minor_ida
, GFP_KERNEL
);
3296 cdev_device
->devt
= MKDEV(MAJOR(nvme_ns_chr_devt
), minor
);
3297 cdev_device
->class = nvme_ns_chr_class
;
3298 cdev_device
->release
= nvme_cdev_rel
;
3299 device_initialize(cdev_device
);
3300 cdev_init(cdev
, fops
);
3301 cdev
->owner
= owner
;
3302 ret
= cdev_device_add(cdev
, cdev_device
);
3304 put_device(cdev_device
);
3309 static int nvme_ns_chr_open(struct inode
*inode
, struct file
*file
)
3311 return nvme_ns_open(container_of(inode
->i_cdev
, struct nvme_ns
, cdev
));
3314 static int nvme_ns_chr_release(struct inode
*inode
, struct file
*file
)
3316 nvme_ns_release(container_of(inode
->i_cdev
, struct nvme_ns
, cdev
));
3320 static const struct file_operations nvme_ns_chr_fops
= {
3321 .owner
= THIS_MODULE
,
3322 .open
= nvme_ns_chr_open
,
3323 .release
= nvme_ns_chr_release
,
3324 .unlocked_ioctl
= nvme_ns_chr_ioctl
,
3325 .compat_ioctl
= compat_ptr_ioctl
,
3326 .uring_cmd
= nvme_ns_chr_uring_cmd
,
3327 .uring_cmd_iopoll
= nvme_ns_chr_uring_cmd_iopoll
,
3330 static int nvme_add_ns_cdev(struct nvme_ns
*ns
)
3334 ns
->cdev_device
.parent
= ns
->ctrl
->device
;
3335 ret
= dev_set_name(&ns
->cdev_device
, "ng%dn%d",
3336 ns
->ctrl
->instance
, ns
->head
->instance
);
3340 return nvme_cdev_add(&ns
->cdev
, &ns
->cdev_device
, &nvme_ns_chr_fops
,
3341 ns
->ctrl
->ops
->module
);
3344 static struct nvme_ns_head
*nvme_alloc_ns_head(struct nvme_ctrl
*ctrl
,
3345 struct nvme_ns_info
*info
)
3347 struct nvme_ns_head
*head
;
3348 size_t size
= sizeof(*head
);
3351 #ifdef CONFIG_NVME_MULTIPATH
3352 size
+= num_possible_nodes() * sizeof(struct nvme_ns
*);
3355 head
= kzalloc(size
, GFP_KERNEL
);
3358 ret
= ida_alloc_min(&ctrl
->subsys
->ns_ida
, 1, GFP_KERNEL
);
3361 head
->instance
= ret
;
3362 INIT_LIST_HEAD(&head
->list
);
3363 ret
= init_srcu_struct(&head
->srcu
);
3365 goto out_ida_remove
;
3366 head
->subsys
= ctrl
->subsys
;
3367 head
->ns_id
= info
->nsid
;
3368 head
->ids
= info
->ids
;
3369 head
->shared
= info
->is_shared
;
3370 kref_init(&head
->ref
);
3372 if (head
->ids
.csi
) {
3373 ret
= nvme_get_effects_log(ctrl
, head
->ids
.csi
, &head
->effects
);
3375 goto out_cleanup_srcu
;
3377 head
->effects
= ctrl
->effects
;
3379 ret
= nvme_mpath_alloc_disk(ctrl
, head
);
3381 goto out_cleanup_srcu
;
3383 list_add_tail(&head
->entry
, &ctrl
->subsys
->nsheads
);
3385 kref_get(&ctrl
->subsys
->ref
);
3389 cleanup_srcu_struct(&head
->srcu
);
3391 ida_free(&ctrl
->subsys
->ns_ida
, head
->instance
);
3396 ret
= blk_status_to_errno(nvme_error_status(ret
));
3397 return ERR_PTR(ret
);
3400 static int nvme_global_check_duplicate_ids(struct nvme_subsystem
*this,
3401 struct nvme_ns_ids
*ids
)
3403 struct nvme_subsystem
*s
;
3407 * Note that this check is racy as we try to avoid holding the global
3408 * lock over the whole ns_head creation. But it is only intended as
3409 * a sanity check anyway.
3411 mutex_lock(&nvme_subsystems_lock
);
3412 list_for_each_entry(s
, &nvme_subsystems
, entry
) {
3415 mutex_lock(&s
->lock
);
3416 ret
= nvme_subsys_check_duplicate_ids(s
, ids
);
3417 mutex_unlock(&s
->lock
);
3421 mutex_unlock(&nvme_subsystems_lock
);
3426 static int nvme_init_ns_head(struct nvme_ns
*ns
, struct nvme_ns_info
*info
)
3428 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
3429 struct nvme_ns_head
*head
= NULL
;
3432 ret
= nvme_global_check_duplicate_ids(ctrl
->subsys
, &info
->ids
);
3435 * We've found two different namespaces on two different
3436 * subsystems that report the same ID. This is pretty nasty
3437 * for anything that actually requires unique device
3438 * identification. In the kernel we need this for multipathing,
3439 * and in user space the /dev/disk/by-id/ links rely on it.
3441 * If the device also claims to be multi-path capable back off
3442 * here now and refuse the probe the second device as this is a
3443 * recipe for data corruption. If not this is probably a
3444 * cheap consumer device if on the PCIe bus, so let the user
3445 * proceed and use the shiny toy, but warn that with changing
3446 * probing order (which due to our async probing could just be
3447 * device taking longer to startup) the other device could show
3450 nvme_print_device_info(ctrl
);
3451 if ((ns
->ctrl
->ops
->flags
& NVME_F_FABRICS
) || /* !PCIe */
3452 ((ns
->ctrl
->subsys
->cmic
& NVME_CTRL_CMIC_MULTI_CTRL
) &&
3454 dev_err(ctrl
->device
,
3455 "ignoring nsid %d because of duplicate IDs\n",
3460 dev_err(ctrl
->device
,
3461 "clearing duplicate IDs for nsid %d\n", info
->nsid
);
3462 dev_err(ctrl
->device
,
3463 "use of /dev/disk/by-id/ may cause data corruption\n");
3464 memset(&info
->ids
.nguid
, 0, sizeof(info
->ids
.nguid
));
3465 memset(&info
->ids
.uuid
, 0, sizeof(info
->ids
.uuid
));
3466 memset(&info
->ids
.eui64
, 0, sizeof(info
->ids
.eui64
));
3467 ctrl
->quirks
|= NVME_QUIRK_BOGUS_NID
;
3470 mutex_lock(&ctrl
->subsys
->lock
);
3471 head
= nvme_find_ns_head(ctrl
, info
->nsid
);
3473 ret
= nvme_subsys_check_duplicate_ids(ctrl
->subsys
, &info
->ids
);
3475 dev_err(ctrl
->device
,
3476 "duplicate IDs in subsystem for nsid %d\n",
3480 head
= nvme_alloc_ns_head(ctrl
, info
);
3482 ret
= PTR_ERR(head
);
3487 if (!info
->is_shared
|| !head
->shared
) {
3488 dev_err(ctrl
->device
,
3489 "Duplicate unshared namespace %d\n",
3491 goto out_put_ns_head
;
3493 if (!nvme_ns_ids_equal(&head
->ids
, &info
->ids
)) {
3494 dev_err(ctrl
->device
,
3495 "IDs don't match for shared namespace %d\n",
3497 goto out_put_ns_head
;
3501 dev_warn(ctrl
->device
,
3502 "Found shared namespace %d, but multipathing not supported.\n",
3504 dev_warn_once(ctrl
->device
,
3505 "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
3509 list_add_tail_rcu(&ns
->siblings
, &head
->list
);
3511 mutex_unlock(&ctrl
->subsys
->lock
);
3515 nvme_put_ns_head(head
);
3517 mutex_unlock(&ctrl
->subsys
->lock
);
3521 struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3523 struct nvme_ns
*ns
, *ret
= NULL
;
3525 down_read(&ctrl
->namespaces_rwsem
);
3526 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
3527 if (ns
->head
->ns_id
== nsid
) {
3528 if (!nvme_get_ns(ns
))
3533 if (ns
->head
->ns_id
> nsid
)
3536 up_read(&ctrl
->namespaces_rwsem
);
3539 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns
, NVME_TARGET_PASSTHRU
);
3542 * Add the namespace to the controller list while keeping the list ordered.
3544 static void nvme_ns_add_to_ctrl_list(struct nvme_ns
*ns
)
3546 struct nvme_ns
*tmp
;
3548 list_for_each_entry_reverse(tmp
, &ns
->ctrl
->namespaces
, list
) {
3549 if (tmp
->head
->ns_id
< ns
->head
->ns_id
) {
3550 list_add(&ns
->list
, &tmp
->list
);
3554 list_add(&ns
->list
, &ns
->ctrl
->namespaces
);
3557 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns_info
*info
)
3560 struct gendisk
*disk
;
3561 int node
= ctrl
->numa_node
;
3563 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
3567 disk
= blk_mq_alloc_disk(ctrl
->tagset
, ns
);
3570 disk
->fops
= &nvme_bdev_ops
;
3571 disk
->private_data
= ns
;
3574 ns
->queue
= disk
->queue
;
3576 if (ctrl
->opts
&& ctrl
->opts
->data_digest
)
3577 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES
, ns
->queue
);
3579 blk_queue_flag_set(QUEUE_FLAG_NONROT
, ns
->queue
);
3580 if (ctrl
->ops
->supports_pci_p2pdma
&&
3581 ctrl
->ops
->supports_pci_p2pdma(ctrl
))
3582 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA
, ns
->queue
);
3585 kref_init(&ns
->kref
);
3587 if (nvme_init_ns_head(ns
, info
))
3588 goto out_cleanup_disk
;
3591 * If multipathing is enabled, the device name for all disks and not
3592 * just those that represent shared namespaces needs to be based on the
3593 * subsystem instance. Using the controller instance for private
3594 * namespaces could lead to naming collisions between shared and private
3595 * namespaces if they don't use a common numbering scheme.
3597 * If multipathing is not enabled, disk names must use the controller
3598 * instance as shared namespaces will show up as multiple block
3601 if (nvme_ns_head_multipath(ns
->head
)) {
3602 sprintf(disk
->disk_name
, "nvme%dc%dn%d", ctrl
->subsys
->instance
,
3603 ctrl
->instance
, ns
->head
->instance
);
3604 disk
->flags
|= GENHD_FL_HIDDEN
;
3605 } else if (multipath
) {
3606 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->subsys
->instance
,
3607 ns
->head
->instance
);
3609 sprintf(disk
->disk_name
, "nvme%dn%d", ctrl
->instance
,
3610 ns
->head
->instance
);
3613 if (nvme_update_ns_info(ns
, info
))
3616 down_write(&ctrl
->namespaces_rwsem
);
3617 nvme_ns_add_to_ctrl_list(ns
);
3618 up_write(&ctrl
->namespaces_rwsem
);
3619 nvme_get_ctrl(ctrl
);
3621 if (device_add_disk(ctrl
->device
, ns
->disk
, nvme_ns_id_attr_groups
))
3622 goto out_cleanup_ns_from_list
;
3624 if (!nvme_ns_head_multipath(ns
->head
))
3625 nvme_add_ns_cdev(ns
);
3627 nvme_mpath_add_disk(ns
, info
->anagrpid
);
3628 nvme_fault_inject_init(&ns
->fault_inject
, ns
->disk
->disk_name
);
3632 out_cleanup_ns_from_list
:
3633 nvme_put_ctrl(ctrl
);
3634 down_write(&ctrl
->namespaces_rwsem
);
3635 list_del_init(&ns
->list
);
3636 up_write(&ctrl
->namespaces_rwsem
);
3638 mutex_lock(&ctrl
->subsys
->lock
);
3639 list_del_rcu(&ns
->siblings
);
3640 if (list_empty(&ns
->head
->list
))
3641 list_del_init(&ns
->head
->entry
);
3642 mutex_unlock(&ctrl
->subsys
->lock
);
3643 nvme_put_ns_head(ns
->head
);
3650 static void nvme_ns_remove(struct nvme_ns
*ns
)
3652 bool last_path
= false;
3654 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
3657 clear_bit(NVME_NS_READY
, &ns
->flags
);
3658 set_capacity(ns
->disk
, 0);
3659 nvme_fault_inject_fini(&ns
->fault_inject
);
3662 * Ensure that !NVME_NS_READY is seen by other threads to prevent
3663 * this ns going back into current_path.
3665 synchronize_srcu(&ns
->head
->srcu
);
3667 /* wait for concurrent submissions */
3668 if (nvme_mpath_clear_current_path(ns
))
3669 synchronize_srcu(&ns
->head
->srcu
);
3671 mutex_lock(&ns
->ctrl
->subsys
->lock
);
3672 list_del_rcu(&ns
->siblings
);
3673 if (list_empty(&ns
->head
->list
)) {
3674 list_del_init(&ns
->head
->entry
);
3677 mutex_unlock(&ns
->ctrl
->subsys
->lock
);
3679 /* guarantee not available in head->list */
3680 synchronize_srcu(&ns
->head
->srcu
);
3682 if (!nvme_ns_head_multipath(ns
->head
))
3683 nvme_cdev_del(&ns
->cdev
, &ns
->cdev_device
);
3684 del_gendisk(ns
->disk
);
3686 down_write(&ns
->ctrl
->namespaces_rwsem
);
3687 list_del_init(&ns
->list
);
3688 up_write(&ns
->ctrl
->namespaces_rwsem
);
3691 nvme_mpath_shutdown_disk(ns
->head
);
3695 static void nvme_ns_remove_by_nsid(struct nvme_ctrl
*ctrl
, u32 nsid
)
3697 struct nvme_ns
*ns
= nvme_find_get_ns(ctrl
, nsid
);
3705 static void nvme_validate_ns(struct nvme_ns
*ns
, struct nvme_ns_info
*info
)
3707 int ret
= NVME_SC_INVALID_NS
| NVME_SC_DNR
;
3709 if (!nvme_ns_ids_equal(&ns
->head
->ids
, &info
->ids
)) {
3710 dev_err(ns
->ctrl
->device
,
3711 "identifiers changed for nsid %d\n", ns
->head
->ns_id
);
3715 ret
= nvme_update_ns_info(ns
, info
);
3718 * Only remove the namespace if we got a fatal error back from the
3719 * device, otherwise ignore the error and just move on.
3721 * TODO: we should probably schedule a delayed retry here.
3723 if (ret
> 0 && (ret
& NVME_SC_DNR
))
3727 static void nvme_scan_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3729 struct nvme_ns_info info
= { .nsid
= nsid
};
3733 if (nvme_identify_ns_descs(ctrl
, &info
))
3736 if (info
.ids
.csi
!= NVME_CSI_NVM
&& !nvme_multi_css(ctrl
)) {
3737 dev_warn(ctrl
->device
,
3738 "command set not reported for nsid: %d\n", nsid
);
3743 * If available try to use the Command Set Idependent Identify Namespace
3744 * data structure to find all the generic information that is needed to
3745 * set up a namespace. If not fall back to the legacy version.
3747 if ((ctrl
->cap
& NVME_CAP_CRMS_CRIMS
) ||
3748 (info
.ids
.csi
!= NVME_CSI_NVM
&& info
.ids
.csi
!= NVME_CSI_ZNS
))
3749 ret
= nvme_ns_info_from_id_cs_indep(ctrl
, &info
);
3751 ret
= nvme_ns_info_from_identify(ctrl
, &info
);
3753 if (info
.is_removed
)
3754 nvme_ns_remove_by_nsid(ctrl
, nsid
);
3757 * Ignore the namespace if it is not ready. We will get an AEN once it
3758 * becomes ready and restart the scan.
3760 if (ret
|| !info
.is_ready
)
3763 ns
= nvme_find_get_ns(ctrl
, nsid
);
3765 nvme_validate_ns(ns
, &info
);
3768 nvme_alloc_ns(ctrl
, &info
);
3772 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
3775 struct nvme_ns
*ns
, *next
;
3778 down_write(&ctrl
->namespaces_rwsem
);
3779 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
3780 if (ns
->head
->ns_id
> nsid
)
3781 list_move_tail(&ns
->list
, &rm_list
);
3783 up_write(&ctrl
->namespaces_rwsem
);
3785 list_for_each_entry_safe(ns
, next
, &rm_list
, list
)
3790 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
)
3792 const int nr_entries
= NVME_IDENTIFY_DATA_SIZE
/ sizeof(__le32
);
3797 ns_list
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
3802 struct nvme_command cmd
= {
3803 .identify
.opcode
= nvme_admin_identify
,
3804 .identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
,
3805 .identify
.nsid
= cpu_to_le32(prev
),
3808 ret
= nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, ns_list
,
3809 NVME_IDENTIFY_DATA_SIZE
);
3811 dev_warn(ctrl
->device
,
3812 "Identify NS List failed (status=0x%x)\n", ret
);
3816 for (i
= 0; i
< nr_entries
; i
++) {
3817 u32 nsid
= le32_to_cpu(ns_list
[i
]);
3819 if (!nsid
) /* end of the list? */
3821 nvme_scan_ns(ctrl
, nsid
);
3822 while (++prev
< nsid
)
3823 nvme_ns_remove_by_nsid(ctrl
, prev
);
3827 nvme_remove_invalid_namespaces(ctrl
, prev
);
3833 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
)
3835 struct nvme_id_ctrl
*id
;
3838 if (nvme_identify_ctrl(ctrl
, &id
))
3840 nn
= le32_to_cpu(id
->nn
);
3843 for (i
= 1; i
<= nn
; i
++)
3844 nvme_scan_ns(ctrl
, i
);
3846 nvme_remove_invalid_namespaces(ctrl
, nn
);
3849 static void nvme_clear_changed_ns_log(struct nvme_ctrl
*ctrl
)
3851 size_t log_size
= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
);
3855 log
= kzalloc(log_size
, GFP_KERNEL
);
3860 * We need to read the log to clear the AEN, but we don't want to rely
3861 * on it for the changed namespace information as userspace could have
3862 * raced with us in reading the log page, which could cause us to miss
3865 error
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CHANGED_NS
, 0,
3866 NVME_CSI_NVM
, log
, log_size
, 0);
3868 dev_warn(ctrl
->device
,
3869 "reading changed ns log failed: %d\n", error
);
3874 static void nvme_scan_work(struct work_struct
*work
)
3876 struct nvme_ctrl
*ctrl
=
3877 container_of(work
, struct nvme_ctrl
, scan_work
);
3880 /* No tagset on a live ctrl means IO queues could not created */
3881 if (ctrl
->state
!= NVME_CTRL_LIVE
|| !ctrl
->tagset
)
3885 * Identify controller limits can change at controller reset due to
3886 * new firmware download, even though it is not common we cannot ignore
3887 * such scenario. Controller's non-mdts limits are reported in the unit
3888 * of logical blocks that is dependent on the format of attached
3889 * namespace. Hence re-read the limits at the time of ns allocation.
3891 ret
= nvme_init_non_mdts_limits(ctrl
);
3893 dev_warn(ctrl
->device
,
3894 "reading non-mdts-limits failed: %d\n", ret
);
3898 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
)) {
3899 dev_info(ctrl
->device
, "rescanning namespaces.\n");
3900 nvme_clear_changed_ns_log(ctrl
);
3903 mutex_lock(&ctrl
->scan_lock
);
3904 if (nvme_ctrl_limited_cns(ctrl
)) {
3905 nvme_scan_ns_sequential(ctrl
);
3908 * Fall back to sequential scan if DNR is set to handle broken
3909 * devices which should support Identify NS List (as per the VS
3910 * they report) but don't actually support it.
3912 ret
= nvme_scan_ns_list(ctrl
);
3913 if (ret
> 0 && ret
& NVME_SC_DNR
)
3914 nvme_scan_ns_sequential(ctrl
);
3916 mutex_unlock(&ctrl
->scan_lock
);
3920 * This function iterates the namespace list unlocked to allow recovery from
3921 * controller failure. It is up to the caller to ensure the namespace list is
3922 * not modified by scan work while this function is executing.
3924 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
3926 struct nvme_ns
*ns
, *next
;
3930 * make sure to requeue I/O to all namespaces as these
3931 * might result from the scan itself and must complete
3932 * for the scan_work to make progress
3934 nvme_mpath_clear_ctrl_paths(ctrl
);
3937 * Unquiesce io queues so any pending IO won't hang, especially
3938 * those submitted from scan work
3940 nvme_unquiesce_io_queues(ctrl
);
3942 /* prevent racing with ns scanning */
3943 flush_work(&ctrl
->scan_work
);
3946 * The dead states indicates the controller was not gracefully
3947 * disconnected. In that case, we won't be able to flush any data while
3948 * removing the namespaces' disks; fail all the queues now to avoid
3949 * potentially having to clean up the failed sync later.
3951 if (ctrl
->state
== NVME_CTRL_DEAD
)
3952 nvme_mark_namespaces_dead(ctrl
);
3954 /* this is a no-op when called from the controller reset handler */
3955 nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING_NOIO
);
3957 down_write(&ctrl
->namespaces_rwsem
);
3958 list_splice_init(&ctrl
->namespaces
, &ns_list
);
3959 up_write(&ctrl
->namespaces_rwsem
);
3961 list_for_each_entry_safe(ns
, next
, &ns_list
, list
)
3964 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
3966 static int nvme_class_uevent(const struct device
*dev
, struct kobj_uevent_env
*env
)
3968 const struct nvme_ctrl
*ctrl
=
3969 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
3970 struct nvmf_ctrl_options
*opts
= ctrl
->opts
;
3973 ret
= add_uevent_var(env
, "NVME_TRTYPE=%s", ctrl
->ops
->name
);
3978 ret
= add_uevent_var(env
, "NVME_TRADDR=%s", opts
->traddr
);
3982 ret
= add_uevent_var(env
, "NVME_TRSVCID=%s",
3983 opts
->trsvcid
?: "none");
3987 ret
= add_uevent_var(env
, "NVME_HOST_TRADDR=%s",
3988 opts
->host_traddr
?: "none");
3992 ret
= add_uevent_var(env
, "NVME_HOST_IFACE=%s",
3993 opts
->host_iface
?: "none");
3998 static void nvme_change_uevent(struct nvme_ctrl
*ctrl
, char *envdata
)
4000 char *envp
[2] = { envdata
, NULL
};
4002 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
4005 static void nvme_aen_uevent(struct nvme_ctrl
*ctrl
)
4007 char *envp
[2] = { NULL
, NULL
};
4008 u32 aen_result
= ctrl
->aen_result
;
4010 ctrl
->aen_result
= 0;
4014 envp
[0] = kasprintf(GFP_KERNEL
, "NVME_AEN=%#08x", aen_result
);
4017 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
4021 static void nvme_async_event_work(struct work_struct
*work
)
4023 struct nvme_ctrl
*ctrl
=
4024 container_of(work
, struct nvme_ctrl
, async_event_work
);
4026 nvme_aen_uevent(ctrl
);
4029 * The transport drivers must guarantee AER submission here is safe by
4030 * flushing ctrl async_event_work after changing the controller state
4031 * from LIVE and before freeing the admin queue.
4033 if (ctrl
->state
== NVME_CTRL_LIVE
)
4034 ctrl
->ops
->submit_async_event(ctrl
);
4037 static bool nvme_ctrl_pp_status(struct nvme_ctrl
*ctrl
)
4042 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
))
4048 return ((ctrl
->ctrl_config
& NVME_CC_ENABLE
) && (csts
& NVME_CSTS_PP
));
4051 static void nvme_get_fw_slot_info(struct nvme_ctrl
*ctrl
)
4053 struct nvme_fw_slot_info_log
*log
;
4055 log
= kmalloc(sizeof(*log
), GFP_KERNEL
);
4059 if (nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_FW_SLOT
, 0, NVME_CSI_NVM
,
4060 log
, sizeof(*log
), 0))
4061 dev_warn(ctrl
->device
, "Get FW SLOT INFO log error\n");
4065 static void nvme_fw_act_work(struct work_struct
*work
)
4067 struct nvme_ctrl
*ctrl
= container_of(work
,
4068 struct nvme_ctrl
, fw_act_work
);
4069 unsigned long fw_act_timeout
;
4072 fw_act_timeout
= jiffies
+
4073 msecs_to_jiffies(ctrl
->mtfa
* 100);
4075 fw_act_timeout
= jiffies
+
4076 msecs_to_jiffies(admin_timeout
* 1000);
4078 nvme_quiesce_io_queues(ctrl
);
4079 while (nvme_ctrl_pp_status(ctrl
)) {
4080 if (time_after(jiffies
, fw_act_timeout
)) {
4081 dev_warn(ctrl
->device
,
4082 "Fw activation timeout, reset controller\n");
4083 nvme_try_sched_reset(ctrl
);
4089 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_LIVE
))
4092 nvme_unquiesce_io_queues(ctrl
);
4093 /* read FW slot information to clear the AER */
4094 nvme_get_fw_slot_info(ctrl
);
4096 queue_work(nvme_wq
, &ctrl
->async_event_work
);
4099 static u32
nvme_aer_type(u32 result
)
4101 return result
& 0x7;
4104 static u32
nvme_aer_subtype(u32 result
)
4106 return (result
& 0xff00) >> 8;
4109 static bool nvme_handle_aen_notice(struct nvme_ctrl
*ctrl
, u32 result
)
4111 u32 aer_notice_type
= nvme_aer_subtype(result
);
4112 bool requeue
= true;
4114 switch (aer_notice_type
) {
4115 case NVME_AER_NOTICE_NS_CHANGED
:
4116 set_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
);
4117 nvme_queue_scan(ctrl
);
4119 case NVME_AER_NOTICE_FW_ACT_STARTING
:
4121 * We are (ab)using the RESETTING state to prevent subsequent
4122 * recovery actions from interfering with the controller's
4123 * firmware activation.
4125 if (nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
)) {
4126 nvme_auth_stop(ctrl
);
4128 queue_work(nvme_wq
, &ctrl
->fw_act_work
);
4131 #ifdef CONFIG_NVME_MULTIPATH
4132 case NVME_AER_NOTICE_ANA
:
4133 if (!ctrl
->ana_log_buf
)
4135 queue_work(nvme_wq
, &ctrl
->ana_work
);
4138 case NVME_AER_NOTICE_DISC_CHANGED
:
4139 ctrl
->aen_result
= result
;
4142 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
4147 static void nvme_handle_aer_persistent_error(struct nvme_ctrl
*ctrl
)
4149 dev_warn(ctrl
->device
, "resetting controller due to AER\n");
4150 nvme_reset_ctrl(ctrl
);
4153 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
4154 volatile union nvme_result
*res
)
4156 u32 result
= le32_to_cpu(res
->u32
);
4157 u32 aer_type
= nvme_aer_type(result
);
4158 u32 aer_subtype
= nvme_aer_subtype(result
);
4159 bool requeue
= true;
4161 if (le16_to_cpu(status
) >> 1 != NVME_SC_SUCCESS
)
4164 trace_nvme_async_event(ctrl
, result
);
4166 case NVME_AER_NOTICE
:
4167 requeue
= nvme_handle_aen_notice(ctrl
, result
);
4169 case NVME_AER_ERROR
:
4171 * For a persistent internal error, don't run async_event_work
4172 * to submit a new AER. The controller reset will do it.
4174 if (aer_subtype
== NVME_AER_ERROR_PERSIST_INT_ERR
) {
4175 nvme_handle_aer_persistent_error(ctrl
);
4179 case NVME_AER_SMART
:
4182 ctrl
->aen_result
= result
;
4189 queue_work(nvme_wq
, &ctrl
->async_event_work
);
4191 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
4193 int nvme_alloc_admin_tag_set(struct nvme_ctrl
*ctrl
, struct blk_mq_tag_set
*set
,
4194 const struct blk_mq_ops
*ops
, unsigned int cmd_size
)
4198 memset(set
, 0, sizeof(*set
));
4200 set
->queue_depth
= NVME_AQ_MQ_TAG_DEPTH
;
4201 if (ctrl
->ops
->flags
& NVME_F_FABRICS
)
4202 set
->reserved_tags
= NVMF_RESERVED_TAGS
;
4203 set
->numa_node
= ctrl
->numa_node
;
4204 set
->flags
= BLK_MQ_F_NO_SCHED
;
4205 if (ctrl
->ops
->flags
& NVME_F_BLOCKING
)
4206 set
->flags
|= BLK_MQ_F_BLOCKING
;
4207 set
->cmd_size
= cmd_size
;
4208 set
->driver_data
= ctrl
;
4209 set
->nr_hw_queues
= 1;
4210 set
->timeout
= NVME_ADMIN_TIMEOUT
;
4211 ret
= blk_mq_alloc_tag_set(set
);
4215 ctrl
->admin_q
= blk_mq_init_queue(set
);
4216 if (IS_ERR(ctrl
->admin_q
)) {
4217 ret
= PTR_ERR(ctrl
->admin_q
);
4218 goto out_free_tagset
;
4221 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
4222 ctrl
->fabrics_q
= blk_mq_init_queue(set
);
4223 if (IS_ERR(ctrl
->fabrics_q
)) {
4224 ret
= PTR_ERR(ctrl
->fabrics_q
);
4225 goto out_cleanup_admin_q
;
4229 ctrl
->admin_tagset
= set
;
4232 out_cleanup_admin_q
:
4233 blk_mq_destroy_queue(ctrl
->admin_q
);
4234 blk_put_queue(ctrl
->admin_q
);
4236 blk_mq_free_tag_set(set
);
4237 ctrl
->admin_q
= NULL
;
4238 ctrl
->fabrics_q
= NULL
;
4241 EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set
);
4243 void nvme_remove_admin_tag_set(struct nvme_ctrl
*ctrl
)
4245 blk_mq_destroy_queue(ctrl
->admin_q
);
4246 blk_put_queue(ctrl
->admin_q
);
4247 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
4248 blk_mq_destroy_queue(ctrl
->fabrics_q
);
4249 blk_put_queue(ctrl
->fabrics_q
);
4251 blk_mq_free_tag_set(ctrl
->admin_tagset
);
4253 EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set
);
4255 int nvme_alloc_io_tag_set(struct nvme_ctrl
*ctrl
, struct blk_mq_tag_set
*set
,
4256 const struct blk_mq_ops
*ops
, unsigned int nr_maps
,
4257 unsigned int cmd_size
)
4261 memset(set
, 0, sizeof(*set
));
4263 set
->queue_depth
= min_t(unsigned, ctrl
->sqsize
, BLK_MQ_MAX_DEPTH
- 1);
4265 * Some Apple controllers requires tags to be unique across admin and
4266 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
4268 if (ctrl
->quirks
& NVME_QUIRK_SHARED_TAGS
)
4269 set
->reserved_tags
= NVME_AQ_DEPTH
;
4270 else if (ctrl
->ops
->flags
& NVME_F_FABRICS
)
4271 set
->reserved_tags
= NVMF_RESERVED_TAGS
;
4272 set
->numa_node
= ctrl
->numa_node
;
4273 set
->flags
= BLK_MQ_F_SHOULD_MERGE
;
4274 if (ctrl
->ops
->flags
& NVME_F_BLOCKING
)
4275 set
->flags
|= BLK_MQ_F_BLOCKING
;
4276 set
->cmd_size
= cmd_size
,
4277 set
->driver_data
= ctrl
;
4278 set
->nr_hw_queues
= ctrl
->queue_count
- 1;
4279 set
->timeout
= NVME_IO_TIMEOUT
;
4280 set
->nr_maps
= nr_maps
;
4281 ret
= blk_mq_alloc_tag_set(set
);
4285 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
4286 ctrl
->connect_q
= blk_mq_init_queue(set
);
4287 if (IS_ERR(ctrl
->connect_q
)) {
4288 ret
= PTR_ERR(ctrl
->connect_q
);
4289 goto out_free_tag_set
;
4291 blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE
,
4299 blk_mq_free_tag_set(set
);
4300 ctrl
->connect_q
= NULL
;
4303 EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set
);
4305 void nvme_remove_io_tag_set(struct nvme_ctrl
*ctrl
)
4307 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
4308 blk_mq_destroy_queue(ctrl
->connect_q
);
4309 blk_put_queue(ctrl
->connect_q
);
4311 blk_mq_free_tag_set(ctrl
->tagset
);
4313 EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set
);
4315 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
4317 nvme_mpath_stop(ctrl
);
4318 nvme_auth_stop(ctrl
);
4319 nvme_stop_keep_alive(ctrl
);
4320 nvme_stop_failfast_work(ctrl
);
4321 flush_work(&ctrl
->async_event_work
);
4322 cancel_work_sync(&ctrl
->fw_act_work
);
4323 if (ctrl
->ops
->stop_ctrl
)
4324 ctrl
->ops
->stop_ctrl(ctrl
);
4326 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
4328 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
4330 nvme_start_keep_alive(ctrl
);
4332 nvme_enable_aen(ctrl
);
4335 * persistent discovery controllers need to send indication to userspace
4336 * to re-read the discovery log page to learn about possible changes
4337 * that were missed. We identify persistent discovery controllers by
4338 * checking that they started once before, hence are reconnecting back.
4340 if (test_bit(NVME_CTRL_STARTED_ONCE
, &ctrl
->flags
) &&
4341 nvme_discovery_ctrl(ctrl
))
4342 nvme_change_uevent(ctrl
, "NVME_EVENT=rediscover");
4344 if (ctrl
->queue_count
> 1) {
4345 nvme_queue_scan(ctrl
);
4346 nvme_unquiesce_io_queues(ctrl
);
4347 nvme_mpath_update(ctrl
);
4350 nvme_change_uevent(ctrl
, "NVME_EVENT=connected");
4351 set_bit(NVME_CTRL_STARTED_ONCE
, &ctrl
->flags
);
4353 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
4355 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
4357 nvme_hwmon_exit(ctrl
);
4358 nvme_fault_inject_fini(&ctrl
->fault_inject
);
4359 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
4360 cdev_device_del(&ctrl
->cdev
, ctrl
->device
);
4361 nvme_put_ctrl(ctrl
);
4363 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
4365 static void nvme_free_cels(struct nvme_ctrl
*ctrl
)
4367 struct nvme_effects_log
*cel
;
4370 xa_for_each(&ctrl
->cels
, i
, cel
) {
4371 xa_erase(&ctrl
->cels
, i
);
4375 xa_destroy(&ctrl
->cels
);
4378 static void nvme_free_ctrl(struct device
*dev
)
4380 struct nvme_ctrl
*ctrl
=
4381 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
4382 struct nvme_subsystem
*subsys
= ctrl
->subsys
;
4384 if (!subsys
|| ctrl
->instance
!= subsys
->instance
)
4385 ida_free(&nvme_instance_ida
, ctrl
->instance
);
4387 nvme_free_cels(ctrl
);
4388 nvme_mpath_uninit(ctrl
);
4389 nvme_auth_stop(ctrl
);
4390 nvme_auth_free(ctrl
);
4391 __free_page(ctrl
->discard_page
);
4392 free_opal_dev(ctrl
->opal_dev
);
4395 mutex_lock(&nvme_subsystems_lock
);
4396 list_del(&ctrl
->subsys_entry
);
4397 sysfs_remove_link(&subsys
->dev
.kobj
, dev_name(ctrl
->device
));
4398 mutex_unlock(&nvme_subsystems_lock
);
4401 ctrl
->ops
->free_ctrl(ctrl
);
4404 nvme_put_subsystem(subsys
);
4408 * Initialize a NVMe controller structures. This needs to be called during
4409 * earliest initialization so that we have the initialized structured around
4412 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
4413 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
4417 ctrl
->state
= NVME_CTRL_NEW
;
4418 clear_bit(NVME_CTRL_FAILFAST_EXPIRED
, &ctrl
->flags
);
4419 spin_lock_init(&ctrl
->lock
);
4420 mutex_init(&ctrl
->scan_lock
);
4421 INIT_LIST_HEAD(&ctrl
->namespaces
);
4422 xa_init(&ctrl
->cels
);
4423 init_rwsem(&ctrl
->namespaces_rwsem
);
4426 ctrl
->quirks
= quirks
;
4427 ctrl
->numa_node
= NUMA_NO_NODE
;
4428 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
4429 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
4430 INIT_WORK(&ctrl
->fw_act_work
, nvme_fw_act_work
);
4431 INIT_WORK(&ctrl
->delete_work
, nvme_delete_ctrl_work
);
4432 init_waitqueue_head(&ctrl
->state_wq
);
4434 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
4435 INIT_DELAYED_WORK(&ctrl
->failfast_work
, nvme_failfast_work
);
4436 memset(&ctrl
->ka_cmd
, 0, sizeof(ctrl
->ka_cmd
));
4437 ctrl
->ka_cmd
.common
.opcode
= nvme_admin_keep_alive
;
4439 BUILD_BUG_ON(NVME_DSM_MAX_RANGES
* sizeof(struct nvme_dsm_range
) >
4441 ctrl
->discard_page
= alloc_page(GFP_KERNEL
);
4442 if (!ctrl
->discard_page
) {
4447 ret
= ida_alloc(&nvme_instance_ida
, GFP_KERNEL
);
4450 ctrl
->instance
= ret
;
4452 device_initialize(&ctrl
->ctrl_device
);
4453 ctrl
->device
= &ctrl
->ctrl_device
;
4454 ctrl
->device
->devt
= MKDEV(MAJOR(nvme_ctrl_base_chr_devt
),
4456 ctrl
->device
->class = nvme_class
;
4457 ctrl
->device
->parent
= ctrl
->dev
;
4458 if (ops
->dev_attr_groups
)
4459 ctrl
->device
->groups
= ops
->dev_attr_groups
;
4461 ctrl
->device
->groups
= nvme_dev_attr_groups
;
4462 ctrl
->device
->release
= nvme_free_ctrl
;
4463 dev_set_drvdata(ctrl
->device
, ctrl
);
4464 ret
= dev_set_name(ctrl
->device
, "nvme%d", ctrl
->instance
);
4466 goto out_release_instance
;
4468 nvme_get_ctrl(ctrl
);
4469 cdev_init(&ctrl
->cdev
, &nvme_dev_fops
);
4470 ctrl
->cdev
.owner
= ops
->module
;
4471 ret
= cdev_device_add(&ctrl
->cdev
, ctrl
->device
);
4476 * Initialize latency tolerance controls. The sysfs files won't
4477 * be visible to userspace unless the device actually supports APST.
4479 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
4480 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
4481 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
4483 nvme_fault_inject_init(&ctrl
->fault_inject
, dev_name(ctrl
->device
));
4484 nvme_mpath_init_ctrl(ctrl
);
4485 ret
= nvme_auth_init_ctrl(ctrl
);
4491 nvme_fault_inject_fini(&ctrl
->fault_inject
);
4492 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
4493 cdev_device_del(&ctrl
->cdev
, ctrl
->device
);
4495 nvme_put_ctrl(ctrl
);
4496 kfree_const(ctrl
->device
->kobj
.name
);
4497 out_release_instance
:
4498 ida_free(&nvme_instance_ida
, ctrl
->instance
);
4500 if (ctrl
->discard_page
)
4501 __free_page(ctrl
->discard_page
);
4504 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
4506 /* let I/O to all namespaces fail in preparation for surprise removal */
4507 void nvme_mark_namespaces_dead(struct nvme_ctrl
*ctrl
)
4511 down_read(&ctrl
->namespaces_rwsem
);
4512 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4513 blk_mark_disk_dead(ns
->disk
);
4514 up_read(&ctrl
->namespaces_rwsem
);
4516 EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead
);
4518 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
4522 down_read(&ctrl
->namespaces_rwsem
);
4523 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4524 blk_mq_unfreeze_queue(ns
->queue
);
4525 up_read(&ctrl
->namespaces_rwsem
);
4527 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
4529 int nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
4533 down_read(&ctrl
->namespaces_rwsem
);
4534 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
4535 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
4539 up_read(&ctrl
->namespaces_rwsem
);
4542 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
4544 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
4548 down_read(&ctrl
->namespaces_rwsem
);
4549 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4550 blk_mq_freeze_queue_wait(ns
->queue
);
4551 up_read(&ctrl
->namespaces_rwsem
);
4553 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
4555 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
4559 down_read(&ctrl
->namespaces_rwsem
);
4560 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4561 blk_freeze_queue_start(ns
->queue
);
4562 up_read(&ctrl
->namespaces_rwsem
);
4564 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
4566 void nvme_quiesce_io_queues(struct nvme_ctrl
*ctrl
)
4570 if (!test_and_set_bit(NVME_CTRL_STOPPED
, &ctrl
->flags
))
4571 blk_mq_quiesce_tagset(ctrl
->tagset
);
4573 blk_mq_wait_quiesce_done(ctrl
->tagset
);
4575 EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues
);
4577 void nvme_unquiesce_io_queues(struct nvme_ctrl
*ctrl
)
4581 if (test_and_clear_bit(NVME_CTRL_STOPPED
, &ctrl
->flags
))
4582 blk_mq_unquiesce_tagset(ctrl
->tagset
);
4584 EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues
);
4586 void nvme_quiesce_admin_queue(struct nvme_ctrl
*ctrl
)
4588 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED
, &ctrl
->flags
))
4589 blk_mq_quiesce_queue(ctrl
->admin_q
);
4591 blk_mq_wait_quiesce_done(ctrl
->admin_q
->tag_set
);
4593 EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue
);
4595 void nvme_unquiesce_admin_queue(struct nvme_ctrl
*ctrl
)
4597 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED
, &ctrl
->flags
))
4598 blk_mq_unquiesce_queue(ctrl
->admin_q
);
4600 EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue
);
4602 void nvme_sync_io_queues(struct nvme_ctrl
*ctrl
)
4606 down_read(&ctrl
->namespaces_rwsem
);
4607 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
4608 blk_sync_queue(ns
->queue
);
4609 up_read(&ctrl
->namespaces_rwsem
);
4611 EXPORT_SYMBOL_GPL(nvme_sync_io_queues
);
4613 void nvme_sync_queues(struct nvme_ctrl
*ctrl
)
4615 nvme_sync_io_queues(ctrl
);
4617 blk_sync_queue(ctrl
->admin_q
);
4619 EXPORT_SYMBOL_GPL(nvme_sync_queues
);
4621 struct nvme_ctrl
*nvme_ctrl_from_file(struct file
*file
)
4623 if (file
->f_op
!= &nvme_dev_fops
)
4625 return file
->private_data
;
4627 EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file
, NVME_TARGET_PASSTHRU
);
4630 * Check we didn't inadvertently grow the command structure sizes:
4632 static inline void _nvme_check_size(void)
4634 BUILD_BUG_ON(sizeof(struct nvme_common_command
) != 64);
4635 BUILD_BUG_ON(sizeof(struct nvme_rw_command
) != 64);
4636 BUILD_BUG_ON(sizeof(struct nvme_identify
) != 64);
4637 BUILD_BUG_ON(sizeof(struct nvme_features
) != 64);
4638 BUILD_BUG_ON(sizeof(struct nvme_download_firmware
) != 64);
4639 BUILD_BUG_ON(sizeof(struct nvme_format_cmd
) != 64);
4640 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd
) != 64);
4641 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd
) != 64);
4642 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd
) != 64);
4643 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command
) != 64);
4644 BUILD_BUG_ON(sizeof(struct nvme_command
) != 64);
4645 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl
) != NVME_IDENTIFY_DATA_SIZE
);
4646 BUILD_BUG_ON(sizeof(struct nvme_id_ns
) != NVME_IDENTIFY_DATA_SIZE
);
4647 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep
) !=
4648 NVME_IDENTIFY_DATA_SIZE
);
4649 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns
) != NVME_IDENTIFY_DATA_SIZE
);
4650 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm
) != NVME_IDENTIFY_DATA_SIZE
);
4651 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns
) != NVME_IDENTIFY_DATA_SIZE
);
4652 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm
) != NVME_IDENTIFY_DATA_SIZE
);
4653 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type
) != 64);
4654 BUILD_BUG_ON(sizeof(struct nvme_smart_log
) != 512);
4655 BUILD_BUG_ON(sizeof(struct nvme_dbbuf
) != 64);
4656 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd
) != 64);
4657 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior
) != 512);
4661 static int __init
nvme_core_init(void)
4663 int result
= -ENOMEM
;
4667 nvme_wq
= alloc_workqueue("nvme-wq",
4668 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4672 nvme_reset_wq
= alloc_workqueue("nvme-reset-wq",
4673 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4677 nvme_delete_wq
= alloc_workqueue("nvme-delete-wq",
4678 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
4679 if (!nvme_delete_wq
)
4680 goto destroy_reset_wq
;
4682 result
= alloc_chrdev_region(&nvme_ctrl_base_chr_devt
, 0,
4683 NVME_MINORS
, "nvme");
4685 goto destroy_delete_wq
;
4687 nvme_class
= class_create("nvme");
4688 if (IS_ERR(nvme_class
)) {
4689 result
= PTR_ERR(nvme_class
);
4690 goto unregister_chrdev
;
4692 nvme_class
->dev_uevent
= nvme_class_uevent
;
4694 nvme_subsys_class
= class_create("nvme-subsystem");
4695 if (IS_ERR(nvme_subsys_class
)) {
4696 result
= PTR_ERR(nvme_subsys_class
);
4700 result
= alloc_chrdev_region(&nvme_ns_chr_devt
, 0, NVME_MINORS
,
4703 goto destroy_subsys_class
;
4705 nvme_ns_chr_class
= class_create("nvme-generic");
4706 if (IS_ERR(nvme_ns_chr_class
)) {
4707 result
= PTR_ERR(nvme_ns_chr_class
);
4708 goto unregister_generic_ns
;
4711 result
= nvme_init_auth();
4713 goto destroy_ns_chr
;
4717 class_destroy(nvme_ns_chr_class
);
4718 unregister_generic_ns
:
4719 unregister_chrdev_region(nvme_ns_chr_devt
, NVME_MINORS
);
4720 destroy_subsys_class
:
4721 class_destroy(nvme_subsys_class
);
4723 class_destroy(nvme_class
);
4725 unregister_chrdev_region(nvme_ctrl_base_chr_devt
, NVME_MINORS
);
4727 destroy_workqueue(nvme_delete_wq
);
4729 destroy_workqueue(nvme_reset_wq
);
4731 destroy_workqueue(nvme_wq
);
4736 static void __exit
nvme_core_exit(void)
4739 class_destroy(nvme_ns_chr_class
);
4740 class_destroy(nvme_subsys_class
);
4741 class_destroy(nvme_class
);
4742 unregister_chrdev_region(nvme_ns_chr_devt
, NVME_MINORS
);
4743 unregister_chrdev_region(nvme_ctrl_base_chr_devt
, NVME_MINORS
);
4744 destroy_workqueue(nvme_delete_wq
);
4745 destroy_workqueue(nvme_reset_wq
);
4746 destroy_workqueue(nvme_wq
);
4747 ida_destroy(&nvme_ns_chr_minor_ida
);
4748 ida_destroy(&nvme_instance_ida
);
4751 MODULE_LICENSE("GPL");
4752 MODULE_VERSION("1.0");
4753 module_init(nvme_core_init
);
4754 module_exit(nvme_core_exit
);