2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
29 #include <linux/pm_qos.h>
30 #include <asm/unaligned.h>
32 #define CREATE_TRACE_POINTS
38 #define NVME_MINORS (1U << MINORBITS)
40 unsigned int admin_timeout
= 60;
41 module_param(admin_timeout
, uint
, 0644);
42 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
43 EXPORT_SYMBOL_GPL(admin_timeout
);
45 unsigned int nvme_io_timeout
= 30;
46 module_param_named(io_timeout
, nvme_io_timeout
, uint
, 0644);
47 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
48 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
50 static unsigned char shutdown_timeout
= 5;
51 module_param(shutdown_timeout
, byte
, 0644);
52 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
54 static u8 nvme_max_retries
= 5;
55 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
56 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
58 static unsigned long default_ps_max_latency_us
= 100000;
59 module_param(default_ps_max_latency_us
, ulong
, 0644);
60 MODULE_PARM_DESC(default_ps_max_latency_us
,
61 "max power saving latency for new devices; use PM QOS to change per device");
63 static bool force_apst
;
64 module_param(force_apst
, bool, 0644);
65 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
68 module_param(streams
, bool, 0644);
69 MODULE_PARM_DESC(streams
, "turn on support for Streams write directives");
72 * nvme_wq - hosts nvme related works that are not reset or delete
73 * nvme_reset_wq - hosts nvme reset works
74 * nvme_delete_wq - hosts nvme delete works
76 * nvme_wq will host works such are scan, aen handling, fw activation,
77 * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
78 * runs reset works which also flush works hosted on nvme_wq for
79 * serialization purposes. nvme_delete_wq host controller deletion
80 * works which flush reset works for serialization.
82 struct workqueue_struct
*nvme_wq
;
83 EXPORT_SYMBOL_GPL(nvme_wq
);
85 struct workqueue_struct
*nvme_reset_wq
;
86 EXPORT_SYMBOL_GPL(nvme_reset_wq
);
88 struct workqueue_struct
*nvme_delete_wq
;
89 EXPORT_SYMBOL_GPL(nvme_delete_wq
);
91 static DEFINE_IDA(nvme_subsystems_ida
);
92 static LIST_HEAD(nvme_subsystems
);
93 static DEFINE_MUTEX(nvme_subsystems_lock
);
95 static DEFINE_IDA(nvme_instance_ida
);
96 static dev_t nvme_chr_devt
;
97 static struct class *nvme_class
;
98 static struct class *nvme_subsys_class
;
100 static void nvme_ns_remove(struct nvme_ns
*ns
);
101 static int nvme_revalidate_disk(struct gendisk
*disk
);
102 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
);
103 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
106 static void nvme_set_queue_dying(struct nvme_ns
*ns
)
109 * Revalidating a dead namespace sets capacity to 0. This will end
110 * buffered writers dirtying pages that can't be synced.
112 if (!ns
->disk
|| test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
114 revalidate_disk(ns
->disk
);
115 blk_set_queue_dying(ns
->queue
);
116 /* Forcibly unquiesce queues to avoid blocking dispatch */
117 blk_mq_unquiesce_queue(ns
->queue
);
120 static void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
123 * Only new queue scan work when admin and IO queues are both alive
125 if (ctrl
->state
== NVME_CTRL_LIVE
)
126 queue_work(nvme_wq
, &ctrl
->scan_work
);
129 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
131 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
133 if (!queue_work(nvme_reset_wq
, &ctrl
->reset_work
))
137 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
139 int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
143 ret
= nvme_reset_ctrl(ctrl
);
145 flush_work(&ctrl
->reset_work
);
146 if (ctrl
->state
!= NVME_CTRL_LIVE
&&
147 ctrl
->state
!= NVME_CTRL_ADMIN_ONLY
)
153 EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync
);
155 static void nvme_delete_ctrl_work(struct work_struct
*work
)
157 struct nvme_ctrl
*ctrl
=
158 container_of(work
, struct nvme_ctrl
, delete_work
);
160 dev_info(ctrl
->device
,
161 "Removing ctrl: NQN \"%s\"\n", ctrl
->opts
->subsysnqn
);
163 flush_work(&ctrl
->reset_work
);
164 nvme_stop_ctrl(ctrl
);
165 nvme_remove_namespaces(ctrl
);
166 ctrl
->ops
->delete_ctrl(ctrl
);
167 nvme_uninit_ctrl(ctrl
);
171 int nvme_delete_ctrl(struct nvme_ctrl
*ctrl
)
173 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_DELETING
))
175 if (!queue_work(nvme_delete_wq
, &ctrl
->delete_work
))
179 EXPORT_SYMBOL_GPL(nvme_delete_ctrl
);
181 int nvme_delete_ctrl_sync(struct nvme_ctrl
*ctrl
)
186 * Keep a reference until the work is flushed since ->delete_ctrl
187 * can free the controller.
190 ret
= nvme_delete_ctrl(ctrl
);
192 flush_work(&ctrl
->delete_work
);
196 EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync
);
198 static inline bool nvme_ns_has_pi(struct nvme_ns
*ns
)
200 return ns
->pi_type
&& ns
->ms
== sizeof(struct t10_pi_tuple
);
203 static blk_status_t
nvme_error_status(struct request
*req
)
205 switch (nvme_req(req
)->status
& 0x7ff) {
206 case NVME_SC_SUCCESS
:
208 case NVME_SC_CAP_EXCEEDED
:
209 return BLK_STS_NOSPC
;
210 case NVME_SC_LBA_RANGE
:
211 return BLK_STS_TARGET
;
212 case NVME_SC_BAD_ATTRIBUTES
:
213 case NVME_SC_ONCS_NOT_SUPPORTED
:
214 case NVME_SC_INVALID_OPCODE
:
215 case NVME_SC_INVALID_FIELD
:
216 case NVME_SC_INVALID_NS
:
217 return BLK_STS_NOTSUPP
;
218 case NVME_SC_WRITE_FAULT
:
219 case NVME_SC_READ_ERROR
:
220 case NVME_SC_UNWRITTEN_BLOCK
:
221 case NVME_SC_ACCESS_DENIED
:
222 case NVME_SC_READ_ONLY
:
223 case NVME_SC_COMPARE_FAILED
:
224 return BLK_STS_MEDIUM
;
225 case NVME_SC_GUARD_CHECK
:
226 case NVME_SC_APPTAG_CHECK
:
227 case NVME_SC_REFTAG_CHECK
:
228 case NVME_SC_INVALID_PI
:
229 return BLK_STS_PROTECTION
;
230 case NVME_SC_RESERVATION_CONFLICT
:
231 return BLK_STS_NEXUS
;
233 return BLK_STS_IOERR
;
237 static inline bool nvme_req_needs_retry(struct request
*req
)
239 if (blk_noretry_request(req
))
241 if (nvme_req(req
)->status
& NVME_SC_DNR
)
243 if (nvme_req(req
)->retries
>= nvme_max_retries
)
248 void nvme_complete_rq(struct request
*req
)
250 blk_status_t status
= nvme_error_status(req
);
252 trace_nvme_complete_rq(req
);
254 if (unlikely(status
!= BLK_STS_OK
&& nvme_req_needs_retry(req
))) {
255 if ((req
->cmd_flags
& REQ_NVME_MPATH
) &&
256 blk_path_error(status
)) {
257 nvme_failover_req(req
);
261 if (!blk_queue_dying(req
->q
)) {
262 nvme_req(req
)->retries
++;
263 blk_mq_requeue_request(req
, true);
267 blk_mq_end_request(req
, status
);
269 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
271 void nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
273 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
274 "Cancelling I/O %d", req
->tag
);
276 nvme_req(req
)->status
= NVME_SC_ABORT_REQ
;
277 blk_mq_complete_request(req
);
280 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
282 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
283 enum nvme_ctrl_state new_state
)
285 enum nvme_ctrl_state old_state
;
287 bool changed
= false;
289 spin_lock_irqsave(&ctrl
->lock
, flags
);
291 old_state
= ctrl
->state
;
293 case NVME_CTRL_ADMIN_ONLY
:
295 case NVME_CTRL_CONNECTING
:
305 case NVME_CTRL_RESETTING
:
306 case NVME_CTRL_CONNECTING
:
313 case NVME_CTRL_RESETTING
:
317 case NVME_CTRL_ADMIN_ONLY
:
324 case NVME_CTRL_CONNECTING
:
327 case NVME_CTRL_RESETTING
:
334 case NVME_CTRL_DELETING
:
337 case NVME_CTRL_ADMIN_ONLY
:
338 case NVME_CTRL_RESETTING
:
339 case NVME_CTRL_CONNECTING
:
348 case NVME_CTRL_DELETING
:
360 ctrl
->state
= new_state
;
362 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
363 if (changed
&& ctrl
->state
== NVME_CTRL_LIVE
)
364 nvme_kick_requeue_lists(ctrl
);
367 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
369 static void nvme_free_ns_head(struct kref
*ref
)
371 struct nvme_ns_head
*head
=
372 container_of(ref
, struct nvme_ns_head
, ref
);
374 nvme_mpath_remove_disk(head
);
375 ida_simple_remove(&head
->subsys
->ns_ida
, head
->instance
);
376 list_del_init(&head
->entry
);
377 cleanup_srcu_struct_quiesced(&head
->srcu
);
378 nvme_put_subsystem(head
->subsys
);
382 static void nvme_put_ns_head(struct nvme_ns_head
*head
)
384 kref_put(&head
->ref
, nvme_free_ns_head
);
387 static void nvme_free_ns(struct kref
*kref
)
389 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
392 nvme_nvm_unregister(ns
);
395 nvme_put_ns_head(ns
->head
);
396 nvme_put_ctrl(ns
->ctrl
);
400 static void nvme_put_ns(struct nvme_ns
*ns
)
402 kref_put(&ns
->kref
, nvme_free_ns
);
405 static inline void nvme_clear_nvme_request(struct request
*req
)
407 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
408 nvme_req(req
)->retries
= 0;
409 nvme_req(req
)->flags
= 0;
410 req
->rq_flags
|= RQF_DONTPREP
;
414 struct request
*nvme_alloc_request(struct request_queue
*q
,
415 struct nvme_command
*cmd
, blk_mq_req_flags_t flags
, int qid
)
417 unsigned op
= nvme_is_write(cmd
) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
;
420 if (qid
== NVME_QID_ANY
) {
421 req
= blk_mq_alloc_request(q
, op
, flags
);
423 req
= blk_mq_alloc_request_hctx(q
, op
, flags
,
429 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
430 nvme_clear_nvme_request(req
);
431 nvme_req(req
)->cmd
= cmd
;
435 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
437 static int nvme_toggle_streams(struct nvme_ctrl
*ctrl
, bool enable
)
439 struct nvme_command c
;
441 memset(&c
, 0, sizeof(c
));
443 c
.directive
.opcode
= nvme_admin_directive_send
;
444 c
.directive
.nsid
= cpu_to_le32(NVME_NSID_ALL
);
445 c
.directive
.doper
= NVME_DIR_SND_ID_OP_ENABLE
;
446 c
.directive
.dtype
= NVME_DIR_IDENTIFY
;
447 c
.directive
.tdtype
= NVME_DIR_STREAMS
;
448 c
.directive
.endir
= enable
? NVME_DIR_ENDIR
: 0;
450 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, NULL
, 0);
453 static int nvme_disable_streams(struct nvme_ctrl
*ctrl
)
455 return nvme_toggle_streams(ctrl
, false);
458 static int nvme_enable_streams(struct nvme_ctrl
*ctrl
)
460 return nvme_toggle_streams(ctrl
, true);
463 static int nvme_get_stream_params(struct nvme_ctrl
*ctrl
,
464 struct streams_directive_params
*s
, u32 nsid
)
466 struct nvme_command c
;
468 memset(&c
, 0, sizeof(c
));
469 memset(s
, 0, sizeof(*s
));
471 c
.directive
.opcode
= nvme_admin_directive_recv
;
472 c
.directive
.nsid
= cpu_to_le32(nsid
);
473 c
.directive
.numd
= cpu_to_le32((sizeof(*s
) >> 2) - 1);
474 c
.directive
.doper
= NVME_DIR_RCV_ST_OP_PARAM
;
475 c
.directive
.dtype
= NVME_DIR_STREAMS
;
477 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, s
, sizeof(*s
));
480 static int nvme_configure_directives(struct nvme_ctrl
*ctrl
)
482 struct streams_directive_params s
;
485 if (!(ctrl
->oacs
& NVME_CTRL_OACS_DIRECTIVES
))
490 ret
= nvme_enable_streams(ctrl
);
494 ret
= nvme_get_stream_params(ctrl
, &s
, NVME_NSID_ALL
);
498 ctrl
->nssa
= le16_to_cpu(s
.nssa
);
499 if (ctrl
->nssa
< BLK_MAX_WRITE_HINTS
- 1) {
500 dev_info(ctrl
->device
, "too few streams (%u) available\n",
502 nvme_disable_streams(ctrl
);
506 ctrl
->nr_streams
= min_t(unsigned, ctrl
->nssa
, BLK_MAX_WRITE_HINTS
- 1);
507 dev_info(ctrl
->device
, "Using %u streams\n", ctrl
->nr_streams
);
512 * Check if 'req' has a write hint associated with it. If it does, assign
513 * a valid namespace stream to the write.
515 static void nvme_assign_write_stream(struct nvme_ctrl
*ctrl
,
516 struct request
*req
, u16
*control
,
519 enum rw_hint streamid
= req
->write_hint
;
521 if (streamid
== WRITE_LIFE_NOT_SET
|| streamid
== WRITE_LIFE_NONE
)
525 if (WARN_ON_ONCE(streamid
> ctrl
->nr_streams
))
528 *control
|= NVME_RW_DTYPE_STREAMS
;
529 *dsmgmt
|= streamid
<< 16;
532 if (streamid
< ARRAY_SIZE(req
->q
->write_hints
))
533 req
->q
->write_hints
[streamid
] += blk_rq_bytes(req
) >> 9;
536 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
537 struct nvme_command
*cmnd
)
539 memset(cmnd
, 0, sizeof(*cmnd
));
540 cmnd
->common
.opcode
= nvme_cmd_flush
;
541 cmnd
->common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
544 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
545 struct nvme_command
*cmnd
)
547 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
548 struct nvme_dsm_range
*range
;
551 range
= kmalloc_array(segments
, sizeof(*range
), GFP_ATOMIC
);
553 return BLK_STS_RESOURCE
;
555 __rq_for_each_bio(bio
, req
) {
556 u64 slba
= nvme_block_nr(ns
, bio
->bi_iter
.bi_sector
);
557 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
560 range
[n
].cattr
= cpu_to_le32(0);
561 range
[n
].nlb
= cpu_to_le32(nlb
);
562 range
[n
].slba
= cpu_to_le64(slba
);
567 if (WARN_ON_ONCE(n
!= segments
)) {
569 return BLK_STS_IOERR
;
572 memset(cmnd
, 0, sizeof(*cmnd
));
573 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
574 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
575 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
576 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
578 req
->special_vec
.bv_page
= virt_to_page(range
);
579 req
->special_vec
.bv_offset
= offset_in_page(range
);
580 req
->special_vec
.bv_len
= sizeof(*range
) * segments
;
581 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
586 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
587 struct request
*req
, struct nvme_command
*cmnd
)
589 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
593 if (req
->cmd_flags
& REQ_FUA
)
594 control
|= NVME_RW_FUA
;
595 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
596 control
|= NVME_RW_LR
;
598 if (req
->cmd_flags
& REQ_RAHEAD
)
599 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
601 memset(cmnd
, 0, sizeof(*cmnd
));
602 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
603 cmnd
->rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
604 cmnd
->rw
.slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
605 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
607 if (req_op(req
) == REQ_OP_WRITE
&& ctrl
->nr_streams
)
608 nvme_assign_write_stream(ctrl
, req
, &control
, &dsmgmt
);
612 * If formated with metadata, the block layer always provides a
613 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
614 * we enable the PRACT bit for protection information or set the
615 * namespace capacity to zero to prevent any I/O.
617 if (!blk_integrity_rq(req
)) {
618 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns
)))
619 return BLK_STS_NOTSUPP
;
620 control
|= NVME_RW_PRINFO_PRACT
;
621 } else if (req_op(req
) == REQ_OP_WRITE
) {
622 t10_pi_prepare(req
, ns
->pi_type
);
625 switch (ns
->pi_type
) {
626 case NVME_NS_DPS_PI_TYPE3
:
627 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
629 case NVME_NS_DPS_PI_TYPE1
:
630 case NVME_NS_DPS_PI_TYPE2
:
631 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
632 NVME_RW_PRINFO_PRCHK_REF
;
633 cmnd
->rw
.reftag
= cpu_to_le32(t10_pi_ref_tag(req
));
638 cmnd
->rw
.control
= cpu_to_le16(control
);
639 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
643 void nvme_cleanup_cmd(struct request
*req
)
645 if (blk_integrity_rq(req
) && req_op(req
) == REQ_OP_READ
&&
646 nvme_req(req
)->status
== 0) {
647 struct nvme_ns
*ns
= req
->rq_disk
->private_data
;
649 t10_pi_complete(req
, ns
->pi_type
,
650 blk_rq_bytes(req
) >> ns
->lba_shift
);
652 if (req
->rq_flags
& RQF_SPECIAL_PAYLOAD
) {
653 kfree(page_address(req
->special_vec
.bv_page
) +
654 req
->special_vec
.bv_offset
);
657 EXPORT_SYMBOL_GPL(nvme_cleanup_cmd
);
659 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
660 struct nvme_command
*cmd
)
662 blk_status_t ret
= BLK_STS_OK
;
664 nvme_clear_nvme_request(req
);
666 switch (req_op(req
)) {
669 memcpy(cmd
, nvme_req(req
)->cmd
, sizeof(*cmd
));
672 nvme_setup_flush(ns
, cmd
);
674 case REQ_OP_WRITE_ZEROES
:
675 /* currently only aliased to deallocate for a few ctrls: */
677 ret
= nvme_setup_discard(ns
, req
, cmd
);
681 ret
= nvme_setup_rw(ns
, req
, cmd
);
685 return BLK_STS_IOERR
;
688 cmd
->common
.command_id
= req
->tag
;
689 trace_nvme_setup_cmd(req
, cmd
);
692 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
695 * Returns 0 on success. If the result is negative, it's a Linux error code;
696 * if the result is positive, it's an NVM Express status code
698 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
699 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
700 unsigned timeout
, int qid
, int at_head
,
701 blk_mq_req_flags_t flags
)
706 req
= nvme_alloc_request(q
, cmd
, flags
, qid
);
710 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
712 if (buffer
&& bufflen
) {
713 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
718 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
720 *result
= nvme_req(req
)->result
;
721 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
724 ret
= nvme_req(req
)->status
;
726 blk_mq_free_request(req
);
729 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
731 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
732 void *buffer
, unsigned bufflen
)
734 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
737 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
739 static void *nvme_add_user_metadata(struct bio
*bio
, void __user
*ubuf
,
740 unsigned len
, u32 seed
, bool write
)
742 struct bio_integrity_payload
*bip
;
746 buf
= kmalloc(len
, GFP_KERNEL
);
751 if (write
&& copy_from_user(buf
, ubuf
, len
))
754 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
760 bip
->bip_iter
.bi_size
= len
;
761 bip
->bip_iter
.bi_sector
= seed
;
762 ret
= bio_integrity_add_page(bio
, virt_to_page(buf
), len
,
763 offset_in_page(buf
));
773 static int nvme_submit_user_cmd(struct request_queue
*q
,
774 struct nvme_command
*cmd
, void __user
*ubuffer
,
775 unsigned bufflen
, void __user
*meta_buffer
, unsigned meta_len
,
776 u32 meta_seed
, u32
*result
, unsigned timeout
)
778 bool write
= nvme_is_write(cmd
);
779 struct nvme_ns
*ns
= q
->queuedata
;
780 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
782 struct bio
*bio
= NULL
;
786 req
= nvme_alloc_request(q
, cmd
, 0, NVME_QID_ANY
);
790 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
791 nvme_req(req
)->flags
|= NVME_REQ_USERCMD
;
793 if (ubuffer
&& bufflen
) {
794 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
800 if (disk
&& meta_buffer
&& meta_len
) {
801 meta
= nvme_add_user_metadata(bio
, meta_buffer
, meta_len
,
807 req
->cmd_flags
|= REQ_INTEGRITY
;
811 blk_execute_rq(req
->q
, disk
, req
, 0);
812 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
815 ret
= nvme_req(req
)->status
;
817 *result
= le32_to_cpu(nvme_req(req
)->result
.u32
);
818 if (meta
&& !ret
&& !write
) {
819 if (copy_to_user(meta_buffer
, meta
, meta_len
))
825 blk_rq_unmap_user(bio
);
827 blk_mq_free_request(req
);
831 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
833 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
835 bool startka
= false;
837 blk_mq_free_request(rq
);
840 dev_err(ctrl
->device
,
841 "failed nvme_keep_alive_end_io error=%d\n",
846 spin_lock_irqsave(&ctrl
->lock
, flags
);
847 if (ctrl
->state
== NVME_CTRL_LIVE
||
848 ctrl
->state
== NVME_CTRL_CONNECTING
)
850 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
852 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
855 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
859 rq
= nvme_alloc_request(ctrl
->admin_q
, &ctrl
->ka_cmd
, BLK_MQ_REQ_RESERVED
,
864 rq
->timeout
= ctrl
->kato
* HZ
;
865 rq
->end_io_data
= ctrl
;
867 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
872 static void nvme_keep_alive_work(struct work_struct
*work
)
874 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
875 struct nvme_ctrl
, ka_work
);
877 if (nvme_keep_alive(ctrl
)) {
878 /* allocation failure, reset the controller */
879 dev_err(ctrl
->device
, "keep-alive failed\n");
880 nvme_reset_ctrl(ctrl
);
885 static void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
887 if (unlikely(ctrl
->kato
== 0))
890 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
893 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
895 if (unlikely(ctrl
->kato
== 0))
898 cancel_delayed_work_sync(&ctrl
->ka_work
);
900 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
902 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
904 struct nvme_command c
= { };
907 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
908 c
.identify
.opcode
= nvme_admin_identify
;
909 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
911 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
915 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
916 sizeof(struct nvme_id_ctrl
));
922 static int nvme_identify_ns_descs(struct nvme_ctrl
*ctrl
, unsigned nsid
,
923 struct nvme_ns_ids
*ids
)
925 struct nvme_command c
= { };
931 c
.identify
.opcode
= nvme_admin_identify
;
932 c
.identify
.nsid
= cpu_to_le32(nsid
);
933 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
935 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
939 status
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, data
,
940 NVME_IDENTIFY_DATA_SIZE
);
944 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
945 struct nvme_ns_id_desc
*cur
= data
+ pos
;
951 case NVME_NIDT_EUI64
:
952 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
953 dev_warn(ctrl
->device
,
954 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
958 len
= NVME_NIDT_EUI64_LEN
;
959 memcpy(ids
->eui64
, data
+ pos
+ sizeof(*cur
), len
);
961 case NVME_NIDT_NGUID
:
962 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
963 dev_warn(ctrl
->device
,
964 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
968 len
= NVME_NIDT_NGUID_LEN
;
969 memcpy(ids
->nguid
, data
+ pos
+ sizeof(*cur
), len
);
972 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
973 dev_warn(ctrl
->device
,
974 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
978 len
= NVME_NIDT_UUID_LEN
;
979 uuid_copy(&ids
->uuid
, data
+ pos
+ sizeof(*cur
));
982 /* Skip unnkown types */
994 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
996 struct nvme_command c
= { };
998 c
.identify
.opcode
= nvme_admin_identify
;
999 c
.identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
;
1000 c
.identify
.nsid
= cpu_to_le32(nsid
);
1001 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
,
1002 NVME_IDENTIFY_DATA_SIZE
);
1005 static struct nvme_id_ns
*nvme_identify_ns(struct nvme_ctrl
*ctrl
,
1008 struct nvme_id_ns
*id
;
1009 struct nvme_command c
= { };
1012 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1013 c
.identify
.opcode
= nvme_admin_identify
;
1014 c
.identify
.nsid
= cpu_to_le32(nsid
);
1015 c
.identify
.cns
= NVME_ID_CNS_NS
;
1017 id
= kmalloc(sizeof(*id
), GFP_KERNEL
);
1021 error
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, id
, sizeof(*id
));
1023 dev_warn(ctrl
->device
, "Identify namespace failed\n");
1031 static int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
1032 void *buffer
, size_t buflen
, u32
*result
)
1034 struct nvme_command c
;
1035 union nvme_result res
;
1038 memset(&c
, 0, sizeof(c
));
1039 c
.features
.opcode
= nvme_admin_set_features
;
1040 c
.features
.fid
= cpu_to_le32(fid
);
1041 c
.features
.dword11
= cpu_to_le32(dword11
);
1043 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
1044 buffer
, buflen
, 0, NVME_QID_ANY
, 0, 0);
1045 if (ret
>= 0 && result
)
1046 *result
= le32_to_cpu(res
.u32
);
1050 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
1052 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
1054 int status
, nr_io_queues
;
1056 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
1062 * Degraded controllers might return an error when setting the queue
1063 * count. We still want to be able to bring them online and offer
1064 * access to the admin queue, as that might be only way to fix them up.
1067 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
1070 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
1071 *count
= min(*count
, nr_io_queues
);
1076 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
1078 #define NVME_AEN_SUPPORTED \
1079 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE)
1081 static void nvme_enable_aen(struct nvme_ctrl
*ctrl
)
1083 u32 result
, supported_aens
= ctrl
->oaes
& NVME_AEN_SUPPORTED
;
1086 if (!supported_aens
)
1089 status
= nvme_set_features(ctrl
, NVME_FEAT_ASYNC_EVENT
, supported_aens
,
1092 dev_warn(ctrl
->device
, "Failed to configure AEN (cfg %x)\n",
1096 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
1098 struct nvme_user_io io
;
1099 struct nvme_command c
;
1100 unsigned length
, meta_len
;
1101 void __user
*metadata
;
1103 if (copy_from_user(&io
, uio
, sizeof(io
)))
1108 switch (io
.opcode
) {
1109 case nvme_cmd_write
:
1111 case nvme_cmd_compare
:
1117 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
1118 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
1119 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
1124 } else if (meta_len
) {
1125 if ((io
.metadata
& 3) || !io
.metadata
)
1129 memset(&c
, 0, sizeof(c
));
1130 c
.rw
.opcode
= io
.opcode
;
1131 c
.rw
.flags
= io
.flags
;
1132 c
.rw
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1133 c
.rw
.slba
= cpu_to_le64(io
.slba
);
1134 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
1135 c
.rw
.control
= cpu_to_le16(io
.control
);
1136 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
1137 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
1138 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
1139 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
1141 return nvme_submit_user_cmd(ns
->queue
, &c
,
1142 (void __user
*)(uintptr_t)io
.addr
, length
,
1143 metadata
, meta_len
, io
.slba
, NULL
, 0);
1146 static u32
nvme_known_admin_effects(u8 opcode
)
1149 case nvme_admin_format_nvm
:
1150 return NVME_CMD_EFFECTS_CSUPP
| NVME_CMD_EFFECTS_LBCC
|
1151 NVME_CMD_EFFECTS_CSE_MASK
;
1152 case nvme_admin_sanitize_nvm
:
1153 return NVME_CMD_EFFECTS_CSE_MASK
;
1160 static u32
nvme_passthru_start(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1167 effects
= le32_to_cpu(ctrl
->effects
->iocs
[opcode
]);
1168 if (effects
& ~NVME_CMD_EFFECTS_CSUPP
)
1169 dev_warn(ctrl
->device
,
1170 "IO command:%02x has unhandled effects:%08x\n",
1176 effects
= le32_to_cpu(ctrl
->effects
->acs
[opcode
]);
1178 effects
= nvme_known_admin_effects(opcode
);
1181 * For simplicity, IO to all namespaces is quiesced even if the command
1182 * effects say only one namespace is affected.
1184 if (effects
& (NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
)) {
1185 mutex_lock(&ctrl
->scan_lock
);
1186 nvme_start_freeze(ctrl
);
1187 nvme_wait_freeze(ctrl
);
1192 static void nvme_update_formats(struct nvme_ctrl
*ctrl
)
1196 down_read(&ctrl
->namespaces_rwsem
);
1197 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
1198 if (ns
->disk
&& nvme_revalidate_disk(ns
->disk
))
1199 nvme_set_queue_dying(ns
);
1200 up_read(&ctrl
->namespaces_rwsem
);
1202 nvme_remove_invalid_namespaces(ctrl
, NVME_NSID_ALL
);
1205 static void nvme_passthru_end(struct nvme_ctrl
*ctrl
, u32 effects
)
1208 * Revalidate LBA changes prior to unfreezing. This is necessary to
1209 * prevent memory corruption if a logical block size was changed by
1212 if (effects
& NVME_CMD_EFFECTS_LBCC
)
1213 nvme_update_formats(ctrl
);
1214 if (effects
& (NVME_CMD_EFFECTS_LBCC
| NVME_CMD_EFFECTS_CSE_MASK
)) {
1215 nvme_unfreeze(ctrl
);
1216 mutex_unlock(&ctrl
->scan_lock
);
1218 if (effects
& NVME_CMD_EFFECTS_CCC
)
1219 nvme_init_identify(ctrl
);
1220 if (effects
& (NVME_CMD_EFFECTS_NIC
| NVME_CMD_EFFECTS_NCC
))
1221 nvme_queue_scan(ctrl
);
1224 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
1225 struct nvme_passthru_cmd __user
*ucmd
)
1227 struct nvme_passthru_cmd cmd
;
1228 struct nvme_command c
;
1229 unsigned timeout
= 0;
1233 if (!capable(CAP_SYS_ADMIN
))
1235 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
1240 memset(&c
, 0, sizeof(c
));
1241 c
.common
.opcode
= cmd
.opcode
;
1242 c
.common
.flags
= cmd
.flags
;
1243 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
1244 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
1245 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
1246 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
1247 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
1248 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
1249 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
1250 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
1251 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
1254 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1256 effects
= nvme_passthru_start(ctrl
, ns
, cmd
.opcode
);
1257 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1258 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
1259 (void __user
*)(uintptr_t)cmd
.metadata
, cmd
.metadata_len
,
1260 0, &cmd
.result
, timeout
);
1261 nvme_passthru_end(ctrl
, effects
);
1264 if (put_user(cmd
.result
, &ucmd
->result
))
1272 * Issue ioctl requests on the first available path. Note that unlike normal
1273 * block layer requests we will not retry failed request on another controller.
1275 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
,
1276 struct nvme_ns_head
**head
, int *srcu_idx
)
1278 #ifdef CONFIG_NVME_MULTIPATH
1279 if (disk
->fops
== &nvme_ns_head_ops
) {
1282 *head
= disk
->private_data
;
1283 *srcu_idx
= srcu_read_lock(&(*head
)->srcu
);
1284 ns
= nvme_find_path(*head
);
1286 srcu_read_unlock(&(*head
)->srcu
, *srcu_idx
);
1292 return disk
->private_data
;
1295 static void nvme_put_ns_from_disk(struct nvme_ns_head
*head
, int idx
)
1298 srcu_read_unlock(&head
->srcu
, idx
);
1301 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
1302 unsigned int cmd
, unsigned long arg
)
1304 struct nvme_ns_head
*head
= NULL
;
1305 void __user
*argp
= (void __user
*)arg
;
1309 ns
= nvme_get_ns_from_disk(bdev
->bd_disk
, &head
, &srcu_idx
);
1311 return -EWOULDBLOCK
;
1314 * Handle ioctls that apply to the controller instead of the namespace
1315 * seperately and drop the ns SRCU reference early. This avoids a
1316 * deadlock when deleting namespaces using the passthrough interface.
1318 if (cmd
== NVME_IOCTL_ADMIN_CMD
|| is_sed_ioctl(cmd
)) {
1319 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1321 nvme_get_ctrl(ns
->ctrl
);
1322 nvme_put_ns_from_disk(head
, srcu_idx
);
1324 if (cmd
== NVME_IOCTL_ADMIN_CMD
)
1325 ret
= nvme_user_cmd(ctrl
, NULL
, argp
);
1327 ret
= sed_ioctl(ctrl
->opal_dev
, cmd
, argp
);
1329 nvme_put_ctrl(ctrl
);
1335 force_successful_syscall_return();
1336 ret
= ns
->head
->ns_id
;
1338 case NVME_IOCTL_IO_CMD
:
1339 ret
= nvme_user_cmd(ns
->ctrl
, ns
, argp
);
1341 case NVME_IOCTL_SUBMIT_IO
:
1342 ret
= nvme_submit_io(ns
, argp
);
1346 ret
= nvme_nvm_ioctl(ns
, cmd
, arg
);
1351 nvme_put_ns_from_disk(head
, srcu_idx
);
1355 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1357 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1359 #ifdef CONFIG_NVME_MULTIPATH
1360 /* should never be called due to GENHD_FL_HIDDEN */
1361 if (WARN_ON_ONCE(ns
->head
->disk
))
1364 if (!kref_get_unless_zero(&ns
->kref
))
1366 if (!try_module_get(ns
->ctrl
->ops
->module
))
1377 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1379 struct nvme_ns
*ns
= disk
->private_data
;
1381 module_put(ns
->ctrl
->ops
->module
);
1385 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1387 /* some standard values */
1388 geo
->heads
= 1 << 6;
1389 geo
->sectors
= 1 << 5;
1390 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1394 #ifdef CONFIG_BLK_DEV_INTEGRITY
1395 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
)
1397 struct blk_integrity integrity
;
1399 memset(&integrity
, 0, sizeof(integrity
));
1401 case NVME_NS_DPS_PI_TYPE3
:
1402 integrity
.profile
= &t10_pi_type3_crc
;
1403 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1404 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1406 case NVME_NS_DPS_PI_TYPE1
:
1407 case NVME_NS_DPS_PI_TYPE2
:
1408 integrity
.profile
= &t10_pi_type1_crc
;
1409 integrity
.tag_size
= sizeof(u16
);
1410 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1413 integrity
.profile
= NULL
;
1416 integrity
.tuple_size
= ms
;
1417 blk_integrity_register(disk
, &integrity
);
1418 blk_queue_max_integrity_segments(disk
->queue
, 1);
1421 static void nvme_init_integrity(struct gendisk
*disk
, u16 ms
, u8 pi_type
)
1424 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1426 static void nvme_set_chunk_size(struct nvme_ns
*ns
)
1428 u32 chunk_size
= (((u32
)ns
->noiob
) << (ns
->lba_shift
- 9));
1429 blk_queue_chunk_sectors(ns
->queue
, rounddown_pow_of_two(chunk_size
));
1432 static void nvme_config_discard(struct nvme_ns
*ns
)
1434 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1435 struct request_queue
*queue
= ns
->queue
;
1436 u32 size
= queue_logical_block_size(queue
);
1438 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)) {
1439 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, queue
);
1443 if (ctrl
->nr_streams
&& ns
->sws
&& ns
->sgs
)
1444 size
*= ns
->sws
* ns
->sgs
;
1446 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1447 NVME_DSM_MAX_RANGES
);
1449 queue
->limits
.discard_alignment
= 0;
1450 queue
->limits
.discard_granularity
= size
;
1452 /* If discard is already enabled, don't reset queue limits */
1453 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD
, queue
))
1456 blk_queue_max_discard_sectors(queue
, UINT_MAX
);
1457 blk_queue_max_discard_segments(queue
, NVME_DSM_MAX_RANGES
);
1459 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1460 blk_queue_max_write_zeroes_sectors(queue
, UINT_MAX
);
1463 static void nvme_report_ns_ids(struct nvme_ctrl
*ctrl
, unsigned int nsid
,
1464 struct nvme_id_ns
*id
, struct nvme_ns_ids
*ids
)
1466 memset(ids
, 0, sizeof(*ids
));
1468 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
1469 memcpy(ids
->eui64
, id
->eui64
, sizeof(id
->eui64
));
1470 if (ctrl
->vs
>= NVME_VS(1, 2, 0))
1471 memcpy(ids
->nguid
, id
->nguid
, sizeof(id
->nguid
));
1472 if (ctrl
->vs
>= NVME_VS(1, 3, 0)) {
1473 /* Don't treat error as fatal we potentially
1474 * already have a NGUID or EUI-64
1476 if (nvme_identify_ns_descs(ctrl
, nsid
, ids
))
1477 dev_warn(ctrl
->device
,
1478 "%s: Identify Descriptors failed\n", __func__
);
1482 static bool nvme_ns_ids_valid(struct nvme_ns_ids
*ids
)
1484 return !uuid_is_null(&ids
->uuid
) ||
1485 memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)) ||
1486 memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
));
1489 static bool nvme_ns_ids_equal(struct nvme_ns_ids
*a
, struct nvme_ns_ids
*b
)
1491 return uuid_equal(&a
->uuid
, &b
->uuid
) &&
1492 memcmp(&a
->nguid
, &b
->nguid
, sizeof(a
->nguid
)) == 0 &&
1493 memcmp(&a
->eui64
, &b
->eui64
, sizeof(a
->eui64
)) == 0;
1496 static void nvme_update_disk_info(struct gendisk
*disk
,
1497 struct nvme_ns
*ns
, struct nvme_id_ns
*id
)
1499 sector_t capacity
= le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9);
1500 unsigned short bs
= 1 << ns
->lba_shift
;
1502 if (ns
->lba_shift
> PAGE_SHIFT
) {
1503 /* unsupported block size, set capacity to 0 later */
1506 blk_mq_freeze_queue(disk
->queue
);
1507 blk_integrity_unregister(disk
);
1509 blk_queue_logical_block_size(disk
->queue
, bs
);
1510 blk_queue_physical_block_size(disk
->queue
, bs
);
1511 blk_queue_io_min(disk
->queue
, bs
);
1513 if (ns
->ms
&& !ns
->ext
&&
1514 (ns
->ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
))
1515 nvme_init_integrity(disk
, ns
->ms
, ns
->pi_type
);
1516 if ((ns
->ms
&& !nvme_ns_has_pi(ns
) && !blk_get_integrity(disk
)) ||
1517 ns
->lba_shift
> PAGE_SHIFT
)
1520 set_capacity(disk
, capacity
);
1521 nvme_config_discard(ns
);
1523 if (id
->nsattr
& (1 << 0))
1524 set_disk_ro(disk
, true);
1526 set_disk_ro(disk
, false);
1528 blk_mq_unfreeze_queue(disk
->queue
);
1531 static void __nvme_revalidate_disk(struct gendisk
*disk
, struct nvme_id_ns
*id
)
1533 struct nvme_ns
*ns
= disk
->private_data
;
1536 * If identify namespace failed, use default 512 byte block size so
1537 * block layer can use before failing read/write for 0 capacity.
1539 ns
->lba_shift
= id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ds
;
1540 if (ns
->lba_shift
== 0)
1542 ns
->noiob
= le16_to_cpu(id
->noiob
);
1543 ns
->ms
= le16_to_cpu(id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ms
);
1544 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
1545 /* the PI implementation requires metadata equal t10 pi tuple size */
1546 if (ns
->ms
== sizeof(struct t10_pi_tuple
))
1547 ns
->pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1552 nvme_set_chunk_size(ns
);
1553 nvme_update_disk_info(disk
, ns
, id
);
1555 nvme_nvm_update_nvm_info(ns
);
1556 #ifdef CONFIG_NVME_MULTIPATH
1557 if (ns
->head
->disk
) {
1558 nvme_update_disk_info(ns
->head
->disk
, ns
, id
);
1559 blk_queue_stack_limits(ns
->head
->disk
->queue
, ns
->queue
);
1564 static int nvme_revalidate_disk(struct gendisk
*disk
)
1566 struct nvme_ns
*ns
= disk
->private_data
;
1567 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1568 struct nvme_id_ns
*id
;
1569 struct nvme_ns_ids ids
;
1572 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
1573 set_capacity(disk
, 0);
1577 id
= nvme_identify_ns(ctrl
, ns
->head
->ns_id
);
1581 if (id
->ncap
== 0) {
1586 __nvme_revalidate_disk(disk
, id
);
1587 nvme_report_ns_ids(ctrl
, ns
->head
->ns_id
, id
, &ids
);
1588 if (!nvme_ns_ids_equal(&ns
->head
->ids
, &ids
)) {
1589 dev_err(ctrl
->device
,
1590 "identifiers changed for nsid %d\n", ns
->head
->ns_id
);
1599 static char nvme_pr_type(enum pr_type type
)
1602 case PR_WRITE_EXCLUSIVE
:
1604 case PR_EXCLUSIVE_ACCESS
:
1606 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
1608 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
1610 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
1612 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
1619 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
1620 u64 key
, u64 sa_key
, u8 op
)
1622 struct nvme_ns_head
*head
= NULL
;
1624 struct nvme_command c
;
1626 u8 data
[16] = { 0, };
1628 ns
= nvme_get_ns_from_disk(bdev
->bd_disk
, &head
, &srcu_idx
);
1630 return -EWOULDBLOCK
;
1632 put_unaligned_le64(key
, &data
[0]);
1633 put_unaligned_le64(sa_key
, &data
[8]);
1635 memset(&c
, 0, sizeof(c
));
1636 c
.common
.opcode
= op
;
1637 c
.common
.nsid
= cpu_to_le32(ns
->head
->ns_id
);
1638 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
1640 ret
= nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
1641 nvme_put_ns_from_disk(head
, srcu_idx
);
1645 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
1646 u64
new, unsigned flags
)
1650 if (flags
& ~PR_FL_IGNORE_KEY
)
1653 cdw10
= old
? 2 : 0;
1654 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
1655 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
1656 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
1659 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
1660 enum pr_type type
, unsigned flags
)
1664 if (flags
& ~PR_FL_IGNORE_KEY
)
1667 cdw10
= nvme_pr_type(type
) << 8;
1668 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
1669 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
1672 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
1673 enum pr_type type
, bool abort
)
1675 u32 cdw10
= nvme_pr_type(type
) << 8 | (abort
? 2 : 1);
1676 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
1679 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
1681 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
1682 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
1685 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
1687 u32 cdw10
= nvme_pr_type(type
) << 8 | (key
? 1 << 3 : 0);
1688 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
1691 static const struct pr_ops nvme_pr_ops
= {
1692 .pr_register
= nvme_pr_register
,
1693 .pr_reserve
= nvme_pr_reserve
,
1694 .pr_release
= nvme_pr_release
,
1695 .pr_preempt
= nvme_pr_preempt
,
1696 .pr_clear
= nvme_pr_clear
,
1699 #ifdef CONFIG_BLK_SED_OPAL
1700 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
1703 struct nvme_ctrl
*ctrl
= data
;
1704 struct nvme_command cmd
;
1706 memset(&cmd
, 0, sizeof(cmd
));
1708 cmd
.common
.opcode
= nvme_admin_security_send
;
1710 cmd
.common
.opcode
= nvme_admin_security_recv
;
1711 cmd
.common
.nsid
= 0;
1712 cmd
.common
.cdw10
[0] = cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
1713 cmd
.common
.cdw10
[1] = cpu_to_le32(len
);
1715 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
,
1716 ADMIN_TIMEOUT
, NVME_QID_ANY
, 1, 0);
1718 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
1719 #endif /* CONFIG_BLK_SED_OPAL */
1721 static const struct block_device_operations nvme_fops
= {
1722 .owner
= THIS_MODULE
,
1723 .ioctl
= nvme_ioctl
,
1724 .compat_ioctl
= nvme_ioctl
,
1726 .release
= nvme_release
,
1727 .getgeo
= nvme_getgeo
,
1728 .revalidate_disk
= nvme_revalidate_disk
,
1729 .pr_ops
= &nvme_pr_ops
,
1732 #ifdef CONFIG_NVME_MULTIPATH
1733 static int nvme_ns_head_open(struct block_device
*bdev
, fmode_t mode
)
1735 struct nvme_ns_head
*head
= bdev
->bd_disk
->private_data
;
1737 if (!kref_get_unless_zero(&head
->ref
))
1742 static void nvme_ns_head_release(struct gendisk
*disk
, fmode_t mode
)
1744 nvme_put_ns_head(disk
->private_data
);
1747 const struct block_device_operations nvme_ns_head_ops
= {
1748 .owner
= THIS_MODULE
,
1749 .open
= nvme_ns_head_open
,
1750 .release
= nvme_ns_head_release
,
1751 .ioctl
= nvme_ioctl
,
1752 .compat_ioctl
= nvme_ioctl
,
1753 .getgeo
= nvme_getgeo
,
1754 .pr_ops
= &nvme_pr_ops
,
1756 #endif /* CONFIG_NVME_MULTIPATH */
1758 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
1760 unsigned long timeout
=
1761 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
1762 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
1765 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1768 if ((csts
& NVME_CSTS_RDY
) == bit
)
1772 if (fatal_signal_pending(current
))
1774 if (time_after(jiffies
, timeout
)) {
1775 dev_err(ctrl
->device
,
1776 "Device not ready; aborting %s\n", enabled
?
1777 "initialisation" : "reset");
1786 * If the device has been passed off to us in an enabled state, just clear
1787 * the enabled bit. The spec says we should set the 'shutdown notification
1788 * bits', but doing so may cause the device to complete commands to the
1789 * admin queue ... and we don't know what memory that might be pointing at!
1791 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1795 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1796 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
1798 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1802 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
1803 msleep(NVME_QUIRK_DELAY_AMOUNT
);
1805 return nvme_wait_ready(ctrl
, cap
, false);
1807 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
1809 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1812 * Default to a 4K page size, with the intention to update this
1813 * path in the future to accomodate architectures with differing
1814 * kernel and IO page sizes.
1816 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
1819 if (page_shift
< dev_page_min
) {
1820 dev_err(ctrl
->device
,
1821 "Minimum device page size %u too large for host (%u)\n",
1822 1 << dev_page_min
, 1 << page_shift
);
1826 ctrl
->page_size
= 1 << page_shift
;
1828 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
1829 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
1830 ctrl
->ctrl_config
|= NVME_CC_AMS_RR
| NVME_CC_SHN_NONE
;
1831 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
1832 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
1834 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1837 return nvme_wait_ready(ctrl
, cap
, true);
1839 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
1841 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
1843 unsigned long timeout
= jiffies
+ (ctrl
->shutdown_timeout
* HZ
);
1847 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1848 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
1850 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1854 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1855 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
1859 if (fatal_signal_pending(current
))
1861 if (time_after(jiffies
, timeout
)) {
1862 dev_err(ctrl
->device
,
1863 "Device shutdown incomplete; abort shutdown\n");
1870 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
1872 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1873 struct request_queue
*q
)
1877 if (ctrl
->max_hw_sectors
) {
1879 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
1881 max_segments
= min_not_zero(max_segments
, ctrl
->max_segments
);
1882 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1883 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1885 if ((ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
) &&
1886 is_power_of_2(ctrl
->max_hw_sectors
))
1887 blk_queue_chunk_sectors(q
, ctrl
->max_hw_sectors
);
1888 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
1889 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
1891 blk_queue_write_cache(q
, vwc
, vwc
);
1894 static int nvme_configure_timestamp(struct nvme_ctrl
*ctrl
)
1899 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_TIMESTAMP
))
1902 ts
= cpu_to_le64(ktime_to_ms(ktime_get_real()));
1903 ret
= nvme_set_features(ctrl
, NVME_FEAT_TIMESTAMP
, 0, &ts
, sizeof(ts
),
1906 dev_warn_once(ctrl
->device
,
1907 "could not set timestamp (%d)\n", ret
);
1911 static int nvme_configure_apst(struct nvme_ctrl
*ctrl
)
1914 * APST (Autonomous Power State Transition) lets us program a
1915 * table of power state transitions that the controller will
1916 * perform automatically. We configure it with a simple
1917 * heuristic: we are willing to spend at most 2% of the time
1918 * transitioning between power states. Therefore, when running
1919 * in any given state, we will enter the next lower-power
1920 * non-operational state after waiting 50 * (enlat + exlat)
1921 * microseconds, as long as that state's exit latency is under
1922 * the requested maximum latency.
1924 * We will not autonomously enter any non-operational state for
1925 * which the total latency exceeds ps_max_latency_us. Users
1926 * can set ps_max_latency_us to zero to turn off APST.
1930 struct nvme_feat_auto_pst
*table
;
1936 * If APST isn't supported or if we haven't been initialized yet,
1937 * then don't do anything.
1942 if (ctrl
->npss
> 31) {
1943 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
1947 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
1951 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
1952 /* Turn off APST. */
1954 dev_dbg(ctrl
->device
, "APST disabled\n");
1956 __le64 target
= cpu_to_le64(0);
1960 * Walk through all states from lowest- to highest-power.
1961 * According to the spec, lower-numbered states use more
1962 * power. NPSS, despite the name, is the index of the
1963 * lowest-power state, not the number of states.
1965 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
1966 u64 total_latency_us
, exit_latency_us
, transition_ms
;
1969 table
->entries
[state
] = target
;
1972 * Don't allow transitions to the deepest state
1973 * if it's quirked off.
1975 if (state
== ctrl
->npss
&&
1976 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
1980 * Is this state a useful non-operational state for
1981 * higher-power states to autonomously transition to?
1983 if (!(ctrl
->psd
[state
].flags
&
1984 NVME_PS_FLAGS_NON_OP_STATE
))
1988 (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
1989 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
1994 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
1997 * This state is good. Use it as the APST idle
1998 * target for higher power states.
2000 transition_ms
= total_latency_us
+ 19;
2001 do_div(transition_ms
, 20);
2002 if (transition_ms
> (1 << 24) - 1)
2003 transition_ms
= (1 << 24) - 1;
2005 target
= cpu_to_le64((state
<< 3) |
2006 (transition_ms
<< 8));
2011 if (total_latency_us
> max_lat_us
)
2012 max_lat_us
= total_latency_us
;
2018 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
2020 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2021 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
2025 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
2026 table
, sizeof(*table
), NULL
);
2028 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
2034 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
2036 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2040 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
2041 case PM_QOS_LATENCY_ANY
:
2049 if (ctrl
->ps_max_latency_us
!= latency
) {
2050 ctrl
->ps_max_latency_us
= latency
;
2051 nvme_configure_apst(ctrl
);
2055 struct nvme_core_quirk_entry
{
2057 * NVMe model and firmware strings are padded with spaces. For
2058 * simplicity, strings in the quirk table are padded with NULLs
2064 unsigned long quirks
;
2067 static const struct nvme_core_quirk_entry core_quirks
[] = {
2070 * This Toshiba device seems to die using any APST states. See:
2071 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
2074 .mn
= "THNSF5256GPUK TOSHIBA",
2075 .quirks
= NVME_QUIRK_NO_APST
,
2079 /* match is null-terminated but idstr is space-padded. */
2080 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
2087 matchlen
= strlen(match
);
2088 WARN_ON_ONCE(matchlen
> len
);
2090 if (memcmp(idstr
, match
, matchlen
))
2093 for (; matchlen
< len
; matchlen
++)
2094 if (idstr
[matchlen
] != ' ')
2100 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
2101 const struct nvme_core_quirk_entry
*q
)
2103 return q
->vid
== le16_to_cpu(id
->vid
) &&
2104 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
2105 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
2108 static void nvme_init_subnqn(struct nvme_subsystem
*subsys
, struct nvme_ctrl
*ctrl
,
2109 struct nvme_id_ctrl
*id
)
2114 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
2115 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
2116 strncpy(subsys
->subnqn
, id
->subnqn
, NVMF_NQN_SIZE
);
2120 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
2121 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
2123 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2124 off
= snprintf(subsys
->subnqn
, NVMF_NQN_SIZE
,
2125 "nqn.2014.08.org.nvmexpress:%04x%04x",
2126 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
2127 memcpy(subsys
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
2128 off
+= sizeof(id
->sn
);
2129 memcpy(subsys
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
2130 off
+= sizeof(id
->mn
);
2131 memset(subsys
->subnqn
+ off
, 0, sizeof(subsys
->subnqn
) - off
);
2134 static void __nvme_release_subsystem(struct nvme_subsystem
*subsys
)
2136 ida_simple_remove(&nvme_subsystems_ida
, subsys
->instance
);
2140 static void nvme_release_subsystem(struct device
*dev
)
2142 __nvme_release_subsystem(container_of(dev
, struct nvme_subsystem
, dev
));
2145 static void nvme_destroy_subsystem(struct kref
*ref
)
2147 struct nvme_subsystem
*subsys
=
2148 container_of(ref
, struct nvme_subsystem
, ref
);
2150 mutex_lock(&nvme_subsystems_lock
);
2151 list_del(&subsys
->entry
);
2152 mutex_unlock(&nvme_subsystems_lock
);
2154 ida_destroy(&subsys
->ns_ida
);
2155 device_del(&subsys
->dev
);
2156 put_device(&subsys
->dev
);
2159 static void nvme_put_subsystem(struct nvme_subsystem
*subsys
)
2161 kref_put(&subsys
->ref
, nvme_destroy_subsystem
);
2164 static struct nvme_subsystem
*__nvme_find_get_subsystem(const char *subsysnqn
)
2166 struct nvme_subsystem
*subsys
;
2168 lockdep_assert_held(&nvme_subsystems_lock
);
2170 list_for_each_entry(subsys
, &nvme_subsystems
, entry
) {
2171 if (strcmp(subsys
->subnqn
, subsysnqn
))
2173 if (!kref_get_unless_zero(&subsys
->ref
))
2181 #define SUBSYS_ATTR_RO(_name, _mode, _show) \
2182 struct device_attribute subsys_attr_##_name = \
2183 __ATTR(_name, _mode, _show, NULL)
2185 static ssize_t
nvme_subsys_show_nqn(struct device
*dev
,
2186 struct device_attribute
*attr
,
2189 struct nvme_subsystem
*subsys
=
2190 container_of(dev
, struct nvme_subsystem
, dev
);
2192 return snprintf(buf
, PAGE_SIZE
, "%s\n", subsys
->subnqn
);
2194 static SUBSYS_ATTR_RO(subsysnqn
, S_IRUGO
, nvme_subsys_show_nqn
);
2196 #define nvme_subsys_show_str_function(field) \
2197 static ssize_t subsys_##field##_show(struct device *dev, \
2198 struct device_attribute *attr, char *buf) \
2200 struct nvme_subsystem *subsys = \
2201 container_of(dev, struct nvme_subsystem, dev); \
2202 return sprintf(buf, "%.*s\n", \
2203 (int)sizeof(subsys->field), subsys->field); \
2205 static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
2207 nvme_subsys_show_str_function(model
);
2208 nvme_subsys_show_str_function(serial
);
2209 nvme_subsys_show_str_function(firmware_rev
);
2211 static struct attribute
*nvme_subsys_attrs
[] = {
2212 &subsys_attr_model
.attr
,
2213 &subsys_attr_serial
.attr
,
2214 &subsys_attr_firmware_rev
.attr
,
2215 &subsys_attr_subsysnqn
.attr
,
2219 static struct attribute_group nvme_subsys_attrs_group
= {
2220 .attrs
= nvme_subsys_attrs
,
2223 static const struct attribute_group
*nvme_subsys_attrs_groups
[] = {
2224 &nvme_subsys_attrs_group
,
2228 static int nvme_active_ctrls(struct nvme_subsystem
*subsys
)
2231 struct nvme_ctrl
*ctrl
;
2233 mutex_lock(&subsys
->lock
);
2234 list_for_each_entry(ctrl
, &subsys
->ctrls
, subsys_entry
) {
2235 if (ctrl
->state
!= NVME_CTRL_DELETING
&&
2236 ctrl
->state
!= NVME_CTRL_DEAD
)
2239 mutex_unlock(&subsys
->lock
);
2244 static int nvme_init_subsystem(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
2246 struct nvme_subsystem
*subsys
, *found
;
2249 subsys
= kzalloc(sizeof(*subsys
), GFP_KERNEL
);
2252 ret
= ida_simple_get(&nvme_subsystems_ida
, 0, 0, GFP_KERNEL
);
2257 subsys
->instance
= ret
;
2258 mutex_init(&subsys
->lock
);
2259 kref_init(&subsys
->ref
);
2260 INIT_LIST_HEAD(&subsys
->ctrls
);
2261 INIT_LIST_HEAD(&subsys
->nsheads
);
2262 nvme_init_subnqn(subsys
, ctrl
, id
);
2263 memcpy(subsys
->serial
, id
->sn
, sizeof(subsys
->serial
));
2264 memcpy(subsys
->model
, id
->mn
, sizeof(subsys
->model
));
2265 memcpy(subsys
->firmware_rev
, id
->fr
, sizeof(subsys
->firmware_rev
));
2266 subsys
->vendor_id
= le16_to_cpu(id
->vid
);
2267 subsys
->cmic
= id
->cmic
;
2269 subsys
->dev
.class = nvme_subsys_class
;
2270 subsys
->dev
.release
= nvme_release_subsystem
;
2271 subsys
->dev
.groups
= nvme_subsys_attrs_groups
;
2272 dev_set_name(&subsys
->dev
, "nvme-subsys%d", subsys
->instance
);
2273 device_initialize(&subsys
->dev
);
2275 mutex_lock(&nvme_subsystems_lock
);
2276 found
= __nvme_find_get_subsystem(subsys
->subnqn
);
2279 * Verify that the subsystem actually supports multiple
2280 * controllers, else bail out.
2282 if (!(ctrl
->opts
&& ctrl
->opts
->discovery_nqn
) &&
2283 nvme_active_ctrls(found
) && !(id
->cmic
& (1 << 1))) {
2284 dev_err(ctrl
->device
,
2285 "ignoring ctrl due to duplicate subnqn (%s).\n",
2287 nvme_put_subsystem(found
);
2292 __nvme_release_subsystem(subsys
);
2295 ret
= device_add(&subsys
->dev
);
2297 dev_err(ctrl
->device
,
2298 "failed to register subsystem device.\n");
2301 ida_init(&subsys
->ns_ida
);
2302 list_add_tail(&subsys
->entry
, &nvme_subsystems
);
2305 ctrl
->subsys
= subsys
;
2306 mutex_unlock(&nvme_subsystems_lock
);
2308 if (sysfs_create_link(&subsys
->dev
.kobj
, &ctrl
->device
->kobj
,
2309 dev_name(ctrl
->device
))) {
2310 dev_err(ctrl
->device
,
2311 "failed to create sysfs link from subsystem.\n");
2312 /* the transport driver will eventually put the subsystem */
2316 mutex_lock(&subsys
->lock
);
2317 list_add_tail(&ctrl
->subsys_entry
, &subsys
->ctrls
);
2318 mutex_unlock(&subsys
->lock
);
2323 mutex_unlock(&nvme_subsystems_lock
);
2324 put_device(&subsys
->dev
);
2328 int nvme_get_log(struct nvme_ctrl
*ctrl
, u32 nsid
, u8 log_page
, u8 lsp
,
2329 void *log
, size_t size
, u64 offset
)
2331 struct nvme_command c
= { };
2332 unsigned long dwlen
= size
/ 4 - 1;
2334 c
.get_log_page
.opcode
= nvme_admin_get_log_page
;
2335 c
.get_log_page
.nsid
= cpu_to_le32(nsid
);
2336 c
.get_log_page
.lid
= log_page
;
2337 c
.get_log_page
.lsp
= lsp
;
2338 c
.get_log_page
.numdl
= cpu_to_le16(dwlen
& ((1 << 16) - 1));
2339 c
.get_log_page
.numdu
= cpu_to_le16(dwlen
>> 16);
2340 c
.get_log_page
.lpol
= cpu_to_le32(lower_32_bits(offset
));
2341 c
.get_log_page
.lpou
= cpu_to_le32(upper_32_bits(offset
));
2343 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, log
, size
);
2346 static int nvme_get_effects_log(struct nvme_ctrl
*ctrl
)
2351 ctrl
->effects
= kzalloc(sizeof(*ctrl
->effects
), GFP_KERNEL
);
2356 ret
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CMD_EFFECTS
, 0,
2357 ctrl
->effects
, sizeof(*ctrl
->effects
), 0);
2359 kfree(ctrl
->effects
);
2360 ctrl
->effects
= NULL
;
2366 * Initialize the cached copies of the Identify data and various controller
2367 * register in our nvme_ctrl structure. This should be called as soon as
2368 * the admin queue is fully up and running.
2370 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
2372 struct nvme_id_ctrl
*id
;
2374 int ret
, page_shift
;
2376 bool prev_apst_enabled
;
2378 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
2380 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
2384 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
2386 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
2389 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
2391 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
2392 ctrl
->subsystem
= NVME_CAP_NSSRC(cap
);
2394 ret
= nvme_identify_ctrl(ctrl
, &id
);
2396 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
2400 if (id
->lpa
& NVME_CTRL_LPA_CMD_EFFECTS_LOG
) {
2401 ret
= nvme_get_effects_log(ctrl
);
2406 if (!ctrl
->identified
) {
2409 ret
= nvme_init_subsystem(ctrl
, id
);
2414 * Check for quirks. Quirk can depend on firmware version,
2415 * so, in principle, the set of quirks present can change
2416 * across a reset. As a possible future enhancement, we
2417 * could re-scan for quirks every time we reinitialize
2418 * the device, but we'd have to make sure that the driver
2419 * behaves intelligently if the quirks change.
2421 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
2422 if (quirk_matches(id
, &core_quirks
[i
]))
2423 ctrl
->quirks
|= core_quirks
[i
].quirks
;
2427 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
2428 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
2429 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
2432 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
2433 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
2434 ctrl
->oaes
= le32_to_cpu(id
->oaes
);
2435 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
2436 ctrl
->vwc
= id
->vwc
;
2437 ctrl
->cntlid
= le16_to_cpup(&id
->cntlid
);
2439 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
2441 max_hw_sectors
= UINT_MAX
;
2442 ctrl
->max_hw_sectors
=
2443 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
2445 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
2446 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
2447 ctrl
->kas
= le16_to_cpu(id
->kas
);
2448 ctrl
->max_namespaces
= le32_to_cpu(id
->mnan
);
2452 u32 transition_time
= le32_to_cpu(id
->rtd3e
) / 1000000;
2454 ctrl
->shutdown_timeout
= clamp_t(unsigned int, transition_time
,
2455 shutdown_timeout
, 60);
2457 if (ctrl
->shutdown_timeout
!= shutdown_timeout
)
2458 dev_info(ctrl
->device
,
2459 "Shutdown timeout set to %u seconds\n",
2460 ctrl
->shutdown_timeout
);
2462 ctrl
->shutdown_timeout
= shutdown_timeout
;
2464 ctrl
->npss
= id
->npss
;
2465 ctrl
->apsta
= id
->apsta
;
2466 prev_apst_enabled
= ctrl
->apst_enabled
;
2467 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
2468 if (force_apst
&& id
->apsta
) {
2469 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
2470 ctrl
->apst_enabled
= true;
2472 ctrl
->apst_enabled
= false;
2475 ctrl
->apst_enabled
= id
->apsta
;
2477 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
2479 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
2480 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
2481 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
2482 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
2483 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
2486 * In fabrics we need to verify the cntlid matches the
2489 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
)) {
2494 if (!ctrl
->opts
->discovery_nqn
&& !ctrl
->kas
) {
2495 dev_err(ctrl
->device
,
2496 "keep-alive support is mandatory for fabrics\n");
2501 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
2502 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
2503 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
2504 ctrl
->hmminds
= le32_to_cpu(id
->hmminds
);
2505 ctrl
->hmmaxd
= le16_to_cpu(id
->hmmaxd
);
2508 ret
= nvme_mpath_init(ctrl
, id
);
2514 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
2515 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
2516 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
2517 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
2519 ret
= nvme_configure_apst(ctrl
);
2523 ret
= nvme_configure_timestamp(ctrl
);
2527 ret
= nvme_configure_directives(ctrl
);
2531 ctrl
->identified
= true;
2539 EXPORT_SYMBOL_GPL(nvme_init_identify
);
2541 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
2543 struct nvme_ctrl
*ctrl
=
2544 container_of(inode
->i_cdev
, struct nvme_ctrl
, cdev
);
2546 switch (ctrl
->state
) {
2547 case NVME_CTRL_LIVE
:
2548 case NVME_CTRL_ADMIN_ONLY
:
2551 return -EWOULDBLOCK
;
2554 file
->private_data
= ctrl
;
2558 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
2563 down_read(&ctrl
->namespaces_rwsem
);
2564 if (list_empty(&ctrl
->namespaces
)) {
2569 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
2570 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
2571 dev_warn(ctrl
->device
,
2572 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
2577 dev_warn(ctrl
->device
,
2578 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
2579 kref_get(&ns
->kref
);
2580 up_read(&ctrl
->namespaces_rwsem
);
2582 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
2587 up_read(&ctrl
->namespaces_rwsem
);
2591 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
2594 struct nvme_ctrl
*ctrl
= file
->private_data
;
2595 void __user
*argp
= (void __user
*)arg
;
2598 case NVME_IOCTL_ADMIN_CMD
:
2599 return nvme_user_cmd(ctrl
, NULL
, argp
);
2600 case NVME_IOCTL_IO_CMD
:
2601 return nvme_dev_user_cmd(ctrl
, argp
);
2602 case NVME_IOCTL_RESET
:
2603 dev_warn(ctrl
->device
, "resetting controller\n");
2604 return nvme_reset_ctrl_sync(ctrl
);
2605 case NVME_IOCTL_SUBSYS_RESET
:
2606 return nvme_reset_subsystem(ctrl
);
2607 case NVME_IOCTL_RESCAN
:
2608 nvme_queue_scan(ctrl
);
2615 static const struct file_operations nvme_dev_fops
= {
2616 .owner
= THIS_MODULE
,
2617 .open
= nvme_dev_open
,
2618 .unlocked_ioctl
= nvme_dev_ioctl
,
2619 .compat_ioctl
= nvme_dev_ioctl
,
2622 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
2623 struct device_attribute
*attr
, const char *buf
,
2626 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2629 ret
= nvme_reset_ctrl_sync(ctrl
);
2634 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
2636 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
2637 struct device_attribute
*attr
, const char *buf
,
2640 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2642 nvme_queue_scan(ctrl
);
2645 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
2647 static inline struct nvme_ns_head
*dev_to_ns_head(struct device
*dev
)
2649 struct gendisk
*disk
= dev_to_disk(dev
);
2651 if (disk
->fops
== &nvme_fops
)
2652 return nvme_get_ns_from_dev(dev
)->head
;
2654 return disk
->private_data
;
2657 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
2660 struct nvme_ns_head
*head
= dev_to_ns_head(dev
);
2661 struct nvme_ns_ids
*ids
= &head
->ids
;
2662 struct nvme_subsystem
*subsys
= head
->subsys
;
2663 int serial_len
= sizeof(subsys
->serial
);
2664 int model_len
= sizeof(subsys
->model
);
2666 if (!uuid_is_null(&ids
->uuid
))
2667 return sprintf(buf
, "uuid.%pU\n", &ids
->uuid
);
2669 if (memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
2670 return sprintf(buf
, "eui.%16phN\n", ids
->nguid
);
2672 if (memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
2673 return sprintf(buf
, "eui.%8phN\n", ids
->eui64
);
2675 while (serial_len
> 0 && (subsys
->serial
[serial_len
- 1] == ' ' ||
2676 subsys
->serial
[serial_len
- 1] == '\0'))
2678 while (model_len
> 0 && (subsys
->model
[model_len
- 1] == ' ' ||
2679 subsys
->model
[model_len
- 1] == '\0'))
2682 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", subsys
->vendor_id
,
2683 serial_len
, subsys
->serial
, model_len
, subsys
->model
,
2686 static DEVICE_ATTR_RO(wwid
);
2688 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
2691 return sprintf(buf
, "%pU\n", dev_to_ns_head(dev
)->ids
.nguid
);
2693 static DEVICE_ATTR_RO(nguid
);
2695 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
2698 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
2700 /* For backward compatibility expose the NGUID to userspace if
2701 * we have no UUID set
2703 if (uuid_is_null(&ids
->uuid
)) {
2704 printk_ratelimited(KERN_WARNING
2705 "No UUID available providing old NGUID\n");
2706 return sprintf(buf
, "%pU\n", ids
->nguid
);
2708 return sprintf(buf
, "%pU\n", &ids
->uuid
);
2710 static DEVICE_ATTR_RO(uuid
);
2712 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
2715 return sprintf(buf
, "%8ph\n", dev_to_ns_head(dev
)->ids
.eui64
);
2717 static DEVICE_ATTR_RO(eui
);
2719 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
2722 return sprintf(buf
, "%d\n", dev_to_ns_head(dev
)->ns_id
);
2724 static DEVICE_ATTR_RO(nsid
);
2726 static struct attribute
*nvme_ns_id_attrs
[] = {
2727 &dev_attr_wwid
.attr
,
2728 &dev_attr_uuid
.attr
,
2729 &dev_attr_nguid
.attr
,
2731 &dev_attr_nsid
.attr
,
2732 #ifdef CONFIG_NVME_MULTIPATH
2733 &dev_attr_ana_grpid
.attr
,
2734 &dev_attr_ana_state
.attr
,
2739 static umode_t
nvme_ns_id_attrs_are_visible(struct kobject
*kobj
,
2740 struct attribute
*a
, int n
)
2742 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
2743 struct nvme_ns_ids
*ids
= &dev_to_ns_head(dev
)->ids
;
2745 if (a
== &dev_attr_uuid
.attr
) {
2746 if (uuid_is_null(&ids
->uuid
) &&
2747 !memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
2750 if (a
== &dev_attr_nguid
.attr
) {
2751 if (!memchr_inv(ids
->nguid
, 0, sizeof(ids
->nguid
)))
2754 if (a
== &dev_attr_eui
.attr
) {
2755 if (!memchr_inv(ids
->eui64
, 0, sizeof(ids
->eui64
)))
2758 #ifdef CONFIG_NVME_MULTIPATH
2759 if (a
== &dev_attr_ana_grpid
.attr
|| a
== &dev_attr_ana_state
.attr
) {
2760 if (dev_to_disk(dev
)->fops
!= &nvme_fops
) /* per-path attr */
2762 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev
)->ctrl
))
2769 const struct attribute_group nvme_ns_id_attr_group
= {
2770 .attrs
= nvme_ns_id_attrs
,
2771 .is_visible
= nvme_ns_id_attrs_are_visible
,
2774 #define nvme_show_str_function(field) \
2775 static ssize_t field##_show(struct device *dev, \
2776 struct device_attribute *attr, char *buf) \
2778 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2779 return sprintf(buf, "%.*s\n", \
2780 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
2782 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2784 nvme_show_str_function(model
);
2785 nvme_show_str_function(serial
);
2786 nvme_show_str_function(firmware_rev
);
2788 #define nvme_show_int_function(field) \
2789 static ssize_t field##_show(struct device *dev, \
2790 struct device_attribute *attr, char *buf) \
2792 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2793 return sprintf(buf, "%d\n", ctrl->field); \
2795 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2797 nvme_show_int_function(cntlid
);
2799 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
2800 struct device_attribute
*attr
, const char *buf
,
2803 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2805 if (device_remove_file_self(dev
, attr
))
2806 nvme_delete_ctrl_sync(ctrl
);
2809 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
2811 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
2812 struct device_attribute
*attr
,
2815 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2817 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
2819 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
2821 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
2822 struct device_attribute
*attr
,
2825 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2826 static const char *const state_name
[] = {
2827 [NVME_CTRL_NEW
] = "new",
2828 [NVME_CTRL_LIVE
] = "live",
2829 [NVME_CTRL_ADMIN_ONLY
] = "only-admin",
2830 [NVME_CTRL_RESETTING
] = "resetting",
2831 [NVME_CTRL_CONNECTING
] = "connecting",
2832 [NVME_CTRL_DELETING
] = "deleting",
2833 [NVME_CTRL_DEAD
] = "dead",
2836 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
2837 state_name
[ctrl
->state
])
2838 return sprintf(buf
, "%s\n", state_name
[ctrl
->state
]);
2840 return sprintf(buf
, "unknown state\n");
2843 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
2845 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
2846 struct device_attribute
*attr
,
2849 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2851 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->subsys
->subnqn
);
2853 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
2855 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
2856 struct device_attribute
*attr
,
2859 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2861 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
2863 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
2865 static struct attribute
*nvme_dev_attrs
[] = {
2866 &dev_attr_reset_controller
.attr
,
2867 &dev_attr_rescan_controller
.attr
,
2868 &dev_attr_model
.attr
,
2869 &dev_attr_serial
.attr
,
2870 &dev_attr_firmware_rev
.attr
,
2871 &dev_attr_cntlid
.attr
,
2872 &dev_attr_delete_controller
.attr
,
2873 &dev_attr_transport
.attr
,
2874 &dev_attr_subsysnqn
.attr
,
2875 &dev_attr_address
.attr
,
2876 &dev_attr_state
.attr
,
2880 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
2881 struct attribute
*a
, int n
)
2883 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
2884 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2886 if (a
== &dev_attr_delete_controller
.attr
&& !ctrl
->ops
->delete_ctrl
)
2888 if (a
== &dev_attr_address
.attr
&& !ctrl
->ops
->get_address
)
2894 static struct attribute_group nvme_dev_attrs_group
= {
2895 .attrs
= nvme_dev_attrs
,
2896 .is_visible
= nvme_dev_attrs_are_visible
,
2899 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
2900 &nvme_dev_attrs_group
,
2904 static struct nvme_ns_head
*__nvme_find_ns_head(struct nvme_subsystem
*subsys
,
2907 struct nvme_ns_head
*h
;
2909 lockdep_assert_held(&subsys
->lock
);
2911 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
2912 if (h
->ns_id
== nsid
&& kref_get_unless_zero(&h
->ref
))
2919 static int __nvme_check_ids(struct nvme_subsystem
*subsys
,
2920 struct nvme_ns_head
*new)
2922 struct nvme_ns_head
*h
;
2924 lockdep_assert_held(&subsys
->lock
);
2926 list_for_each_entry(h
, &subsys
->nsheads
, entry
) {
2927 if (nvme_ns_ids_valid(&new->ids
) &&
2928 !list_empty(&h
->list
) &&
2929 nvme_ns_ids_equal(&new->ids
, &h
->ids
))
2936 static struct nvme_ns_head
*nvme_alloc_ns_head(struct nvme_ctrl
*ctrl
,
2937 unsigned nsid
, struct nvme_id_ns
*id
)
2939 struct nvme_ns_head
*head
;
2942 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
2945 ret
= ida_simple_get(&ctrl
->subsys
->ns_ida
, 1, 0, GFP_KERNEL
);
2948 head
->instance
= ret
;
2949 INIT_LIST_HEAD(&head
->list
);
2950 ret
= init_srcu_struct(&head
->srcu
);
2952 goto out_ida_remove
;
2953 head
->subsys
= ctrl
->subsys
;
2955 kref_init(&head
->ref
);
2957 nvme_report_ns_ids(ctrl
, nsid
, id
, &head
->ids
);
2959 ret
= __nvme_check_ids(ctrl
->subsys
, head
);
2961 dev_err(ctrl
->device
,
2962 "duplicate IDs for nsid %d\n", nsid
);
2963 goto out_cleanup_srcu
;
2966 ret
= nvme_mpath_alloc_disk(ctrl
, head
);
2968 goto out_cleanup_srcu
;
2970 list_add_tail(&head
->entry
, &ctrl
->subsys
->nsheads
);
2972 kref_get(&ctrl
->subsys
->ref
);
2976 cleanup_srcu_struct(&head
->srcu
);
2978 ida_simple_remove(&ctrl
->subsys
->ns_ida
, head
->instance
);
2982 return ERR_PTR(ret
);
2985 static int nvme_init_ns_head(struct nvme_ns
*ns
, unsigned nsid
,
2986 struct nvme_id_ns
*id
)
2988 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
2989 bool is_shared
= id
->nmic
& (1 << 0);
2990 struct nvme_ns_head
*head
= NULL
;
2993 mutex_lock(&ctrl
->subsys
->lock
);
2995 head
= __nvme_find_ns_head(ctrl
->subsys
, nsid
);
2997 head
= nvme_alloc_ns_head(ctrl
, nsid
, id
);
2999 ret
= PTR_ERR(head
);
3003 struct nvme_ns_ids ids
;
3005 nvme_report_ns_ids(ctrl
, nsid
, id
, &ids
);
3006 if (!nvme_ns_ids_equal(&head
->ids
, &ids
)) {
3007 dev_err(ctrl
->device
,
3008 "IDs don't match for shared namespace %d\n",
3015 list_add_tail(&ns
->siblings
, &head
->list
);
3019 mutex_unlock(&ctrl
->subsys
->lock
);
3023 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
3025 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
3026 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
3028 return nsa
->head
->ns_id
- nsb
->head
->ns_id
;
3031 static struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3033 struct nvme_ns
*ns
, *ret
= NULL
;
3035 down_read(&ctrl
->namespaces_rwsem
);
3036 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
3037 if (ns
->head
->ns_id
== nsid
) {
3038 if (!kref_get_unless_zero(&ns
->kref
))
3043 if (ns
->head
->ns_id
> nsid
)
3046 up_read(&ctrl
->namespaces_rwsem
);
3050 static int nvme_setup_streams_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
)
3052 struct streams_directive_params s
;
3055 if (!ctrl
->nr_streams
)
3058 ret
= nvme_get_stream_params(ctrl
, &s
, ns
->head
->ns_id
);
3062 ns
->sws
= le32_to_cpu(s
.sws
);
3063 ns
->sgs
= le16_to_cpu(s
.sgs
);
3066 unsigned int bs
= 1 << ns
->lba_shift
;
3068 blk_queue_io_min(ns
->queue
, bs
* ns
->sws
);
3070 blk_queue_io_opt(ns
->queue
, bs
* ns
->sws
* ns
->sgs
);
3076 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3079 struct gendisk
*disk
;
3080 struct nvme_id_ns
*id
;
3081 char disk_name
[DISK_NAME_LEN
];
3082 int node
= dev_to_node(ctrl
->dev
), flags
= GENHD_FL_EXT_DEVT
;
3084 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
3088 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
3089 if (IS_ERR(ns
->queue
))
3091 blk_queue_flag_set(QUEUE_FLAG_NONROT
, ns
->queue
);
3092 ns
->queue
->queuedata
= ns
;
3095 kref_init(&ns
->kref
);
3096 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
3098 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
3099 nvme_set_queue_limits(ctrl
, ns
->queue
);
3101 id
= nvme_identify_ns(ctrl
, nsid
);
3103 goto out_free_queue
;
3108 if (nvme_init_ns_head(ns
, nsid
, id
))
3110 nvme_setup_streams_ns(ctrl
, ns
);
3111 nvme_set_disk_name(disk_name
, ns
, ctrl
, &flags
);
3113 if ((ctrl
->quirks
& NVME_QUIRK_LIGHTNVM
) && id
->vs
[0] == 0x1) {
3114 if (nvme_nvm_register(ns
, disk_name
, node
)) {
3115 dev_warn(ctrl
->device
, "LightNVM init failure\n");
3120 disk
= alloc_disk_node(0, node
);
3124 disk
->fops
= &nvme_fops
;
3125 disk
->private_data
= ns
;
3126 disk
->queue
= ns
->queue
;
3127 disk
->flags
= flags
;
3128 memcpy(disk
->disk_name
, disk_name
, DISK_NAME_LEN
);
3131 __nvme_revalidate_disk(disk
, id
);
3133 down_write(&ctrl
->namespaces_rwsem
);
3134 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
3135 up_write(&ctrl
->namespaces_rwsem
);
3137 nvme_get_ctrl(ctrl
);
3139 device_add_disk(ctrl
->device
, ns
->disk
);
3140 if (sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
3141 &nvme_ns_id_attr_group
))
3142 pr_warn("%s: failed to create sysfs group for identification\n",
3143 ns
->disk
->disk_name
);
3144 if (ns
->ndev
&& nvme_nvm_register_sysfs(ns
))
3145 pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
3146 ns
->disk
->disk_name
);
3148 nvme_mpath_add_disk(ns
, id
);
3149 nvme_fault_inject_init(ns
);
3154 mutex_lock(&ctrl
->subsys
->lock
);
3155 list_del_rcu(&ns
->siblings
);
3156 mutex_unlock(&ctrl
->subsys
->lock
);
3160 blk_cleanup_queue(ns
->queue
);
3165 static void nvme_ns_remove(struct nvme_ns
*ns
)
3167 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
3170 nvme_fault_inject_fini(ns
);
3171 if (ns
->disk
&& ns
->disk
->flags
& GENHD_FL_UP
) {
3172 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
3173 &nvme_ns_id_attr_group
);
3175 nvme_nvm_unregister_sysfs(ns
);
3176 del_gendisk(ns
->disk
);
3177 blk_cleanup_queue(ns
->queue
);
3178 if (blk_get_integrity(ns
->disk
))
3179 blk_integrity_unregister(ns
->disk
);
3182 mutex_lock(&ns
->ctrl
->subsys
->lock
);
3183 list_del_rcu(&ns
->siblings
);
3184 nvme_mpath_clear_current_path(ns
);
3185 mutex_unlock(&ns
->ctrl
->subsys
->lock
);
3187 down_write(&ns
->ctrl
->namespaces_rwsem
);
3188 list_del_init(&ns
->list
);
3189 up_write(&ns
->ctrl
->namespaces_rwsem
);
3191 synchronize_srcu(&ns
->head
->srcu
);
3192 nvme_mpath_check_last_path(ns
);
3196 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
3200 ns
= nvme_find_get_ns(ctrl
, nsid
);
3202 if (ns
->disk
&& revalidate_disk(ns
->disk
))
3206 nvme_alloc_ns(ctrl
, nsid
);
3209 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
3212 struct nvme_ns
*ns
, *next
;
3215 down_write(&ctrl
->namespaces_rwsem
);
3216 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
3217 if (ns
->head
->ns_id
> nsid
|| test_bit(NVME_NS_DEAD
, &ns
->flags
))
3218 list_move_tail(&ns
->list
, &rm_list
);
3220 up_write(&ctrl
->namespaces_rwsem
);
3222 list_for_each_entry_safe(ns
, next
, &rm_list
, list
)
3227 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
3231 unsigned i
, j
, nsid
, prev
= 0, num_lists
= DIV_ROUND_UP(nn
, 1024);
3234 ns_list
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
3238 for (i
= 0; i
< num_lists
; i
++) {
3239 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
3243 for (j
= 0; j
< min(nn
, 1024U); j
++) {
3244 nsid
= le32_to_cpu(ns_list
[j
]);
3248 nvme_validate_ns(ctrl
, nsid
);
3250 while (++prev
< nsid
) {
3251 ns
= nvme_find_get_ns(ctrl
, prev
);
3261 nvme_remove_invalid_namespaces(ctrl
, prev
);
3267 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
, unsigned nn
)
3271 for (i
= 1; i
<= nn
; i
++)
3272 nvme_validate_ns(ctrl
, i
);
3274 nvme_remove_invalid_namespaces(ctrl
, nn
);
3277 static void nvme_clear_changed_ns_log(struct nvme_ctrl
*ctrl
)
3279 size_t log_size
= NVME_MAX_CHANGED_NAMESPACES
* sizeof(__le32
);
3283 log
= kzalloc(log_size
, GFP_KERNEL
);
3288 * We need to read the log to clear the AEN, but we don't want to rely
3289 * on it for the changed namespace information as userspace could have
3290 * raced with us in reading the log page, which could cause us to miss
3293 error
= nvme_get_log(ctrl
, NVME_NSID_ALL
, NVME_LOG_CHANGED_NS
, 0, log
,
3296 dev_warn(ctrl
->device
,
3297 "reading changed ns log failed: %d\n", error
);
3302 static void nvme_scan_work(struct work_struct
*work
)
3304 struct nvme_ctrl
*ctrl
=
3305 container_of(work
, struct nvme_ctrl
, scan_work
);
3306 struct nvme_id_ctrl
*id
;
3309 if (ctrl
->state
!= NVME_CTRL_LIVE
)
3312 WARN_ON_ONCE(!ctrl
->tagset
);
3314 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
)) {
3315 dev_info(ctrl
->device
, "rescanning namespaces.\n");
3316 nvme_clear_changed_ns_log(ctrl
);
3319 if (nvme_identify_ctrl(ctrl
, &id
))
3322 mutex_lock(&ctrl
->scan_lock
);
3323 nn
= le32_to_cpu(id
->nn
);
3324 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
3325 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
3326 if (!nvme_scan_ns_list(ctrl
, nn
))
3329 nvme_scan_ns_sequential(ctrl
, nn
);
3331 mutex_unlock(&ctrl
->scan_lock
);
3333 down_write(&ctrl
->namespaces_rwsem
);
3334 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
3335 up_write(&ctrl
->namespaces_rwsem
);
3339 * This function iterates the namespace list unlocked to allow recovery from
3340 * controller failure. It is up to the caller to ensure the namespace list is
3341 * not modified by scan work while this function is executing.
3343 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
3345 struct nvme_ns
*ns
, *next
;
3348 /* prevent racing with ns scanning */
3349 flush_work(&ctrl
->scan_work
);
3352 * The dead states indicates the controller was not gracefully
3353 * disconnected. In that case, we won't be able to flush any data while
3354 * removing the namespaces' disks; fail all the queues now to avoid
3355 * potentially having to clean up the failed sync later.
3357 if (ctrl
->state
== NVME_CTRL_DEAD
)
3358 nvme_kill_queues(ctrl
);
3360 down_write(&ctrl
->namespaces_rwsem
);
3361 list_splice_init(&ctrl
->namespaces
, &ns_list
);
3362 up_write(&ctrl
->namespaces_rwsem
);
3364 list_for_each_entry_safe(ns
, next
, &ns_list
, list
)
3367 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
3369 static void nvme_aen_uevent(struct nvme_ctrl
*ctrl
)
3371 char *envp
[2] = { NULL
, NULL
};
3372 u32 aen_result
= ctrl
->aen_result
;
3374 ctrl
->aen_result
= 0;
3378 envp
[0] = kasprintf(GFP_KERNEL
, "NVME_AEN=%#08x", aen_result
);
3381 kobject_uevent_env(&ctrl
->device
->kobj
, KOBJ_CHANGE
, envp
);
3385 static void nvme_async_event_work(struct work_struct
*work
)
3387 struct nvme_ctrl
*ctrl
=
3388 container_of(work
, struct nvme_ctrl
, async_event_work
);
3390 nvme_aen_uevent(ctrl
);
3391 ctrl
->ops
->submit_async_event(ctrl
);
3394 static bool nvme_ctrl_pp_status(struct nvme_ctrl
*ctrl
)
3399 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
))
3405 return ((ctrl
->ctrl_config
& NVME_CC_ENABLE
) && (csts
& NVME_CSTS_PP
));
3408 static void nvme_get_fw_slot_info(struct nvme_ctrl
*ctrl
)
3410 struct nvme_fw_slot_info_log
*log
;
3412 log
= kmalloc(sizeof(*log
), GFP_KERNEL
);
3416 if (nvme_get_log(ctrl
, NVME_NSID_ALL
, 0, NVME_LOG_FW_SLOT
, log
,
3418 dev_warn(ctrl
->device
, "Get FW SLOT INFO log error\n");
3422 static void nvme_fw_act_work(struct work_struct
*work
)
3424 struct nvme_ctrl
*ctrl
= container_of(work
,
3425 struct nvme_ctrl
, fw_act_work
);
3426 unsigned long fw_act_timeout
;
3429 fw_act_timeout
= jiffies
+
3430 msecs_to_jiffies(ctrl
->mtfa
* 100);
3432 fw_act_timeout
= jiffies
+
3433 msecs_to_jiffies(admin_timeout
* 1000);
3435 nvme_stop_queues(ctrl
);
3436 while (nvme_ctrl_pp_status(ctrl
)) {
3437 if (time_after(jiffies
, fw_act_timeout
)) {
3438 dev_warn(ctrl
->device
,
3439 "Fw activation timeout, reset controller\n");
3440 nvme_reset_ctrl(ctrl
);
3446 if (ctrl
->state
!= NVME_CTRL_LIVE
)
3449 nvme_start_queues(ctrl
);
3450 /* read FW slot information to clear the AER */
3451 nvme_get_fw_slot_info(ctrl
);
3454 static void nvme_handle_aen_notice(struct nvme_ctrl
*ctrl
, u32 result
)
3456 switch ((result
& 0xff00) >> 8) {
3457 case NVME_AER_NOTICE_NS_CHANGED
:
3458 set_bit(NVME_AER_NOTICE_NS_CHANGED
, &ctrl
->events
);
3459 nvme_queue_scan(ctrl
);
3461 case NVME_AER_NOTICE_FW_ACT_STARTING
:
3462 queue_work(nvme_wq
, &ctrl
->fw_act_work
);
3464 #ifdef CONFIG_NVME_MULTIPATH
3465 case NVME_AER_NOTICE_ANA
:
3466 if (!ctrl
->ana_log_buf
)
3468 queue_work(nvme_wq
, &ctrl
->ana_work
);
3472 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
3476 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
3477 volatile union nvme_result
*res
)
3479 u32 result
= le32_to_cpu(res
->u32
);
3481 if (le16_to_cpu(status
) >> 1 != NVME_SC_SUCCESS
)
3484 switch (result
& 0x7) {
3485 case NVME_AER_NOTICE
:
3486 nvme_handle_aen_notice(ctrl
, result
);
3488 case NVME_AER_ERROR
:
3489 case NVME_AER_SMART
:
3492 ctrl
->aen_result
= result
;
3497 queue_work(nvme_wq
, &ctrl
->async_event_work
);
3499 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
3501 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
3503 nvme_mpath_stop(ctrl
);
3504 nvme_stop_keep_alive(ctrl
);
3505 flush_work(&ctrl
->async_event_work
);
3506 cancel_work_sync(&ctrl
->fw_act_work
);
3507 if (ctrl
->ops
->stop_ctrl
)
3508 ctrl
->ops
->stop_ctrl(ctrl
);
3510 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
3512 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
3515 nvme_start_keep_alive(ctrl
);
3517 if (ctrl
->queue_count
> 1) {
3518 nvme_queue_scan(ctrl
);
3519 nvme_enable_aen(ctrl
);
3520 queue_work(nvme_wq
, &ctrl
->async_event_work
);
3521 nvme_start_queues(ctrl
);
3524 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
3526 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
3528 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
3529 cdev_device_del(&ctrl
->cdev
, ctrl
->device
);
3531 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
3533 static void nvme_free_ctrl(struct device
*dev
)
3535 struct nvme_ctrl
*ctrl
=
3536 container_of(dev
, struct nvme_ctrl
, ctrl_device
);
3537 struct nvme_subsystem
*subsys
= ctrl
->subsys
;
3539 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
3540 kfree(ctrl
->effects
);
3541 nvme_mpath_uninit(ctrl
);
3544 mutex_lock(&subsys
->lock
);
3545 list_del(&ctrl
->subsys_entry
);
3546 mutex_unlock(&subsys
->lock
);
3547 sysfs_remove_link(&subsys
->dev
.kobj
, dev_name(ctrl
->device
));
3550 ctrl
->ops
->free_ctrl(ctrl
);
3553 nvme_put_subsystem(subsys
);
3557 * Initialize a NVMe controller structures. This needs to be called during
3558 * earliest initialization so that we have the initialized structured around
3561 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
3562 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
3566 ctrl
->state
= NVME_CTRL_NEW
;
3567 spin_lock_init(&ctrl
->lock
);
3568 mutex_init(&ctrl
->scan_lock
);
3569 INIT_LIST_HEAD(&ctrl
->namespaces
);
3570 init_rwsem(&ctrl
->namespaces_rwsem
);
3573 ctrl
->quirks
= quirks
;
3574 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
3575 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
3576 INIT_WORK(&ctrl
->fw_act_work
, nvme_fw_act_work
);
3577 INIT_WORK(&ctrl
->delete_work
, nvme_delete_ctrl_work
);
3579 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
3580 memset(&ctrl
->ka_cmd
, 0, sizeof(ctrl
->ka_cmd
));
3581 ctrl
->ka_cmd
.common
.opcode
= nvme_admin_keep_alive
;
3583 ret
= ida_simple_get(&nvme_instance_ida
, 0, 0, GFP_KERNEL
);
3586 ctrl
->instance
= ret
;
3588 device_initialize(&ctrl
->ctrl_device
);
3589 ctrl
->device
= &ctrl
->ctrl_device
;
3590 ctrl
->device
->devt
= MKDEV(MAJOR(nvme_chr_devt
), ctrl
->instance
);
3591 ctrl
->device
->class = nvme_class
;
3592 ctrl
->device
->parent
= ctrl
->dev
;
3593 ctrl
->device
->groups
= nvme_dev_attr_groups
;
3594 ctrl
->device
->release
= nvme_free_ctrl
;
3595 dev_set_drvdata(ctrl
->device
, ctrl
);
3596 ret
= dev_set_name(ctrl
->device
, "nvme%d", ctrl
->instance
);
3598 goto out_release_instance
;
3600 cdev_init(&ctrl
->cdev
, &nvme_dev_fops
);
3601 ctrl
->cdev
.owner
= ops
->module
;
3602 ret
= cdev_device_add(&ctrl
->cdev
, ctrl
->device
);
3607 * Initialize latency tolerance controls. The sysfs files won't
3608 * be visible to userspace unless the device actually supports APST.
3610 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
3611 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
3612 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
3616 kfree_const(dev
->kobj
.name
);
3617 out_release_instance
:
3618 ida_simple_remove(&nvme_instance_ida
, ctrl
->instance
);
3622 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
3625 * nvme_kill_queues(): Ends all namespace queues
3626 * @ctrl: the dead controller that needs to end
3628 * Call this function when the driver determines it is unable to get the
3629 * controller in a state capable of servicing IO.
3631 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
3635 down_read(&ctrl
->namespaces_rwsem
);
3637 /* Forcibly unquiesce queues to avoid blocking dispatch */
3639 blk_mq_unquiesce_queue(ctrl
->admin_q
);
3641 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3642 nvme_set_queue_dying(ns
);
3644 up_read(&ctrl
->namespaces_rwsem
);
3646 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
3648 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
3652 down_read(&ctrl
->namespaces_rwsem
);
3653 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3654 blk_mq_unfreeze_queue(ns
->queue
);
3655 up_read(&ctrl
->namespaces_rwsem
);
3657 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
3659 void nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
3663 down_read(&ctrl
->namespaces_rwsem
);
3664 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
3665 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
3669 up_read(&ctrl
->namespaces_rwsem
);
3671 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
3673 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
3677 down_read(&ctrl
->namespaces_rwsem
);
3678 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3679 blk_mq_freeze_queue_wait(ns
->queue
);
3680 up_read(&ctrl
->namespaces_rwsem
);
3682 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
3684 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
3688 down_read(&ctrl
->namespaces_rwsem
);
3689 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3690 blk_freeze_queue_start(ns
->queue
);
3691 up_read(&ctrl
->namespaces_rwsem
);
3693 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
3695 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
3699 down_read(&ctrl
->namespaces_rwsem
);
3700 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3701 blk_mq_quiesce_queue(ns
->queue
);
3702 up_read(&ctrl
->namespaces_rwsem
);
3704 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
3706 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
3710 down_read(&ctrl
->namespaces_rwsem
);
3711 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
3712 blk_mq_unquiesce_queue(ns
->queue
);
3713 up_read(&ctrl
->namespaces_rwsem
);
3715 EXPORT_SYMBOL_GPL(nvme_start_queues
);
3717 int __init
nvme_core_init(void)
3719 int result
= -ENOMEM
;
3721 nvme_wq
= alloc_workqueue("nvme-wq",
3722 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
3726 nvme_reset_wq
= alloc_workqueue("nvme-reset-wq",
3727 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
3731 nvme_delete_wq
= alloc_workqueue("nvme-delete-wq",
3732 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
3733 if (!nvme_delete_wq
)
3734 goto destroy_reset_wq
;
3736 result
= alloc_chrdev_region(&nvme_chr_devt
, 0, NVME_MINORS
, "nvme");
3738 goto destroy_delete_wq
;
3740 nvme_class
= class_create(THIS_MODULE
, "nvme");
3741 if (IS_ERR(nvme_class
)) {
3742 result
= PTR_ERR(nvme_class
);
3743 goto unregister_chrdev
;
3746 nvme_subsys_class
= class_create(THIS_MODULE
, "nvme-subsystem");
3747 if (IS_ERR(nvme_subsys_class
)) {
3748 result
= PTR_ERR(nvme_subsys_class
);
3754 class_destroy(nvme_class
);
3756 unregister_chrdev_region(nvme_chr_devt
, NVME_MINORS
);
3758 destroy_workqueue(nvme_delete_wq
);
3760 destroy_workqueue(nvme_reset_wq
);
3762 destroy_workqueue(nvme_wq
);
3767 void nvme_core_exit(void)
3769 ida_destroy(&nvme_subsystems_ida
);
3770 class_destroy(nvme_subsys_class
);
3771 class_destroy(nvme_class
);
3772 unregister_chrdev_region(nvme_chr_devt
, NVME_MINORS
);
3773 destroy_workqueue(nvme_delete_wq
);
3774 destroy_workqueue(nvme_reset_wq
);
3775 destroy_workqueue(nvme_wq
);
3778 MODULE_LICENSE("GPL");
3779 MODULE_VERSION("1.0");
3780 module_init(nvme_core_init
);
3781 module_exit(nvme_core_exit
);