1 // SPDX-License-Identifier: GPL-2.0
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
7 #include <linux/acpi.h>
8 #include <linux/async.h>
9 #include <linux/blkdev.h>
10 #include <linux/blk-mq.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-integrity.h>
13 #include <linux/dmi.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/kstrtox.h>
18 #include <linux/memremap.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/once.h>
23 #include <linux/pci.h>
24 #include <linux/suspend.h>
25 #include <linux/t10-pi.h>
26 #include <linux/types.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/io-64-nonatomic-hi-lo.h>
29 #include <linux/sed-opal.h>
30 #include <linux/pci-p2pdma.h>
35 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
36 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
38 #define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
41 * These can be higher, but we need to ensure that any command doesn't
42 * require an sg allocation that needs more than a page of data.
44 #define NVME_MAX_KB_SZ 8192
45 #define NVME_MAX_SEGS 128
46 #define NVME_MAX_NR_ALLOCATIONS 5
48 static int use_threaded_interrupts
;
49 module_param(use_threaded_interrupts
, int, 0444);
51 static bool use_cmb_sqes
= true;
52 module_param(use_cmb_sqes
, bool, 0444);
53 MODULE_PARM_DESC(use_cmb_sqes
, "use controller's memory buffer for I/O SQes");
55 static unsigned int max_host_mem_size_mb
= 128;
56 module_param(max_host_mem_size_mb
, uint
, 0444);
57 MODULE_PARM_DESC(max_host_mem_size_mb
,
58 "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
60 static unsigned int sgl_threshold
= SZ_32K
;
61 module_param(sgl_threshold
, uint
, 0644);
62 MODULE_PARM_DESC(sgl_threshold
,
63 "Use SGLs when average request segment size is larger or equal to "
64 "this size. Use 0 to disable SGLs.");
66 #define NVME_PCI_MIN_QUEUE_SIZE 2
67 #define NVME_PCI_MAX_QUEUE_SIZE 4095
68 static int io_queue_depth_set(const char *val
, const struct kernel_param
*kp
);
69 static const struct kernel_param_ops io_queue_depth_ops
= {
70 .set
= io_queue_depth_set
,
71 .get
= param_get_uint
,
74 static unsigned int io_queue_depth
= 1024;
75 module_param_cb(io_queue_depth
, &io_queue_depth_ops
, &io_queue_depth
, 0644);
76 MODULE_PARM_DESC(io_queue_depth
, "set io queue depth, should >= 2 and < 4096");
78 static int io_queue_count_set(const char *val
, const struct kernel_param
*kp
)
83 ret
= kstrtouint(val
, 10, &n
);
84 if (ret
!= 0 || n
> num_possible_cpus())
86 return param_set_uint(val
, kp
);
89 static const struct kernel_param_ops io_queue_count_ops
= {
90 .set
= io_queue_count_set
,
91 .get
= param_get_uint
,
94 static unsigned int write_queues
;
95 module_param_cb(write_queues
, &io_queue_count_ops
, &write_queues
, 0644);
96 MODULE_PARM_DESC(write_queues
,
97 "Number of queues to use for writes. If not set, reads and writes "
98 "will share a queue set.");
100 static unsigned int poll_queues
;
101 module_param_cb(poll_queues
, &io_queue_count_ops
, &poll_queues
, 0644);
102 MODULE_PARM_DESC(poll_queues
, "Number of queues to use for polled IO.");
105 module_param(noacpi
, bool, 0444);
106 MODULE_PARM_DESC(noacpi
, "disable acpi bios quirks");
111 static void nvme_dev_disable(struct nvme_dev
*dev
, bool shutdown
);
112 static void nvme_delete_io_queues(struct nvme_dev
*dev
);
113 static void nvme_update_attrs(struct nvme_dev
*dev
);
116 * Represents an NVM Express device. Each nvme_dev is a PCI function.
119 struct nvme_queue
*queues
;
120 struct blk_mq_tag_set tagset
;
121 struct blk_mq_tag_set admin_tagset
;
124 struct dma_pool
*prp_page_pool
;
125 struct dma_pool
*prp_small_pool
;
126 unsigned online_queues
;
128 unsigned io_queues
[HCTX_MAX_TYPES
];
129 unsigned int num_vecs
;
134 unsigned long bar_mapped_size
;
135 struct mutex shutdown_lock
;
141 struct nvme_ctrl ctrl
;
145 mempool_t
*iod_mempool
;
147 /* shadow doorbell buffer support: */
149 dma_addr_t dbbuf_dbs_dma_addr
;
151 dma_addr_t dbbuf_eis_dma_addr
;
153 /* host memory buffer support: */
155 u32 nr_host_mem_descs
;
156 dma_addr_t host_mem_descs_dma
;
157 struct nvme_host_mem_buf_desc
*host_mem_descs
;
158 void **host_mem_desc_bufs
;
159 unsigned int nr_allocated_queues
;
160 unsigned int nr_write_queues
;
161 unsigned int nr_poll_queues
;
164 static int io_queue_depth_set(const char *val
, const struct kernel_param
*kp
)
166 return param_set_uint_minmax(val
, kp
, NVME_PCI_MIN_QUEUE_SIZE
,
167 NVME_PCI_MAX_QUEUE_SIZE
);
170 static inline unsigned int sq_idx(unsigned int qid
, u32 stride
)
172 return qid
* 2 * stride
;
175 static inline unsigned int cq_idx(unsigned int qid
, u32 stride
)
177 return (qid
* 2 + 1) * stride
;
180 static inline struct nvme_dev
*to_nvme_dev(struct nvme_ctrl
*ctrl
)
182 return container_of(ctrl
, struct nvme_dev
, ctrl
);
186 * An NVM Express queue. Each device has at least two (one for admin
187 * commands and one for I/O commands).
190 struct nvme_dev
*dev
;
193 /* only used for poll queues: */
194 spinlock_t cq_poll_lock ____cacheline_aligned_in_smp
;
195 struct nvme_completion
*cqes
;
196 dma_addr_t sq_dma_addr
;
197 dma_addr_t cq_dma_addr
;
208 #define NVMEQ_ENABLED 0
209 #define NVMEQ_SQ_CMB 1
210 #define NVMEQ_DELETE_ERROR 2
211 #define NVMEQ_POLLED 3
216 struct completion delete_done
;
219 union nvme_descriptor
{
220 struct nvme_sgl_desc
*sg_list
;
225 * The nvme_iod describes the data in an I/O.
227 * The sg pointer contains the list of PRP/SGL chunk allocations in addition
228 * to the actual struct scatterlist.
231 struct nvme_request req
;
232 struct nvme_command cmd
;
234 s8 nr_allocations
; /* PRP list pool allocations. 0 means small
236 unsigned int dma_len
; /* length of single DMA segment mapping */
237 dma_addr_t first_dma
;
240 union nvme_descriptor list
[NVME_MAX_NR_ALLOCATIONS
];
243 static inline unsigned int nvme_dbbuf_size(struct nvme_dev
*dev
)
245 return dev
->nr_allocated_queues
* 8 * dev
->db_stride
;
248 static void nvme_dbbuf_dma_alloc(struct nvme_dev
*dev
)
250 unsigned int mem_size
= nvme_dbbuf_size(dev
);
252 if (!(dev
->ctrl
.oacs
& NVME_CTRL_OACS_DBBUF_SUPP
))
255 if (dev
->dbbuf_dbs
) {
257 * Clear the dbbuf memory so the driver doesn't observe stale
258 * values from the previous instantiation.
260 memset(dev
->dbbuf_dbs
, 0, mem_size
);
261 memset(dev
->dbbuf_eis
, 0, mem_size
);
265 dev
->dbbuf_dbs
= dma_alloc_coherent(dev
->dev
, mem_size
,
266 &dev
->dbbuf_dbs_dma_addr
,
270 dev
->dbbuf_eis
= dma_alloc_coherent(dev
->dev
, mem_size
,
271 &dev
->dbbuf_eis_dma_addr
,
274 goto fail_free_dbbuf_dbs
;
278 dma_free_coherent(dev
->dev
, mem_size
, dev
->dbbuf_dbs
,
279 dev
->dbbuf_dbs_dma_addr
);
280 dev
->dbbuf_dbs
= NULL
;
282 dev_warn(dev
->dev
, "unable to allocate dma for dbbuf\n");
285 static void nvme_dbbuf_dma_free(struct nvme_dev
*dev
)
287 unsigned int mem_size
= nvme_dbbuf_size(dev
);
289 if (dev
->dbbuf_dbs
) {
290 dma_free_coherent(dev
->dev
, mem_size
,
291 dev
->dbbuf_dbs
, dev
->dbbuf_dbs_dma_addr
);
292 dev
->dbbuf_dbs
= NULL
;
294 if (dev
->dbbuf_eis
) {
295 dma_free_coherent(dev
->dev
, mem_size
,
296 dev
->dbbuf_eis
, dev
->dbbuf_eis_dma_addr
);
297 dev
->dbbuf_eis
= NULL
;
301 static void nvme_dbbuf_init(struct nvme_dev
*dev
,
302 struct nvme_queue
*nvmeq
, int qid
)
304 if (!dev
->dbbuf_dbs
|| !qid
)
307 nvmeq
->dbbuf_sq_db
= &dev
->dbbuf_dbs
[sq_idx(qid
, dev
->db_stride
)];
308 nvmeq
->dbbuf_cq_db
= &dev
->dbbuf_dbs
[cq_idx(qid
, dev
->db_stride
)];
309 nvmeq
->dbbuf_sq_ei
= &dev
->dbbuf_eis
[sq_idx(qid
, dev
->db_stride
)];
310 nvmeq
->dbbuf_cq_ei
= &dev
->dbbuf_eis
[cq_idx(qid
, dev
->db_stride
)];
313 static void nvme_dbbuf_free(struct nvme_queue
*nvmeq
)
318 nvmeq
->dbbuf_sq_db
= NULL
;
319 nvmeq
->dbbuf_cq_db
= NULL
;
320 nvmeq
->dbbuf_sq_ei
= NULL
;
321 nvmeq
->dbbuf_cq_ei
= NULL
;
324 static void nvme_dbbuf_set(struct nvme_dev
*dev
)
326 struct nvme_command c
= { };
332 c
.dbbuf
.opcode
= nvme_admin_dbbuf
;
333 c
.dbbuf
.prp1
= cpu_to_le64(dev
->dbbuf_dbs_dma_addr
);
334 c
.dbbuf
.prp2
= cpu_to_le64(dev
->dbbuf_eis_dma_addr
);
336 if (nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0)) {
337 dev_warn(dev
->ctrl
.device
, "unable to set dbbuf\n");
338 /* Free memory and continue on */
339 nvme_dbbuf_dma_free(dev
);
341 for (i
= 1; i
<= dev
->online_queues
; i
++)
342 nvme_dbbuf_free(&dev
->queues
[i
]);
346 static inline int nvme_dbbuf_need_event(u16 event_idx
, u16 new_idx
, u16 old
)
348 return (u16
)(new_idx
- event_idx
- 1) < (u16
)(new_idx
- old
);
351 /* Update dbbuf and return true if an MMIO is required */
352 static bool nvme_dbbuf_update_and_check_event(u16 value
, __le32
*dbbuf_db
,
353 volatile __le32
*dbbuf_ei
)
356 u16 old_value
, event_idx
;
359 * Ensure that the queue is written before updating
360 * the doorbell in memory
364 old_value
= le32_to_cpu(*dbbuf_db
);
365 *dbbuf_db
= cpu_to_le32(value
);
368 * Ensure that the doorbell is updated before reading the event
369 * index from memory. The controller needs to provide similar
370 * ordering to ensure the envent index is updated before reading
375 event_idx
= le32_to_cpu(*dbbuf_ei
);
376 if (!nvme_dbbuf_need_event(event_idx
, value
, old_value
))
384 * Will slightly overestimate the number of pages needed. This is OK
385 * as it only leads to a small amount of wasted memory for the lifetime of
388 static int nvme_pci_npages_prp(void)
390 unsigned max_bytes
= (NVME_MAX_KB_SZ
* 1024) + NVME_CTRL_PAGE_SIZE
;
391 unsigned nprps
= DIV_ROUND_UP(max_bytes
, NVME_CTRL_PAGE_SIZE
);
392 return DIV_ROUND_UP(8 * nprps
, NVME_CTRL_PAGE_SIZE
- 8);
395 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
396 unsigned int hctx_idx
)
398 struct nvme_dev
*dev
= to_nvme_dev(data
);
399 struct nvme_queue
*nvmeq
= &dev
->queues
[0];
401 WARN_ON(hctx_idx
!= 0);
402 WARN_ON(dev
->admin_tagset
.tags
[0] != hctx
->tags
);
404 hctx
->driver_data
= nvmeq
;
408 static int nvme_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
409 unsigned int hctx_idx
)
411 struct nvme_dev
*dev
= to_nvme_dev(data
);
412 struct nvme_queue
*nvmeq
= &dev
->queues
[hctx_idx
+ 1];
414 WARN_ON(dev
->tagset
.tags
[hctx_idx
] != hctx
->tags
);
415 hctx
->driver_data
= nvmeq
;
419 static int nvme_pci_init_request(struct blk_mq_tag_set
*set
,
420 struct request
*req
, unsigned int hctx_idx
,
421 unsigned int numa_node
)
423 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
425 nvme_req(req
)->ctrl
= set
->driver_data
;
426 nvme_req(req
)->cmd
= &iod
->cmd
;
430 static int queue_irq_offset(struct nvme_dev
*dev
)
432 /* if we have more than 1 vec, admin queue offsets us by 1 */
433 if (dev
->num_vecs
> 1)
439 static void nvme_pci_map_queues(struct blk_mq_tag_set
*set
)
441 struct nvme_dev
*dev
= to_nvme_dev(set
->driver_data
);
444 offset
= queue_irq_offset(dev
);
445 for (i
= 0, qoff
= 0; i
< set
->nr_maps
; i
++) {
446 struct blk_mq_queue_map
*map
= &set
->map
[i
];
448 map
->nr_queues
= dev
->io_queues
[i
];
449 if (!map
->nr_queues
) {
450 BUG_ON(i
== HCTX_TYPE_DEFAULT
);
455 * The poll queue(s) doesn't have an IRQ (and hence IRQ
456 * affinity), so use the regular blk-mq cpu mapping
458 map
->queue_offset
= qoff
;
459 if (i
!= HCTX_TYPE_POLL
&& offset
)
460 blk_mq_pci_map_queues(map
, to_pci_dev(dev
->dev
), offset
);
462 blk_mq_map_queues(map
);
463 qoff
+= map
->nr_queues
;
464 offset
+= map
->nr_queues
;
469 * Write sq tail if we are asked to, or if the next command would wrap.
471 static inline void nvme_write_sq_db(struct nvme_queue
*nvmeq
, bool write_sq
)
474 u16 next_tail
= nvmeq
->sq_tail
+ 1;
476 if (next_tail
== nvmeq
->q_depth
)
478 if (next_tail
!= nvmeq
->last_sq_tail
)
482 if (nvme_dbbuf_update_and_check_event(nvmeq
->sq_tail
,
483 nvmeq
->dbbuf_sq_db
, nvmeq
->dbbuf_sq_ei
))
484 writel(nvmeq
->sq_tail
, nvmeq
->q_db
);
485 nvmeq
->last_sq_tail
= nvmeq
->sq_tail
;
488 static inline void nvme_sq_copy_cmd(struct nvme_queue
*nvmeq
,
489 struct nvme_command
*cmd
)
491 memcpy(nvmeq
->sq_cmds
+ (nvmeq
->sq_tail
<< nvmeq
->sqes
),
492 absolute_pointer(cmd
), sizeof(*cmd
));
493 if (++nvmeq
->sq_tail
== nvmeq
->q_depth
)
497 static void nvme_commit_rqs(struct blk_mq_hw_ctx
*hctx
)
499 struct nvme_queue
*nvmeq
= hctx
->driver_data
;
501 spin_lock(&nvmeq
->sq_lock
);
502 if (nvmeq
->sq_tail
!= nvmeq
->last_sq_tail
)
503 nvme_write_sq_db(nvmeq
, true);
504 spin_unlock(&nvmeq
->sq_lock
);
507 static inline bool nvme_pci_use_sgls(struct nvme_dev
*dev
, struct request
*req
,
510 struct nvme_queue
*nvmeq
= req
->mq_hctx
->driver_data
;
511 unsigned int avg_seg_size
;
513 avg_seg_size
= DIV_ROUND_UP(blk_rq_payload_bytes(req
), nseg
);
515 if (!nvme_ctrl_sgl_supported(&dev
->ctrl
))
519 if (!sgl_threshold
|| avg_seg_size
< sgl_threshold
)
524 static void nvme_free_prps(struct nvme_dev
*dev
, struct request
*req
)
526 const int last_prp
= NVME_CTRL_PAGE_SIZE
/ sizeof(__le64
) - 1;
527 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
528 dma_addr_t dma_addr
= iod
->first_dma
;
531 for (i
= 0; i
< iod
->nr_allocations
; i
++) {
532 __le64
*prp_list
= iod
->list
[i
].prp_list
;
533 dma_addr_t next_dma_addr
= le64_to_cpu(prp_list
[last_prp
]);
535 dma_pool_free(dev
->prp_page_pool
, prp_list
, dma_addr
);
536 dma_addr
= next_dma_addr
;
540 static void nvme_unmap_data(struct nvme_dev
*dev
, struct request
*req
)
542 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
545 dma_unmap_page(dev
->dev
, iod
->first_dma
, iod
->dma_len
,
550 WARN_ON_ONCE(!iod
->sgt
.nents
);
552 dma_unmap_sgtable(dev
->dev
, &iod
->sgt
, rq_dma_dir(req
), 0);
554 if (iod
->nr_allocations
== 0)
555 dma_pool_free(dev
->prp_small_pool
, iod
->list
[0].sg_list
,
557 else if (iod
->nr_allocations
== 1)
558 dma_pool_free(dev
->prp_page_pool
, iod
->list
[0].sg_list
,
561 nvme_free_prps(dev
, req
);
562 mempool_free(iod
->sgt
.sgl
, dev
->iod_mempool
);
565 static void nvme_print_sgl(struct scatterlist
*sgl
, int nents
)
568 struct scatterlist
*sg
;
570 for_each_sg(sgl
, sg
, nents
, i
) {
571 dma_addr_t phys
= sg_phys(sg
);
572 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
573 "dma_address:%pad dma_length:%d\n",
574 i
, &phys
, sg
->offset
, sg
->length
, &sg_dma_address(sg
),
579 static blk_status_t
nvme_pci_setup_prps(struct nvme_dev
*dev
,
580 struct request
*req
, struct nvme_rw_command
*cmnd
)
582 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
583 struct dma_pool
*pool
;
584 int length
= blk_rq_payload_bytes(req
);
585 struct scatterlist
*sg
= iod
->sgt
.sgl
;
586 int dma_len
= sg_dma_len(sg
);
587 u64 dma_addr
= sg_dma_address(sg
);
588 int offset
= dma_addr
& (NVME_CTRL_PAGE_SIZE
- 1);
593 length
-= (NVME_CTRL_PAGE_SIZE
- offset
);
599 dma_len
-= (NVME_CTRL_PAGE_SIZE
- offset
);
601 dma_addr
+= (NVME_CTRL_PAGE_SIZE
- offset
);
604 dma_addr
= sg_dma_address(sg
);
605 dma_len
= sg_dma_len(sg
);
608 if (length
<= NVME_CTRL_PAGE_SIZE
) {
609 iod
->first_dma
= dma_addr
;
613 nprps
= DIV_ROUND_UP(length
, NVME_CTRL_PAGE_SIZE
);
614 if (nprps
<= (256 / 8)) {
615 pool
= dev
->prp_small_pool
;
616 iod
->nr_allocations
= 0;
618 pool
= dev
->prp_page_pool
;
619 iod
->nr_allocations
= 1;
622 prp_list
= dma_pool_alloc(pool
, GFP_ATOMIC
, &prp_dma
);
624 iod
->nr_allocations
= -1;
625 return BLK_STS_RESOURCE
;
627 iod
->list
[0].prp_list
= prp_list
;
628 iod
->first_dma
= prp_dma
;
631 if (i
== NVME_CTRL_PAGE_SIZE
>> 3) {
632 __le64
*old_prp_list
= prp_list
;
633 prp_list
= dma_pool_alloc(pool
, GFP_ATOMIC
, &prp_dma
);
636 iod
->list
[iod
->nr_allocations
++].prp_list
= prp_list
;
637 prp_list
[0] = old_prp_list
[i
- 1];
638 old_prp_list
[i
- 1] = cpu_to_le64(prp_dma
);
641 prp_list
[i
++] = cpu_to_le64(dma_addr
);
642 dma_len
-= NVME_CTRL_PAGE_SIZE
;
643 dma_addr
+= NVME_CTRL_PAGE_SIZE
;
644 length
-= NVME_CTRL_PAGE_SIZE
;
649 if (unlikely(dma_len
< 0))
652 dma_addr
= sg_dma_address(sg
);
653 dma_len
= sg_dma_len(sg
);
656 cmnd
->dptr
.prp1
= cpu_to_le64(sg_dma_address(iod
->sgt
.sgl
));
657 cmnd
->dptr
.prp2
= cpu_to_le64(iod
->first_dma
);
660 nvme_free_prps(dev
, req
);
661 return BLK_STS_RESOURCE
;
663 WARN(DO_ONCE(nvme_print_sgl
, iod
->sgt
.sgl
, iod
->sgt
.nents
),
664 "Invalid SGL for payload:%d nents:%d\n",
665 blk_rq_payload_bytes(req
), iod
->sgt
.nents
);
666 return BLK_STS_IOERR
;
669 static void nvme_pci_sgl_set_data(struct nvme_sgl_desc
*sge
,
670 struct scatterlist
*sg
)
672 sge
->addr
= cpu_to_le64(sg_dma_address(sg
));
673 sge
->length
= cpu_to_le32(sg_dma_len(sg
));
674 sge
->type
= NVME_SGL_FMT_DATA_DESC
<< 4;
677 static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc
*sge
,
678 dma_addr_t dma_addr
, int entries
)
680 sge
->addr
= cpu_to_le64(dma_addr
);
681 sge
->length
= cpu_to_le32(entries
* sizeof(*sge
));
682 sge
->type
= NVME_SGL_FMT_LAST_SEG_DESC
<< 4;
685 static blk_status_t
nvme_pci_setup_sgls(struct nvme_dev
*dev
,
686 struct request
*req
, struct nvme_rw_command
*cmd
)
688 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
689 struct dma_pool
*pool
;
690 struct nvme_sgl_desc
*sg_list
;
691 struct scatterlist
*sg
= iod
->sgt
.sgl
;
692 unsigned int entries
= iod
->sgt
.nents
;
696 /* setting the transfer type as SGL */
697 cmd
->flags
= NVME_CMD_SGL_METABUF
;
700 nvme_pci_sgl_set_data(&cmd
->dptr
.sgl
, sg
);
704 if (entries
<= (256 / sizeof(struct nvme_sgl_desc
))) {
705 pool
= dev
->prp_small_pool
;
706 iod
->nr_allocations
= 0;
708 pool
= dev
->prp_page_pool
;
709 iod
->nr_allocations
= 1;
712 sg_list
= dma_pool_alloc(pool
, GFP_ATOMIC
, &sgl_dma
);
714 iod
->nr_allocations
= -1;
715 return BLK_STS_RESOURCE
;
718 iod
->list
[0].sg_list
= sg_list
;
719 iod
->first_dma
= sgl_dma
;
721 nvme_pci_sgl_set_seg(&cmd
->dptr
.sgl
, sgl_dma
, entries
);
723 nvme_pci_sgl_set_data(&sg_list
[i
++], sg
);
725 } while (--entries
> 0);
730 static blk_status_t
nvme_setup_prp_simple(struct nvme_dev
*dev
,
731 struct request
*req
, struct nvme_rw_command
*cmnd
,
734 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
735 unsigned int offset
= bv
->bv_offset
& (NVME_CTRL_PAGE_SIZE
- 1);
736 unsigned int first_prp_len
= NVME_CTRL_PAGE_SIZE
- offset
;
738 iod
->first_dma
= dma_map_bvec(dev
->dev
, bv
, rq_dma_dir(req
), 0);
739 if (dma_mapping_error(dev
->dev
, iod
->first_dma
))
740 return BLK_STS_RESOURCE
;
741 iod
->dma_len
= bv
->bv_len
;
743 cmnd
->dptr
.prp1
= cpu_to_le64(iod
->first_dma
);
744 if (bv
->bv_len
> first_prp_len
)
745 cmnd
->dptr
.prp2
= cpu_to_le64(iod
->first_dma
+ first_prp_len
);
751 static blk_status_t
nvme_setup_sgl_simple(struct nvme_dev
*dev
,
752 struct request
*req
, struct nvme_rw_command
*cmnd
,
755 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
757 iod
->first_dma
= dma_map_bvec(dev
->dev
, bv
, rq_dma_dir(req
), 0);
758 if (dma_mapping_error(dev
->dev
, iod
->first_dma
))
759 return BLK_STS_RESOURCE
;
760 iod
->dma_len
= bv
->bv_len
;
762 cmnd
->flags
= NVME_CMD_SGL_METABUF
;
763 cmnd
->dptr
.sgl
.addr
= cpu_to_le64(iod
->first_dma
);
764 cmnd
->dptr
.sgl
.length
= cpu_to_le32(iod
->dma_len
);
765 cmnd
->dptr
.sgl
.type
= NVME_SGL_FMT_DATA_DESC
<< 4;
769 static blk_status_t
nvme_map_data(struct nvme_dev
*dev
, struct request
*req
,
770 struct nvme_command
*cmnd
)
772 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
773 blk_status_t ret
= BLK_STS_RESOURCE
;
776 if (blk_rq_nr_phys_segments(req
) == 1) {
777 struct nvme_queue
*nvmeq
= req
->mq_hctx
->driver_data
;
778 struct bio_vec bv
= req_bvec(req
);
780 if (!is_pci_p2pdma_page(bv
.bv_page
)) {
781 if (bv
.bv_offset
+ bv
.bv_len
<= NVME_CTRL_PAGE_SIZE
* 2)
782 return nvme_setup_prp_simple(dev
, req
,
785 if (nvmeq
->qid
&& sgl_threshold
&&
786 nvme_ctrl_sgl_supported(&dev
->ctrl
))
787 return nvme_setup_sgl_simple(dev
, req
,
793 iod
->sgt
.sgl
= mempool_alloc(dev
->iod_mempool
, GFP_ATOMIC
);
795 return BLK_STS_RESOURCE
;
796 sg_init_table(iod
->sgt
.sgl
, blk_rq_nr_phys_segments(req
));
797 iod
->sgt
.orig_nents
= blk_rq_map_sg(req
->q
, req
, iod
->sgt
.sgl
);
798 if (!iod
->sgt
.orig_nents
)
801 rc
= dma_map_sgtable(dev
->dev
, &iod
->sgt
, rq_dma_dir(req
),
804 if (rc
== -EREMOTEIO
)
805 ret
= BLK_STS_TARGET
;
809 if (nvme_pci_use_sgls(dev
, req
, iod
->sgt
.nents
))
810 ret
= nvme_pci_setup_sgls(dev
, req
, &cmnd
->rw
);
812 ret
= nvme_pci_setup_prps(dev
, req
, &cmnd
->rw
);
813 if (ret
!= BLK_STS_OK
)
818 dma_unmap_sgtable(dev
->dev
, &iod
->sgt
, rq_dma_dir(req
), 0);
820 mempool_free(iod
->sgt
.sgl
, dev
->iod_mempool
);
824 static blk_status_t
nvme_map_metadata(struct nvme_dev
*dev
, struct request
*req
,
825 struct nvme_command
*cmnd
)
827 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
829 iod
->meta_dma
= dma_map_bvec(dev
->dev
, rq_integrity_vec(req
),
831 if (dma_mapping_error(dev
->dev
, iod
->meta_dma
))
832 return BLK_STS_IOERR
;
833 cmnd
->rw
.metadata
= cpu_to_le64(iod
->meta_dma
);
837 static blk_status_t
nvme_prep_rq(struct nvme_dev
*dev
, struct request
*req
)
839 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
842 iod
->aborted
= false;
843 iod
->nr_allocations
= -1;
846 ret
= nvme_setup_cmd(req
->q
->queuedata
, req
);
850 if (blk_rq_nr_phys_segments(req
)) {
851 ret
= nvme_map_data(dev
, req
, &iod
->cmd
);
856 if (blk_integrity_rq(req
)) {
857 ret
= nvme_map_metadata(dev
, req
, &iod
->cmd
);
862 nvme_start_request(req
);
865 nvme_unmap_data(dev
, req
);
867 nvme_cleanup_cmd(req
);
872 * NOTE: ns is NULL when called on the admin queue.
874 static blk_status_t
nvme_queue_rq(struct blk_mq_hw_ctx
*hctx
,
875 const struct blk_mq_queue_data
*bd
)
877 struct nvme_queue
*nvmeq
= hctx
->driver_data
;
878 struct nvme_dev
*dev
= nvmeq
->dev
;
879 struct request
*req
= bd
->rq
;
880 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
884 * We should not need to do this, but we're still using this to
885 * ensure we can drain requests on a dying queue.
887 if (unlikely(!test_bit(NVMEQ_ENABLED
, &nvmeq
->flags
)))
888 return BLK_STS_IOERR
;
890 if (unlikely(!nvme_check_ready(&dev
->ctrl
, req
, true)))
891 return nvme_fail_nonready_command(&dev
->ctrl
, req
);
893 ret
= nvme_prep_rq(dev
, req
);
896 spin_lock(&nvmeq
->sq_lock
);
897 nvme_sq_copy_cmd(nvmeq
, &iod
->cmd
);
898 nvme_write_sq_db(nvmeq
, bd
->last
);
899 spin_unlock(&nvmeq
->sq_lock
);
903 static void nvme_submit_cmds(struct nvme_queue
*nvmeq
, struct request
**rqlist
)
905 spin_lock(&nvmeq
->sq_lock
);
906 while (!rq_list_empty(*rqlist
)) {
907 struct request
*req
= rq_list_pop(rqlist
);
908 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
910 nvme_sq_copy_cmd(nvmeq
, &iod
->cmd
);
912 nvme_write_sq_db(nvmeq
, true);
913 spin_unlock(&nvmeq
->sq_lock
);
916 static bool nvme_prep_rq_batch(struct nvme_queue
*nvmeq
, struct request
*req
)
919 * We should not need to do this, but we're still using this to
920 * ensure we can drain requests on a dying queue.
922 if (unlikely(!test_bit(NVMEQ_ENABLED
, &nvmeq
->flags
)))
924 if (unlikely(!nvme_check_ready(&nvmeq
->dev
->ctrl
, req
, true)))
927 return nvme_prep_rq(nvmeq
->dev
, req
) == BLK_STS_OK
;
930 static void nvme_queue_rqs(struct request
**rqlist
)
932 struct request
*req
, *next
, *prev
= NULL
;
933 struct request
*requeue_list
= NULL
;
935 rq_list_for_each_safe(rqlist
, req
, next
) {
936 struct nvme_queue
*nvmeq
= req
->mq_hctx
->driver_data
;
938 if (!nvme_prep_rq_batch(nvmeq
, req
)) {
939 /* detach 'req' and add to remainder list */
940 rq_list_move(rqlist
, &requeue_list
, req
, prev
);
947 if (!next
|| req
->mq_hctx
!= next
->mq_hctx
) {
948 /* detach rest of list, and submit */
950 nvme_submit_cmds(nvmeq
, rqlist
);
957 *rqlist
= requeue_list
;
960 static __always_inline
void nvme_pci_unmap_rq(struct request
*req
)
962 struct nvme_queue
*nvmeq
= req
->mq_hctx
->driver_data
;
963 struct nvme_dev
*dev
= nvmeq
->dev
;
965 if (blk_integrity_rq(req
)) {
966 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
968 dma_unmap_page(dev
->dev
, iod
->meta_dma
,
969 rq_integrity_vec(req
)->bv_len
, rq_dma_dir(req
));
972 if (blk_rq_nr_phys_segments(req
))
973 nvme_unmap_data(dev
, req
);
976 static void nvme_pci_complete_rq(struct request
*req
)
978 nvme_pci_unmap_rq(req
);
979 nvme_complete_rq(req
);
982 static void nvme_pci_complete_batch(struct io_comp_batch
*iob
)
984 nvme_complete_batch(iob
, nvme_pci_unmap_rq
);
987 /* We read the CQE phase first to check if the rest of the entry is valid */
988 static inline bool nvme_cqe_pending(struct nvme_queue
*nvmeq
)
990 struct nvme_completion
*hcqe
= &nvmeq
->cqes
[nvmeq
->cq_head
];
992 return (le16_to_cpu(READ_ONCE(hcqe
->status
)) & 1) == nvmeq
->cq_phase
;
995 static inline void nvme_ring_cq_doorbell(struct nvme_queue
*nvmeq
)
997 u16 head
= nvmeq
->cq_head
;
999 if (nvme_dbbuf_update_and_check_event(head
, nvmeq
->dbbuf_cq_db
,
1000 nvmeq
->dbbuf_cq_ei
))
1001 writel(head
, nvmeq
->q_db
+ nvmeq
->dev
->db_stride
);
1004 static inline struct blk_mq_tags
*nvme_queue_tagset(struct nvme_queue
*nvmeq
)
1007 return nvmeq
->dev
->admin_tagset
.tags
[0];
1008 return nvmeq
->dev
->tagset
.tags
[nvmeq
->qid
- 1];
1011 static inline void nvme_handle_cqe(struct nvme_queue
*nvmeq
,
1012 struct io_comp_batch
*iob
, u16 idx
)
1014 struct nvme_completion
*cqe
= &nvmeq
->cqes
[idx
];
1015 __u16 command_id
= READ_ONCE(cqe
->command_id
);
1016 struct request
*req
;
1019 * AEN requests are special as they don't time out and can
1020 * survive any kind of queue freeze and often don't respond to
1021 * aborts. We don't even bother to allocate a struct request
1022 * for them but rather special case them here.
1024 if (unlikely(nvme_is_aen_req(nvmeq
->qid
, command_id
))) {
1025 nvme_complete_async_event(&nvmeq
->dev
->ctrl
,
1026 cqe
->status
, &cqe
->result
);
1030 req
= nvme_find_rq(nvme_queue_tagset(nvmeq
), command_id
);
1031 if (unlikely(!req
)) {
1032 dev_warn(nvmeq
->dev
->ctrl
.device
,
1033 "invalid id %d completed on queue %d\n",
1034 command_id
, le16_to_cpu(cqe
->sq_id
));
1038 trace_nvme_sq(req
, cqe
->sq_head
, nvmeq
->sq_tail
);
1039 if (!nvme_try_complete_req(req
, cqe
->status
, cqe
->result
) &&
1040 !blk_mq_add_to_batch(req
, iob
, nvme_req(req
)->status
,
1041 nvme_pci_complete_batch
))
1042 nvme_pci_complete_rq(req
);
1045 static inline void nvme_update_cq_head(struct nvme_queue
*nvmeq
)
1047 u32 tmp
= nvmeq
->cq_head
+ 1;
1049 if (tmp
== nvmeq
->q_depth
) {
1051 nvmeq
->cq_phase
^= 1;
1053 nvmeq
->cq_head
= tmp
;
1057 static inline int nvme_poll_cq(struct nvme_queue
*nvmeq
,
1058 struct io_comp_batch
*iob
)
1062 while (nvme_cqe_pending(nvmeq
)) {
1065 * load-load control dependency between phase and the rest of
1066 * the cqe requires a full read memory barrier
1069 nvme_handle_cqe(nvmeq
, iob
, nvmeq
->cq_head
);
1070 nvme_update_cq_head(nvmeq
);
1074 nvme_ring_cq_doorbell(nvmeq
);
1078 static irqreturn_t
nvme_irq(int irq
, void *data
)
1080 struct nvme_queue
*nvmeq
= data
;
1081 DEFINE_IO_COMP_BATCH(iob
);
1083 if (nvme_poll_cq(nvmeq
, &iob
)) {
1084 if (!rq_list_empty(iob
.req_list
))
1085 nvme_pci_complete_batch(&iob
);
1091 static irqreturn_t
nvme_irq_check(int irq
, void *data
)
1093 struct nvme_queue
*nvmeq
= data
;
1095 if (nvme_cqe_pending(nvmeq
))
1096 return IRQ_WAKE_THREAD
;
1101 * Poll for completions for any interrupt driven queue
1102 * Can be called from any context.
1104 static void nvme_poll_irqdisable(struct nvme_queue
*nvmeq
)
1106 struct pci_dev
*pdev
= to_pci_dev(nvmeq
->dev
->dev
);
1108 WARN_ON_ONCE(test_bit(NVMEQ_POLLED
, &nvmeq
->flags
));
1110 disable_irq(pci_irq_vector(pdev
, nvmeq
->cq_vector
));
1111 nvme_poll_cq(nvmeq
, NULL
);
1112 enable_irq(pci_irq_vector(pdev
, nvmeq
->cq_vector
));
1115 static int nvme_poll(struct blk_mq_hw_ctx
*hctx
, struct io_comp_batch
*iob
)
1117 struct nvme_queue
*nvmeq
= hctx
->driver_data
;
1120 if (!nvme_cqe_pending(nvmeq
))
1123 spin_lock(&nvmeq
->cq_poll_lock
);
1124 found
= nvme_poll_cq(nvmeq
, iob
);
1125 spin_unlock(&nvmeq
->cq_poll_lock
);
1130 static void nvme_pci_submit_async_event(struct nvme_ctrl
*ctrl
)
1132 struct nvme_dev
*dev
= to_nvme_dev(ctrl
);
1133 struct nvme_queue
*nvmeq
= &dev
->queues
[0];
1134 struct nvme_command c
= { };
1136 c
.common
.opcode
= nvme_admin_async_event
;
1137 c
.common
.command_id
= NVME_AQ_BLK_MQ_DEPTH
;
1139 spin_lock(&nvmeq
->sq_lock
);
1140 nvme_sq_copy_cmd(nvmeq
, &c
);
1141 nvme_write_sq_db(nvmeq
, true);
1142 spin_unlock(&nvmeq
->sq_lock
);
1145 static int adapter_delete_queue(struct nvme_dev
*dev
, u8 opcode
, u16 id
)
1147 struct nvme_command c
= { };
1149 c
.delete_queue
.opcode
= opcode
;
1150 c
.delete_queue
.qid
= cpu_to_le16(id
);
1152 return nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0);
1155 static int adapter_alloc_cq(struct nvme_dev
*dev
, u16 qid
,
1156 struct nvme_queue
*nvmeq
, s16 vector
)
1158 struct nvme_command c
= { };
1159 int flags
= NVME_QUEUE_PHYS_CONTIG
;
1161 if (!test_bit(NVMEQ_POLLED
, &nvmeq
->flags
))
1162 flags
|= NVME_CQ_IRQ_ENABLED
;
1165 * Note: we (ab)use the fact that the prp fields survive if no data
1166 * is attached to the request.
1168 c
.create_cq
.opcode
= nvme_admin_create_cq
;
1169 c
.create_cq
.prp1
= cpu_to_le64(nvmeq
->cq_dma_addr
);
1170 c
.create_cq
.cqid
= cpu_to_le16(qid
);
1171 c
.create_cq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
1172 c
.create_cq
.cq_flags
= cpu_to_le16(flags
);
1173 c
.create_cq
.irq_vector
= cpu_to_le16(vector
);
1175 return nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0);
1178 static int adapter_alloc_sq(struct nvme_dev
*dev
, u16 qid
,
1179 struct nvme_queue
*nvmeq
)
1181 struct nvme_ctrl
*ctrl
= &dev
->ctrl
;
1182 struct nvme_command c
= { };
1183 int flags
= NVME_QUEUE_PHYS_CONTIG
;
1186 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
1187 * set. Since URGENT priority is zeroes, it makes all queues
1190 if (ctrl
->quirks
& NVME_QUIRK_MEDIUM_PRIO_SQ
)
1191 flags
|= NVME_SQ_PRIO_MEDIUM
;
1194 * Note: we (ab)use the fact that the prp fields survive if no data
1195 * is attached to the request.
1197 c
.create_sq
.opcode
= nvme_admin_create_sq
;
1198 c
.create_sq
.prp1
= cpu_to_le64(nvmeq
->sq_dma_addr
);
1199 c
.create_sq
.sqid
= cpu_to_le16(qid
);
1200 c
.create_sq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
1201 c
.create_sq
.sq_flags
= cpu_to_le16(flags
);
1202 c
.create_sq
.cqid
= cpu_to_le16(qid
);
1204 return nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0);
1207 static int adapter_delete_cq(struct nvme_dev
*dev
, u16 cqid
)
1209 return adapter_delete_queue(dev
, nvme_admin_delete_cq
, cqid
);
1212 static int adapter_delete_sq(struct nvme_dev
*dev
, u16 sqid
)
1214 return adapter_delete_queue(dev
, nvme_admin_delete_sq
, sqid
);
1217 static enum rq_end_io_ret
abort_endio(struct request
*req
, blk_status_t error
)
1219 struct nvme_queue
*nvmeq
= req
->mq_hctx
->driver_data
;
1221 dev_warn(nvmeq
->dev
->ctrl
.device
,
1222 "Abort status: 0x%x", nvme_req(req
)->status
);
1223 atomic_inc(&nvmeq
->dev
->ctrl
.abort_limit
);
1224 blk_mq_free_request(req
);
1225 return RQ_END_IO_NONE
;
1228 static bool nvme_should_reset(struct nvme_dev
*dev
, u32 csts
)
1230 /* If true, indicates loss of adapter communication, possibly by a
1231 * NVMe Subsystem reset.
1233 bool nssro
= dev
->subsystem
&& (csts
& NVME_CSTS_NSSRO
);
1235 /* If there is a reset/reinit ongoing, we shouldn't reset again. */
1236 switch (nvme_ctrl_state(&dev
->ctrl
)) {
1237 case NVME_CTRL_RESETTING
:
1238 case NVME_CTRL_CONNECTING
:
1244 /* We shouldn't reset unless the controller is on fatal error state
1245 * _or_ if we lost the communication with it.
1247 if (!(csts
& NVME_CSTS_CFS
) && !nssro
)
1253 static void nvme_warn_reset(struct nvme_dev
*dev
, u32 csts
)
1255 /* Read a config register to help see what died. */
1259 result
= pci_read_config_word(to_pci_dev(dev
->dev
), PCI_STATUS
,
1261 if (result
== PCIBIOS_SUCCESSFUL
)
1262 dev_warn(dev
->ctrl
.device
,
1263 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
1266 dev_warn(dev
->ctrl
.device
,
1267 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
1273 dev_warn(dev
->ctrl
.device
,
1274 "Does your device have a faulty power saving mode enabled?\n");
1275 dev_warn(dev
->ctrl
.device
,
1276 "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n");
1279 static enum blk_eh_timer_return
nvme_timeout(struct request
*req
)
1281 struct nvme_iod
*iod
= blk_mq_rq_to_pdu(req
);
1282 struct nvme_queue
*nvmeq
= req
->mq_hctx
->driver_data
;
1283 struct nvme_dev
*dev
= nvmeq
->dev
;
1284 struct request
*abort_req
;
1285 struct nvme_command cmd
= { };
1286 u32 csts
= readl(dev
->bar
+ NVME_REG_CSTS
);
1289 /* If PCI error recovery process is happening, we cannot reset or
1290 * the recovery mechanism will surely fail.
1293 if (pci_channel_offline(to_pci_dev(dev
->dev
)))
1294 return BLK_EH_RESET_TIMER
;
1297 * Reset immediately if the controller is failed
1299 if (nvme_should_reset(dev
, csts
)) {
1300 nvme_warn_reset(dev
, csts
);
1305 * Did we miss an interrupt?
1307 if (test_bit(NVMEQ_POLLED
, &nvmeq
->flags
))
1308 nvme_poll(req
->mq_hctx
, NULL
);
1310 nvme_poll_irqdisable(nvmeq
);
1312 if (blk_mq_rq_state(req
) != MQ_RQ_IN_FLIGHT
) {
1313 dev_warn(dev
->ctrl
.device
,
1314 "I/O tag %d (%04x) QID %d timeout, completion polled\n",
1315 req
->tag
, nvme_cid(req
), nvmeq
->qid
);
1320 * Shutdown immediately if controller times out while starting. The
1321 * reset work will see the pci device disabled when it gets the forced
1322 * cancellation error. All outstanding requests are completed on
1323 * shutdown, so we return BLK_EH_DONE.
1325 switch (nvme_ctrl_state(&dev
->ctrl
)) {
1326 case NVME_CTRL_CONNECTING
:
1327 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DELETING
);
1329 case NVME_CTRL_DELETING
:
1330 dev_warn_ratelimited(dev
->ctrl
.device
,
1331 "I/O tag %d (%04x) QID %d timeout, disable controller\n",
1332 req
->tag
, nvme_cid(req
), nvmeq
->qid
);
1333 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
1334 nvme_dev_disable(dev
, true);
1336 case NVME_CTRL_RESETTING
:
1337 return BLK_EH_RESET_TIMER
;
1343 * Shutdown the controller immediately and schedule a reset if the
1344 * command was already aborted once before and still hasn't been
1345 * returned to the driver, or if this is the admin queue.
1347 opcode
= nvme_req(req
)->cmd
->common
.opcode
;
1348 if (!nvmeq
->qid
|| iod
->aborted
) {
1349 dev_warn(dev
->ctrl
.device
,
1350 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
1351 req
->tag
, nvme_cid(req
), opcode
,
1352 nvme_opcode_str(nvmeq
->qid
, opcode
), nvmeq
->qid
);
1353 nvme_req(req
)->flags
|= NVME_REQ_CANCELLED
;
1357 if (atomic_dec_return(&dev
->ctrl
.abort_limit
) < 0) {
1358 atomic_inc(&dev
->ctrl
.abort_limit
);
1359 return BLK_EH_RESET_TIMER
;
1361 iod
->aborted
= true;
1363 cmd
.abort
.opcode
= nvme_admin_abort_cmd
;
1364 cmd
.abort
.cid
= nvme_cid(req
);
1365 cmd
.abort
.sqid
= cpu_to_le16(nvmeq
->qid
);
1367 dev_warn(nvmeq
->dev
->ctrl
.device
,
1368 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n",
1369 req
->tag
, nvme_cid(req
), opcode
, nvme_get_opcode_str(opcode
),
1370 nvmeq
->qid
, blk_op_str(req_op(req
)), req_op(req
),
1373 abort_req
= blk_mq_alloc_request(dev
->ctrl
.admin_q
, nvme_req_op(&cmd
),
1375 if (IS_ERR(abort_req
)) {
1376 atomic_inc(&dev
->ctrl
.abort_limit
);
1377 return BLK_EH_RESET_TIMER
;
1379 nvme_init_request(abort_req
, &cmd
);
1381 abort_req
->end_io
= abort_endio
;
1382 abort_req
->end_io_data
= NULL
;
1383 blk_execute_rq_nowait(abort_req
, false);
1386 * The aborted req will be completed on receiving the abort req.
1387 * We enable the timer again. If hit twice, it'll cause a device reset,
1388 * as the device then is in a faulty state.
1390 return BLK_EH_RESET_TIMER
;
1393 if (!nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_RESETTING
))
1396 nvme_dev_disable(dev
, false);
1397 if (nvme_try_sched_reset(&dev
->ctrl
))
1398 nvme_unquiesce_io_queues(&dev
->ctrl
);
1402 static void nvme_free_queue(struct nvme_queue
*nvmeq
)
1404 dma_free_coherent(nvmeq
->dev
->dev
, CQ_SIZE(nvmeq
),
1405 (void *)nvmeq
->cqes
, nvmeq
->cq_dma_addr
);
1406 if (!nvmeq
->sq_cmds
)
1409 if (test_and_clear_bit(NVMEQ_SQ_CMB
, &nvmeq
->flags
)) {
1410 pci_free_p2pmem(to_pci_dev(nvmeq
->dev
->dev
),
1411 nvmeq
->sq_cmds
, SQ_SIZE(nvmeq
));
1413 dma_free_coherent(nvmeq
->dev
->dev
, SQ_SIZE(nvmeq
),
1414 nvmeq
->sq_cmds
, nvmeq
->sq_dma_addr
);
1418 static void nvme_free_queues(struct nvme_dev
*dev
, int lowest
)
1422 for (i
= dev
->ctrl
.queue_count
- 1; i
>= lowest
; i
--) {
1423 dev
->ctrl
.queue_count
--;
1424 nvme_free_queue(&dev
->queues
[i
]);
1428 static void nvme_suspend_queue(struct nvme_dev
*dev
, unsigned int qid
)
1430 struct nvme_queue
*nvmeq
= &dev
->queues
[qid
];
1432 if (!test_and_clear_bit(NVMEQ_ENABLED
, &nvmeq
->flags
))
1435 /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
1438 nvmeq
->dev
->online_queues
--;
1439 if (!nvmeq
->qid
&& nvmeq
->dev
->ctrl
.admin_q
)
1440 nvme_quiesce_admin_queue(&nvmeq
->dev
->ctrl
);
1441 if (!test_and_clear_bit(NVMEQ_POLLED
, &nvmeq
->flags
))
1442 pci_free_irq(to_pci_dev(dev
->dev
), nvmeq
->cq_vector
, nvmeq
);
1445 static void nvme_suspend_io_queues(struct nvme_dev
*dev
)
1449 for (i
= dev
->ctrl
.queue_count
- 1; i
> 0; i
--)
1450 nvme_suspend_queue(dev
, i
);
1454 * Called only on a device that has been disabled and after all other threads
1455 * that can check this device's completion queues have synced, except
1456 * nvme_poll(). This is the last chance for the driver to see a natural
1457 * completion before nvme_cancel_request() terminates all incomplete requests.
1459 static void nvme_reap_pending_cqes(struct nvme_dev
*dev
)
1463 for (i
= dev
->ctrl
.queue_count
- 1; i
> 0; i
--) {
1464 spin_lock(&dev
->queues
[i
].cq_poll_lock
);
1465 nvme_poll_cq(&dev
->queues
[i
], NULL
);
1466 spin_unlock(&dev
->queues
[i
].cq_poll_lock
);
1470 static int nvme_cmb_qdepth(struct nvme_dev
*dev
, int nr_io_queues
,
1473 int q_depth
= dev
->q_depth
;
1474 unsigned q_size_aligned
= roundup(q_depth
* entry_size
,
1475 NVME_CTRL_PAGE_SIZE
);
1477 if (q_size_aligned
* nr_io_queues
> dev
->cmb_size
) {
1478 u64 mem_per_q
= div_u64(dev
->cmb_size
, nr_io_queues
);
1480 mem_per_q
= round_down(mem_per_q
, NVME_CTRL_PAGE_SIZE
);
1481 q_depth
= div_u64(mem_per_q
, entry_size
);
1484 * Ensure the reduced q_depth is above some threshold where it
1485 * would be better to map queues in system memory with the
1495 static int nvme_alloc_sq_cmds(struct nvme_dev
*dev
, struct nvme_queue
*nvmeq
,
1498 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
1500 if (qid
&& dev
->cmb_use_sqes
&& (dev
->cmbsz
& NVME_CMBSZ_SQS
)) {
1501 nvmeq
->sq_cmds
= pci_alloc_p2pmem(pdev
, SQ_SIZE(nvmeq
));
1502 if (nvmeq
->sq_cmds
) {
1503 nvmeq
->sq_dma_addr
= pci_p2pmem_virt_to_bus(pdev
,
1505 if (nvmeq
->sq_dma_addr
) {
1506 set_bit(NVMEQ_SQ_CMB
, &nvmeq
->flags
);
1510 pci_free_p2pmem(pdev
, nvmeq
->sq_cmds
, SQ_SIZE(nvmeq
));
1514 nvmeq
->sq_cmds
= dma_alloc_coherent(dev
->dev
, SQ_SIZE(nvmeq
),
1515 &nvmeq
->sq_dma_addr
, GFP_KERNEL
);
1516 if (!nvmeq
->sq_cmds
)
1521 static int nvme_alloc_queue(struct nvme_dev
*dev
, int qid
, int depth
)
1523 struct nvme_queue
*nvmeq
= &dev
->queues
[qid
];
1525 if (dev
->ctrl
.queue_count
> qid
)
1528 nvmeq
->sqes
= qid
? dev
->io_sqes
: NVME_ADM_SQES
;
1529 nvmeq
->q_depth
= depth
;
1530 nvmeq
->cqes
= dma_alloc_coherent(dev
->dev
, CQ_SIZE(nvmeq
),
1531 &nvmeq
->cq_dma_addr
, GFP_KERNEL
);
1535 if (nvme_alloc_sq_cmds(dev
, nvmeq
, qid
))
1539 spin_lock_init(&nvmeq
->sq_lock
);
1540 spin_lock_init(&nvmeq
->cq_poll_lock
);
1542 nvmeq
->cq_phase
= 1;
1543 nvmeq
->q_db
= &dev
->dbs
[qid
* 2 * dev
->db_stride
];
1545 dev
->ctrl
.queue_count
++;
1550 dma_free_coherent(dev
->dev
, CQ_SIZE(nvmeq
), (void *)nvmeq
->cqes
,
1551 nvmeq
->cq_dma_addr
);
1556 static int queue_request_irq(struct nvme_queue
*nvmeq
)
1558 struct pci_dev
*pdev
= to_pci_dev(nvmeq
->dev
->dev
);
1559 int nr
= nvmeq
->dev
->ctrl
.instance
;
1561 if (use_threaded_interrupts
) {
1562 return pci_request_irq(pdev
, nvmeq
->cq_vector
, nvme_irq_check
,
1563 nvme_irq
, nvmeq
, "nvme%dq%d", nr
, nvmeq
->qid
);
1565 return pci_request_irq(pdev
, nvmeq
->cq_vector
, nvme_irq
,
1566 NULL
, nvmeq
, "nvme%dq%d", nr
, nvmeq
->qid
);
1570 static void nvme_init_queue(struct nvme_queue
*nvmeq
, u16 qid
)
1572 struct nvme_dev
*dev
= nvmeq
->dev
;
1575 nvmeq
->last_sq_tail
= 0;
1577 nvmeq
->cq_phase
= 1;
1578 nvmeq
->q_db
= &dev
->dbs
[qid
* 2 * dev
->db_stride
];
1579 memset((void *)nvmeq
->cqes
, 0, CQ_SIZE(nvmeq
));
1580 nvme_dbbuf_init(dev
, nvmeq
, qid
);
1581 dev
->online_queues
++;
1582 wmb(); /* ensure the first interrupt sees the initialization */
1586 * Try getting shutdown_lock while setting up IO queues.
1588 static int nvme_setup_io_queues_trylock(struct nvme_dev
*dev
)
1591 * Give up if the lock is being held by nvme_dev_disable.
1593 if (!mutex_trylock(&dev
->shutdown_lock
))
1597 * Controller is in wrong state, fail early.
1599 if (nvme_ctrl_state(&dev
->ctrl
) != NVME_CTRL_CONNECTING
) {
1600 mutex_unlock(&dev
->shutdown_lock
);
1607 static int nvme_create_queue(struct nvme_queue
*nvmeq
, int qid
, bool polled
)
1609 struct nvme_dev
*dev
= nvmeq
->dev
;
1613 clear_bit(NVMEQ_DELETE_ERROR
, &nvmeq
->flags
);
1616 * A queue's vector matches the queue identifier unless the controller
1617 * has only one vector available.
1620 vector
= dev
->num_vecs
== 1 ? 0 : qid
;
1622 set_bit(NVMEQ_POLLED
, &nvmeq
->flags
);
1624 result
= adapter_alloc_cq(dev
, qid
, nvmeq
, vector
);
1628 result
= adapter_alloc_sq(dev
, qid
, nvmeq
);
1634 nvmeq
->cq_vector
= vector
;
1636 result
= nvme_setup_io_queues_trylock(dev
);
1639 nvme_init_queue(nvmeq
, qid
);
1641 result
= queue_request_irq(nvmeq
);
1646 set_bit(NVMEQ_ENABLED
, &nvmeq
->flags
);
1647 mutex_unlock(&dev
->shutdown_lock
);
1651 dev
->online_queues
--;
1652 mutex_unlock(&dev
->shutdown_lock
);
1653 adapter_delete_sq(dev
, qid
);
1655 adapter_delete_cq(dev
, qid
);
1659 static const struct blk_mq_ops nvme_mq_admin_ops
= {
1660 .queue_rq
= nvme_queue_rq
,
1661 .complete
= nvme_pci_complete_rq
,
1662 .init_hctx
= nvme_admin_init_hctx
,
1663 .init_request
= nvme_pci_init_request
,
1664 .timeout
= nvme_timeout
,
1667 static const struct blk_mq_ops nvme_mq_ops
= {
1668 .queue_rq
= nvme_queue_rq
,
1669 .queue_rqs
= nvme_queue_rqs
,
1670 .complete
= nvme_pci_complete_rq
,
1671 .commit_rqs
= nvme_commit_rqs
,
1672 .init_hctx
= nvme_init_hctx
,
1673 .init_request
= nvme_pci_init_request
,
1674 .map_queues
= nvme_pci_map_queues
,
1675 .timeout
= nvme_timeout
,
1679 static void nvme_dev_remove_admin(struct nvme_dev
*dev
)
1681 if (dev
->ctrl
.admin_q
&& !blk_queue_dying(dev
->ctrl
.admin_q
)) {
1683 * If the controller was reset during removal, it's possible
1684 * user requests may be waiting on a stopped queue. Start the
1685 * queue to flush these to completion.
1687 nvme_unquiesce_admin_queue(&dev
->ctrl
);
1688 nvme_remove_admin_tag_set(&dev
->ctrl
);
1692 static unsigned long db_bar_size(struct nvme_dev
*dev
, unsigned nr_io_queues
)
1694 return NVME_REG_DBS
+ ((nr_io_queues
+ 1) * 8 * dev
->db_stride
);
1697 static int nvme_remap_bar(struct nvme_dev
*dev
, unsigned long size
)
1699 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
1701 if (size
<= dev
->bar_mapped_size
)
1703 if (size
> pci_resource_len(pdev
, 0))
1707 dev
->bar
= ioremap(pci_resource_start(pdev
, 0), size
);
1709 dev
->bar_mapped_size
= 0;
1712 dev
->bar_mapped_size
= size
;
1713 dev
->dbs
= dev
->bar
+ NVME_REG_DBS
;
1718 static int nvme_pci_configure_admin_queue(struct nvme_dev
*dev
)
1722 struct nvme_queue
*nvmeq
;
1724 result
= nvme_remap_bar(dev
, db_bar_size(dev
, 0));
1728 dev
->subsystem
= readl(dev
->bar
+ NVME_REG_VS
) >= NVME_VS(1, 1, 0) ?
1729 NVME_CAP_NSSRC(dev
->ctrl
.cap
) : 0;
1731 if (dev
->subsystem
&&
1732 (readl(dev
->bar
+ NVME_REG_CSTS
) & NVME_CSTS_NSSRO
))
1733 writel(NVME_CSTS_NSSRO
, dev
->bar
+ NVME_REG_CSTS
);
1736 * If the device has been passed off to us in an enabled state, just
1737 * clear the enabled bit. The spec says we should set the 'shutdown
1738 * notification bits', but doing so may cause the device to complete
1739 * commands to the admin queue ... and we don't know what memory that
1740 * might be pointing at!
1742 result
= nvme_disable_ctrl(&dev
->ctrl
, false);
1746 result
= nvme_alloc_queue(dev
, 0, NVME_AQ_DEPTH
);
1750 dev
->ctrl
.numa_node
= dev_to_node(dev
->dev
);
1752 nvmeq
= &dev
->queues
[0];
1753 aqa
= nvmeq
->q_depth
- 1;
1756 writel(aqa
, dev
->bar
+ NVME_REG_AQA
);
1757 lo_hi_writeq(nvmeq
->sq_dma_addr
, dev
->bar
+ NVME_REG_ASQ
);
1758 lo_hi_writeq(nvmeq
->cq_dma_addr
, dev
->bar
+ NVME_REG_ACQ
);
1760 result
= nvme_enable_ctrl(&dev
->ctrl
);
1764 nvmeq
->cq_vector
= 0;
1765 nvme_init_queue(nvmeq
, 0);
1766 result
= queue_request_irq(nvmeq
);
1768 dev
->online_queues
--;
1772 set_bit(NVMEQ_ENABLED
, &nvmeq
->flags
);
1776 static int nvme_create_io_queues(struct nvme_dev
*dev
)
1778 unsigned i
, max
, rw_queues
;
1781 for (i
= dev
->ctrl
.queue_count
; i
<= dev
->max_qid
; i
++) {
1782 if (nvme_alloc_queue(dev
, i
, dev
->q_depth
)) {
1788 max
= min(dev
->max_qid
, dev
->ctrl
.queue_count
- 1);
1789 if (max
!= 1 && dev
->io_queues
[HCTX_TYPE_POLL
]) {
1790 rw_queues
= dev
->io_queues
[HCTX_TYPE_DEFAULT
] +
1791 dev
->io_queues
[HCTX_TYPE_READ
];
1796 for (i
= dev
->online_queues
; i
<= max
; i
++) {
1797 bool polled
= i
> rw_queues
;
1799 ret
= nvme_create_queue(&dev
->queues
[i
], i
, polled
);
1805 * Ignore failing Create SQ/CQ commands, we can continue with less
1806 * than the desired amount of queues, and even a controller without
1807 * I/O queues can still be used to issue admin commands. This might
1808 * be useful to upgrade a buggy firmware for example.
1810 return ret
>= 0 ? 0 : ret
;
1813 static u64
nvme_cmb_size_unit(struct nvme_dev
*dev
)
1815 u8 szu
= (dev
->cmbsz
>> NVME_CMBSZ_SZU_SHIFT
) & NVME_CMBSZ_SZU_MASK
;
1817 return 1ULL << (12 + 4 * szu
);
1820 static u32
nvme_cmb_size(struct nvme_dev
*dev
)
1822 return (dev
->cmbsz
>> NVME_CMBSZ_SZ_SHIFT
) & NVME_CMBSZ_SZ_MASK
;
1825 static void nvme_map_cmb(struct nvme_dev
*dev
)
1828 resource_size_t bar_size
;
1829 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
1835 if (NVME_CAP_CMBS(dev
->ctrl
.cap
))
1836 writel(NVME_CMBMSC_CRE
, dev
->bar
+ NVME_REG_CMBMSC
);
1838 dev
->cmbsz
= readl(dev
->bar
+ NVME_REG_CMBSZ
);
1841 dev
->cmbloc
= readl(dev
->bar
+ NVME_REG_CMBLOC
);
1843 size
= nvme_cmb_size_unit(dev
) * nvme_cmb_size(dev
);
1844 offset
= nvme_cmb_size_unit(dev
) * NVME_CMB_OFST(dev
->cmbloc
);
1845 bar
= NVME_CMB_BIR(dev
->cmbloc
);
1846 bar_size
= pci_resource_len(pdev
, bar
);
1848 if (offset
> bar_size
)
1852 * Tell the controller about the host side address mapping the CMB,
1853 * and enable CMB decoding for the NVMe 1.4+ scheme:
1855 if (NVME_CAP_CMBS(dev
->ctrl
.cap
)) {
1856 hi_lo_writeq(NVME_CMBMSC_CRE
| NVME_CMBMSC_CMSE
|
1857 (pci_bus_address(pdev
, bar
) + offset
),
1858 dev
->bar
+ NVME_REG_CMBMSC
);
1862 * Controllers may support a CMB size larger than their BAR,
1863 * for example, due to being behind a bridge. Reduce the CMB to
1864 * the reported size of the BAR
1866 if (size
> bar_size
- offset
)
1867 size
= bar_size
- offset
;
1869 if (pci_p2pdma_add_resource(pdev
, bar
, size
, offset
)) {
1870 dev_warn(dev
->ctrl
.device
,
1871 "failed to register the CMB\n");
1875 dev
->cmb_size
= size
;
1876 dev
->cmb_use_sqes
= use_cmb_sqes
&& (dev
->cmbsz
& NVME_CMBSZ_SQS
);
1878 if ((dev
->cmbsz
& (NVME_CMBSZ_WDS
| NVME_CMBSZ_RDS
)) ==
1879 (NVME_CMBSZ_WDS
| NVME_CMBSZ_RDS
))
1880 pci_p2pmem_publish(pdev
, true);
1882 nvme_update_attrs(dev
);
1885 static int nvme_set_host_mem(struct nvme_dev
*dev
, u32 bits
)
1887 u32 host_mem_size
= dev
->host_mem_size
>> NVME_CTRL_PAGE_SHIFT
;
1888 u64 dma_addr
= dev
->host_mem_descs_dma
;
1889 struct nvme_command c
= { };
1892 c
.features
.opcode
= nvme_admin_set_features
;
1893 c
.features
.fid
= cpu_to_le32(NVME_FEAT_HOST_MEM_BUF
);
1894 c
.features
.dword11
= cpu_to_le32(bits
);
1895 c
.features
.dword12
= cpu_to_le32(host_mem_size
);
1896 c
.features
.dword13
= cpu_to_le32(lower_32_bits(dma_addr
));
1897 c
.features
.dword14
= cpu_to_le32(upper_32_bits(dma_addr
));
1898 c
.features
.dword15
= cpu_to_le32(dev
->nr_host_mem_descs
);
1900 ret
= nvme_submit_sync_cmd(dev
->ctrl
.admin_q
, &c
, NULL
, 0);
1902 dev_warn(dev
->ctrl
.device
,
1903 "failed to set host mem (err %d, flags %#x).\n",
1906 dev
->hmb
= bits
& NVME_HOST_MEM_ENABLE
;
1911 static void nvme_free_host_mem(struct nvme_dev
*dev
)
1915 for (i
= 0; i
< dev
->nr_host_mem_descs
; i
++) {
1916 struct nvme_host_mem_buf_desc
*desc
= &dev
->host_mem_descs
[i
];
1917 size_t size
= le32_to_cpu(desc
->size
) * NVME_CTRL_PAGE_SIZE
;
1919 dma_free_attrs(dev
->dev
, size
, dev
->host_mem_desc_bufs
[i
],
1920 le64_to_cpu(desc
->addr
),
1921 DMA_ATTR_NO_KERNEL_MAPPING
| DMA_ATTR_NO_WARN
);
1924 kfree(dev
->host_mem_desc_bufs
);
1925 dev
->host_mem_desc_bufs
= NULL
;
1926 dma_free_coherent(dev
->dev
,
1927 dev
->nr_host_mem_descs
* sizeof(*dev
->host_mem_descs
),
1928 dev
->host_mem_descs
, dev
->host_mem_descs_dma
);
1929 dev
->host_mem_descs
= NULL
;
1930 dev
->nr_host_mem_descs
= 0;
1933 static int __nvme_alloc_host_mem(struct nvme_dev
*dev
, u64 preferred
,
1936 struct nvme_host_mem_buf_desc
*descs
;
1937 u32 max_entries
, len
;
1938 dma_addr_t descs_dma
;
1943 tmp
= (preferred
+ chunk_size
- 1);
1944 do_div(tmp
, chunk_size
);
1947 if (dev
->ctrl
.hmmaxd
&& dev
->ctrl
.hmmaxd
< max_entries
)
1948 max_entries
= dev
->ctrl
.hmmaxd
;
1950 descs
= dma_alloc_coherent(dev
->dev
, max_entries
* sizeof(*descs
),
1951 &descs_dma
, GFP_KERNEL
);
1955 bufs
= kcalloc(max_entries
, sizeof(*bufs
), GFP_KERNEL
);
1957 goto out_free_descs
;
1959 for (size
= 0; size
< preferred
&& i
< max_entries
; size
+= len
) {
1960 dma_addr_t dma_addr
;
1962 len
= min_t(u64
, chunk_size
, preferred
- size
);
1963 bufs
[i
] = dma_alloc_attrs(dev
->dev
, len
, &dma_addr
, GFP_KERNEL
,
1964 DMA_ATTR_NO_KERNEL_MAPPING
| DMA_ATTR_NO_WARN
);
1968 descs
[i
].addr
= cpu_to_le64(dma_addr
);
1969 descs
[i
].size
= cpu_to_le32(len
/ NVME_CTRL_PAGE_SIZE
);
1976 dev
->nr_host_mem_descs
= i
;
1977 dev
->host_mem_size
= size
;
1978 dev
->host_mem_descs
= descs
;
1979 dev
->host_mem_descs_dma
= descs_dma
;
1980 dev
->host_mem_desc_bufs
= bufs
;
1985 size_t size
= le32_to_cpu(descs
[i
].size
) * NVME_CTRL_PAGE_SIZE
;
1987 dma_free_attrs(dev
->dev
, size
, bufs
[i
],
1988 le64_to_cpu(descs
[i
].addr
),
1989 DMA_ATTR_NO_KERNEL_MAPPING
| DMA_ATTR_NO_WARN
);
1994 dma_free_coherent(dev
->dev
, max_entries
* sizeof(*descs
), descs
,
1997 dev
->host_mem_descs
= NULL
;
2001 static int nvme_alloc_host_mem(struct nvme_dev
*dev
, u64 min
, u64 preferred
)
2003 u64 min_chunk
= min_t(u64
, preferred
, PAGE_SIZE
* MAX_ORDER_NR_PAGES
);
2004 u64 hmminds
= max_t(u32
, dev
->ctrl
.hmminds
* 4096, PAGE_SIZE
* 2);
2007 /* start big and work our way down */
2008 for (chunk_size
= min_chunk
; chunk_size
>= hmminds
; chunk_size
/= 2) {
2009 if (!__nvme_alloc_host_mem(dev
, preferred
, chunk_size
)) {
2010 if (!min
|| dev
->host_mem_size
>= min
)
2012 nvme_free_host_mem(dev
);
2019 static int nvme_setup_host_mem(struct nvme_dev
*dev
)
2021 u64 max
= (u64
)max_host_mem_size_mb
* SZ_1M
;
2022 u64 preferred
= (u64
)dev
->ctrl
.hmpre
* 4096;
2023 u64 min
= (u64
)dev
->ctrl
.hmmin
* 4096;
2024 u32 enable_bits
= NVME_HOST_MEM_ENABLE
;
2027 if (!dev
->ctrl
.hmpre
)
2030 preferred
= min(preferred
, max
);
2032 dev_warn(dev
->ctrl
.device
,
2033 "min host memory (%lld MiB) above limit (%d MiB).\n",
2034 min
>> ilog2(SZ_1M
), max_host_mem_size_mb
);
2035 nvme_free_host_mem(dev
);
2040 * If we already have a buffer allocated check if we can reuse it.
2042 if (dev
->host_mem_descs
) {
2043 if (dev
->host_mem_size
>= min
)
2044 enable_bits
|= NVME_HOST_MEM_RETURN
;
2046 nvme_free_host_mem(dev
);
2049 if (!dev
->host_mem_descs
) {
2050 if (nvme_alloc_host_mem(dev
, min
, preferred
)) {
2051 dev_warn(dev
->ctrl
.device
,
2052 "failed to allocate host memory buffer.\n");
2053 return 0; /* controller must work without HMB */
2056 dev_info(dev
->ctrl
.device
,
2057 "allocated %lld MiB host memory buffer.\n",
2058 dev
->host_mem_size
>> ilog2(SZ_1M
));
2061 ret
= nvme_set_host_mem(dev
, enable_bits
);
2063 nvme_free_host_mem(dev
);
2067 static ssize_t
cmb_show(struct device
*dev
, struct device_attribute
*attr
,
2070 struct nvme_dev
*ndev
= to_nvme_dev(dev_get_drvdata(dev
));
2072 return sysfs_emit(buf
, "cmbloc : x%08x\ncmbsz : x%08x\n",
2073 ndev
->cmbloc
, ndev
->cmbsz
);
2075 static DEVICE_ATTR_RO(cmb
);
2077 static ssize_t
cmbloc_show(struct device
*dev
, struct device_attribute
*attr
,
2080 struct nvme_dev
*ndev
= to_nvme_dev(dev_get_drvdata(dev
));
2082 return sysfs_emit(buf
, "%u\n", ndev
->cmbloc
);
2084 static DEVICE_ATTR_RO(cmbloc
);
2086 static ssize_t
cmbsz_show(struct device
*dev
, struct device_attribute
*attr
,
2089 struct nvme_dev
*ndev
= to_nvme_dev(dev_get_drvdata(dev
));
2091 return sysfs_emit(buf
, "%u\n", ndev
->cmbsz
);
2093 static DEVICE_ATTR_RO(cmbsz
);
2095 static ssize_t
hmb_show(struct device
*dev
, struct device_attribute
*attr
,
2098 struct nvme_dev
*ndev
= to_nvme_dev(dev_get_drvdata(dev
));
2100 return sysfs_emit(buf
, "%d\n", ndev
->hmb
);
2103 static ssize_t
hmb_store(struct device
*dev
, struct device_attribute
*attr
,
2104 const char *buf
, size_t count
)
2106 struct nvme_dev
*ndev
= to_nvme_dev(dev_get_drvdata(dev
));
2110 if (kstrtobool(buf
, &new) < 0)
2113 if (new == ndev
->hmb
)
2117 ret
= nvme_setup_host_mem(ndev
);
2119 ret
= nvme_set_host_mem(ndev
, 0);
2121 nvme_free_host_mem(ndev
);
2129 static DEVICE_ATTR_RW(hmb
);
2131 static umode_t
nvme_pci_attrs_are_visible(struct kobject
*kobj
,
2132 struct attribute
*a
, int n
)
2134 struct nvme_ctrl
*ctrl
=
2135 dev_get_drvdata(container_of(kobj
, struct device
, kobj
));
2136 struct nvme_dev
*dev
= to_nvme_dev(ctrl
);
2138 if (a
== &dev_attr_cmb
.attr
||
2139 a
== &dev_attr_cmbloc
.attr
||
2140 a
== &dev_attr_cmbsz
.attr
) {
2144 if (a
== &dev_attr_hmb
.attr
&& !ctrl
->hmpre
)
2150 static struct attribute
*nvme_pci_attrs
[] = {
2152 &dev_attr_cmbloc
.attr
,
2153 &dev_attr_cmbsz
.attr
,
2158 static const struct attribute_group nvme_pci_dev_attrs_group
= {
2159 .attrs
= nvme_pci_attrs
,
2160 .is_visible
= nvme_pci_attrs_are_visible
,
2163 static const struct attribute_group
*nvme_pci_dev_attr_groups
[] = {
2164 &nvme_dev_attrs_group
,
2165 &nvme_pci_dev_attrs_group
,
2169 static void nvme_update_attrs(struct nvme_dev
*dev
)
2171 sysfs_update_group(&dev
->ctrl
.device
->kobj
, &nvme_pci_dev_attrs_group
);
2175 * nirqs is the number of interrupts available for write and read
2176 * queues. The core already reserved an interrupt for the admin queue.
2178 static void nvme_calc_irq_sets(struct irq_affinity
*affd
, unsigned int nrirqs
)
2180 struct nvme_dev
*dev
= affd
->priv
;
2181 unsigned int nr_read_queues
, nr_write_queues
= dev
->nr_write_queues
;
2184 * If there is no interrupt available for queues, ensure that
2185 * the default queue is set to 1. The affinity set size is
2186 * also set to one, but the irq core ignores it for this case.
2188 * If only one interrupt is available or 'write_queue' == 0, combine
2189 * write and read queues.
2191 * If 'write_queues' > 0, ensure it leaves room for at least one read
2197 } else if (nrirqs
== 1 || !nr_write_queues
) {
2199 } else if (nr_write_queues
>= nrirqs
) {
2202 nr_read_queues
= nrirqs
- nr_write_queues
;
2205 dev
->io_queues
[HCTX_TYPE_DEFAULT
] = nrirqs
- nr_read_queues
;
2206 affd
->set_size
[HCTX_TYPE_DEFAULT
] = nrirqs
- nr_read_queues
;
2207 dev
->io_queues
[HCTX_TYPE_READ
] = nr_read_queues
;
2208 affd
->set_size
[HCTX_TYPE_READ
] = nr_read_queues
;
2209 affd
->nr_sets
= nr_read_queues
? 2 : 1;
2212 static int nvme_setup_irqs(struct nvme_dev
*dev
, unsigned int nr_io_queues
)
2214 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2215 struct irq_affinity affd
= {
2217 .calc_sets
= nvme_calc_irq_sets
,
2220 unsigned int irq_queues
, poll_queues
;
2223 * Poll queues don't need interrupts, but we need at least one I/O queue
2224 * left over for non-polled I/O.
2226 poll_queues
= min(dev
->nr_poll_queues
, nr_io_queues
- 1);
2227 dev
->io_queues
[HCTX_TYPE_POLL
] = poll_queues
;
2230 * Initialize for the single interrupt case, will be updated in
2231 * nvme_calc_irq_sets().
2233 dev
->io_queues
[HCTX_TYPE_DEFAULT
] = 1;
2234 dev
->io_queues
[HCTX_TYPE_READ
] = 0;
2237 * We need interrupts for the admin queue and each non-polled I/O queue,
2238 * but some Apple controllers require all queues to use the first
2242 if (!(dev
->ctrl
.quirks
& NVME_QUIRK_SINGLE_VECTOR
))
2243 irq_queues
+= (nr_io_queues
- poll_queues
);
2244 return pci_alloc_irq_vectors_affinity(pdev
, 1, irq_queues
,
2245 PCI_IRQ_ALL_TYPES
| PCI_IRQ_AFFINITY
, &affd
);
2248 static unsigned int nvme_max_io_queues(struct nvme_dev
*dev
)
2251 * If tags are shared with admin queue (Apple bug), then
2252 * make sure we only use one IO queue.
2254 if (dev
->ctrl
.quirks
& NVME_QUIRK_SHARED_TAGS
)
2256 return num_possible_cpus() + dev
->nr_write_queues
+ dev
->nr_poll_queues
;
2259 static int nvme_setup_io_queues(struct nvme_dev
*dev
)
2261 struct nvme_queue
*adminq
= &dev
->queues
[0];
2262 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2263 unsigned int nr_io_queues
;
2268 * Sample the module parameters once at reset time so that we have
2269 * stable values to work with.
2271 dev
->nr_write_queues
= write_queues
;
2272 dev
->nr_poll_queues
= poll_queues
;
2274 nr_io_queues
= dev
->nr_allocated_queues
- 1;
2275 result
= nvme_set_queue_count(&dev
->ctrl
, &nr_io_queues
);
2279 if (nr_io_queues
== 0)
2283 * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
2284 * from set to unset. If there is a window to it is truely freed,
2285 * pci_free_irq_vectors() jumping into this window will crash.
2286 * And take lock to avoid racing with pci_free_irq_vectors() in
2287 * nvme_dev_disable() path.
2289 result
= nvme_setup_io_queues_trylock(dev
);
2292 if (test_and_clear_bit(NVMEQ_ENABLED
, &adminq
->flags
))
2293 pci_free_irq(pdev
, 0, adminq
);
2295 if (dev
->cmb_use_sqes
) {
2296 result
= nvme_cmb_qdepth(dev
, nr_io_queues
,
2297 sizeof(struct nvme_command
));
2299 dev
->q_depth
= result
;
2300 dev
->ctrl
.sqsize
= result
- 1;
2302 dev
->cmb_use_sqes
= false;
2307 size
= db_bar_size(dev
, nr_io_queues
);
2308 result
= nvme_remap_bar(dev
, size
);
2311 if (!--nr_io_queues
) {
2316 adminq
->q_db
= dev
->dbs
;
2319 /* Deregister the admin queue's interrupt */
2320 if (test_and_clear_bit(NVMEQ_ENABLED
, &adminq
->flags
))
2321 pci_free_irq(pdev
, 0, adminq
);
2324 * If we enable msix early due to not intx, disable it again before
2325 * setting up the full range we need.
2327 pci_free_irq_vectors(pdev
);
2329 result
= nvme_setup_irqs(dev
, nr_io_queues
);
2335 dev
->num_vecs
= result
;
2336 result
= max(result
- 1, 1);
2337 dev
->max_qid
= result
+ dev
->io_queues
[HCTX_TYPE_POLL
];
2340 * Should investigate if there's a performance win from allocating
2341 * more queues than interrupt vectors; it might allow the submission
2342 * path to scale better, even if the receive path is limited by the
2343 * number of interrupts.
2345 result
= queue_request_irq(adminq
);
2348 set_bit(NVMEQ_ENABLED
, &adminq
->flags
);
2349 mutex_unlock(&dev
->shutdown_lock
);
2351 result
= nvme_create_io_queues(dev
);
2352 if (result
|| dev
->online_queues
< 2)
2355 if (dev
->online_queues
- 1 < dev
->max_qid
) {
2356 nr_io_queues
= dev
->online_queues
- 1;
2357 nvme_delete_io_queues(dev
);
2358 result
= nvme_setup_io_queues_trylock(dev
);
2361 nvme_suspend_io_queues(dev
);
2364 dev_info(dev
->ctrl
.device
, "%d/%d/%d default/read/poll queues\n",
2365 dev
->io_queues
[HCTX_TYPE_DEFAULT
],
2366 dev
->io_queues
[HCTX_TYPE_READ
],
2367 dev
->io_queues
[HCTX_TYPE_POLL
]);
2370 mutex_unlock(&dev
->shutdown_lock
);
2374 static enum rq_end_io_ret
nvme_del_queue_end(struct request
*req
,
2377 struct nvme_queue
*nvmeq
= req
->end_io_data
;
2379 blk_mq_free_request(req
);
2380 complete(&nvmeq
->delete_done
);
2381 return RQ_END_IO_NONE
;
2384 static enum rq_end_io_ret
nvme_del_cq_end(struct request
*req
,
2387 struct nvme_queue
*nvmeq
= req
->end_io_data
;
2390 set_bit(NVMEQ_DELETE_ERROR
, &nvmeq
->flags
);
2392 return nvme_del_queue_end(req
, error
);
2395 static int nvme_delete_queue(struct nvme_queue
*nvmeq
, u8 opcode
)
2397 struct request_queue
*q
= nvmeq
->dev
->ctrl
.admin_q
;
2398 struct request
*req
;
2399 struct nvme_command cmd
= { };
2401 cmd
.delete_queue
.opcode
= opcode
;
2402 cmd
.delete_queue
.qid
= cpu_to_le16(nvmeq
->qid
);
2404 req
= blk_mq_alloc_request(q
, nvme_req_op(&cmd
), BLK_MQ_REQ_NOWAIT
);
2406 return PTR_ERR(req
);
2407 nvme_init_request(req
, &cmd
);
2409 if (opcode
== nvme_admin_delete_cq
)
2410 req
->end_io
= nvme_del_cq_end
;
2412 req
->end_io
= nvme_del_queue_end
;
2413 req
->end_io_data
= nvmeq
;
2415 init_completion(&nvmeq
->delete_done
);
2416 blk_execute_rq_nowait(req
, false);
2420 static bool __nvme_delete_io_queues(struct nvme_dev
*dev
, u8 opcode
)
2422 int nr_queues
= dev
->online_queues
- 1, sent
= 0;
2423 unsigned long timeout
;
2426 timeout
= NVME_ADMIN_TIMEOUT
;
2427 while (nr_queues
> 0) {
2428 if (nvme_delete_queue(&dev
->queues
[nr_queues
], opcode
))
2434 struct nvme_queue
*nvmeq
= &dev
->queues
[nr_queues
+ sent
];
2436 timeout
= wait_for_completion_io_timeout(&nvmeq
->delete_done
,
2448 static void nvme_delete_io_queues(struct nvme_dev
*dev
)
2450 if (__nvme_delete_io_queues(dev
, nvme_admin_delete_sq
))
2451 __nvme_delete_io_queues(dev
, nvme_admin_delete_cq
);
2454 static unsigned int nvme_pci_nr_maps(struct nvme_dev
*dev
)
2456 if (dev
->io_queues
[HCTX_TYPE_POLL
])
2458 if (dev
->io_queues
[HCTX_TYPE_READ
])
2463 static void nvme_pci_update_nr_queues(struct nvme_dev
*dev
)
2465 blk_mq_update_nr_hw_queues(&dev
->tagset
, dev
->online_queues
- 1);
2466 /* free previously allocated queues that are no longer usable */
2467 nvme_free_queues(dev
, dev
->online_queues
);
2470 static int nvme_pci_enable(struct nvme_dev
*dev
)
2472 int result
= -ENOMEM
;
2473 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2475 if (pci_enable_device_mem(pdev
))
2478 pci_set_master(pdev
);
2480 if (readl(dev
->bar
+ NVME_REG_CSTS
) == -1) {
2486 * Some devices and/or platforms don't advertise or work with INTx
2487 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
2488 * adjust this later.
2490 result
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_ALL_TYPES
);
2494 dev
->ctrl
.cap
= lo_hi_readq(dev
->bar
+ NVME_REG_CAP
);
2496 dev
->q_depth
= min_t(u32
, NVME_CAP_MQES(dev
->ctrl
.cap
) + 1,
2498 dev
->db_stride
= 1 << NVME_CAP_STRIDE(dev
->ctrl
.cap
);
2499 dev
->dbs
= dev
->bar
+ 4096;
2502 * Some Apple controllers require a non-standard SQE size.
2503 * Interestingly they also seem to ignore the CC:IOSQES register
2504 * so we don't bother updating it here.
2506 if (dev
->ctrl
.quirks
& NVME_QUIRK_128_BYTES_SQES
)
2509 dev
->io_sqes
= NVME_NVM_IOSQES
;
2512 * Temporary fix for the Apple controller found in the MacBook8,1 and
2513 * some MacBook7,1 to avoid controller resets and data loss.
2515 if (pdev
->vendor
== PCI_VENDOR_ID_APPLE
&& pdev
->device
== 0x2001) {
2517 dev_warn(dev
->ctrl
.device
, "detected Apple NVMe controller, "
2518 "set queue depth=%u to work around controller resets\n",
2520 } else if (pdev
->vendor
== PCI_VENDOR_ID_SAMSUNG
&&
2521 (pdev
->device
== 0xa821 || pdev
->device
== 0xa822) &&
2522 NVME_CAP_MQES(dev
->ctrl
.cap
) == 0) {
2524 dev_err(dev
->ctrl
.device
, "detected PM1725 NVMe controller, "
2525 "set queue depth=%u\n", dev
->q_depth
);
2529 * Controllers with the shared tags quirk need the IO queue to be
2530 * big enough so that we get 32 tags for the admin queue
2532 if ((dev
->ctrl
.quirks
& NVME_QUIRK_SHARED_TAGS
) &&
2533 (dev
->q_depth
< (NVME_AQ_DEPTH
+ 2))) {
2534 dev
->q_depth
= NVME_AQ_DEPTH
+ 2;
2535 dev_warn(dev
->ctrl
.device
, "IO queue depth clamped to %d\n",
2538 dev
->ctrl
.sqsize
= dev
->q_depth
- 1; /* 0's based queue depth */
2542 pci_save_state(pdev
);
2544 result
= nvme_pci_configure_admin_queue(dev
);
2550 pci_free_irq_vectors(pdev
);
2552 pci_disable_device(pdev
);
2556 static void nvme_dev_unmap(struct nvme_dev
*dev
)
2560 pci_release_mem_regions(to_pci_dev(dev
->dev
));
2563 static bool nvme_pci_ctrl_is_dead(struct nvme_dev
*dev
)
2565 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2568 if (!pci_is_enabled(pdev
) || !pci_device_is_present(pdev
))
2570 if (pdev
->error_state
!= pci_channel_io_normal
)
2573 csts
= readl(dev
->bar
+ NVME_REG_CSTS
);
2574 return (csts
& NVME_CSTS_CFS
) || !(csts
& NVME_CSTS_RDY
);
2577 static void nvme_dev_disable(struct nvme_dev
*dev
, bool shutdown
)
2579 enum nvme_ctrl_state state
= nvme_ctrl_state(&dev
->ctrl
);
2580 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2583 mutex_lock(&dev
->shutdown_lock
);
2584 dead
= nvme_pci_ctrl_is_dead(dev
);
2585 if (state
== NVME_CTRL_LIVE
|| state
== NVME_CTRL_RESETTING
) {
2586 if (pci_is_enabled(pdev
))
2587 nvme_start_freeze(&dev
->ctrl
);
2589 * Give the controller a chance to complete all entered requests
2590 * if doing a safe shutdown.
2592 if (!dead
&& shutdown
)
2593 nvme_wait_freeze_timeout(&dev
->ctrl
, NVME_IO_TIMEOUT
);
2596 nvme_quiesce_io_queues(&dev
->ctrl
);
2598 if (!dead
&& dev
->ctrl
.queue_count
> 0) {
2599 nvme_delete_io_queues(dev
);
2600 nvme_disable_ctrl(&dev
->ctrl
, shutdown
);
2601 nvme_poll_irqdisable(&dev
->queues
[0]);
2603 nvme_suspend_io_queues(dev
);
2604 nvme_suspend_queue(dev
, 0);
2605 pci_free_irq_vectors(pdev
);
2606 if (pci_is_enabled(pdev
))
2607 pci_disable_device(pdev
);
2608 nvme_reap_pending_cqes(dev
);
2610 nvme_cancel_tagset(&dev
->ctrl
);
2611 nvme_cancel_admin_tagset(&dev
->ctrl
);
2614 * The driver will not be starting up queues again if shutting down so
2615 * must flush all entered requests to their failed completion to avoid
2616 * deadlocking blk-mq hot-cpu notifier.
2619 nvme_unquiesce_io_queues(&dev
->ctrl
);
2620 if (dev
->ctrl
.admin_q
&& !blk_queue_dying(dev
->ctrl
.admin_q
))
2621 nvme_unquiesce_admin_queue(&dev
->ctrl
);
2623 mutex_unlock(&dev
->shutdown_lock
);
2626 static int nvme_disable_prepare_reset(struct nvme_dev
*dev
, bool shutdown
)
2628 if (!nvme_wait_reset(&dev
->ctrl
))
2630 nvme_dev_disable(dev
, shutdown
);
2634 static int nvme_setup_prp_pools(struct nvme_dev
*dev
)
2636 dev
->prp_page_pool
= dma_pool_create("prp list page", dev
->dev
,
2637 NVME_CTRL_PAGE_SIZE
,
2638 NVME_CTRL_PAGE_SIZE
, 0);
2639 if (!dev
->prp_page_pool
)
2642 /* Optimisation for I/Os between 4k and 128k */
2643 dev
->prp_small_pool
= dma_pool_create("prp list 256", dev
->dev
,
2645 if (!dev
->prp_small_pool
) {
2646 dma_pool_destroy(dev
->prp_page_pool
);
2652 static void nvme_release_prp_pools(struct nvme_dev
*dev
)
2654 dma_pool_destroy(dev
->prp_page_pool
);
2655 dma_pool_destroy(dev
->prp_small_pool
);
2658 static int nvme_pci_alloc_iod_mempool(struct nvme_dev
*dev
)
2660 size_t alloc_size
= sizeof(struct scatterlist
) * NVME_MAX_SEGS
;
2662 dev
->iod_mempool
= mempool_create_node(1,
2663 mempool_kmalloc
, mempool_kfree
,
2664 (void *)alloc_size
, GFP_KERNEL
,
2665 dev_to_node(dev
->dev
));
2666 if (!dev
->iod_mempool
)
2671 static void nvme_free_tagset(struct nvme_dev
*dev
)
2673 if (dev
->tagset
.tags
)
2674 nvme_remove_io_tag_set(&dev
->ctrl
);
2675 dev
->ctrl
.tagset
= NULL
;
2678 /* pairs with nvme_pci_alloc_dev */
2679 static void nvme_pci_free_ctrl(struct nvme_ctrl
*ctrl
)
2681 struct nvme_dev
*dev
= to_nvme_dev(ctrl
);
2683 nvme_free_tagset(dev
);
2684 put_device(dev
->dev
);
2689 static void nvme_reset_work(struct work_struct
*work
)
2691 struct nvme_dev
*dev
=
2692 container_of(work
, struct nvme_dev
, ctrl
.reset_work
);
2693 bool was_suspend
= !!(dev
->ctrl
.ctrl_config
& NVME_CC_SHN_NORMAL
);
2696 if (nvme_ctrl_state(&dev
->ctrl
) != NVME_CTRL_RESETTING
) {
2697 dev_warn(dev
->ctrl
.device
, "ctrl state %d is not RESETTING\n",
2704 * If we're called to reset a live controller first shut it down before
2707 if (dev
->ctrl
.ctrl_config
& NVME_CC_ENABLE
)
2708 nvme_dev_disable(dev
, false);
2709 nvme_sync_queues(&dev
->ctrl
);
2711 mutex_lock(&dev
->shutdown_lock
);
2712 result
= nvme_pci_enable(dev
);
2715 nvme_unquiesce_admin_queue(&dev
->ctrl
);
2716 mutex_unlock(&dev
->shutdown_lock
);
2719 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2720 * initializing procedure here.
2722 if (!nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_CONNECTING
)) {
2723 dev_warn(dev
->ctrl
.device
,
2724 "failed to mark controller CONNECTING\n");
2729 result
= nvme_init_ctrl_finish(&dev
->ctrl
, was_suspend
);
2733 nvme_dbbuf_dma_alloc(dev
);
2735 result
= nvme_setup_host_mem(dev
);
2739 result
= nvme_setup_io_queues(dev
);
2744 * Freeze and update the number of I/O queues as thos might have
2745 * changed. If there are no I/O queues left after this reset, keep the
2746 * controller around but remove all namespaces.
2748 if (dev
->online_queues
> 1) {
2749 nvme_dbbuf_set(dev
);
2750 nvme_unquiesce_io_queues(&dev
->ctrl
);
2751 nvme_wait_freeze(&dev
->ctrl
);
2752 nvme_pci_update_nr_queues(dev
);
2753 nvme_unfreeze(&dev
->ctrl
);
2755 dev_warn(dev
->ctrl
.device
, "IO queues lost\n");
2756 nvme_mark_namespaces_dead(&dev
->ctrl
);
2757 nvme_unquiesce_io_queues(&dev
->ctrl
);
2758 nvme_remove_namespaces(&dev
->ctrl
);
2759 nvme_free_tagset(dev
);
2763 * If only admin queue live, keep it to do further investigation or
2766 if (!nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_LIVE
)) {
2767 dev_warn(dev
->ctrl
.device
,
2768 "failed to mark controller live state\n");
2773 nvme_start_ctrl(&dev
->ctrl
);
2777 mutex_unlock(&dev
->shutdown_lock
);
2780 * Set state to deleting now to avoid blocking nvme_wait_reset(), which
2781 * may be holding this pci_dev's device lock.
2783 dev_warn(dev
->ctrl
.device
, "Disabling device after reset failure: %d\n",
2785 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DELETING
);
2786 nvme_dev_disable(dev
, true);
2787 nvme_sync_queues(&dev
->ctrl
);
2788 nvme_mark_namespaces_dead(&dev
->ctrl
);
2789 nvme_unquiesce_io_queues(&dev
->ctrl
);
2790 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DEAD
);
2793 static int nvme_pci_reg_read32(struct nvme_ctrl
*ctrl
, u32 off
, u32
*val
)
2795 *val
= readl(to_nvme_dev(ctrl
)->bar
+ off
);
2799 static int nvme_pci_reg_write32(struct nvme_ctrl
*ctrl
, u32 off
, u32 val
)
2801 writel(val
, to_nvme_dev(ctrl
)->bar
+ off
);
2805 static int nvme_pci_reg_read64(struct nvme_ctrl
*ctrl
, u32 off
, u64
*val
)
2807 *val
= lo_hi_readq(to_nvme_dev(ctrl
)->bar
+ off
);
2811 static int nvme_pci_get_address(struct nvme_ctrl
*ctrl
, char *buf
, int size
)
2813 struct pci_dev
*pdev
= to_pci_dev(to_nvme_dev(ctrl
)->dev
);
2815 return snprintf(buf
, size
, "%s\n", dev_name(&pdev
->dev
));
2818 static void nvme_pci_print_device_info(struct nvme_ctrl
*ctrl
)
2820 struct pci_dev
*pdev
= to_pci_dev(to_nvme_dev(ctrl
)->dev
);
2821 struct nvme_subsystem
*subsys
= ctrl
->subsys
;
2823 dev_err(ctrl
->device
,
2824 "VID:DID %04x:%04x model:%.*s firmware:%.*s\n",
2825 pdev
->vendor
, pdev
->device
,
2826 nvme_strlen(subsys
->model
, sizeof(subsys
->model
)),
2827 subsys
->model
, nvme_strlen(subsys
->firmware_rev
,
2828 sizeof(subsys
->firmware_rev
)),
2829 subsys
->firmware_rev
);
2832 static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl
*ctrl
)
2834 struct nvme_dev
*dev
= to_nvme_dev(ctrl
);
2836 return dma_pci_p2pdma_supported(dev
->dev
);
2839 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops
= {
2841 .module
= THIS_MODULE
,
2842 .flags
= NVME_F_METADATA_SUPPORTED
,
2843 .dev_attr_groups
= nvme_pci_dev_attr_groups
,
2844 .reg_read32
= nvme_pci_reg_read32
,
2845 .reg_write32
= nvme_pci_reg_write32
,
2846 .reg_read64
= nvme_pci_reg_read64
,
2847 .free_ctrl
= nvme_pci_free_ctrl
,
2848 .submit_async_event
= nvme_pci_submit_async_event
,
2849 .get_address
= nvme_pci_get_address
,
2850 .print_device_info
= nvme_pci_print_device_info
,
2851 .supports_pci_p2pdma
= nvme_pci_supports_pci_p2pdma
,
2854 static int nvme_dev_map(struct nvme_dev
*dev
)
2856 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
2858 if (pci_request_mem_regions(pdev
, "nvme"))
2861 if (nvme_remap_bar(dev
, NVME_REG_DBS
+ 4096))
2866 pci_release_mem_regions(pdev
);
2870 static unsigned long check_vendor_combination_bug(struct pci_dev
*pdev
)
2872 if (pdev
->vendor
== 0x144d && pdev
->device
== 0xa802) {
2874 * Several Samsung devices seem to drop off the PCIe bus
2875 * randomly when APST is on and uses the deepest sleep state.
2876 * This has been observed on a Samsung "SM951 NVMe SAMSUNG
2877 * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD
2878 * 950 PRO 256GB", but it seems to be restricted to two Dell
2881 if (dmi_match(DMI_SYS_VENDOR
, "Dell Inc.") &&
2882 (dmi_match(DMI_PRODUCT_NAME
, "XPS 15 9550") ||
2883 dmi_match(DMI_PRODUCT_NAME
, "Precision 5510")))
2884 return NVME_QUIRK_NO_DEEPEST_PS
;
2885 } else if (pdev
->vendor
== 0x144d && pdev
->device
== 0xa804) {
2887 * Samsung SSD 960 EVO drops off the PCIe bus after system
2888 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
2889 * within few minutes after bootup on a Coffee Lake board -
2892 if (dmi_match(DMI_BOARD_VENDOR
, "ASUSTeK COMPUTER INC.") &&
2893 (dmi_match(DMI_BOARD_NAME
, "PRIME B350M-A") ||
2894 dmi_match(DMI_BOARD_NAME
, "PRIME Z370-A")))
2895 return NVME_QUIRK_NO_APST
;
2896 } else if ((pdev
->vendor
== 0x144d && (pdev
->device
== 0xa801 ||
2897 pdev
->device
== 0xa808 || pdev
->device
== 0xa809)) ||
2898 (pdev
->vendor
== 0x1e0f && pdev
->device
== 0x0001)) {
2900 * Forcing to use host managed nvme power settings for
2901 * lowest idle power with quick resume latency on
2902 * Samsung and Toshiba SSDs based on suspend behavior
2903 * on Coffee Lake board for LENOVO C640
2905 if ((dmi_match(DMI_BOARD_VENDOR
, "LENOVO")) &&
2906 dmi_match(DMI_BOARD_NAME
, "LNVNB161216"))
2907 return NVME_QUIRK_SIMPLE_SUSPEND
;
2908 } else if (pdev
->vendor
== 0x2646 && (pdev
->device
== 0x2263 ||
2909 pdev
->device
== 0x500f)) {
2911 * Exclude some Kingston NV1 and A2000 devices from
2912 * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
2913 * lot fo energy with s2idle sleep on some TUXEDO platforms.
2915 if (dmi_match(DMI_BOARD_NAME
, "NS5X_NS7XAU") ||
2916 dmi_match(DMI_BOARD_NAME
, "NS5x_7xAU") ||
2917 dmi_match(DMI_BOARD_NAME
, "NS5x_7xPU") ||
2918 dmi_match(DMI_BOARD_NAME
, "PH4PRX1_PH6PRX1"))
2919 return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND
;
2925 static struct nvme_dev
*nvme_pci_alloc_dev(struct pci_dev
*pdev
,
2926 const struct pci_device_id
*id
)
2928 unsigned long quirks
= id
->driver_data
;
2929 int node
= dev_to_node(&pdev
->dev
);
2930 struct nvme_dev
*dev
;
2933 dev
= kzalloc_node(sizeof(*dev
), GFP_KERNEL
, node
);
2935 return ERR_PTR(-ENOMEM
);
2936 INIT_WORK(&dev
->ctrl
.reset_work
, nvme_reset_work
);
2937 mutex_init(&dev
->shutdown_lock
);
2939 dev
->nr_write_queues
= write_queues
;
2940 dev
->nr_poll_queues
= poll_queues
;
2941 dev
->nr_allocated_queues
= nvme_max_io_queues(dev
) + 1;
2942 dev
->queues
= kcalloc_node(dev
->nr_allocated_queues
,
2943 sizeof(struct nvme_queue
), GFP_KERNEL
, node
);
2947 dev
->dev
= get_device(&pdev
->dev
);
2949 quirks
|= check_vendor_combination_bug(pdev
);
2951 !(quirks
& NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND
) &&
2952 acpi_storage_d3(&pdev
->dev
)) {
2954 * Some systems use a bios work around to ask for D3 on
2955 * platforms that support kernel managed suspend.
2957 dev_info(&pdev
->dev
,
2958 "platform quirk: setting simple suspend\n");
2959 quirks
|= NVME_QUIRK_SIMPLE_SUSPEND
;
2961 ret
= nvme_init_ctrl(&dev
->ctrl
, &pdev
->dev
, &nvme_pci_ctrl_ops
,
2964 goto out_put_device
;
2966 if (dev
->ctrl
.quirks
& NVME_QUIRK_DMA_ADDRESS_BITS_48
)
2967 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(48));
2969 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
2970 dma_set_min_align_mask(&pdev
->dev
, NVME_CTRL_PAGE_SIZE
- 1);
2971 dma_set_max_seg_size(&pdev
->dev
, 0xffffffff);
2974 * Limit the max command size to prevent iod->sg allocations going
2975 * over a single page.
2977 dev
->ctrl
.max_hw_sectors
= min_t(u32
,
2978 NVME_MAX_KB_SZ
<< 1, dma_opt_mapping_size(&pdev
->dev
) >> 9);
2979 dev
->ctrl
.max_segments
= NVME_MAX_SEGS
;
2982 * There is no support for SGLs for metadata (yet), so we are limited to
2983 * a single integrity segment for the separate metadata pointer.
2985 dev
->ctrl
.max_integrity_segments
= 1;
2989 put_device(dev
->dev
);
2993 return ERR_PTR(ret
);
2996 static int nvme_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2998 struct nvme_dev
*dev
;
2999 int result
= -ENOMEM
;
3001 dev
= nvme_pci_alloc_dev(pdev
, id
);
3003 return PTR_ERR(dev
);
3005 result
= nvme_dev_map(dev
);
3007 goto out_uninit_ctrl
;
3009 result
= nvme_setup_prp_pools(dev
);
3013 result
= nvme_pci_alloc_iod_mempool(dev
);
3015 goto out_release_prp_pools
;
3017 dev_info(dev
->ctrl
.device
, "pci function %s\n", dev_name(&pdev
->dev
));
3019 result
= nvme_pci_enable(dev
);
3021 goto out_release_iod_mempool
;
3023 result
= nvme_alloc_admin_tag_set(&dev
->ctrl
, &dev
->admin_tagset
,
3024 &nvme_mq_admin_ops
, sizeof(struct nvme_iod
));
3029 * Mark the controller as connecting before sending admin commands to
3030 * allow the timeout handler to do the right thing.
3032 if (!nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_CONNECTING
)) {
3033 dev_warn(dev
->ctrl
.device
,
3034 "failed to mark controller CONNECTING\n");
3039 result
= nvme_init_ctrl_finish(&dev
->ctrl
, false);
3043 nvme_dbbuf_dma_alloc(dev
);
3045 result
= nvme_setup_host_mem(dev
);
3049 result
= nvme_setup_io_queues(dev
);
3053 if (dev
->online_queues
> 1) {
3054 nvme_alloc_io_tag_set(&dev
->ctrl
, &dev
->tagset
, &nvme_mq_ops
,
3055 nvme_pci_nr_maps(dev
), sizeof(struct nvme_iod
));
3056 nvme_dbbuf_set(dev
);
3059 if (!dev
->ctrl
.tagset
)
3060 dev_warn(dev
->ctrl
.device
, "IO queues not created\n");
3062 if (!nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_LIVE
)) {
3063 dev_warn(dev
->ctrl
.device
,
3064 "failed to mark controller live state\n");
3069 pci_set_drvdata(pdev
, dev
);
3071 nvme_start_ctrl(&dev
->ctrl
);
3072 nvme_put_ctrl(&dev
->ctrl
);
3073 flush_work(&dev
->ctrl
.scan_work
);
3077 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DELETING
);
3078 nvme_dev_disable(dev
, true);
3079 nvme_free_host_mem(dev
);
3080 nvme_dev_remove_admin(dev
);
3081 nvme_dbbuf_dma_free(dev
);
3082 nvme_free_queues(dev
, 0);
3083 out_release_iod_mempool
:
3084 mempool_destroy(dev
->iod_mempool
);
3085 out_release_prp_pools
:
3086 nvme_release_prp_pools(dev
);
3088 nvme_dev_unmap(dev
);
3090 nvme_uninit_ctrl(&dev
->ctrl
);
3091 nvme_put_ctrl(&dev
->ctrl
);
3095 static void nvme_reset_prepare(struct pci_dev
*pdev
)
3097 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3100 * We don't need to check the return value from waiting for the reset
3101 * state as pci_dev device lock is held, making it impossible to race
3104 nvme_disable_prepare_reset(dev
, false);
3105 nvme_sync_queues(&dev
->ctrl
);
3108 static void nvme_reset_done(struct pci_dev
*pdev
)
3110 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3112 if (!nvme_try_sched_reset(&dev
->ctrl
))
3113 flush_work(&dev
->ctrl
.reset_work
);
3116 static void nvme_shutdown(struct pci_dev
*pdev
)
3118 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3120 nvme_disable_prepare_reset(dev
, true);
3124 * The driver's remove may be called on a device in a partially initialized
3125 * state. This function must not have any dependencies on the device state in
3128 static void nvme_remove(struct pci_dev
*pdev
)
3130 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3132 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DELETING
);
3133 pci_set_drvdata(pdev
, NULL
);
3135 if (!pci_device_is_present(pdev
)) {
3136 nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_DEAD
);
3137 nvme_dev_disable(dev
, true);
3140 flush_work(&dev
->ctrl
.reset_work
);
3141 nvme_stop_ctrl(&dev
->ctrl
);
3142 nvme_remove_namespaces(&dev
->ctrl
);
3143 nvme_dev_disable(dev
, true);
3144 nvme_free_host_mem(dev
);
3145 nvme_dev_remove_admin(dev
);
3146 nvme_dbbuf_dma_free(dev
);
3147 nvme_free_queues(dev
, 0);
3148 mempool_destroy(dev
->iod_mempool
);
3149 nvme_release_prp_pools(dev
);
3150 nvme_dev_unmap(dev
);
3151 nvme_uninit_ctrl(&dev
->ctrl
);
3154 #ifdef CONFIG_PM_SLEEP
3155 static int nvme_get_power_state(struct nvme_ctrl
*ctrl
, u32
*ps
)
3157 return nvme_get_features(ctrl
, NVME_FEAT_POWER_MGMT
, 0, NULL
, 0, ps
);
3160 static int nvme_set_power_state(struct nvme_ctrl
*ctrl
, u32 ps
)
3162 return nvme_set_features(ctrl
, NVME_FEAT_POWER_MGMT
, ps
, NULL
, 0, NULL
);
3165 static int nvme_resume(struct device
*dev
)
3167 struct nvme_dev
*ndev
= pci_get_drvdata(to_pci_dev(dev
));
3168 struct nvme_ctrl
*ctrl
= &ndev
->ctrl
;
3170 if (ndev
->last_ps
== U32_MAX
||
3171 nvme_set_power_state(ctrl
, ndev
->last_ps
) != 0)
3173 if (ctrl
->hmpre
&& nvme_setup_host_mem(ndev
))
3178 return nvme_try_sched_reset(ctrl
);
3181 static int nvme_suspend(struct device
*dev
)
3183 struct pci_dev
*pdev
= to_pci_dev(dev
);
3184 struct nvme_dev
*ndev
= pci_get_drvdata(pdev
);
3185 struct nvme_ctrl
*ctrl
= &ndev
->ctrl
;
3188 ndev
->last_ps
= U32_MAX
;
3191 * The platform does not remove power for a kernel managed suspend so
3192 * use host managed nvme power settings for lowest idle power if
3193 * possible. This should have quicker resume latency than a full device
3194 * shutdown. But if the firmware is involved after the suspend or the
3195 * device does not support any non-default power states, shut down the
3198 * If ASPM is not enabled for the device, shut down the device and allow
3199 * the PCI bus layer to put it into D3 in order to take the PCIe link
3200 * down, so as to allow the platform to achieve its minimum low-power
3201 * state (which may not be possible if the link is up).
3203 if (pm_suspend_via_firmware() || !ctrl
->npss
||
3204 !pcie_aspm_enabled(pdev
) ||
3205 (ndev
->ctrl
.quirks
& NVME_QUIRK_SIMPLE_SUSPEND
))
3206 return nvme_disable_prepare_reset(ndev
, true);
3208 nvme_start_freeze(ctrl
);
3209 nvme_wait_freeze(ctrl
);
3210 nvme_sync_queues(ctrl
);
3212 if (nvme_ctrl_state(ctrl
) != NVME_CTRL_LIVE
)
3216 * Host memory access may not be successful in a system suspend state,
3217 * but the specification allows the controller to access memory in a
3218 * non-operational power state.
3221 ret
= nvme_set_host_mem(ndev
, 0);
3226 ret
= nvme_get_power_state(ctrl
, &ndev
->last_ps
);
3231 * A saved state prevents pci pm from generically controlling the
3232 * device's power. If we're using protocol specific settings, we don't
3233 * want pci interfering.
3235 pci_save_state(pdev
);
3237 ret
= nvme_set_power_state(ctrl
, ctrl
->npss
);
3242 /* discard the saved state */
3243 pci_load_saved_state(pdev
, NULL
);
3246 * Clearing npss forces a controller reset on resume. The
3247 * correct value will be rediscovered then.
3249 ret
= nvme_disable_prepare_reset(ndev
, true);
3253 nvme_unfreeze(ctrl
);
3257 static int nvme_simple_suspend(struct device
*dev
)
3259 struct nvme_dev
*ndev
= pci_get_drvdata(to_pci_dev(dev
));
3261 return nvme_disable_prepare_reset(ndev
, true);
3264 static int nvme_simple_resume(struct device
*dev
)
3266 struct pci_dev
*pdev
= to_pci_dev(dev
);
3267 struct nvme_dev
*ndev
= pci_get_drvdata(pdev
);
3269 return nvme_try_sched_reset(&ndev
->ctrl
);
3272 static const struct dev_pm_ops nvme_dev_pm_ops
= {
3273 .suspend
= nvme_suspend
,
3274 .resume
= nvme_resume
,
3275 .freeze
= nvme_simple_suspend
,
3276 .thaw
= nvme_simple_resume
,
3277 .poweroff
= nvme_simple_suspend
,
3278 .restore
= nvme_simple_resume
,
3280 #endif /* CONFIG_PM_SLEEP */
3282 static pci_ers_result_t
nvme_error_detected(struct pci_dev
*pdev
,
3283 pci_channel_state_t state
)
3285 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3288 * A frozen channel requires a reset. When detected, this method will
3289 * shutdown the controller to quiesce. The controller will be restarted
3290 * after the slot reset through driver's slot_reset callback.
3293 case pci_channel_io_normal
:
3294 return PCI_ERS_RESULT_CAN_RECOVER
;
3295 case pci_channel_io_frozen
:
3296 dev_warn(dev
->ctrl
.device
,
3297 "frozen state error detected, reset controller\n");
3298 if (!nvme_change_ctrl_state(&dev
->ctrl
, NVME_CTRL_RESETTING
)) {
3299 nvme_dev_disable(dev
, true);
3300 return PCI_ERS_RESULT_DISCONNECT
;
3302 nvme_dev_disable(dev
, false);
3303 return PCI_ERS_RESULT_NEED_RESET
;
3304 case pci_channel_io_perm_failure
:
3305 dev_warn(dev
->ctrl
.device
,
3306 "failure state error detected, request disconnect\n");
3307 return PCI_ERS_RESULT_DISCONNECT
;
3309 return PCI_ERS_RESULT_NEED_RESET
;
3312 static pci_ers_result_t
nvme_slot_reset(struct pci_dev
*pdev
)
3314 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3316 dev_info(dev
->ctrl
.device
, "restart after slot reset\n");
3317 pci_restore_state(pdev
);
3318 if (!nvme_try_sched_reset(&dev
->ctrl
))
3319 nvme_unquiesce_io_queues(&dev
->ctrl
);
3320 return PCI_ERS_RESULT_RECOVERED
;
3323 static void nvme_error_resume(struct pci_dev
*pdev
)
3325 struct nvme_dev
*dev
= pci_get_drvdata(pdev
);
3327 flush_work(&dev
->ctrl
.reset_work
);
3330 static const struct pci_error_handlers nvme_err_handler
= {
3331 .error_detected
= nvme_error_detected
,
3332 .slot_reset
= nvme_slot_reset
,
3333 .resume
= nvme_error_resume
,
3334 .reset_prepare
= nvme_reset_prepare
,
3335 .reset_done
= nvme_reset_done
,
3338 static const struct pci_device_id nvme_id_table
[] = {
3339 { PCI_VDEVICE(INTEL
, 0x0953), /* Intel 750/P3500/P3600/P3700 */
3340 .driver_data
= NVME_QUIRK_STRIPE_SIZE
|
3341 NVME_QUIRK_DEALLOCATE_ZEROES
, },
3342 { PCI_VDEVICE(INTEL
, 0x0a53), /* Intel P3520 */
3343 .driver_data
= NVME_QUIRK_STRIPE_SIZE
|
3344 NVME_QUIRK_DEALLOCATE_ZEROES
, },
3345 { PCI_VDEVICE(INTEL
, 0x0a54), /* Intel P4500/P4600 */
3346 .driver_data
= NVME_QUIRK_STRIPE_SIZE
|
3347 NVME_QUIRK_DEALLOCATE_ZEROES
|
3348 NVME_QUIRK_IGNORE_DEV_SUBNQN
|
3349 NVME_QUIRK_BOGUS_NID
, },
3350 { PCI_VDEVICE(INTEL
, 0x0a55), /* Dell Express Flash P4600 */
3351 .driver_data
= NVME_QUIRK_STRIPE_SIZE
|
3352 NVME_QUIRK_DEALLOCATE_ZEROES
, },
3353 { PCI_VDEVICE(INTEL
, 0xf1a5), /* Intel 600P/P3100 */
3354 .driver_data
= NVME_QUIRK_NO_DEEPEST_PS
|
3355 NVME_QUIRK_MEDIUM_PRIO_SQ
|
3356 NVME_QUIRK_NO_TEMP_THRESH_CHANGE
|
3357 NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3358 { PCI_VDEVICE(INTEL
, 0xf1a6), /* Intel 760p/Pro 7600p */
3359 .driver_data
= NVME_QUIRK_IGNORE_DEV_SUBNQN
, },
3360 { PCI_VDEVICE(INTEL
, 0x5845), /* Qemu emulated controller */
3361 .driver_data
= NVME_QUIRK_IDENTIFY_CNS
|
3362 NVME_QUIRK_DISABLE_WRITE_ZEROES
|
3363 NVME_QUIRK_BOGUS_NID
, },
3364 { PCI_VDEVICE(REDHAT
, 0x0010), /* Qemu emulated controller */
3365 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3366 { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
3367 .driver_data
= NVME_QUIRK_NO_NS_DESC_LIST
|
3368 NVME_QUIRK_BOGUS_NID
, },
3369 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
3370 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
|
3371 NVME_QUIRK_NO_NS_DESC_LIST
, },
3372 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
3373 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3374 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
3375 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3376 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
3377 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3378 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
3379 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
, },
3380 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
3381 .driver_data
= NVME_QUIRK_DELAY_BEFORE_CHK_RDY
|
3382 NVME_QUIRK_DISABLE_WRITE_ZEROES
|
3383 NVME_QUIRK_IGNORE_DEV_SUBNQN
, },
3384 { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
3385 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3386 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
3387 .driver_data
= NVME_QUIRK_IGNORE_DEV_SUBNQN
|
3388 NVME_QUIRK_BOGUS_NID
, },
3389 { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */
3390 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3391 { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */
3392 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3393 { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
3394 .driver_data
= NVME_QUIRK_NO_NS_DESC_LIST
|
3395 NVME_QUIRK_IGNORE_DEV_SUBNQN
, },
3396 { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */
3397 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3398 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
3399 .driver_data
= NVME_QUIRK_IGNORE_DEV_SUBNQN
|
3400 NVME_QUIRK_BOGUS_NID
, },
3401 { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */
3402 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3403 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
3404 .driver_data
= NVME_QUIRK_NO_DEEPEST_PS
|
3405 NVME_QUIRK_IGNORE_DEV_SUBNQN
, },
3406 { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */
3407 .driver_data
= NVME_QUIRK_IGNORE_DEV_SUBNQN
},
3408 { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */
3409 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3410 { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
3411 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3412 { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
3413 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3414 { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */
3415 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3416 { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
3417 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3418 { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
3419 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3420 { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */
3421 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
|
3422 NVME_QUIRK_BOGUS_NID
, },
3423 { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */
3424 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3425 { PCI_DEVICE(0x144d, 0xa802), /* Samsung SM953 */
3426 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3427 { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */
3428 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3429 { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */
3430 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3431 { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
3432 .driver_data
= NVME_QUIRK_NO_DEEPEST_PS
, },
3433 { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
3434 .driver_data
= NVME_QUIRK_NO_DEEPEST_PS
, },
3435 { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */
3436 .driver_data
= NVME_QUIRK_NO_SECONDARY_TEMP_THRESH
, },
3437 { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
3438 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3439 { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
3440 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3441 { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */
3442 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3443 { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */
3444 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3445 { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
3446 .driver_data
= NVME_QUIRK_DISABLE_WRITE_ZEROES
, },
3447 { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */
3448 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3449 { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */
3450 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3451 { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */
3452 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3453 { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
3454 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3455 { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
3456 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3457 { PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */
3458 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3459 { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
3460 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3461 { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
3462 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3463 { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
3464 .driver_data
= NVME_QUIRK_NO_DEEPEST_PS
, },
3465 { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
3466 .driver_data
= NVME_QUIRK_NO_DEEPEST_PS
, },
3467 { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
3468 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3469 { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
3470 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3471 { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
3472 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3473 { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
3474 .driver_data
= NVME_QUIRK_BOGUS_NID
|
3475 NVME_QUIRK_IGNORE_DEV_SUBNQN
, },
3476 { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
3477 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3478 { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */
3479 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3480 { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
3481 .driver_data
= NVME_QUIRK_BOGUS_NID
, },
3482 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON
, 0x0061),
3483 .driver_data
= NVME_QUIRK_DMA_ADDRESS_BITS_48
, },
3484 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON
, 0x0065),
3485 .driver_data
= NVME_QUIRK_DMA_ADDRESS_BITS_48
, },
3486 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON
, 0x8061),
3487 .driver_data
= NVME_QUIRK_DMA_ADDRESS_BITS_48
, },
3488 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON
, 0xcd00),
3489 .driver_data
= NVME_QUIRK_DMA_ADDRESS_BITS_48
, },
3490 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON
, 0xcd01),
3491 .driver_data
= NVME_QUIRK_DMA_ADDRESS_BITS_48
, },
3492 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON
, 0xcd02),
3493 .driver_data
= NVME_QUIRK_DMA_ADDRESS_BITS_48
, },
3494 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, 0x2001),
3495 .driver_data
= NVME_QUIRK_SINGLE_VECTOR
},
3496 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, 0x2003) },
3497 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, 0x2005),
3498 .driver_data
= NVME_QUIRK_SINGLE_VECTOR
|
3499 NVME_QUIRK_128_BYTES_SQES
|
3500 NVME_QUIRK_SHARED_TAGS
|
3501 NVME_QUIRK_SKIP_CID_GEN
|
3502 NVME_QUIRK_IDENTIFY_CNS
},
3503 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS
, 0xffffff) },
3506 MODULE_DEVICE_TABLE(pci
, nvme_id_table
);
3508 static struct pci_driver nvme_driver
= {
3510 .id_table
= nvme_id_table
,
3511 .probe
= nvme_probe
,
3512 .remove
= nvme_remove
,
3513 .shutdown
= nvme_shutdown
,
3515 .probe_type
= PROBE_PREFER_ASYNCHRONOUS
,
3516 #ifdef CONFIG_PM_SLEEP
3517 .pm
= &nvme_dev_pm_ops
,
3520 .sriov_configure
= pci_sriov_configure_simple
,
3521 .err_handler
= &nvme_err_handler
,
3524 static int __init
nvme_init(void)
3526 BUILD_BUG_ON(sizeof(struct nvme_create_cq
) != 64);
3527 BUILD_BUG_ON(sizeof(struct nvme_create_sq
) != 64);
3528 BUILD_BUG_ON(sizeof(struct nvme_delete_queue
) != 64);
3529 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS
< 2);
3530 BUILD_BUG_ON(NVME_MAX_SEGS
> SGES_PER_PAGE
);
3531 BUILD_BUG_ON(sizeof(struct scatterlist
) * NVME_MAX_SEGS
> PAGE_SIZE
);
3532 BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS
);
3534 return pci_register_driver(&nvme_driver
);
3537 static void __exit
nvme_exit(void)
3539 pci_unregister_driver(&nvme_driver
);
3540 flush_workqueue(nvme_wq
);
3543 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
3544 MODULE_LICENSE("GPL");
3545 MODULE_VERSION("1.0");
3546 MODULE_DESCRIPTION("NVMe host PCIe transport driver");
3547 module_init(nvme_init
);
3548 module_exit(nvme_exit
);