1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 NXP Semiconductors
4 * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
15 #include <dm/device-internal.h>
16 #include <linux/compat.h>
19 #define NVME_Q_DEPTH 2
20 #define NVME_AQ_DEPTH 2
21 #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
22 #define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
23 #define ADMIN_TIMEOUT 60
25 #define MAX_PRP_POOL 512
34 * An NVM Express queue. Each device has at least two (one for admin
35 * commands and one for I/O commands).
39 struct nvme_command
*sq_cmds
;
40 struct nvme_completion
*cqes
;
41 wait_queue_head_t sq_full
;
51 unsigned long cmdid_data
[];
54 static int nvme_wait_ready(struct nvme_dev
*dev
, bool enabled
)
56 u32 bit
= enabled
? NVME_CSTS_RDY
: 0;
60 /* Timeout field in the CAP register is in 500 millisecond units */
61 timeout
= NVME_CAP_TIMEOUT(dev
->cap
) * 500;
64 while (get_timer(start
) < timeout
) {
65 if ((readl(&dev
->bar
->csts
) & NVME_CSTS_RDY
) == bit
)
72 static int nvme_setup_prps(struct nvme_dev
*dev
, u64
*prp2
,
73 int total_len
, u64 dma_addr
)
75 u32 page_size
= dev
->page_size
;
76 int offset
= dma_addr
& (page_size
- 1);
78 int length
= total_len
;
80 u32 prps_per_page
= (page_size
>> 3) - 1;
83 length
-= (page_size
- offset
);
91 dma_addr
+= (page_size
- offset
);
93 if (length
<= page_size
) {
98 nprps
= DIV_ROUND_UP(length
, page_size
);
99 num_pages
= DIV_ROUND_UP(nprps
, prps_per_page
);
101 if (nprps
> dev
->prp_entry_num
) {
104 * Always increase in increments of pages. It doesn't waste
105 * much memory and reduces the number of allocations.
107 dev
->prp_pool
= memalign(page_size
, num_pages
* page_size
);
108 if (!dev
->prp_pool
) {
109 printf("Error: malloc prp_pool fail\n");
112 dev
->prp_entry_num
= prps_per_page
* num_pages
;
115 prp_pool
= dev
->prp_pool
;
118 if (i
== ((page_size
>> 3) - 1)) {
119 *(prp_pool
+ i
) = cpu_to_le64((ulong
)prp_pool
+
122 prp_pool
+= page_size
;
124 *(prp_pool
+ i
++) = cpu_to_le64(dma_addr
);
125 dma_addr
+= page_size
;
128 *prp2
= (ulong
)dev
->prp_pool
;
130 flush_dcache_range((ulong
)dev
->prp_pool
, (ulong
)dev
->prp_pool
+
131 dev
->prp_entry_num
* sizeof(u64
));
136 static __le16
nvme_get_cmd_id(void)
138 static unsigned short cmdid
;
140 return cpu_to_le16((cmdid
< USHRT_MAX
) ? cmdid
++ : 0);
143 static u16
nvme_read_completion_status(struct nvme_queue
*nvmeq
, u16 index
)
145 u64 start
= (ulong
)&nvmeq
->cqes
[index
];
146 u64 stop
= start
+ sizeof(struct nvme_completion
);
148 invalidate_dcache_range(start
, stop
);
150 return le16_to_cpu(readw(&(nvmeq
->cqes
[index
].status
)));
154 * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
156 * @nvmeq: The queue to use
157 * @cmd: The command to send
159 static void nvme_submit_cmd(struct nvme_queue
*nvmeq
, struct nvme_command
*cmd
)
161 u16 tail
= nvmeq
->sq_tail
;
163 memcpy(&nvmeq
->sq_cmds
[tail
], cmd
, sizeof(*cmd
));
164 flush_dcache_range((ulong
)&nvmeq
->sq_cmds
[tail
],
165 (ulong
)&nvmeq
->sq_cmds
[tail
] + sizeof(*cmd
));
167 if (++tail
== nvmeq
->q_depth
)
169 writel(tail
, nvmeq
->q_db
);
170 nvmeq
->sq_tail
= tail
;
173 static int nvme_submit_sync_cmd(struct nvme_queue
*nvmeq
,
174 struct nvme_command
*cmd
,
175 u32
*result
, unsigned timeout
)
177 u16 head
= nvmeq
->cq_head
;
178 u16 phase
= nvmeq
->cq_phase
;
181 ulong timeout_us
= timeout
* 100000;
183 cmd
->common
.command_id
= nvme_get_cmd_id();
184 nvme_submit_cmd(nvmeq
, cmd
);
186 start_time
= timer_get_us();
189 status
= nvme_read_completion_status(nvmeq
, head
);
190 if ((status
& 0x01) == phase
)
192 if (timeout_us
> 0 && (timer_get_us() - start_time
)
199 printf("ERROR: status = %x, phase = %d, head = %d\n",
200 status
, phase
, head
);
202 if (++head
== nvmeq
->q_depth
) {
206 writel(head
, nvmeq
->q_db
+ nvmeq
->dev
->db_stride
);
207 nvmeq
->cq_head
= head
;
208 nvmeq
->cq_phase
= phase
;
214 *result
= le32_to_cpu(readl(&(nvmeq
->cqes
[head
].result
)));
216 if (++head
== nvmeq
->q_depth
) {
220 writel(head
, nvmeq
->q_db
+ nvmeq
->dev
->db_stride
);
221 nvmeq
->cq_head
= head
;
222 nvmeq
->cq_phase
= phase
;
227 static int nvme_submit_admin_cmd(struct nvme_dev
*dev
, struct nvme_command
*cmd
,
230 return nvme_submit_sync_cmd(dev
->queues
[NVME_ADMIN_Q
], cmd
,
231 result
, ADMIN_TIMEOUT
);
234 static struct nvme_queue
*nvme_alloc_queue(struct nvme_dev
*dev
,
237 struct nvme_queue
*nvmeq
= malloc(sizeof(*nvmeq
));
240 memset(nvmeq
, 0, sizeof(*nvmeq
));
242 nvmeq
->cqes
= (void *)memalign(4096, NVME_CQ_SIZE(depth
));
245 memset((void *)nvmeq
->cqes
, 0, NVME_CQ_SIZE(depth
));
247 nvmeq
->sq_cmds
= (void *)memalign(4096, NVME_SQ_SIZE(depth
));
250 memset((void *)nvmeq
->sq_cmds
, 0, NVME_SQ_SIZE(depth
));
256 nvmeq
->q_db
= &dev
->dbs
[qid
* 2 * dev
->db_stride
];
257 nvmeq
->q_depth
= depth
;
260 dev
->queues
[qid
] = nvmeq
;
265 free((void *)nvmeq
->cqes
);
272 static int nvme_delete_queue(struct nvme_dev
*dev
, u8 opcode
, u16 id
)
274 struct nvme_command c
;
276 memset(&c
, 0, sizeof(c
));
277 c
.delete_queue
.opcode
= opcode
;
278 c
.delete_queue
.qid
= cpu_to_le16(id
);
280 return nvme_submit_admin_cmd(dev
, &c
, NULL
);
283 static int nvme_delete_sq(struct nvme_dev
*dev
, u16 sqid
)
285 return nvme_delete_queue(dev
, nvme_admin_delete_sq
, sqid
);
288 static int nvme_delete_cq(struct nvme_dev
*dev
, u16 cqid
)
290 return nvme_delete_queue(dev
, nvme_admin_delete_cq
, cqid
);
293 static int nvme_enable_ctrl(struct nvme_dev
*dev
)
295 dev
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
296 dev
->ctrl_config
|= NVME_CC_ENABLE
;
297 writel(cpu_to_le32(dev
->ctrl_config
), &dev
->bar
->cc
);
299 return nvme_wait_ready(dev
, true);
302 static int nvme_disable_ctrl(struct nvme_dev
*dev
)
304 dev
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
305 dev
->ctrl_config
&= ~NVME_CC_ENABLE
;
306 writel(cpu_to_le32(dev
->ctrl_config
), &dev
->bar
->cc
);
308 return nvme_wait_ready(dev
, false);
311 static void nvme_free_queue(struct nvme_queue
*nvmeq
)
313 free((void *)nvmeq
->cqes
);
314 free(nvmeq
->sq_cmds
);
318 static void nvme_free_queues(struct nvme_dev
*dev
, int lowest
)
322 for (i
= dev
->queue_count
- 1; i
>= lowest
; i
--) {
323 struct nvme_queue
*nvmeq
= dev
->queues
[i
];
325 dev
->queues
[i
] = NULL
;
326 nvme_free_queue(nvmeq
);
330 static void nvme_init_queue(struct nvme_queue
*nvmeq
, u16 qid
)
332 struct nvme_dev
*dev
= nvmeq
->dev
;
337 nvmeq
->q_db
= &dev
->dbs
[qid
* 2 * dev
->db_stride
];
338 memset((void *)nvmeq
->cqes
, 0, NVME_CQ_SIZE(nvmeq
->q_depth
));
339 flush_dcache_range((ulong
)nvmeq
->cqes
,
340 (ulong
)nvmeq
->cqes
+ NVME_CQ_SIZE(nvmeq
->q_depth
));
341 dev
->online_queues
++;
344 static int nvme_configure_admin_queue(struct nvme_dev
*dev
)
349 struct nvme_queue
*nvmeq
;
350 /* most architectures use 4KB as the page size */
351 unsigned page_shift
= 12;
352 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12;
353 unsigned dev_page_max
= NVME_CAP_MPSMAX(cap
) + 12;
355 if (page_shift
< dev_page_min
) {
356 debug("Device minimum page size (%u) too large for host (%u)\n",
357 1 << dev_page_min
, 1 << page_shift
);
361 if (page_shift
> dev_page_max
) {
362 debug("Device maximum page size (%u) smaller than host (%u)\n",
363 1 << dev_page_max
, 1 << page_shift
);
364 page_shift
= dev_page_max
;
367 result
= nvme_disable_ctrl(dev
);
371 nvmeq
= dev
->queues
[NVME_ADMIN_Q
];
373 nvmeq
= nvme_alloc_queue(dev
, 0, NVME_AQ_DEPTH
);
378 aqa
= nvmeq
->q_depth
- 1;
382 dev
->page_size
= 1 << page_shift
;
384 dev
->ctrl_config
= NVME_CC_CSS_NVM
;
385 dev
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
386 dev
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
387 dev
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
389 writel(aqa
, &dev
->bar
->aqa
);
390 nvme_writeq((ulong
)nvmeq
->sq_cmds
, &dev
->bar
->asq
);
391 nvme_writeq((ulong
)nvmeq
->cqes
, &dev
->bar
->acq
);
393 result
= nvme_enable_ctrl(dev
);
397 nvmeq
->cq_vector
= 0;
399 nvme_init_queue(dev
->queues
[NVME_ADMIN_Q
], 0);
404 nvme_free_queues(dev
, 0);
409 static int nvme_alloc_cq(struct nvme_dev
*dev
, u16 qid
,
410 struct nvme_queue
*nvmeq
)
412 struct nvme_command c
;
413 int flags
= NVME_QUEUE_PHYS_CONTIG
| NVME_CQ_IRQ_ENABLED
;
415 memset(&c
, 0, sizeof(c
));
416 c
.create_cq
.opcode
= nvme_admin_create_cq
;
417 c
.create_cq
.prp1
= cpu_to_le64((ulong
)nvmeq
->cqes
);
418 c
.create_cq
.cqid
= cpu_to_le16(qid
);
419 c
.create_cq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
420 c
.create_cq
.cq_flags
= cpu_to_le16(flags
);
421 c
.create_cq
.irq_vector
= cpu_to_le16(nvmeq
->cq_vector
);
423 return nvme_submit_admin_cmd(dev
, &c
, NULL
);
426 static int nvme_alloc_sq(struct nvme_dev
*dev
, u16 qid
,
427 struct nvme_queue
*nvmeq
)
429 struct nvme_command c
;
430 int flags
= NVME_QUEUE_PHYS_CONTIG
| NVME_SQ_PRIO_MEDIUM
;
432 memset(&c
, 0, sizeof(c
));
433 c
.create_sq
.opcode
= nvme_admin_create_sq
;
434 c
.create_sq
.prp1
= cpu_to_le64((ulong
)nvmeq
->sq_cmds
);
435 c
.create_sq
.sqid
= cpu_to_le16(qid
);
436 c
.create_sq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
437 c
.create_sq
.sq_flags
= cpu_to_le16(flags
);
438 c
.create_sq
.cqid
= cpu_to_le16(qid
);
440 return nvme_submit_admin_cmd(dev
, &c
, NULL
);
443 int nvme_identify(struct nvme_dev
*dev
, unsigned nsid
,
444 unsigned cns
, dma_addr_t dma_addr
)
446 struct nvme_command c
;
447 u32 page_size
= dev
->page_size
;
448 int offset
= dma_addr
& (page_size
- 1);
449 int length
= sizeof(struct nvme_id_ctrl
);
452 memset(&c
, 0, sizeof(c
));
453 c
.identify
.opcode
= nvme_admin_identify
;
454 c
.identify
.nsid
= cpu_to_le32(nsid
);
455 c
.identify
.prp1
= cpu_to_le64(dma_addr
);
457 length
-= (page_size
- offset
);
461 dma_addr
+= (page_size
- offset
);
462 c
.identify
.prp2
= cpu_to_le64(dma_addr
);
465 c
.identify
.cns
= cpu_to_le32(cns
);
467 ret
= nvme_submit_admin_cmd(dev
, &c
, NULL
);
469 invalidate_dcache_range(dma_addr
,
470 dma_addr
+ sizeof(struct nvme_id_ctrl
));
475 int nvme_get_features(struct nvme_dev
*dev
, unsigned fid
, unsigned nsid
,
476 dma_addr_t dma_addr
, u32
*result
)
478 struct nvme_command c
;
480 memset(&c
, 0, sizeof(c
));
481 c
.features
.opcode
= nvme_admin_get_features
;
482 c
.features
.nsid
= cpu_to_le32(nsid
);
483 c
.features
.prp1
= cpu_to_le64(dma_addr
);
484 c
.features
.fid
= cpu_to_le32(fid
);
487 * TODO: add cache invalidate operation when the size of
488 * the DMA buffer is known
491 return nvme_submit_admin_cmd(dev
, &c
, result
);
494 int nvme_set_features(struct nvme_dev
*dev
, unsigned fid
, unsigned dword11
,
495 dma_addr_t dma_addr
, u32
*result
)
497 struct nvme_command c
;
499 memset(&c
, 0, sizeof(c
));
500 c
.features
.opcode
= nvme_admin_set_features
;
501 c
.features
.prp1
= cpu_to_le64(dma_addr
);
502 c
.features
.fid
= cpu_to_le32(fid
);
503 c
.features
.dword11
= cpu_to_le32(dword11
);
506 * TODO: add cache flush operation when the size of
507 * the DMA buffer is known
510 return nvme_submit_admin_cmd(dev
, &c
, result
);
513 static int nvme_create_queue(struct nvme_queue
*nvmeq
, int qid
)
515 struct nvme_dev
*dev
= nvmeq
->dev
;
518 nvmeq
->cq_vector
= qid
- 1;
519 result
= nvme_alloc_cq(dev
, qid
, nvmeq
);
523 result
= nvme_alloc_sq(dev
, qid
, nvmeq
);
527 nvme_init_queue(nvmeq
, qid
);
532 nvme_delete_sq(dev
, qid
);
534 nvme_delete_cq(dev
, qid
);
539 static int nvme_set_queue_count(struct nvme_dev
*dev
, int count
)
543 u32 q_count
= (count
- 1) | ((count
- 1) << 16);
545 status
= nvme_set_features(dev
, NVME_FEAT_NUM_QUEUES
,
546 q_count
, 0, &result
);
553 return min(result
& 0xffff, result
>> 16) + 1;
556 static void nvme_create_io_queues(struct nvme_dev
*dev
)
560 for (i
= dev
->queue_count
; i
<= dev
->max_qid
; i
++)
561 if (!nvme_alloc_queue(dev
, i
, dev
->q_depth
))
564 for (i
= dev
->online_queues
; i
<= dev
->queue_count
- 1; i
++)
565 if (nvme_create_queue(dev
->queues
[i
], i
))
569 static int nvme_setup_io_queues(struct nvme_dev
*dev
)
575 result
= nvme_set_queue_count(dev
, nr_io_queues
);
579 dev
->max_qid
= nr_io_queues
;
581 /* Free previously allocated queues */
582 nvme_free_queues(dev
, nr_io_queues
+ 1);
583 nvme_create_io_queues(dev
);
588 static int nvme_get_info_from_identify(struct nvme_dev
*dev
)
590 struct nvme_id_ctrl
*ctrl
;
592 int shift
= NVME_CAP_MPSMIN(dev
->cap
) + 12;
594 ctrl
= memalign(dev
->page_size
, sizeof(struct nvme_id_ctrl
));
598 ret
= nvme_identify(dev
, 0, 1, (dma_addr_t
)(long)ctrl
);
604 dev
->nn
= le32_to_cpu(ctrl
->nn
);
605 dev
->vwc
= ctrl
->vwc
;
606 memcpy(dev
->serial
, ctrl
->sn
, sizeof(ctrl
->sn
));
607 memcpy(dev
->model
, ctrl
->mn
, sizeof(ctrl
->mn
));
608 memcpy(dev
->firmware_rev
, ctrl
->fr
, sizeof(ctrl
->fr
));
610 dev
->max_transfer_shift
= (ctrl
->mdts
+ shift
);
613 * Maximum Data Transfer Size (MDTS) field indicates the maximum
614 * data transfer size between the host and the controller. The
615 * host should not submit a command that exceeds this transfer
616 * size. The value is in units of the minimum memory page size
617 * and is reported as a power of two (2^n).
619 * The spec also says: a value of 0h indicates no restrictions
620 * on transfer size. But in nvme_blk_read/write() below we have
621 * the following algorithm for maximum number of logic blocks
624 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
626 * In order for lbas not to overflow, the maximum number is 15
627 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
628 * Let's use 20 which provides 1MB size.
630 dev
->max_transfer_shift
= 20;
637 int nvme_get_namespace_id(struct udevice
*udev
, u32
*ns_id
, u8
*eui64
)
639 struct nvme_ns
*ns
= dev_get_priv(udev
);
644 memcpy(eui64
, ns
->eui64
, sizeof(ns
->eui64
));
649 int nvme_scan_namespace(void)
655 ret
= uclass_get(UCLASS_NVME
, &uc
);
659 uclass_foreach_dev(dev
, uc
) {
660 ret
= device_probe(dev
);
668 static int nvme_blk_probe(struct udevice
*udev
)
670 struct nvme_dev
*ndev
= dev_get_priv(udev
->parent
);
671 struct blk_desc
*desc
= dev_get_uclass_platdata(udev
);
672 struct nvme_ns
*ns
= dev_get_priv(udev
);
674 struct pci_child_platdata
*pplat
;
675 struct nvme_id_ns
*id
;
677 id
= memalign(ndev
->page_size
, sizeof(struct nvme_id_ns
));
681 memset(ns
, 0, sizeof(*ns
));
683 /* extract the namespace id from the block device name */
684 ns
->ns_id
= trailing_strtol(udev
->name
) + 1;
685 if (nvme_identify(ndev
, ns
->ns_id
, 0, (dma_addr_t
)(long)id
)) {
690 memcpy(&ns
->eui64
, &id
->eui64
, sizeof(id
->eui64
));
691 flbas
= id
->flbas
& NVME_NS_FLBAS_LBA_MASK
;
693 ns
->lba_shift
= id
->lbaf
[flbas
].ds
;
694 ns
->mode_select_num_blocks
= le64_to_cpu(id
->nsze
);
695 ns
->mode_select_block_len
= 1 << ns
->lba_shift
;
696 list_add(&ns
->list
, &ndev
->namespaces
);
698 desc
->lba
= ns
->mode_select_num_blocks
;
699 desc
->log2blksz
= ns
->lba_shift
;
700 desc
->blksz
= 1 << ns
->lba_shift
;
702 pplat
= dev_get_parent_platdata(udev
->parent
);
703 sprintf(desc
->vendor
, "0x%.4x", pplat
->vendor
);
704 memcpy(desc
->product
, ndev
->serial
, sizeof(ndev
->serial
));
705 memcpy(desc
->revision
, ndev
->firmware_rev
, sizeof(ndev
->firmware_rev
));
711 static ulong
nvme_blk_rw(struct udevice
*udev
, lbaint_t blknr
,
712 lbaint_t blkcnt
, void *buffer
, bool read
)
714 struct nvme_ns
*ns
= dev_get_priv(udev
);
715 struct nvme_dev
*dev
= ns
->dev
;
716 struct nvme_command c
;
717 struct blk_desc
*desc
= dev_get_uclass_platdata(udev
);
720 u64 total_len
= blkcnt
<< desc
->log2blksz
;
721 u64 temp_len
= total_len
;
724 u16 lbas
= 1 << (dev
->max_transfer_shift
- ns
->lba_shift
);
725 u64 total_lbas
= blkcnt
;
727 flush_dcache_range((unsigned long)buffer
,
728 (unsigned long)buffer
+ total_len
);
730 c
.rw
.opcode
= read
? nvme_cmd_read
: nvme_cmd_write
;
732 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
741 if (total_lbas
< lbas
) {
742 lbas
= (u16
)total_lbas
;
748 if (nvme_setup_prps(dev
, &prp2
,
749 lbas
<< ns
->lba_shift
, (ulong
)buffer
))
751 c
.rw
.slba
= cpu_to_le64(slba
);
753 c
.rw
.length
= cpu_to_le16(lbas
- 1);
754 c
.rw
.prp1
= cpu_to_le64((ulong
)buffer
);
755 c
.rw
.prp2
= cpu_to_le64(prp2
);
756 status
= nvme_submit_sync_cmd(dev
->queues
[NVME_IO_Q
],
757 &c
, NULL
, IO_TIMEOUT
);
760 temp_len
-= (u32
)lbas
<< ns
->lba_shift
;
761 buffer
+= lbas
<< ns
->lba_shift
;
765 invalidate_dcache_range((unsigned long)buffer
,
766 (unsigned long)buffer
+ total_len
);
768 return (total_len
- temp_len
) >> desc
->log2blksz
;
771 static ulong
nvme_blk_read(struct udevice
*udev
, lbaint_t blknr
,
772 lbaint_t blkcnt
, void *buffer
)
774 return nvme_blk_rw(udev
, blknr
, blkcnt
, buffer
, true);
777 static ulong
nvme_blk_write(struct udevice
*udev
, lbaint_t blknr
,
778 lbaint_t blkcnt
, const void *buffer
)
780 return nvme_blk_rw(udev
, blknr
, blkcnt
, (void *)buffer
, false);
783 static const struct blk_ops nvme_blk_ops
= {
784 .read
= nvme_blk_read
,
785 .write
= nvme_blk_write
,
788 U_BOOT_DRIVER(nvme_blk
) = {
791 .probe
= nvme_blk_probe
,
792 .ops
= &nvme_blk_ops
,
793 .priv_auto_alloc_size
= sizeof(struct nvme_ns
),
796 static int nvme_bind(struct udevice
*udev
)
801 sprintf(name
, "nvme#%d", ndev_num
++);
803 return device_set_name(udev
, name
);
806 static int nvme_probe(struct udevice
*udev
)
809 struct nvme_dev
*ndev
= dev_get_priv(udev
);
811 ndev
->instance
= trailing_strtol(udev
->name
);
813 INIT_LIST_HEAD(&ndev
->namespaces
);
814 ndev
->bar
= dm_pci_map_bar(udev
, PCI_BASE_ADDRESS_0
,
816 if (readl(&ndev
->bar
->csts
) == -1) {
818 printf("Error: %s: Out of memory!\n", udev
->name
);
822 ndev
->queues
= malloc(NVME_Q_NUM
* sizeof(struct nvme_queue
*));
825 printf("Error: %s: Out of memory!\n", udev
->name
);
828 memset(ndev
->queues
, 0, NVME_Q_NUM
* sizeof(struct nvme_queue
*));
830 ndev
->cap
= nvme_readq(&ndev
->bar
->cap
);
831 ndev
->q_depth
= min_t(int, NVME_CAP_MQES(ndev
->cap
) + 1, NVME_Q_DEPTH
);
832 ndev
->db_stride
= 1 << NVME_CAP_STRIDE(ndev
->cap
);
833 ndev
->dbs
= ((void __iomem
*)ndev
->bar
) + 4096;
835 ret
= nvme_configure_admin_queue(ndev
);
839 /* Allocate after the page size is known */
840 ndev
->prp_pool
= memalign(ndev
->page_size
, MAX_PRP_POOL
);
841 if (!ndev
->prp_pool
) {
843 printf("Error: %s: Out of memory!\n", udev
->name
);
846 ndev
->prp_entry_num
= MAX_PRP_POOL
>> 3;
848 ret
= nvme_setup_io_queues(ndev
);
852 nvme_get_info_from_identify(ndev
);
857 free((void *)ndev
->queues
);
862 U_BOOT_DRIVER(nvme
) = {
867 .priv_auto_alloc_size
= sizeof(struct nvme_dev
),
870 struct pci_device_id nvme_supported
[] = {
871 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS
, ~0) },
875 U_BOOT_PCI_DEVICE(nvme
, nvme_supported
);