2 * Copyright (C) 2017 NXP Semiconductors
3 * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com>
5 * SPDX-License-Identifier: GPL-2.0+
13 #include <dm/device-internal.h>
16 struct nvme_info
*nvme_info
;
18 #define NVME_Q_DEPTH 2
19 #define NVME_AQ_DEPTH 2
20 #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
21 #define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
22 #define ADMIN_TIMEOUT 60
24 #define MAX_PRP_POOL 512
27 * An NVM Express queue. Each device has at least two (one for admin
28 * commands and one for I/O commands).
32 struct nvme_command
*sq_cmds
;
33 struct nvme_completion
*cqes
;
34 wait_queue_head_t sq_full
;
44 unsigned long cmdid_data
[];
47 static int nvme_wait_ready(struct nvme_dev
*dev
, bool enabled
)
49 u32 bit
= enabled
? NVME_CSTS_RDY
: 0;
51 while ((readl(&dev
->bar
->csts
) & NVME_CSTS_RDY
) != bit
)
57 static int nvme_setup_prps(struct nvme_dev
*dev
, u64
*prp2
,
58 int total_len
, u64 dma_addr
)
60 u32 page_size
= dev
->page_size
;
61 int offset
= dma_addr
& (page_size
- 1);
63 int length
= total_len
;
65 length
-= (page_size
- offset
);
73 dma_addr
+= (page_size
- offset
);
75 if (length
<= page_size
) {
80 nprps
= DIV_ROUND_UP(length
, page_size
);
82 if (nprps
> dev
->prp_entry_num
) {
84 dev
->prp_pool
= malloc(nprps
<< 3);
86 printf("Error: malloc prp_pool fail\n");
89 dev
->prp_entry_num
= nprps
;
92 prp_pool
= dev
->prp_pool
;
95 if (i
== ((page_size
>> 3) - 1)) {
96 *(prp_pool
+ i
) = cpu_to_le64((ulong
)prp_pool
+
99 prp_pool
+= page_size
;
101 *(prp_pool
+ i
++) = cpu_to_le64(dma_addr
);
102 dma_addr
+= page_size
;
105 *prp2
= (ulong
)dev
->prp_pool
;
110 static __le16
nvme_get_cmd_id(void)
112 static unsigned short cmdid
;
114 return cpu_to_le16((cmdid
< USHRT_MAX
) ? cmdid
++ : 0);
117 static u16
nvme_read_completion_status(struct nvme_queue
*nvmeq
, u16 index
)
119 u64 start
= (ulong
)&nvmeq
->cqes
[index
];
120 u64 stop
= start
+ sizeof(struct nvme_completion
);
122 invalidate_dcache_range(start
, stop
);
124 return le16_to_cpu(readw(&(nvmeq
->cqes
[index
].status
)));
128 * nvme_submit_cmd() - copy a command into a queue and ring the doorbell
130 * @nvmeq: The queue to use
131 * @cmd: The command to send
133 static void nvme_submit_cmd(struct nvme_queue
*nvmeq
, struct nvme_command
*cmd
)
135 u16 tail
= nvmeq
->sq_tail
;
137 memcpy(&nvmeq
->sq_cmds
[tail
], cmd
, sizeof(*cmd
));
138 flush_dcache_range((ulong
)&nvmeq
->sq_cmds
[tail
],
139 (ulong
)&nvmeq
->sq_cmds
[tail
] + sizeof(*cmd
));
141 if (++tail
== nvmeq
->q_depth
)
143 writel(tail
, nvmeq
->q_db
);
144 nvmeq
->sq_tail
= tail
;
147 static int nvme_submit_sync_cmd(struct nvme_queue
*nvmeq
,
148 struct nvme_command
*cmd
,
149 u32
*result
, unsigned timeout
)
151 u16 head
= nvmeq
->cq_head
;
152 u16 phase
= nvmeq
->cq_phase
;
155 ulong timeout_us
= timeout
* 100000;
157 cmd
->common
.command_id
= nvme_get_cmd_id();
158 nvme_submit_cmd(nvmeq
, cmd
);
160 start_time
= timer_get_us();
163 status
= nvme_read_completion_status(nvmeq
, head
);
164 if ((status
& 0x01) == phase
)
166 if (timeout_us
> 0 && (timer_get_us() - start_time
)
173 printf("ERROR: status = %x, phase = %d, head = %d\n",
174 status
, phase
, head
);
176 if (++head
== nvmeq
->q_depth
) {
180 writel(head
, nvmeq
->q_db
+ nvmeq
->dev
->db_stride
);
181 nvmeq
->cq_head
= head
;
182 nvmeq
->cq_phase
= phase
;
188 *result
= le32_to_cpu(readl(&(nvmeq
->cqes
[head
].result
)));
190 if (++head
== nvmeq
->q_depth
) {
194 writel(head
, nvmeq
->q_db
+ nvmeq
->dev
->db_stride
);
195 nvmeq
->cq_head
= head
;
196 nvmeq
->cq_phase
= phase
;
201 static int nvme_submit_admin_cmd(struct nvme_dev
*dev
, struct nvme_command
*cmd
,
204 return nvme_submit_sync_cmd(dev
->queues
[0], cmd
, result
, ADMIN_TIMEOUT
);
207 static struct nvme_queue
*nvme_alloc_queue(struct nvme_dev
*dev
,
210 struct nvme_queue
*nvmeq
= malloc(sizeof(*nvmeq
));
213 memset(nvmeq
, 0, sizeof(*nvmeq
));
215 nvmeq
->cqes
= (void *)memalign(4096, NVME_CQ_SIZE(depth
));
218 memset((void *)nvmeq
->cqes
, 0, NVME_CQ_SIZE(depth
));
220 nvmeq
->sq_cmds
= (void *)memalign(4096, NVME_SQ_SIZE(depth
));
223 memset((void *)nvmeq
->sq_cmds
, 0, NVME_SQ_SIZE(depth
));
229 nvmeq
->q_db
= &dev
->dbs
[qid
* 2 * dev
->db_stride
];
230 nvmeq
->q_depth
= depth
;
233 dev
->queues
[qid
] = nvmeq
;
238 free((void *)nvmeq
->cqes
);
245 static int nvme_delete_queue(struct nvme_dev
*dev
, u8 opcode
, u16 id
)
247 struct nvme_command c
;
249 memset(&c
, 0, sizeof(c
));
250 c
.delete_queue
.opcode
= opcode
;
251 c
.delete_queue
.qid
= cpu_to_le16(id
);
253 return nvme_submit_admin_cmd(dev
, &c
, NULL
);
256 static int nvme_delete_sq(struct nvme_dev
*dev
, u16 sqid
)
258 return nvme_delete_queue(dev
, nvme_admin_delete_sq
, sqid
);
261 static int nvme_delete_cq(struct nvme_dev
*dev
, u16 cqid
)
263 return nvme_delete_queue(dev
, nvme_admin_delete_cq
, cqid
);
266 static int nvme_enable_ctrl(struct nvme_dev
*dev
)
268 dev
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
269 dev
->ctrl_config
|= NVME_CC_ENABLE
;
270 writel(cpu_to_le32(dev
->ctrl_config
), &dev
->bar
->cc
);
272 return nvme_wait_ready(dev
, true);
275 static int nvme_disable_ctrl(struct nvme_dev
*dev
)
277 dev
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
278 dev
->ctrl_config
&= ~NVME_CC_ENABLE
;
279 writel(cpu_to_le32(dev
->ctrl_config
), &dev
->bar
->cc
);
281 return nvme_wait_ready(dev
, false);
284 static void nvme_free_queue(struct nvme_queue
*nvmeq
)
286 free((void *)nvmeq
->cqes
);
287 free(nvmeq
->sq_cmds
);
291 static void nvme_free_queues(struct nvme_dev
*dev
, int lowest
)
295 for (i
= dev
->queue_count
- 1; i
>= lowest
; i
--) {
296 struct nvme_queue
*nvmeq
= dev
->queues
[i
];
298 dev
->queues
[i
] = NULL
;
299 nvme_free_queue(nvmeq
);
303 static void nvme_init_queue(struct nvme_queue
*nvmeq
, u16 qid
)
305 struct nvme_dev
*dev
= nvmeq
->dev
;
310 nvmeq
->q_db
= &dev
->dbs
[qid
* 2 * dev
->db_stride
];
311 memset((void *)nvmeq
->cqes
, 0, NVME_CQ_SIZE(nvmeq
->q_depth
));
312 flush_dcache_range((ulong
)nvmeq
->cqes
,
313 (ulong
)nvmeq
->cqes
+ NVME_CQ_SIZE(nvmeq
->q_depth
));
314 dev
->online_queues
++;
317 static int nvme_configure_admin_queue(struct nvme_dev
*dev
)
321 u64 cap
= nvme_readq(&dev
->bar
->cap
);
322 struct nvme_queue
*nvmeq
;
323 /* most architectures use 4KB as the page size */
324 unsigned page_shift
= 12;
325 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12;
326 unsigned dev_page_max
= NVME_CAP_MPSMAX(cap
) + 12;
328 if (page_shift
< dev_page_min
) {
329 debug("Device minimum page size (%u) too large for host (%u)\n",
330 1 << dev_page_min
, 1 << page_shift
);
334 if (page_shift
> dev_page_max
) {
335 debug("Device maximum page size (%u) smaller than host (%u)\n",
336 1 << dev_page_max
, 1 << page_shift
);
337 page_shift
= dev_page_max
;
340 result
= nvme_disable_ctrl(dev
);
344 nvmeq
= dev
->queues
[0];
346 nvmeq
= nvme_alloc_queue(dev
, 0, NVME_AQ_DEPTH
);
351 aqa
= nvmeq
->q_depth
- 1;
355 dev
->page_size
= 1 << page_shift
;
357 dev
->ctrl_config
= NVME_CC_CSS_NVM
;
358 dev
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
359 dev
->ctrl_config
|= NVME_CC_ARB_RR
| NVME_CC_SHN_NONE
;
360 dev
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
362 writel(aqa
, &dev
->bar
->aqa
);
363 nvme_writeq((ulong
)nvmeq
->sq_cmds
, &dev
->bar
->asq
);
364 nvme_writeq((ulong
)nvmeq
->cqes
, &dev
->bar
->acq
);
366 result
= nvme_enable_ctrl(dev
);
370 nvmeq
->cq_vector
= 0;
372 nvme_init_queue(dev
->queues
[0], 0);
377 nvme_free_queues(dev
, 0);
382 static int nvme_alloc_cq(struct nvme_dev
*dev
, u16 qid
,
383 struct nvme_queue
*nvmeq
)
385 struct nvme_command c
;
386 int flags
= NVME_QUEUE_PHYS_CONTIG
| NVME_CQ_IRQ_ENABLED
;
388 memset(&c
, 0, sizeof(c
));
389 c
.create_cq
.opcode
= nvme_admin_create_cq
;
390 c
.create_cq
.prp1
= cpu_to_le64((ulong
)nvmeq
->cqes
);
391 c
.create_cq
.cqid
= cpu_to_le16(qid
);
392 c
.create_cq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
393 c
.create_cq
.cq_flags
= cpu_to_le16(flags
);
394 c
.create_cq
.irq_vector
= cpu_to_le16(nvmeq
->cq_vector
);
396 return nvme_submit_admin_cmd(dev
, &c
, NULL
);
399 static int nvme_alloc_sq(struct nvme_dev
*dev
, u16 qid
,
400 struct nvme_queue
*nvmeq
)
402 struct nvme_command c
;
403 int flags
= NVME_QUEUE_PHYS_CONTIG
| NVME_SQ_PRIO_MEDIUM
;
405 memset(&c
, 0, sizeof(c
));
406 c
.create_sq
.opcode
= nvme_admin_create_sq
;
407 c
.create_sq
.prp1
= cpu_to_le64((ulong
)nvmeq
->sq_cmds
);
408 c
.create_sq
.sqid
= cpu_to_le16(qid
);
409 c
.create_sq
.qsize
= cpu_to_le16(nvmeq
->q_depth
- 1);
410 c
.create_sq
.sq_flags
= cpu_to_le16(flags
);
411 c
.create_sq
.cqid
= cpu_to_le16(qid
);
413 return nvme_submit_admin_cmd(dev
, &c
, NULL
);
416 int nvme_identify(struct nvme_dev
*dev
, unsigned nsid
,
417 unsigned cns
, dma_addr_t dma_addr
)
419 struct nvme_command c
;
420 u32 page_size
= dev
->page_size
;
421 int offset
= dma_addr
& (page_size
- 1);
422 int length
= sizeof(struct nvme_id_ctrl
);
424 memset(&c
, 0, sizeof(c
));
425 c
.identify
.opcode
= nvme_admin_identify
;
426 c
.identify
.nsid
= cpu_to_le32(nsid
);
427 c
.identify
.prp1
= cpu_to_le64(dma_addr
);
429 length
-= (page_size
- offset
);
433 dma_addr
+= (page_size
- offset
);
434 c
.identify
.prp2
= dma_addr
;
437 c
.identify
.cns
= cpu_to_le32(cns
);
439 return nvme_submit_admin_cmd(dev
, &c
, NULL
);
442 int nvme_get_features(struct nvme_dev
*dev
, unsigned fid
, unsigned nsid
,
443 dma_addr_t dma_addr
, u32
*result
)
445 struct nvme_command c
;
447 memset(&c
, 0, sizeof(c
));
448 c
.features
.opcode
= nvme_admin_get_features
;
449 c
.features
.nsid
= cpu_to_le32(nsid
);
450 c
.features
.prp1
= cpu_to_le64(dma_addr
);
451 c
.features
.fid
= cpu_to_le32(fid
);
453 return nvme_submit_admin_cmd(dev
, &c
, result
);
456 int nvme_set_features(struct nvme_dev
*dev
, unsigned fid
, unsigned dword11
,
457 dma_addr_t dma_addr
, u32
*result
)
459 struct nvme_command c
;
461 memset(&c
, 0, sizeof(c
));
462 c
.features
.opcode
= nvme_admin_set_features
;
463 c
.features
.prp1
= cpu_to_le64(dma_addr
);
464 c
.features
.fid
= cpu_to_le32(fid
);
465 c
.features
.dword11
= cpu_to_le32(dword11
);
467 return nvme_submit_admin_cmd(dev
, &c
, result
);
470 static int nvme_create_queue(struct nvme_queue
*nvmeq
, int qid
)
472 struct nvme_dev
*dev
= nvmeq
->dev
;
475 nvmeq
->cq_vector
= qid
- 1;
476 result
= nvme_alloc_cq(dev
, qid
, nvmeq
);
480 result
= nvme_alloc_sq(dev
, qid
, nvmeq
);
484 nvme_init_queue(nvmeq
, qid
);
489 nvme_delete_sq(dev
, qid
);
491 nvme_delete_cq(dev
, qid
);
496 static int nvme_set_queue_count(struct nvme_dev
*dev
, int count
)
500 u32 q_count
= (count
- 1) | ((count
- 1) << 16);
502 status
= nvme_set_features(dev
, NVME_FEAT_NUM_QUEUES
,
503 q_count
, 0, &result
);
510 return min(result
& 0xffff, result
>> 16) + 1;
513 static void nvme_create_io_queues(struct nvme_dev
*dev
)
517 for (i
= dev
->queue_count
; i
<= dev
->max_qid
; i
++)
518 if (!nvme_alloc_queue(dev
, i
, dev
->q_depth
))
521 for (i
= dev
->online_queues
; i
<= dev
->queue_count
- 1; i
++)
522 if (nvme_create_queue(dev
->queues
[i
], i
))
526 static int nvme_setup_io_queues(struct nvme_dev
*dev
)
532 result
= nvme_set_queue_count(dev
, nr_io_queues
);
536 if (result
< nr_io_queues
)
537 nr_io_queues
= result
;
539 dev
->max_qid
= nr_io_queues
;
541 /* Free previously allocated queues */
542 nvme_free_queues(dev
, nr_io_queues
+ 1);
543 nvme_create_io_queues(dev
);
548 static int nvme_get_info_from_identify(struct nvme_dev
*dev
)
551 struct nvme_id_ctrl buf
, *ctrl
= &buf
;
553 int shift
= NVME_CAP_MPSMIN(nvme_readq(&dev
->bar
->cap
)) + 12;
555 ret
= nvme_identify(dev
, 0, 1, (dma_addr_t
)ctrl
);
559 dev
->nn
= le32_to_cpu(ctrl
->nn
);
560 dev
->vwc
= ctrl
->vwc
;
561 memcpy(dev
->serial
, ctrl
->sn
, sizeof(ctrl
->sn
));
562 memcpy(dev
->model
, ctrl
->mn
, sizeof(ctrl
->mn
));
563 memcpy(dev
->firmware_rev
, ctrl
->fr
, sizeof(ctrl
->fr
));
565 dev
->max_transfer_shift
= (ctrl
->mdts
+ shift
);
568 * Maximum Data Transfer Size (MDTS) field indicates the maximum
569 * data transfer size between the host and the controller. The
570 * host should not submit a command that exceeds this transfer
571 * size. The value is in units of the minimum memory page size
572 * and is reported as a power of two (2^n).
574 * The spec also says: a value of 0h indicates no restrictions
575 * on transfer size. But in nvme_blk_read/write() below we have
576 * the following algorithm for maximum number of logic blocks
579 * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift);
581 * In order for lbas not to overflow, the maximum number is 15
582 * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift).
583 * Let's use 20 which provides 1MB size.
585 dev
->max_transfer_shift
= 20;
588 /* Apply quirk stuff */
589 dm_pci_read_config16(dev
->pdev
, PCI_VENDOR_ID
, &vendor
);
590 dm_pci_read_config16(dev
->pdev
, PCI_DEVICE_ID
, &device
);
591 if ((vendor
== PCI_VENDOR_ID_INTEL
) &&
592 (device
== 0x0953) && ctrl
->vs
[3]) {
593 unsigned int max_transfer_shift
;
594 dev
->stripe_size
= (ctrl
->vs
[3] + shift
);
595 max_transfer_shift
= (ctrl
->vs
[3] + 18);
596 if (dev
->max_transfer_shift
) {
597 dev
->max_transfer_shift
= min(max_transfer_shift
,
598 dev
->max_transfer_shift
);
600 dev
->max_transfer_shift
= max_transfer_shift
;
607 int nvme_scan_namespace(void)
613 ret
= uclass_get(UCLASS_NVME
, &uc
);
617 uclass_foreach_dev(dev
, uc
) {
618 ret
= device_probe(dev
);
626 static int nvme_blk_probe(struct udevice
*udev
)
628 struct nvme_dev
*ndev
= dev_get_priv(udev
->parent
);
629 struct blk_desc
*desc
= dev_get_uclass_platdata(udev
);
630 struct nvme_ns
*ns
= dev_get_priv(udev
);
633 struct nvme_id_ns buf
, *id
= &buf
;
635 memset(ns
, 0, sizeof(*ns
));
637 ns
->ns_id
= desc
->devnum
- ndev
->blk_dev_start
+ 1;
638 if (nvme_identify(ndev
, ns
->ns_id
, 0, (dma_addr_t
)id
))
641 flbas
= id
->flbas
& NVME_NS_FLBAS_LBA_MASK
;
643 ns
->lba_shift
= id
->lbaf
[flbas
].ds
;
644 ns
->mode_select_num_blocks
= le64_to_cpu(id
->nsze
);
645 ns
->mode_select_block_len
= 1 << ns
->lba_shift
;
646 list_add(&ns
->list
, &ndev
->namespaces
);
648 desc
->lba
= ns
->mode_select_num_blocks
;
649 desc
->log2blksz
= ns
->lba_shift
;
650 desc
->blksz
= 1 << ns
->lba_shift
;
652 dm_pci_read_config16(ndev
->pdev
, PCI_VENDOR_ID
, &vendor
);
653 sprintf(desc
->vendor
, "0x%.4x", vendor
);
654 memcpy(desc
->product
, ndev
->serial
, sizeof(ndev
->serial
));
655 memcpy(desc
->revision
, ndev
->firmware_rev
, sizeof(ndev
->firmware_rev
));
661 static ulong
nvme_blk_read(struct udevice
*udev
, lbaint_t blknr
,
662 lbaint_t blkcnt
, void *buffer
)
664 struct nvme_ns
*ns
= dev_get_priv(udev
);
665 struct nvme_dev
*dev
= ns
->dev
;
666 struct nvme_command c
;
667 struct blk_desc
*desc
= dev_get_uclass_platdata(udev
);
670 u64 total_len
= blkcnt
<< desc
->log2blksz
;
671 u64 temp_len
= total_len
;
674 u16 lbas
= 1 << (dev
->max_transfer_shift
- ns
->lba_shift
);
675 u64 total_lbas
= blkcnt
;
677 c
.rw
.opcode
= nvme_cmd_read
;
679 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
688 if (total_lbas
< lbas
) {
689 lbas
= (u16
)total_lbas
;
696 (dev
, &prp2
, lbas
<< ns
->lba_shift
, (ulong
)buffer
))
698 c
.rw
.slba
= cpu_to_le64(slba
);
700 c
.rw
.length
= cpu_to_le16(lbas
- 1);
701 c
.rw
.prp1
= cpu_to_le64((ulong
)buffer
);
702 c
.rw
.prp2
= cpu_to_le64(prp2
);
703 status
= nvme_submit_sync_cmd(dev
->queues
[1],
704 &c
, NULL
, IO_TIMEOUT
);
707 temp_len
-= lbas
<< ns
->lba_shift
;
708 buffer
+= lbas
<< ns
->lba_shift
;
711 return (total_len
- temp_len
) >> desc
->log2blksz
;
714 static ulong
nvme_blk_write(struct udevice
*udev
, lbaint_t blknr
,
715 lbaint_t blkcnt
, const void *buffer
)
717 struct nvme_ns
*ns
= dev_get_priv(udev
);
718 struct nvme_dev
*dev
= ns
->dev
;
719 struct nvme_command c
;
720 struct blk_desc
*desc
= dev_get_uclass_platdata(udev
);
723 u64 total_len
= blkcnt
<< desc
->log2blksz
;
724 u64 temp_len
= total_len
;
727 u16 lbas
= 1 << (dev
->max_transfer_shift
- ns
->lba_shift
);
728 u64 total_lbas
= blkcnt
;
730 c
.rw
.opcode
= nvme_cmd_write
;
732 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
741 if (total_lbas
< lbas
) {
742 lbas
= (u16
)total_lbas
;
749 (dev
, &prp2
, lbas
<< ns
->lba_shift
, (ulong
)buffer
))
751 c
.rw
.slba
= cpu_to_le64(slba
);
753 c
.rw
.length
= cpu_to_le16(lbas
- 1);
754 c
.rw
.prp1
= cpu_to_le64((ulong
)buffer
);
755 c
.rw
.prp2
= cpu_to_le64(prp2
);
756 status
= nvme_submit_sync_cmd(dev
->queues
[1],
757 &c
, NULL
, IO_TIMEOUT
);
760 temp_len
-= lbas
<< ns
->lba_shift
;
761 buffer
+= lbas
<< ns
->lba_shift
;
764 return (total_len
- temp_len
) >> desc
->log2blksz
;
767 static const struct blk_ops nvme_blk_ops
= {
768 .read
= nvme_blk_read
,
769 .write
= nvme_blk_write
,
772 U_BOOT_DRIVER(nvme_blk
) = {
775 .probe
= nvme_blk_probe
,
776 .ops
= &nvme_blk_ops
,
777 .priv_auto_alloc_size
= sizeof(struct nvme_ns
),
780 static int nvme_bind(struct udevice
*udev
)
783 sprintf(name
, "nvme#%d", nvme_info
->ndev_num
++);
785 return device_set_name(udev
, name
);
788 static int nvme_probe(struct udevice
*udev
)
791 struct nvme_dev
*ndev
= dev_get_priv(udev
);
794 ndev
->pdev
= pci_get_controller(udev
);
795 ndev
->instance
= trailing_strtol(udev
->name
);
797 INIT_LIST_HEAD(&ndev
->namespaces
);
798 ndev
->bar
= dm_pci_map_bar(udev
, PCI_BASE_ADDRESS_0
,
800 if (readl(&ndev
->bar
->csts
) == -1) {
802 printf("Error: %s: Out of memory!\n", udev
->name
);
806 ndev
->queues
= malloc(2 * sizeof(struct nvme_queue
));
809 printf("Error: %s: Out of memory!\n", udev
->name
);
812 memset(ndev
->queues
, 0, sizeof(2 * sizeof(struct nvme_queue
)));
814 ndev
->prp_pool
= malloc(MAX_PRP_POOL
);
815 if (!ndev
->prp_pool
) {
817 printf("Error: %s: Out of memory!\n", udev
->name
);
820 ndev
->prp_entry_num
= MAX_PRP_POOL
>> 3;
822 cap
= nvme_readq(&ndev
->bar
->cap
);
823 ndev
->q_depth
= min_t(int, NVME_CAP_MQES(cap
) + 1, NVME_Q_DEPTH
);
824 ndev
->db_stride
= 1 << NVME_CAP_STRIDE(cap
);
825 ndev
->dbs
= ((void __iomem
*)ndev
->bar
) + 4096;
827 ret
= nvme_configure_admin_queue(ndev
);
831 ret
= nvme_setup_io_queues(ndev
);
835 nvme_get_info_from_identify(ndev
);
836 ndev
->blk_dev_start
= nvme_info
->ns_num
;
837 list_add(&ndev
->node
, &nvme_info
->dev_list
);
842 free((void *)ndev
->queues
);
847 U_BOOT_DRIVER(nvme
) = {
852 .priv_auto_alloc_size
= sizeof(struct nvme_dev
),
855 struct pci_device_id nvme_supported
[] = {
856 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS
, ~0) },
860 U_BOOT_PCI_DEVICE(nvme
, nvme_supported
);