]> git.ipfire.org Git - thirdparty/qemu.git/blame - block/nvme.c
block: move bdrv_can_store_new_dirty_bitmap to block/dirty-bitmap.c
[thirdparty/qemu.git] / block / nvme.c
CommitLineData
bdd6a90a
FZ
1/*
2 * NVMe block driver based on vfio
3 *
4 * Copyright 2016 - 2018 Red Hat, Inc.
5 *
6 * Authors:
7 * Fam Zheng <famz@redhat.com>
8 * Paolo Bonzini <pbonzini@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14#include "qemu/osdep.h"
15#include <linux/vfio.h>
16#include "qapi/error.h"
17#include "qapi/qmp/qdict.h"
18#include "qapi/qmp/qstring.h"
19#include "qemu/error-report.h"
db725815 20#include "qemu/main-loop.h"
0b8fa32f 21#include "qemu/module.h"
bdd6a90a 22#include "qemu/cutils.h"
922a01a0 23#include "qemu/option.h"
bdd6a90a
FZ
24#include "qemu/vfio-helpers.h"
25#include "block/block_int.h"
e4ec5ad4 26#include "sysemu/replay.h"
bdd6a90a
FZ
27#include "trace.h"
28
a3d9a352 29#include "block/nvme.h"
bdd6a90a
FZ
30
31#define NVME_SQ_ENTRY_BYTES 64
32#define NVME_CQ_ENTRY_BYTES 16
33#define NVME_QUEUE_SIZE 128
34#define NVME_BAR_SIZE 8192
35
36typedef struct {
37 int32_t head, tail;
38 uint8_t *queue;
39 uint64_t iova;
40 /* Hardware MMIO register */
41 volatile uint32_t *doorbell;
42} NVMeQueue;
43
44typedef struct {
45 BlockCompletionFunc *cb;
46 void *opaque;
47 int cid;
48 void *prp_list_page;
49 uint64_t prp_list_iova;
50 bool busy;
51} NVMeRequest;
52
53typedef struct {
54 CoQueue free_req_queue;
55 QemuMutex lock;
56
57 /* Fields protected by BQL */
58 int index;
59 uint8_t *prp_list_pages;
60
61 /* Fields protected by @lock */
62 NVMeQueue sq, cq;
63 int cq_phase;
64 NVMeRequest reqs[NVME_QUEUE_SIZE];
65 bool busy;
66 int need_kick;
67 int inflight;
68} NVMeQueuePair;
69
70/* Memory mapped registers */
71typedef volatile struct {
72 uint64_t cap;
73 uint32_t vs;
74 uint32_t intms;
75 uint32_t intmc;
76 uint32_t cc;
77 uint32_t reserved0;
78 uint32_t csts;
79 uint32_t nssr;
80 uint32_t aqa;
81 uint64_t asq;
82 uint64_t acq;
83 uint32_t cmbloc;
84 uint32_t cmbsz;
85 uint8_t reserved1[0xec0];
86 uint8_t cmd_set_specfic[0x100];
87 uint32_t doorbells[];
83c68e14 88} NVMeRegs;
bdd6a90a
FZ
89
90QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
91
92typedef struct {
93 AioContext *aio_context;
94 QEMUVFIOState *vfio;
95 NVMeRegs *regs;
96 /* The submission/completion queue pairs.
97 * [0]: admin queue.
98 * [1..]: io queues.
99 */
100 NVMeQueuePair **queues;
101 int nr_queues;
102 size_t page_size;
103 /* How many uint32_t elements does each doorbell entry take. */
104 size_t doorbell_scale;
105 bool write_cache_supported;
106 EventNotifier irq_notifier;
118d1b6a 107
bdd6a90a
FZ
108 uint64_t nsze; /* Namespace size reported by identify command */
109 int nsid; /* The namespace id to read/write data. */
1120407b 110 int blkshift;
118d1b6a 111
bdd6a90a 112 uint64_t max_transfer;
2f0d8947 113 bool plugged;
bdd6a90a
FZ
114
115 CoMutex dma_map_lock;
116 CoQueue dma_flush_queue;
117
118 /* Total size of mapped qiov, accessed under dma_map_lock */
119 int dma_map_count;
cc61b074
HR
120
121 /* PCI address (required for nvme_refresh_filename()) */
122 char *device;
bdd6a90a
FZ
123} BDRVNVMeState;
124
125#define NVME_BLOCK_OPT_DEVICE "device"
126#define NVME_BLOCK_OPT_NAMESPACE "namespace"
127
128static QemuOptsList runtime_opts = {
129 .name = "nvme",
130 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
131 .desc = {
132 {
133 .name = NVME_BLOCK_OPT_DEVICE,
134 .type = QEMU_OPT_STRING,
135 .help = "NVMe PCI device address",
136 },
137 {
138 .name = NVME_BLOCK_OPT_NAMESPACE,
139 .type = QEMU_OPT_NUMBER,
140 .help = "NVMe namespace",
141 },
142 { /* end of list */ }
143 },
144};
145
146static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q,
147 int nentries, int entry_bytes, Error **errp)
148{
149 BDRVNVMeState *s = bs->opaque;
150 size_t bytes;
151 int r;
152
153 bytes = ROUND_UP(nentries * entry_bytes, s->page_size);
154 q->head = q->tail = 0;
155 q->queue = qemu_try_blockalign0(bs, bytes);
156
157 if (!q->queue) {
158 error_setg(errp, "Cannot allocate queue");
159 return;
160 }
161 r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
162 if (r) {
163 error_setg(errp, "Cannot map queue");
164 }
165}
166
167static void nvme_free_queue_pair(BlockDriverState *bs, NVMeQueuePair *q)
168{
169 qemu_vfree(q->prp_list_pages);
170 qemu_vfree(q->sq.queue);
171 qemu_vfree(q->cq.queue);
172 qemu_mutex_destroy(&q->lock);
173 g_free(q);
174}
175
176static void nvme_free_req_queue_cb(void *opaque)
177{
178 NVMeQueuePair *q = opaque;
179
180 qemu_mutex_lock(&q->lock);
181 while (qemu_co_enter_next(&q->free_req_queue, &q->lock)) {
182 /* Retry all pending requests */
183 }
184 qemu_mutex_unlock(&q->lock);
185}
186
187static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
188 int idx, int size,
189 Error **errp)
190{
191 int i, r;
192 BDRVNVMeState *s = bs->opaque;
193 Error *local_err = NULL;
194 NVMeQueuePair *q = g_new0(NVMeQueuePair, 1);
195 uint64_t prp_list_iova;
196
197 qemu_mutex_init(&q->lock);
198 q->index = idx;
199 qemu_co_queue_init(&q->free_req_queue);
200 q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_QUEUE_SIZE);
201 r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
202 s->page_size * NVME_QUEUE_SIZE,
203 false, &prp_list_iova);
204 if (r) {
205 goto fail;
206 }
207 for (i = 0; i < NVME_QUEUE_SIZE; i++) {
208 NVMeRequest *req = &q->reqs[i];
209 req->cid = i + 1;
210 req->prp_list_page = q->prp_list_pages + i * s->page_size;
211 req->prp_list_iova = prp_list_iova + i * s->page_size;
212 }
213 nvme_init_queue(bs, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err);
214 if (local_err) {
215 error_propagate(errp, local_err);
216 goto fail;
217 }
218 q->sq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale];
219
220 nvme_init_queue(bs, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err);
221 if (local_err) {
222 error_propagate(errp, local_err);
223 goto fail;
224 }
461bba04 225 q->cq.doorbell = &s->regs->doorbells[(idx * 2 + 1) * s->doorbell_scale];
bdd6a90a
FZ
226
227 return q;
228fail:
229 nvme_free_queue_pair(bs, q);
230 return NULL;
231}
232
233/* With q->lock */
234static void nvme_kick(BDRVNVMeState *s, NVMeQueuePair *q)
235{
236 if (s->plugged || !q->need_kick) {
237 return;
238 }
239 trace_nvme_kick(s, q->index);
240 assert(!(q->sq.tail & 0xFF00));
241 /* Fence the write to submission queue entry before notifying the device. */
242 smp_wmb();
243 *q->sq.doorbell = cpu_to_le32(q->sq.tail);
244 q->inflight += q->need_kick;
245 q->need_kick = 0;
246}
247
248/* Find a free request element if any, otherwise:
249 * a) if in coroutine context, try to wait for one to become available;
250 * b) if not in coroutine, return NULL;
251 */
252static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
253{
254 int i;
255 NVMeRequest *req = NULL;
256
257 qemu_mutex_lock(&q->lock);
258 while (q->inflight + q->need_kick > NVME_QUEUE_SIZE - 2) {
259 /* We have to leave one slot empty as that is the full queue case (head
260 * == tail + 1). */
261 if (qemu_in_coroutine()) {
262 trace_nvme_free_req_queue_wait(q);
263 qemu_co_queue_wait(&q->free_req_queue, &q->lock);
264 } else {
265 qemu_mutex_unlock(&q->lock);
266 return NULL;
267 }
268 }
269 for (i = 0; i < NVME_QUEUE_SIZE; i++) {
270 if (!q->reqs[i].busy) {
271 q->reqs[i].busy = true;
272 req = &q->reqs[i];
273 break;
274 }
275 }
276 /* We have checked inflight and need_kick while holding q->lock, so one
277 * free req must be available. */
278 assert(req);
279 qemu_mutex_unlock(&q->lock);
280 return req;
281}
282
283static inline int nvme_translate_error(const NvmeCqe *c)
284{
285 uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF;
286 if (status) {
287 trace_nvme_error(le32_to_cpu(c->result),
288 le16_to_cpu(c->sq_head),
289 le16_to_cpu(c->sq_id),
290 le16_to_cpu(c->cid),
291 le16_to_cpu(status));
292 }
293 switch (status) {
294 case 0:
295 return 0;
296 case 1:
297 return -ENOSYS;
298 case 2:
299 return -EINVAL;
300 default:
301 return -EIO;
302 }
303}
304
305/* With q->lock */
306static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q)
307{
308 bool progress = false;
309 NVMeRequest *preq;
310 NVMeRequest req;
311 NvmeCqe *c;
312
313 trace_nvme_process_completion(s, q->index, q->inflight);
314 if (q->busy || s->plugged) {
315 trace_nvme_process_completion_queue_busy(s, q->index);
316 return false;
317 }
318 q->busy = true;
319 assert(q->inflight >= 0);
320 while (q->inflight) {
321 int16_t cid;
322 c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES];
258867d1 323 if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
bdd6a90a
FZ
324 break;
325 }
326 q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
327 if (!q->cq.head) {
328 q->cq_phase = !q->cq_phase;
329 }
330 cid = le16_to_cpu(c->cid);
331 if (cid == 0 || cid > NVME_QUEUE_SIZE) {
332 fprintf(stderr, "Unexpected CID in completion queue: %" PRIu32 "\n",
333 cid);
334 continue;
335 }
336 assert(cid <= NVME_QUEUE_SIZE);
337 trace_nvme_complete_command(s, q->index, cid);
338 preq = &q->reqs[cid - 1];
339 req = *preq;
340 assert(req.cid == cid);
341 assert(req.cb);
342 preq->busy = false;
343 preq->cb = preq->opaque = NULL;
344 qemu_mutex_unlock(&q->lock);
345 req.cb(req.opaque, nvme_translate_error(c));
346 qemu_mutex_lock(&q->lock);
bdd6a90a 347 q->inflight--;
bdd6a90a
FZ
348 progress = true;
349 }
350 if (progress) {
351 /* Notify the device so it can post more completions. */
352 smp_mb_release();
353 *q->cq.doorbell = cpu_to_le32(q->cq.head);
354 if (!qemu_co_queue_empty(&q->free_req_queue)) {
e4ec5ad4
PD
355 replay_bh_schedule_oneshot_event(s->aio_context,
356 nvme_free_req_queue_cb, q);
bdd6a90a
FZ
357 }
358 }
359 q->busy = false;
360 return progress;
361}
362
363static void nvme_trace_command(const NvmeCmd *cmd)
364{
365 int i;
366
367 for (i = 0; i < 8; ++i) {
368 uint8_t *cmdp = (uint8_t *)cmd + i * 8;
369 trace_nvme_submit_command_raw(cmdp[0], cmdp[1], cmdp[2], cmdp[3],
370 cmdp[4], cmdp[5], cmdp[6], cmdp[7]);
371 }
372}
373
374static void nvme_submit_command(BDRVNVMeState *s, NVMeQueuePair *q,
375 NVMeRequest *req,
376 NvmeCmd *cmd, BlockCompletionFunc cb,
377 void *opaque)
378{
379 assert(!req->cb);
380 req->cb = cb;
381 req->opaque = opaque;
382 cmd->cid = cpu_to_le32(req->cid);
383
384 trace_nvme_submit_command(s, q->index, req->cid);
385 nvme_trace_command(cmd);
386 qemu_mutex_lock(&q->lock);
387 memcpy((uint8_t *)q->sq.queue +
388 q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd));
389 q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE;
390 q->need_kick++;
391 nvme_kick(s, q);
392 nvme_process_completion(s, q);
393 qemu_mutex_unlock(&q->lock);
394}
395
396static void nvme_cmd_sync_cb(void *opaque, int ret)
397{
398 int *pret = opaque;
399 *pret = ret;
4720cbee 400 aio_wait_kick();
bdd6a90a
FZ
401}
402
403static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q,
404 NvmeCmd *cmd)
405{
406 NVMeRequest *req;
407 BDRVNVMeState *s = bs->opaque;
408 int ret = -EINPROGRESS;
409 req = nvme_get_free_req(q);
410 if (!req) {
411 return -EBUSY;
412 }
413 nvme_submit_command(s, q, req, cmd, nvme_cmd_sync_cb, &ret);
414
415 BDRV_POLL_WHILE(bs, ret == -EINPROGRESS);
416 return ret;
417}
418
419static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
420{
421 BDRVNVMeState *s = bs->opaque;
422 NvmeIdCtrl *idctrl;
423 NvmeIdNs *idns;
118d1b6a 424 NvmeLBAF *lbaf;
bdd6a90a 425 uint8_t *resp;
1120407b 426 int r;
bdd6a90a
FZ
427 uint64_t iova;
428 NvmeCmd cmd = {
429 .opcode = NVME_ADM_CMD_IDENTIFY,
430 .cdw10 = cpu_to_le32(0x1),
431 };
432
433 resp = qemu_try_blockalign0(bs, sizeof(NvmeIdCtrl));
434 if (!resp) {
435 error_setg(errp, "Cannot allocate buffer for identify response");
436 goto out;
437 }
438 idctrl = (NvmeIdCtrl *)resp;
439 idns = (NvmeIdNs *)resp;
440 r = qemu_vfio_dma_map(s->vfio, resp, sizeof(NvmeIdCtrl), true, &iova);
441 if (r) {
442 error_setg(errp, "Cannot map buffer for DMA");
443 goto out;
444 }
445 cmd.prp1 = cpu_to_le64(iova);
446
447 if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
448 error_setg(errp, "Failed to identify controller");
449 goto out;
450 }
451
452 if (le32_to_cpu(idctrl->nn) < namespace) {
453 error_setg(errp, "Invalid namespace");
454 goto out;
455 }
456 s->write_cache_supported = le32_to_cpu(idctrl->vwc) & 0x1;
457 s->max_transfer = (idctrl->mdts ? 1 << idctrl->mdts : 0) * s->page_size;
458 /* For now the page list buffer per command is one page, to hold at most
459 * s->page_size / sizeof(uint64_t) entries. */
460 s->max_transfer = MIN_NON_ZERO(s->max_transfer,
461 s->page_size / sizeof(uint64_t) * s->page_size);
462
463 memset(resp, 0, 4096);
464
465 cmd.cdw10 = 0;
466 cmd.nsid = cpu_to_le32(namespace);
467 if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
468 error_setg(errp, "Failed to identify namespace");
469 goto out;
470 }
471
472 s->nsze = le64_to_cpu(idns->nsze);
118d1b6a
ML
473 lbaf = &idns->lbaf[NVME_ID_NS_FLBAS_INDEX(idns->flbas)];
474
475 if (lbaf->ms) {
476 error_setg(errp, "Namespaces with metadata are not yet supported");
477 goto out;
478 }
479
1120407b
HR
480 if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 ||
481 (1 << lbaf->ds) > s->page_size)
482 {
483 error_setg(errp, "Namespace has unsupported block size (2^%d)",
484 lbaf->ds);
118d1b6a
ML
485 goto out;
486 }
bdd6a90a 487
118d1b6a 488 s->blkshift = lbaf->ds;
bdd6a90a
FZ
489out:
490 qemu_vfio_dma_unmap(s->vfio, resp);
491 qemu_vfree(resp);
492}
493
494static bool nvme_poll_queues(BDRVNVMeState *s)
495{
496 bool progress = false;
497 int i;
498
499 for (i = 0; i < s->nr_queues; i++) {
500 NVMeQueuePair *q = s->queues[i];
501 qemu_mutex_lock(&q->lock);
502 while (nvme_process_completion(s, q)) {
503 /* Keep polling */
504 progress = true;
505 }
506 qemu_mutex_unlock(&q->lock);
507 }
508 return progress;
509}
510
511static void nvme_handle_event(EventNotifier *n)
512{
513 BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
514
515 trace_nvme_handle_event(s);
bdd6a90a
FZ
516 event_notifier_test_and_clear(n);
517 nvme_poll_queues(s);
bdd6a90a
FZ
518}
519
520static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
521{
522 BDRVNVMeState *s = bs->opaque;
523 int n = s->nr_queues;
524 NVMeQueuePair *q;
525 NvmeCmd cmd;
526 int queue_size = NVME_QUEUE_SIZE;
527
528 q = nvme_create_queue_pair(bs, n, queue_size, errp);
529 if (!q) {
530 return false;
531 }
532 cmd = (NvmeCmd) {
533 .opcode = NVME_ADM_CMD_CREATE_CQ,
534 .prp1 = cpu_to_le64(q->cq.iova),
535 .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
536 .cdw11 = cpu_to_le32(0x3),
537 };
538 if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
539 error_setg(errp, "Failed to create io queue [%d]", n);
540 nvme_free_queue_pair(bs, q);
541 return false;
542 }
543 cmd = (NvmeCmd) {
544 .opcode = NVME_ADM_CMD_CREATE_SQ,
545 .prp1 = cpu_to_le64(q->sq.iova),
546 .cdw10 = cpu_to_le32(((queue_size - 1) << 16) | (n & 0xFFFF)),
547 .cdw11 = cpu_to_le32(0x1 | (n << 16)),
548 };
549 if (nvme_cmd_sync(bs, s->queues[0], &cmd)) {
550 error_setg(errp, "Failed to create io queue [%d]", n);
551 nvme_free_queue_pair(bs, q);
552 return false;
553 }
554 s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
555 s->queues[n] = q;
556 s->nr_queues++;
557 return true;
558}
559
560static bool nvme_poll_cb(void *opaque)
561{
562 EventNotifier *e = opaque;
563 BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
564 bool progress = false;
565
566 trace_nvme_poll_cb(s);
567 progress = nvme_poll_queues(s);
568 return progress;
569}
570
571static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
572 Error **errp)
573{
574 BDRVNVMeState *s = bs->opaque;
575 int ret;
576 uint64_t cap;
577 uint64_t timeout_ms;
578 uint64_t deadline, now;
579 Error *local_err = NULL;
580
581 qemu_co_mutex_init(&s->dma_map_lock);
582 qemu_co_queue_init(&s->dma_flush_queue);
cc61b074 583 s->device = g_strdup(device);
bdd6a90a
FZ
584 s->nsid = namespace;
585 s->aio_context = bdrv_get_aio_context(bs);
586 ret = event_notifier_init(&s->irq_notifier, 0);
587 if (ret) {
588 error_setg(errp, "Failed to init event notifier");
589 return ret;
590 }
591
592 s->vfio = qemu_vfio_open_pci(device, errp);
593 if (!s->vfio) {
594 ret = -EINVAL;
9582f357 595 goto out;
bdd6a90a
FZ
596 }
597
598 s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp);
599 if (!s->regs) {
600 ret = -EINVAL;
9582f357 601 goto out;
bdd6a90a
FZ
602 }
603
604 /* Perform initialize sequence as described in NVMe spec "7.6.1
605 * Initialization". */
606
607 cap = le64_to_cpu(s->regs->cap);
608 if (!(cap & (1ULL << 37))) {
609 error_setg(errp, "Device doesn't support NVMe command set");
610 ret = -EINVAL;
9582f357 611 goto out;
bdd6a90a
FZ
612 }
613
614 s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF)));
615 s->doorbell_scale = (4 << (((cap >> 32) & 0xF))) / sizeof(uint32_t);
616 bs->bl.opt_mem_alignment = s->page_size;
617 timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000);
618
619 /* Reset device to get a clean state. */
620 s->regs->cc = cpu_to_le32(le32_to_cpu(s->regs->cc) & 0xFE);
621 /* Wait for CSTS.RDY = 0. */
622 deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * 1000000ULL;
623 while (le32_to_cpu(s->regs->csts) & 0x1) {
624 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
625 error_setg(errp, "Timeout while waiting for device to reset (%"
626 PRId64 " ms)",
627 timeout_ms);
628 ret = -ETIMEDOUT;
9582f357 629 goto out;
bdd6a90a
FZ
630 }
631 }
632
633 /* Set up admin queue. */
634 s->queues = g_new(NVMeQueuePair *, 1);
bdd6a90a
FZ
635 s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
636 if (!s->queues[0]) {
637 ret = -EINVAL;
9582f357 638 goto out;
bdd6a90a 639 }
95667c3b 640 s->nr_queues = 1;
bdd6a90a
FZ
641 QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
642 s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
643 s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova);
644 s->regs->acq = cpu_to_le64(s->queues[0]->cq.iova);
645
646 /* After setting up all control registers we can enable device now. */
647 s->regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
648 (ctz32(NVME_SQ_ENTRY_BYTES) << 16) |
649 0x1);
650 /* Wait for CSTS.RDY = 1. */
651 now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
652 deadline = now + timeout_ms * 1000000;
653 while (!(le32_to_cpu(s->regs->csts) & 0x1)) {
654 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
655 error_setg(errp, "Timeout while waiting for device to start (%"
656 PRId64 " ms)",
657 timeout_ms);
658 ret = -ETIMEDOUT;
9582f357 659 goto out;
bdd6a90a
FZ
660 }
661 }
662
663 ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
664 VFIO_PCI_MSIX_IRQ_INDEX, errp);
665 if (ret) {
9582f357 666 goto out;
bdd6a90a
FZ
667 }
668 aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
669 false, nvme_handle_event, nvme_poll_cb);
670
78d8c99e 671 nvme_identify(bs, namespace, &local_err);
bdd6a90a
FZ
672 if (local_err) {
673 error_propagate(errp, local_err);
674 ret = -EIO;
9582f357 675 goto out;
bdd6a90a
FZ
676 }
677
678 /* Set up command queues. */
679 if (!nvme_add_io_queue(bs, errp)) {
680 ret = -EIO;
bdd6a90a 681 }
9582f357
FZ
682out:
683 /* Cleaning up is done in nvme_file_open() upon error. */
bdd6a90a
FZ
684 return ret;
685}
686
687/* Parse a filename in the format of nvme://XXXX:XX:XX.X/X. Example:
688 *
689 * nvme://0000:44:00.0/1
690 *
691 * where the "nvme://" is a fixed form of the protocol prefix, the middle part
692 * is the PCI address, and the last part is the namespace number starting from
693 * 1 according to the NVMe spec. */
694static void nvme_parse_filename(const char *filename, QDict *options,
695 Error **errp)
696{
697 int pref = strlen("nvme://");
698
699 if (strlen(filename) > pref && !strncmp(filename, "nvme://", pref)) {
700 const char *tmp = filename + pref;
701 char *device;
702 const char *namespace;
703 unsigned long ns;
704 const char *slash = strchr(tmp, '/');
705 if (!slash) {
625eaca9 706 qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, tmp);
bdd6a90a
FZ
707 return;
708 }
709 device = g_strndup(tmp, slash - tmp);
625eaca9 710 qdict_put_str(options, NVME_BLOCK_OPT_DEVICE, device);
bdd6a90a
FZ
711 g_free(device);
712 namespace = slash + 1;
713 if (*namespace && qemu_strtoul(namespace, NULL, 10, &ns)) {
714 error_setg(errp, "Invalid namespace '%s', positive number expected",
715 namespace);
716 return;
717 }
625eaca9
LV
718 qdict_put_str(options, NVME_BLOCK_OPT_NAMESPACE,
719 *namespace ? namespace : "1");
bdd6a90a
FZ
720 }
721}
722
723static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
724 Error **errp)
725{
726 int ret;
727 BDRVNVMeState *s = bs->opaque;
728 NvmeCmd cmd = {
729 .opcode = NVME_ADM_CMD_SET_FEATURES,
730 .nsid = cpu_to_le32(s->nsid),
731 .cdw10 = cpu_to_le32(0x06),
732 .cdw11 = cpu_to_le32(enable ? 0x01 : 0x00),
733 };
734
735 ret = nvme_cmd_sync(bs, s->queues[0], &cmd);
736 if (ret) {
737 error_setg(errp, "Failed to configure NVMe write cache");
738 }
739 return ret;
740}
741
742static void nvme_close(BlockDriverState *bs)
743{
744 int i;
745 BDRVNVMeState *s = bs->opaque;
746
747 for (i = 0; i < s->nr_queues; ++i) {
748 nvme_free_queue_pair(bs, s->queues[i]);
749 }
9582f357 750 g_free(s->queues);
bdd6a90a
FZ
751 aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
752 false, NULL, NULL);
9582f357 753 event_notifier_cleanup(&s->irq_notifier);
bdd6a90a
FZ
754 qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
755 qemu_vfio_close(s->vfio);
cc61b074
HR
756
757 g_free(s->device);
bdd6a90a
FZ
758}
759
760static int nvme_file_open(BlockDriverState *bs, QDict *options, int flags,
761 Error **errp)
762{
763 const char *device;
764 QemuOpts *opts;
765 int namespace;
766 int ret;
767 BDRVNVMeState *s = bs->opaque;
768
769 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
770 qemu_opts_absorb_qdict(opts, options, &error_abort);
771 device = qemu_opt_get(opts, NVME_BLOCK_OPT_DEVICE);
772 if (!device) {
773 error_setg(errp, "'" NVME_BLOCK_OPT_DEVICE "' option is required");
774 qemu_opts_del(opts);
775 return -EINVAL;
776 }
777
778 namespace = qemu_opt_get_number(opts, NVME_BLOCK_OPT_NAMESPACE, 1);
779 ret = nvme_init(bs, device, namespace, errp);
780 qemu_opts_del(opts);
781 if (ret) {
782 goto fail;
783 }
784 if (flags & BDRV_O_NOCACHE) {
785 if (!s->write_cache_supported) {
786 error_setg(errp,
787 "NVMe controller doesn't support write cache configuration");
788 ret = -EINVAL;
789 } else {
790 ret = nvme_enable_disable_write_cache(bs, !(flags & BDRV_O_NOCACHE),
791 errp);
792 }
793 if (ret) {
794 goto fail;
795 }
796 }
797 bs->supported_write_flags = BDRV_REQ_FUA;
798 return 0;
799fail:
800 nvme_close(bs);
801 return ret;
802}
803
804static int64_t nvme_getlength(BlockDriverState *bs)
805{
806 BDRVNVMeState *s = bs->opaque;
118d1b6a
ML
807 return s->nsze << s->blkshift;
808}
bdd6a90a 809
1120407b 810static uint32_t nvme_get_blocksize(BlockDriverState *bs)
118d1b6a
ML
811{
812 BDRVNVMeState *s = bs->opaque;
1120407b
HR
813 assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12);
814 return UINT32_C(1) << s->blkshift;
118d1b6a
ML
815}
816
817static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
818{
1120407b 819 uint32_t blocksize = nvme_get_blocksize(bs);
118d1b6a
ML
820 bsz->phys = blocksize;
821 bsz->log = blocksize;
822 return 0;
bdd6a90a
FZ
823}
824
825/* Called with s->dma_map_lock */
826static coroutine_fn int nvme_cmd_unmap_qiov(BlockDriverState *bs,
827 QEMUIOVector *qiov)
828{
829 int r = 0;
830 BDRVNVMeState *s = bs->opaque;
831
832 s->dma_map_count -= qiov->size;
833 if (!s->dma_map_count && !qemu_co_queue_empty(&s->dma_flush_queue)) {
834 r = qemu_vfio_dma_reset_temporary(s->vfio);
835 if (!r) {
836 qemu_co_queue_restart_all(&s->dma_flush_queue);
837 }
838 }
839 return r;
840}
841
842/* Called with s->dma_map_lock */
843static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
844 NVMeRequest *req, QEMUIOVector *qiov)
845{
846 BDRVNVMeState *s = bs->opaque;
847 uint64_t *pagelist = req->prp_list_page;
848 int i, j, r;
849 int entries = 0;
850
851 assert(qiov->size);
852 assert(QEMU_IS_ALIGNED(qiov->size, s->page_size));
853 assert(qiov->size / s->page_size <= s->page_size / sizeof(uint64_t));
854 for (i = 0; i < qiov->niov; ++i) {
855 bool retry = true;
856 uint64_t iova;
857try_map:
858 r = qemu_vfio_dma_map(s->vfio,
859 qiov->iov[i].iov_base,
860 qiov->iov[i].iov_len,
861 true, &iova);
862 if (r == -ENOMEM && retry) {
863 retry = false;
864 trace_nvme_dma_flush_queue_wait(s);
865 if (s->dma_map_count) {
866 trace_nvme_dma_map_flush(s);
867 qemu_co_queue_wait(&s->dma_flush_queue, &s->dma_map_lock);
868 } else {
869 r = qemu_vfio_dma_reset_temporary(s->vfio);
870 if (r) {
871 goto fail;
872 }
873 }
874 goto try_map;
875 }
876 if (r) {
877 goto fail;
878 }
879
880 for (j = 0; j < qiov->iov[i].iov_len / s->page_size; j++) {
2916405a 881 pagelist[entries++] = cpu_to_le64(iova + j * s->page_size);
bdd6a90a
FZ
882 }
883 trace_nvme_cmd_map_qiov_iov(s, i, qiov->iov[i].iov_base,
884 qiov->iov[i].iov_len / s->page_size);
885 }
886
887 s->dma_map_count += qiov->size;
888
889 assert(entries <= s->page_size / sizeof(uint64_t));
890 switch (entries) {
891 case 0:
892 abort();
893 case 1:
2916405a 894 cmd->prp1 = pagelist[0];
bdd6a90a
FZ
895 cmd->prp2 = 0;
896 break;
897 case 2:
2916405a
LF
898 cmd->prp1 = pagelist[0];
899 cmd->prp2 = pagelist[1];
bdd6a90a
FZ
900 break;
901 default:
2916405a
LF
902 cmd->prp1 = pagelist[0];
903 cmd->prp2 = cpu_to_le64(req->prp_list_iova + sizeof(uint64_t));
bdd6a90a
FZ
904 break;
905 }
906 trace_nvme_cmd_map_qiov(s, cmd, req, qiov, entries);
907 for (i = 0; i < entries; ++i) {
908 trace_nvme_cmd_map_qiov_pages(s, i, pagelist[i]);
909 }
910 return 0;
911fail:
912 /* No need to unmap [0 - i) iovs even if we've failed, since we don't
913 * increment s->dma_map_count. This is okay for fixed mapping memory areas
914 * because they are already mapped before calling this function; for
915 * temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
916 * calling qemu_vfio_dma_reset_temporary when necessary. */
917 return r;
918}
919
920typedef struct {
921 Coroutine *co;
922 int ret;
923 AioContext *ctx;
924} NVMeCoData;
925
926static void nvme_rw_cb_bh(void *opaque)
927{
928 NVMeCoData *data = opaque;
929 qemu_coroutine_enter(data->co);
930}
931
932static void nvme_rw_cb(void *opaque, int ret)
933{
934 NVMeCoData *data = opaque;
935 data->ret = ret;
936 if (!data->co) {
937 /* The rw coroutine hasn't yielded, don't try to enter. */
938 return;
939 }
e4ec5ad4 940 replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data);
bdd6a90a
FZ
941}
942
943static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
944 uint64_t offset, uint64_t bytes,
945 QEMUIOVector *qiov,
946 bool is_write,
947 int flags)
948{
949 int r;
950 BDRVNVMeState *s = bs->opaque;
951 NVMeQueuePair *ioq = s->queues[1];
952 NVMeRequest *req;
118d1b6a
ML
953
954 uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
bdd6a90a
FZ
955 (flags & BDRV_REQ_FUA ? 1 << 30 : 0);
956 NvmeCmd cmd = {
957 .opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ,
958 .nsid = cpu_to_le32(s->nsid),
118d1b6a
ML
959 .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
960 .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
bdd6a90a
FZ
961 .cdw12 = cpu_to_le32(cdw12),
962 };
963 NVMeCoData data = {
964 .ctx = bdrv_get_aio_context(bs),
965 .ret = -EINPROGRESS,
966 };
967
968 trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
969 assert(s->nr_queues > 1);
970 req = nvme_get_free_req(ioq);
971 assert(req);
972
973 qemu_co_mutex_lock(&s->dma_map_lock);
974 r = nvme_cmd_map_qiov(bs, &cmd, req, qiov);
975 qemu_co_mutex_unlock(&s->dma_map_lock);
976 if (r) {
977 req->busy = false;
978 return r;
979 }
980 nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
981
982 data.co = qemu_coroutine_self();
983 while (data.ret == -EINPROGRESS) {
984 qemu_coroutine_yield();
985 }
986
987 qemu_co_mutex_lock(&s->dma_map_lock);
988 r = nvme_cmd_unmap_qiov(bs, qiov);
989 qemu_co_mutex_unlock(&s->dma_map_lock);
990 if (r) {
991 return r;
992 }
993
994 trace_nvme_rw_done(s, is_write, offset, bytes, data.ret);
995 return data.ret;
996}
997
998static inline bool nvme_qiov_aligned(BlockDriverState *bs,
999 const QEMUIOVector *qiov)
1000{
1001 int i;
1002 BDRVNVMeState *s = bs->opaque;
1003
1004 for (i = 0; i < qiov->niov; ++i) {
1005 if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) ||
1006 !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) {
1007 trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
1008 qiov->iov[i].iov_len, s->page_size);
1009 return false;
1010 }
1011 }
1012 return true;
1013}
1014
1015static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
1016 QEMUIOVector *qiov, bool is_write, int flags)
1017{
1018 BDRVNVMeState *s = bs->opaque;
1019 int r;
1020 uint8_t *buf = NULL;
1021 QEMUIOVector local_qiov;
1022
1023 assert(QEMU_IS_ALIGNED(offset, s->page_size));
1024 assert(QEMU_IS_ALIGNED(bytes, s->page_size));
1025 assert(bytes <= s->max_transfer);
1026 if (nvme_qiov_aligned(bs, qiov)) {
1027 return nvme_co_prw_aligned(bs, offset, bytes, qiov, is_write, flags);
1028 }
1029 trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
1030 buf = qemu_try_blockalign(bs, bytes);
1031
1032 if (!buf) {
1033 return -ENOMEM;
1034 }
1035 qemu_iovec_init(&local_qiov, 1);
1036 if (is_write) {
1037 qemu_iovec_to_buf(qiov, 0, buf, bytes);
1038 }
1039 qemu_iovec_add(&local_qiov, buf, bytes);
1040 r = nvme_co_prw_aligned(bs, offset, bytes, &local_qiov, is_write, flags);
1041 qemu_iovec_destroy(&local_qiov);
1042 if (!r && !is_write) {
1043 qemu_iovec_from_buf(qiov, 0, buf, bytes);
1044 }
1045 qemu_vfree(buf);
1046 return r;
1047}
1048
1049static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
1050 uint64_t offset, uint64_t bytes,
1051 QEMUIOVector *qiov, int flags)
1052{
1053 return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
1054}
1055
1056static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
1057 uint64_t offset, uint64_t bytes,
1058 QEMUIOVector *qiov, int flags)
1059{
1060 return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
1061}
1062
1063static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
1064{
1065 BDRVNVMeState *s = bs->opaque;
1066 NVMeQueuePair *ioq = s->queues[1];
1067 NVMeRequest *req;
1068 NvmeCmd cmd = {
1069 .opcode = NVME_CMD_FLUSH,
1070 .nsid = cpu_to_le32(s->nsid),
1071 };
1072 NVMeCoData data = {
1073 .ctx = bdrv_get_aio_context(bs),
1074 .ret = -EINPROGRESS,
1075 };
1076
1077 assert(s->nr_queues > 1);
1078 req = nvme_get_free_req(ioq);
1079 assert(req);
1080 nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
1081
1082 data.co = qemu_coroutine_self();
1083 if (data.ret == -EINPROGRESS) {
1084 qemu_coroutine_yield();
1085 }
1086
1087 return data.ret;
1088}
1089
1090
1091static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
1092 BlockReopenQueue *queue, Error **errp)
1093{
1094 return 0;
1095}
1096
998b3a1e 1097static void nvme_refresh_filename(BlockDriverState *bs)
bdd6a90a 1098{
cc61b074 1099 BDRVNVMeState *s = bs->opaque;
bdd6a90a 1100
cc61b074
HR
1101 snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nvme://%s/%i",
1102 s->device, s->nsid);
bdd6a90a
FZ
1103}
1104
1105static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
1106{
1107 BDRVNVMeState *s = bs->opaque;
1108
1109 bs->bl.opt_mem_alignment = s->page_size;
1110 bs->bl.request_alignment = s->page_size;
1111 bs->bl.max_transfer = s->max_transfer;
1112}
1113
1114static void nvme_detach_aio_context(BlockDriverState *bs)
1115{
1116 BDRVNVMeState *s = bs->opaque;
1117
1118 aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
1119 false, NULL, NULL);
1120}
1121
1122static void nvme_attach_aio_context(BlockDriverState *bs,
1123 AioContext *new_context)
1124{
1125 BDRVNVMeState *s = bs->opaque;
1126
1127 s->aio_context = new_context;
1128 aio_set_event_notifier(new_context, &s->irq_notifier,
1129 false, nvme_handle_event, nvme_poll_cb);
1130}
1131
1132static void nvme_aio_plug(BlockDriverState *bs)
1133{
1134 BDRVNVMeState *s = bs->opaque;
2f0d8947
PB
1135 assert(!s->plugged);
1136 s->plugged = true;
bdd6a90a
FZ
1137}
1138
1139static void nvme_aio_unplug(BlockDriverState *bs)
1140{
1141 int i;
1142 BDRVNVMeState *s = bs->opaque;
1143 assert(s->plugged);
2f0d8947
PB
1144 s->plugged = false;
1145 for (i = 1; i < s->nr_queues; i++) {
1146 NVMeQueuePair *q = s->queues[i];
1147 qemu_mutex_lock(&q->lock);
1148 nvme_kick(s, q);
1149 nvme_process_completion(s, q);
1150 qemu_mutex_unlock(&q->lock);
bdd6a90a
FZ
1151 }
1152}
1153
9ed61612
FZ
1154static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
1155{
1156 int ret;
1157 BDRVNVMeState *s = bs->opaque;
1158
1159 ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
1160 if (ret) {
1161 /* FIXME: we may run out of IOVA addresses after repeated
1162 * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
1163 * doesn't reclaim addresses for fixed mappings. */
1164 error_report("nvme_register_buf failed: %s", strerror(-ret));
1165 }
1166}
1167
1168static void nvme_unregister_buf(BlockDriverState *bs, void *host)
1169{
1170 BDRVNVMeState *s = bs->opaque;
1171
1172 qemu_vfio_dma_unmap(s->vfio, host);
1173}
1174
2654267c
HR
1175static const char *const nvme_strong_runtime_opts[] = {
1176 NVME_BLOCK_OPT_DEVICE,
1177 NVME_BLOCK_OPT_NAMESPACE,
1178
1179 NULL
1180};
1181
bdd6a90a
FZ
1182static BlockDriver bdrv_nvme = {
1183 .format_name = "nvme",
1184 .protocol_name = "nvme",
1185 .instance_size = sizeof(BDRVNVMeState),
1186
1187 .bdrv_parse_filename = nvme_parse_filename,
1188 .bdrv_file_open = nvme_file_open,
1189 .bdrv_close = nvme_close,
1190 .bdrv_getlength = nvme_getlength,
118d1b6a 1191 .bdrv_probe_blocksizes = nvme_probe_blocksizes,
bdd6a90a
FZ
1192
1193 .bdrv_co_preadv = nvme_co_preadv,
1194 .bdrv_co_pwritev = nvme_co_pwritev,
1195 .bdrv_co_flush_to_disk = nvme_co_flush,
1196 .bdrv_reopen_prepare = nvme_reopen_prepare,
1197
bdd6a90a
FZ
1198 .bdrv_refresh_filename = nvme_refresh_filename,
1199 .bdrv_refresh_limits = nvme_refresh_limits,
2654267c 1200 .strong_runtime_opts = nvme_strong_runtime_opts,
bdd6a90a
FZ
1201
1202 .bdrv_detach_aio_context = nvme_detach_aio_context,
1203 .bdrv_attach_aio_context = nvme_attach_aio_context,
1204
1205 .bdrv_io_plug = nvme_aio_plug,
1206 .bdrv_io_unplug = nvme_aio_unplug,
9ed61612
FZ
1207
1208 .bdrv_register_buf = nvme_register_buf,
1209 .bdrv_unregister_buf = nvme_unregister_buf,
bdd6a90a
FZ
1210};
1211
1212static void bdrv_nvme_init(void)
1213{
1214 bdrv_register(&bdrv_nvme);
1215}
1216
1217block_init(bdrv_nvme_init);