]> git.ipfire.org Git - thirdparty/qemu.git/blob - hw/rdma/vmw/pvrdma_dev_ring.c
Merge remote-tracking branch 'remotes/elmarco/tags/slirp-pull-request' into staging
[thirdparty/qemu.git] / hw / rdma / vmw / pvrdma_dev_ring.c
1 /*
2 * QEMU paravirtual RDMA - Device rings
3 *
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
6 *
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "hw/pci/pci.h"
19 #include "cpu.h"
20 #include "qemu/cutils.h"
21
22 #include "trace.h"
23
24 #include "../rdma_utils.h"
25 #include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h"
26 #include "pvrdma_dev_ring.h"
27
28 int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
29 struct pvrdma_ring *ring_state, uint32_t max_elems,
30 size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
31 {
32 int i;
33 int rc = 0;
34
35 pstrcpy(ring->name, MAX_RING_NAME_SZ, name);
36 ring->dev = dev;
37 ring->ring_state = ring_state;
38 ring->max_elems = max_elems;
39 ring->elem_sz = elem_sz;
40 /* TODO: Give a moment to think if we want to redo driver settings
41 atomic_set(&ring->ring_state->prod_tail, 0);
42 atomic_set(&ring->ring_state->cons_head, 0);
43 */
44 ring->npages = npages;
45 ring->pages = g_malloc(npages * sizeof(void *));
46
47 for (i = 0; i < npages; i++) {
48 if (!tbl[i]) {
49 rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i);
50 continue;
51 }
52
53 ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
54 if (!ring->pages[i]) {
55 rc = -ENOMEM;
56 rdma_error_report("Failed to map to page %d in ring %s", i, name);
57 goto out_free;
58 }
59 memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
60 }
61
62 goto out;
63
64 out_free:
65 while (i--) {
66 rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
67 }
68 g_free(ring->pages);
69
70 out:
71 return rc;
72 }
73
74 void *pvrdma_ring_next_elem_read(PvrdmaRing *ring)
75 {
76 int e;
77 unsigned int idx = 0, offset;
78
79 e = pvrdma_idx_ring_has_data(ring->ring_state, ring->max_elems, &idx);
80 if (e <= 0) {
81 trace_pvrdma_ring_next_elem_read_no_data(ring->name);
82 return NULL;
83 }
84
85 offset = idx * ring->elem_sz;
86 return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
87 }
88
89 void pvrdma_ring_read_inc(PvrdmaRing *ring)
90 {
91 pvrdma_idx_ring_inc(&ring->ring_state->cons_head, ring->max_elems);
92 }
93
94 void *pvrdma_ring_next_elem_write(PvrdmaRing *ring)
95 {
96 int idx;
97 unsigned int offset, tail;
98
99 idx = pvrdma_idx_ring_has_space(ring->ring_state, ring->max_elems, &tail);
100 if (idx <= 0) {
101 rdma_error_report("CQ is full");
102 return NULL;
103 }
104
105 idx = pvrdma_idx(&ring->ring_state->prod_tail, ring->max_elems);
106 if (idx < 0 || tail != idx) {
107 rdma_error_report("Invalid idx %d", idx);
108 return NULL;
109 }
110
111 offset = idx * ring->elem_sz;
112 return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
113 }
114
115 void pvrdma_ring_write_inc(PvrdmaRing *ring)
116 {
117 pvrdma_idx_ring_inc(&ring->ring_state->prod_tail, ring->max_elems);
118 }
119
120 void pvrdma_ring_free(PvrdmaRing *ring)
121 {
122 if (!ring) {
123 return;
124 }
125
126 if (!ring->pages) {
127 return;
128 }
129
130 while (ring->npages--) {
131 rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
132 TARGET_PAGE_SIZE);
133 }
134
135 g_free(ring->pages);
136 ring->pages = NULL;
137 }