]> git.ipfire.org Git - thirdparty/qemu.git/blob - hw/rdma/vmw/pvrdma_dev_ring.c
42130667a7d41bb2500f3ae5119cd3577131cef1
[thirdparty/qemu.git] / hw / rdma / vmw / pvrdma_dev_ring.c
1 /*
2 * QEMU paravirtual RDMA - Device rings
3 *
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
6 *
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "hw/pci/pci.h"
19 #include "cpu.h"
20 #include "qemu/cutils.h"
21
22 #include "trace.h"
23
24 #include "../rdma_utils.h"
25 #include "pvrdma_dev_ring.h"
26
27 int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
28 PvrdmaRingState *ring_state, uint32_t max_elems,
29 size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
30 {
31 int i;
32 int rc = 0;
33
34 pstrcpy(ring->name, MAX_RING_NAME_SZ, name);
35 ring->dev = dev;
36 ring->ring_state = ring_state;
37 ring->max_elems = max_elems;
38 ring->elem_sz = elem_sz;
39 /* TODO: Give a moment to think if we want to redo driver settings
40 qatomic_set(&ring->ring_state->prod_tail, 0);
41 qatomic_set(&ring->ring_state->cons_head, 0);
42 */
43 ring->npages = npages;
44 ring->pages = g_malloc0(npages * sizeof(void *));
45
46 for (i = 0; i < npages; i++) {
47 if (!tbl[i]) {
48 rdma_error_report("npages=%d but tbl[%d] is NULL", npages, i);
49 continue;
50 }
51
52 ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
53 if (!ring->pages[i]) {
54 rc = -ENOMEM;
55 rdma_error_report("Failed to map to page %d in ring %s", i, name);
56 goto out_free;
57 }
58 memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
59 }
60
61 goto out;
62
63 out_free:
64 while (i--) {
65 rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
66 }
67 g_free(ring->pages);
68
69 out:
70 return rc;
71 }
72
73 void *pvrdma_ring_next_elem_read(PvrdmaRing *ring)
74 {
75 unsigned int idx, offset;
76 const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail);
77 const uint32_t head = qatomic_read(&ring->ring_state->cons_head);
78
79 if (tail & ~((ring->max_elems << 1) - 1) ||
80 head & ~((ring->max_elems << 1) - 1) ||
81 tail == head) {
82 trace_pvrdma_ring_next_elem_read_no_data(ring->name);
83 return NULL;
84 }
85
86 idx = head & (ring->max_elems - 1);
87 offset = idx * ring->elem_sz;
88 return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
89 }
90
91 void pvrdma_ring_read_inc(PvrdmaRing *ring)
92 {
93 uint32_t idx = qatomic_read(&ring->ring_state->cons_head);
94
95 idx = (idx + 1) & ((ring->max_elems << 1) - 1);
96 qatomic_set(&ring->ring_state->cons_head, idx);
97 }
98
99 void *pvrdma_ring_next_elem_write(PvrdmaRing *ring)
100 {
101 unsigned int idx, offset;
102 const uint32_t tail = qatomic_read(&ring->ring_state->prod_tail);
103 const uint32_t head = qatomic_read(&ring->ring_state->cons_head);
104
105 if (tail & ~((ring->max_elems << 1) - 1) ||
106 head & ~((ring->max_elems << 1) - 1) ||
107 tail == (head ^ ring->max_elems)) {
108 rdma_error_report("CQ is full");
109 return NULL;
110 }
111
112 idx = tail & (ring->max_elems - 1);
113 offset = idx * ring->elem_sz;
114 return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
115 }
116
117 void pvrdma_ring_write_inc(PvrdmaRing *ring)
118 {
119 uint32_t idx = qatomic_read(&ring->ring_state->prod_tail);
120
121 idx = (idx + 1) & ((ring->max_elems << 1) - 1);
122 qatomic_set(&ring->ring_state->prod_tail, idx);
123 }
124
125 void pvrdma_ring_free(PvrdmaRing *ring)
126 {
127 if (!ring) {
128 return;
129 }
130
131 if (!ring->pages) {
132 return;
133 }
134
135 while (ring->npages--) {
136 rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
137 TARGET_PAGE_SIZE);
138 }
139
140 g_free(ring->pages);
141 ring->pages = NULL;
142 }