]>
Commit | Line | Data |
---|---|---|
ef6d4ccd YS |
1 | /* |
2 | * QEMU paravirtual RDMA - Generic RDMA backend | |
3 | * | |
4 | * Copyright (C) 2018 Oracle | |
5 | * Copyright (C) 2018 Red Hat Inc | |
6 | * | |
7 | * Authors: | |
8 | * Yuval Shaia <yuval.shaia@oracle.com> | |
9 | * Marcel Apfelbaum <marcel@redhat.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
0efc9511 | 16 | #include "qemu/osdep.h" |
2b05705d | 17 | #include "qapi/qapi-events-rdma.h" |
ef6d4ccd YS |
18 | |
19 | #include <infiniband/verbs.h> | |
20 | ||
2b05705d | 21 | #include "contrib/rdmacm-mux/rdmacm-mux.h" |
ef6d4ccd YS |
22 | #include "trace.h" |
23 | #include "rdma_utils.h" | |
24 | #include "rdma_rm.h" | |
25 | #include "rdma_backend.h" | |
26 | ||
ef6d4ccd | 27 | #define THR_NAME_LEN 16 |
75152227 | 28 | #define THR_POLL_TO 5000 |
ef6d4ccd | 29 | |
605ec166 YS |
30 | #define MAD_HDR_SIZE sizeof(struct ibv_grh) |
31 | ||
ef6d4ccd | 32 | typedef struct BackendCtx { |
ef6d4ccd | 33 | void *up_ctx; |
605ec166 | 34 | struct ibv_sge sge; /* Used to save MAD recv buffer */ |
bf441451 | 35 | RdmaBackendQP *backend_qp; /* To maintain recv buffers */ |
e926c9f1 | 36 | RdmaBackendSRQ *backend_srq; |
ef6d4ccd YS |
37 | } BackendCtx; |
38 | ||
605ec166 YS |
39 | struct backend_umad { |
40 | struct ib_user_mad hdr; | |
41 | char mad[RDMA_MAX_PRIVATE_DATA]; | |
42 | }; | |
43 | ||
eaac0100 | 44 | static void (*comp_handler)(void *ctx, struct ibv_wc *wc); |
ef6d4ccd | 45 | |
eaac0100 | 46 | static void dummy_comp_handler(void *ctx, struct ibv_wc *wc) |
ef6d4ccd | 47 | { |
4d71b38a | 48 | rdma_error_report("No completion handler is registered"); |
ef6d4ccd YS |
49 | } |
50 | ||
eaac0100 YS |
51 | static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err, |
52 | void *ctx) | |
53 | { | |
a421c811 | 54 | struct ibv_wc wc = {}; |
eaac0100 YS |
55 | |
56 | wc.status = status; | |
57 | wc.vendor_err = vendor_err; | |
58 | ||
59 | comp_handler(ctx, &wc); | |
60 | } | |
61 | ||
ff30a446 YS |
62 | static void free_cqe_ctx(gpointer data, gpointer user_data) |
63 | { | |
64 | BackendCtx *bctx; | |
65 | RdmaDeviceResources *rdma_dev_res = user_data; | |
66 | unsigned long cqe_ctx_id = GPOINTER_TO_INT(data); | |
67 | ||
68 | bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); | |
69 | if (bctx) { | |
70 | rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); | |
d73415a3 | 71 | qatomic_dec(&rdma_dev_res->stats.missing_cqe); |
ff30a446 YS |
72 | } |
73 | g_free(bctx); | |
74 | } | |
75 | ||
76 | static void clean_recv_mads(RdmaBackendDev *backend_dev) | |
77 | { | |
78 | unsigned long cqe_ctx_id; | |
79 | ||
80 | do { | |
bce80086 | 81 | cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev-> |
ff30a446 YS |
82 | recv_mads_list); |
83 | if (cqe_ctx_id != -ENOENT) { | |
d73415a3 | 84 | qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
ff30a446 YS |
85 | free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), |
86 | backend_dev->rdma_dev_res); | |
87 | } | |
88 | } while (cqe_ctx_id != -ENOENT); | |
89 | } | |
90 | ||
1373f4a8 | 91 | static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) |
ef6d4ccd | 92 | { |
c2dd117b | 93 | int i, ne, total_ne = 0; |
ef6d4ccd YS |
94 | BackendCtx *bctx; |
95 | struct ibv_wc wc[2]; | |
e926c9f1 | 96 | RdmaProtectedGSList *cqe_ctx_list; |
ef6d4ccd | 97 | |
08b689aa SS |
98 | WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) { |
99 | do { | |
100 | ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc); | |
101 | ||
102 | trace_rdma_poll_cq(ne, ibcq); | |
103 | ||
104 | for (i = 0; i < ne; i++) { | |
105 | bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id); | |
106 | if (unlikely(!bctx)) { | |
107 | rdma_error_report("No matching ctx for req %"PRId64, | |
108 | wc[i].wr_id); | |
109 | continue; | |
110 | } | |
111 | ||
112 | comp_handler(bctx->up_ctx, &wc[i]); | |
113 | ||
114 | if (bctx->backend_qp) { | |
115 | cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list; | |
116 | } else { | |
117 | cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list; | |
118 | } | |
119 | ||
120 | rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id); | |
121 | rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id); | |
122 | g_free(bctx); | |
ef6d4ccd | 123 | } |
08b689aa SS |
124 | total_ne += ne; |
125 | } while (ne > 0); | |
d73415a3 | 126 | qatomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); |
08b689aa | 127 | } |
ef6d4ccd YS |
128 | |
129 | if (ne < 0) { | |
4d71b38a | 130 | rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno); |
ef6d4ccd | 131 | } |
c2dd117b YS |
132 | |
133 | rdma_dev_res->stats.completions += total_ne; | |
134 | ||
135 | return total_ne; | |
ef6d4ccd YS |
136 | } |
137 | ||
138 | static void *comp_handler_thread(void *arg) | |
139 | { | |
140 | RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg; | |
141 | int rc; | |
142 | struct ibv_cq *ev_cq; | |
143 | void *ev_ctx; | |
75152227 YS |
144 | int flags; |
145 | GPollFD pfds[1]; | |
146 | ||
147 | /* Change to non-blocking mode */ | |
148 | flags = fcntl(backend_dev->channel->fd, F_GETFL); | |
149 | rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK); | |
150 | if (rc < 0) { | |
4d71b38a | 151 | rdma_error_report("Failed to change backend channel FD to non-blocking"); |
75152227 YS |
152 | return NULL; |
153 | } | |
ef6d4ccd | 154 | |
75152227 YS |
155 | pfds[0].fd = backend_dev->channel->fd; |
156 | pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; | |
157 | ||
158 | backend_dev->comp_thread.is_running = true; | |
159 | ||
ef6d4ccd | 160 | while (backend_dev->comp_thread.run) { |
75152227 YS |
161 | do { |
162 | rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS); | |
c2dd117b YS |
163 | if (!rc) { |
164 | backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++; | |
165 | } | |
75152227 YS |
166 | } while (!rc && backend_dev->comp_thread.run); |
167 | ||
168 | if (backend_dev->comp_thread.run) { | |
75152227 | 169 | rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx); |
75152227 | 170 | if (unlikely(rc)) { |
4d71b38a YS |
171 | rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc, |
172 | errno); | |
75152227 YS |
173 | continue; |
174 | } | |
ef6d4ccd | 175 | |
75152227 YS |
176 | rc = ibv_req_notify_cq(ev_cq, 0); |
177 | if (unlikely(rc)) { | |
4d71b38a YS |
178 | rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, |
179 | errno); | |
75152227 | 180 | } |
ef6d4ccd | 181 | |
c2dd117b | 182 | backend_dev->rdma_dev_res->stats.poll_cq_from_bk++; |
1373f4a8 | 183 | rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq); |
ef6d4ccd | 184 | |
75152227 YS |
185 | ibv_ack_cq_events(ev_cq, 1); |
186 | } | |
ef6d4ccd YS |
187 | } |
188 | ||
75152227 YS |
189 | backend_dev->comp_thread.is_running = false; |
190 | ||
191 | qemu_thread_exit(0); | |
192 | ||
ef6d4ccd YS |
193 | return NULL; |
194 | } | |
195 | ||
2b05705d YS |
196 | static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev) |
197 | { | |
d73415a3 | 198 | qatomic_set(&backend_dev->rdmacm_mux.can_receive, 0); |
2b05705d YS |
199 | } |
200 | ||
201 | static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev) | |
202 | { | |
d73415a3 | 203 | qatomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg)); |
2b05705d YS |
204 | } |
205 | ||
206 | static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev) | |
207 | { | |
d73415a3 | 208 | return qatomic_read(&backend_dev->rdmacm_mux.can_receive); |
2b05705d YS |
209 | } |
210 | ||
4d71b38a | 211 | static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be) |
2b05705d | 212 | { |
555b3d67 | 213 | RdmaCmMuxMsg msg = {}; |
2b05705d YS |
214 | int ret; |
215 | ||
2b05705d YS |
216 | ret = qemu_chr_fe_read_all(mad_chr_be, (uint8_t *)&msg, sizeof(msg)); |
217 | if (ret != sizeof(msg)) { | |
4d71b38a YS |
218 | rdma_error_report("Got invalid message from mux: size %d, expecting %d", |
219 | ret, (int)sizeof(msg)); | |
2b05705d YS |
220 | return -EIO; |
221 | } | |
222 | ||
4d71b38a YS |
223 | trace_rdmacm_mux_check_op_status(msg.hdr.msg_type, msg.hdr.op_code, |
224 | msg.hdr.err_code); | |
2b05705d YS |
225 | |
226 | if (msg.hdr.msg_type != RDMACM_MUX_MSG_TYPE_RESP) { | |
4d71b38a | 227 | rdma_error_report("Got invalid message type %d", msg.hdr.msg_type); |
2b05705d YS |
228 | return -EIO; |
229 | } | |
230 | ||
231 | if (msg.hdr.err_code != RDMACM_MUX_ERR_CODE_OK) { | |
4d71b38a YS |
232 | rdma_error_report("Operation failed in mux, error code %d", |
233 | msg.hdr.err_code); | |
2b05705d YS |
234 | return -EIO; |
235 | } | |
236 | ||
237 | return 0; | |
238 | } | |
239 | ||
4d71b38a | 240 | static int rdmacm_mux_send(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg) |
2b05705d YS |
241 | { |
242 | int rc = 0; | |
243 | ||
2b05705d | 244 | msg->hdr.msg_type = RDMACM_MUX_MSG_TYPE_REQ; |
4d71b38a | 245 | trace_rdmacm_mux("send", msg->hdr.msg_type, msg->hdr.op_code); |
2b05705d YS |
246 | disable_rdmacm_mux_async(backend_dev); |
247 | rc = qemu_chr_fe_write(backend_dev->rdmacm_mux.chr_be, | |
248 | (const uint8_t *)msg, sizeof(*msg)); | |
249 | if (rc != sizeof(*msg)) { | |
250 | enable_rdmacm_mux_async(backend_dev); | |
4d71b38a | 251 | rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc); |
2b05705d YS |
252 | return -EIO; |
253 | } | |
254 | ||
4d71b38a | 255 | rc = rdmacm_mux_check_op_status(backend_dev->rdmacm_mux.chr_be); |
2b05705d | 256 | if (rc) { |
4d71b38a YS |
257 | rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)", |
258 | msg->hdr.op_code, rc); | |
2b05705d YS |
259 | } |
260 | ||
261 | enable_rdmacm_mux_async(backend_dev); | |
262 | ||
263 | return 0; | |
264 | } | |
265 | ||
292dce62 | 266 | static void stop_backend_thread(RdmaBackendThread *thread) |
75152227 | 267 | { |
292dce62 YS |
268 | thread->run = false; |
269 | while (thread->is_running) { | |
75152227 YS |
270 | sleep(THR_POLL_TO / SCALE_US / 2); |
271 | } | |
272 | } | |
273 | ||
274 | static void start_comp_thread(RdmaBackendDev *backend_dev) | |
275 | { | |
a421c811 | 276 | char thread_name[THR_NAME_LEN] = {}; |
75152227 | 277 | |
292dce62 | 278 | stop_backend_thread(&backend_dev->comp_thread); |
75152227 YS |
279 | |
280 | snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s", | |
281 | ibv_get_device_name(backend_dev->ib_dev)); | |
282 | backend_dev->comp_thread.run = true; | |
283 | qemu_thread_create(&backend_dev->comp_thread.thread, thread_name, | |
284 | comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED); | |
285 | } | |
286 | ||
eaac0100 YS |
287 | void rdma_backend_register_comp_handler(void (*handler)(void *ctx, |
288 | struct ibv_wc *wc)) | |
ef6d4ccd YS |
289 | { |
290 | comp_handler = handler; | |
291 | } | |
292 | ||
293 | void rdma_backend_unregister_comp_handler(void) | |
294 | { | |
295 | rdma_backend_register_comp_handler(dummy_comp_handler); | |
296 | } | |
297 | ||
298 | int rdma_backend_query_port(RdmaBackendDev *backend_dev, | |
299 | struct ibv_port_attr *port_attr) | |
300 | { | |
301 | int rc; | |
302 | ||
303 | rc = ibv_query_port(backend_dev->context, backend_dev->port_num, port_attr); | |
304 | if (rc) { | |
4d71b38a | 305 | rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
306 | return -EIO; |
307 | } | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
312 | void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq) | |
313 | { | |
c2dd117b YS |
314 | int polled; |
315 | ||
316 | rdma_dev_res->stats.poll_cq_from_guest++; | |
1373f4a8 | 317 | polled = rdma_poll_cq(rdma_dev_res, cq->ibcq); |
c2dd117b YS |
318 | if (!polled) { |
319 | rdma_dev_res->stats.poll_cq_from_guest_empty++; | |
320 | } | |
ef6d4ccd YS |
321 | } |
322 | ||
323 | static GHashTable *ah_hash; | |
324 | ||
325 | static struct ibv_ah *create_ah(RdmaBackendDev *backend_dev, struct ibv_pd *pd, | |
326 | uint8_t sgid_idx, union ibv_gid *dgid) | |
327 | { | |
328 | GBytes *ah_key = g_bytes_new(dgid, sizeof(*dgid)); | |
329 | struct ibv_ah *ah = g_hash_table_lookup(ah_hash, ah_key); | |
330 | ||
331 | if (ah) { | |
4d71b38a YS |
332 | trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid->global.subnet_prefix), |
333 | be64_to_cpu(dgid->global.interface_id)); | |
ef6d4ccd YS |
334 | g_bytes_unref(ah_key); |
335 | } else { | |
336 | struct ibv_ah_attr ah_attr = { | |
337 | .is_global = 1, | |
338 | .port_num = backend_dev->port_num, | |
339 | .grh.hop_limit = 1, | |
340 | }; | |
341 | ||
342 | ah_attr.grh.dgid = *dgid; | |
343 | ah_attr.grh.sgid_index = sgid_idx; | |
344 | ||
345 | ah = ibv_create_ah(pd, &ah_attr); | |
346 | if (ah) { | |
347 | g_hash_table_insert(ah_hash, ah_key, ah); | |
348 | } else { | |
349 | g_bytes_unref(ah_key); | |
4d71b38a YS |
350 | rdma_error_report("Failed to create AH for gid <0x%" PRIx64", 0x%"PRIx64">", |
351 | be64_to_cpu(dgid->global.subnet_prefix), | |
352 | be64_to_cpu(dgid->global.interface_id)); | |
ef6d4ccd YS |
353 | } |
354 | ||
4d71b38a YS |
355 | trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid->global.subnet_prefix), |
356 | be64_to_cpu(dgid->global.interface_id)); | |
ef6d4ccd YS |
357 | } |
358 | ||
359 | return ah; | |
360 | } | |
361 | ||
362 | static void destroy_ah_hash_key(gpointer data) | |
363 | { | |
364 | g_bytes_unref(data); | |
365 | } | |
366 | ||
367 | static void destroy_ah_hast_data(gpointer data) | |
368 | { | |
369 | struct ibv_ah *ah = data; | |
370 | ||
371 | ibv_destroy_ah(ah); | |
372 | } | |
373 | ||
374 | static void ah_cache_init(void) | |
375 | { | |
376 | ah_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal, | |
377 | destroy_ah_hash_key, destroy_ah_hast_data); | |
378 | } | |
379 | ||
b196d4f1 | 380 | #ifdef LEGACY_RDMA_REG_MR |
ef6d4ccd | 381 | static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, |
f2360151 YS |
382 | struct ibv_sge *sge, uint8_t num_sge, |
383 | uint64_t *total_length) | |
ef6d4ccd YS |
384 | { |
385 | RdmaRmMR *mr; | |
f2360151 | 386 | int idx; |
ef6d4ccd | 387 | |
f2360151 YS |
388 | for (idx = 0; idx < num_sge; idx++) { |
389 | mr = rdma_rm_get_mr(rdma_dev_res, sge[idx].lkey); | |
ef6d4ccd | 390 | if (unlikely(!mr)) { |
f2360151 YS |
391 | rdma_error_report("Invalid lkey 0x%x", sge[idx].lkey); |
392 | return VENDOR_ERR_INVLKEY | sge[idx].lkey; | |
ef6d4ccd YS |
393 | } |
394 | ||
f2360151 | 395 | sge[idx].addr = (uintptr_t)mr->virt + sge[idx].addr - mr->start; |
f2360151 | 396 | sge[idx].lkey = rdma_backend_mr_lkey(&mr->backend_mr); |
ef6d4ccd | 397 | |
f2360151 | 398 | *total_length += sge[idx].length; |
ef6d4ccd YS |
399 | } |
400 | ||
401 | return 0; | |
402 | } | |
b196d4f1 YS |
403 | #else |
404 | static inline int build_host_sge_array(RdmaDeviceResources *rdma_dev_res, | |
405 | struct ibv_sge *sge, uint8_t num_sge, | |
406 | uint64_t *total_length) | |
407 | { | |
408 | int idx; | |
409 | ||
410 | for (idx = 0; idx < num_sge; idx++) { | |
411 | *total_length += sge[idx].length; | |
412 | } | |
413 | return 0; | |
414 | } | |
415 | #endif | |
ef6d4ccd | 416 | |
4d71b38a YS |
417 | static void trace_mad_message(const char *title, char *buf, int len) |
418 | { | |
419 | int i; | |
420 | char *b = g_malloc0(len * 3 + 1); | |
421 | char b1[4]; | |
422 | ||
423 | for (i = 0; i < len; i++) { | |
424 | sprintf(b1, "%.2X ", buf[i] & 0x000000FF); | |
425 | strcat(b, b1); | |
426 | } | |
427 | ||
428 | trace_rdma_mad_message(title, len, b); | |
429 | ||
430 | g_free(b); | |
431 | } | |
432 | ||
2b05705d YS |
433 | static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx, |
434 | union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge) | |
605ec166 | 435 | { |
555b3d67 | 436 | RdmaCmMuxMsg msg = {}; |
2b05705d | 437 | char *hdr, *data; |
605ec166 YS |
438 | int ret; |
439 | ||
605ec166 YS |
440 | if (num_sge != 2) { |
441 | return -EINVAL; | |
442 | } | |
443 | ||
2b05705d YS |
444 | msg.hdr.op_code = RDMACM_MUX_OP_CODE_MAD; |
445 | memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid)); | |
605ec166 | 446 | |
2b05705d | 447 | msg.umad_len = sge[0].length + sge[1].length; |
2b05705d YS |
448 | |
449 | if (msg.umad_len > sizeof(msg.umad.mad)) { | |
605ec166 YS |
450 | return -ENOMEM; |
451 | } | |
452 | ||
2b05705d YS |
453 | msg.umad.hdr.addr.qpn = htobe32(1); |
454 | msg.umad.hdr.addr.grh_present = 1; | |
2b05705d YS |
455 | msg.umad.hdr.addr.gid_index = sgid_idx; |
456 | memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid)); | |
457 | msg.umad.hdr.addr.hop_limit = 0xFF; | |
605ec166 YS |
458 | |
459 | hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length); | |
460 | if (!hdr) { | |
605ec166 YS |
461 | return -ENOMEM; |
462 | } | |
2b05705d YS |
463 | data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length); |
464 | if (!data) { | |
605ec166 YS |
465 | rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); |
466 | return -ENOMEM; | |
467 | } | |
468 | ||
2b05705d YS |
469 | memcpy(&msg.umad.mad[0], hdr, sge[0].length); |
470 | memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length); | |
605ec166 | 471 | |
2b05705d | 472 | rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length); |
605ec166 YS |
473 | rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length); |
474 | ||
4d71b38a YS |
475 | trace_mad_message("send", msg.umad.mad, msg.umad_len); |
476 | ||
477 | ret = rdmacm_mux_send(backend_dev, &msg); | |
2b05705d | 478 | if (ret) { |
4d71b38a | 479 | rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret); |
2b05705d YS |
480 | return -EIO; |
481 | } | |
605ec166 | 482 | |
2b05705d | 483 | return 0; |
605ec166 YS |
484 | } |
485 | ||
ef6d4ccd YS |
486 | void rdma_backend_post_send(RdmaBackendDev *backend_dev, |
487 | RdmaBackendQP *qp, uint8_t qp_type, | |
488 | struct ibv_sge *sge, uint32_t num_sge, | |
2b05705d YS |
489 | uint8_t sgid_idx, union ibv_gid *sgid, |
490 | union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey, | |
491 | void *ctx) | |
ef6d4ccd YS |
492 | { |
493 | BackendCtx *bctx; | |
ef6d4ccd YS |
494 | uint32_t bctx_id; |
495 | int rc; | |
a421c811 | 496 | struct ibv_send_wr wr = {}, *bad_wr; |
ef6d4ccd | 497 | |
4d71b38a | 498 | if (!qp->ibqp) { /* This field is not initialized for QP0 and QP1 */ |
ef6d4ccd | 499 | if (qp_type == IBV_QPT_SMI) { |
4d71b38a | 500 | rdma_error_report("Got QP0 request"); |
eaac0100 | 501 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); |
ef6d4ccd | 502 | } else if (qp_type == IBV_QPT_GSI) { |
2b05705d | 503 | rc = mad_send(backend_dev, sgid_idx, sgid, sge, num_sge); |
605ec166 | 504 | if (rc) { |
eaac0100 | 505 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx); |
c2dd117b | 506 | backend_dev->rdma_dev_res->stats.mad_tx_err++; |
605ec166 | 507 | } else { |
eaac0100 | 508 | complete_work(IBV_WC_SUCCESS, 0, ctx); |
c2dd117b | 509 | backend_dev->rdma_dev_res->stats.mad_tx++; |
605ec166 | 510 | } |
ef6d4ccd | 511 | } |
ef6d4ccd YS |
512 | return; |
513 | } | |
514 | ||
ef6d4ccd YS |
515 | bctx = g_malloc0(sizeof(*bctx)); |
516 | bctx->up_ctx = ctx; | |
bf441451 | 517 | bctx->backend_qp = qp; |
ef6d4ccd YS |
518 | |
519 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); | |
520 | if (unlikely(rc)) { | |
eaac0100 | 521 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); |
c2dd117b | 522 | goto err_free_bctx; |
ef6d4ccd YS |
523 | } |
524 | ||
bf441451 YS |
525 | rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); |
526 | ||
f2360151 | 527 | rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge, |
c2dd117b | 528 | &backend_dev->rdma_dev_res->stats.tx_len); |
ef6d4ccd | 529 | if (rc) { |
eaac0100 | 530 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); |
c2dd117b | 531 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
532 | } |
533 | ||
534 | if (qp_type == IBV_QPT_UD) { | |
2b05705d | 535 | wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd, sgid_idx, dgid); |
305bdd7a | 536 | if (!wr.wr.ud.ah) { |
eaac0100 | 537 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); |
c2dd117b | 538 | goto err_dealloc_cqe_ctx; |
305bdd7a | 539 | } |
ef6d4ccd YS |
540 | wr.wr.ud.remote_qpn = dqpn; |
541 | wr.wr.ud.remote_qkey = dqkey; | |
542 | } | |
543 | ||
544 | wr.num_sge = num_sge; | |
545 | wr.opcode = IBV_WR_SEND; | |
546 | wr.send_flags = IBV_SEND_SIGNALED; | |
f2360151 | 547 | wr.sg_list = sge; |
ef6d4ccd YS |
548 | wr.wr_id = bctx_id; |
549 | ||
550 | rc = ibv_post_send(qp->ibqp, &wr, &bad_wr); | |
ef6d4ccd | 551 | if (rc) { |
4d71b38a YS |
552 | rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d", |
553 | qp->ibqp->qp_num, rc, errno); | |
eaac0100 | 554 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); |
c2dd117b | 555 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
556 | } |
557 | ||
d73415a3 | 558 | qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
c2dd117b YS |
559 | backend_dev->rdma_dev_res->stats.tx++; |
560 | ||
ef6d4ccd YS |
561 | return; |
562 | ||
c2dd117b YS |
563 | err_dealloc_cqe_ctx: |
564 | backend_dev->rdma_dev_res->stats.tx_err++; | |
ef6d4ccd YS |
565 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); |
566 | ||
c2dd117b | 567 | err_free_bctx: |
ef6d4ccd YS |
568 | g_free(bctx); |
569 | } | |
570 | ||
605ec166 YS |
571 | static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev, |
572 | struct ibv_sge *sge, uint32_t num_sge, | |
573 | void *ctx) | |
574 | { | |
575 | BackendCtx *bctx; | |
576 | int rc; | |
577 | uint32_t bctx_id; | |
578 | ||
579 | if (num_sge != 1) { | |
4d71b38a | 580 | rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge); |
605ec166 YS |
581 | return VENDOR_ERR_INV_NUM_SGE; |
582 | } | |
583 | ||
584 | if (sge[0].length < RDMA_MAX_PRIVATE_DATA + sizeof(struct ibv_grh)) { | |
4d71b38a | 585 | rdma_error_report("Too small buffer for MAD"); |
605ec166 YS |
586 | return VENDOR_ERR_INV_MAD_BUFF; |
587 | } | |
588 | ||
605ec166 YS |
589 | bctx = g_malloc0(sizeof(*bctx)); |
590 | ||
591 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); | |
592 | if (unlikely(rc)) { | |
593 | g_free(bctx); | |
605ec166 YS |
594 | return VENDOR_ERR_NOMEM; |
595 | } | |
596 | ||
605ec166 YS |
597 | bctx->up_ctx = ctx; |
598 | bctx->sge = *sge; | |
599 | ||
bce80086 | 600 | rdma_protected_gqueue_append_int64(&backend_dev->recv_mads_list, bctx_id); |
605ec166 YS |
601 | |
602 | return 0; | |
603 | } | |
604 | ||
ef6d4ccd | 605 | void rdma_backend_post_recv(RdmaBackendDev *backend_dev, |
ef6d4ccd YS |
606 | RdmaBackendQP *qp, uint8_t qp_type, |
607 | struct ibv_sge *sge, uint32_t num_sge, void *ctx) | |
608 | { | |
609 | BackendCtx *bctx; | |
ef6d4ccd YS |
610 | uint32_t bctx_id; |
611 | int rc; | |
a421c811 | 612 | struct ibv_recv_wr wr = {}, *bad_wr; |
ef6d4ccd YS |
613 | |
614 | if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */ | |
615 | if (qp_type == IBV_QPT_SMI) { | |
4d71b38a | 616 | rdma_error_report("Got QP0 request"); |
eaac0100 | 617 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx); |
ef6d4ccd YS |
618 | } |
619 | if (qp_type == IBV_QPT_GSI) { | |
605ec166 YS |
620 | rc = save_mad_recv_buffer(backend_dev, sge, num_sge, ctx); |
621 | if (rc) { | |
eaac0100 | 622 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); |
3c890bcf | 623 | backend_dev->rdma_dev_res->stats.mad_rx_bufs_err++; |
c2dd117b | 624 | } else { |
3c890bcf | 625 | backend_dev->rdma_dev_res->stats.mad_rx_bufs++; |
605ec166 | 626 | } |
ef6d4ccd YS |
627 | } |
628 | return; | |
629 | } | |
630 | ||
ef6d4ccd YS |
631 | bctx = g_malloc0(sizeof(*bctx)); |
632 | bctx->up_ctx = ctx; | |
bf441451 | 633 | bctx->backend_qp = qp; |
ef6d4ccd | 634 | |
3c890bcf | 635 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); |
ef6d4ccd | 636 | if (unlikely(rc)) { |
eaac0100 | 637 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); |
c2dd117b | 638 | goto err_free_bctx; |
ef6d4ccd YS |
639 | } |
640 | ||
bf441451 YS |
641 | rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id); |
642 | ||
f2360151 | 643 | rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge, |
c2dd117b | 644 | &backend_dev->rdma_dev_res->stats.rx_bufs_len); |
ef6d4ccd | 645 | if (rc) { |
eaac0100 | 646 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); |
c2dd117b | 647 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
648 | } |
649 | ||
650 | wr.num_sge = num_sge; | |
f2360151 | 651 | wr.sg_list = sge; |
ef6d4ccd YS |
652 | wr.wr_id = bctx_id; |
653 | rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr); | |
ef6d4ccd | 654 | if (rc) { |
4d71b38a YS |
655 | rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d", |
656 | qp->ibqp->qp_num, rc, errno); | |
eaac0100 | 657 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); |
c2dd117b | 658 | goto err_dealloc_cqe_ctx; |
ef6d4ccd YS |
659 | } |
660 | ||
d73415a3 | 661 | qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
3c890bcf | 662 | backend_dev->rdma_dev_res->stats.rx_bufs++; |
c2dd117b | 663 | |
ef6d4ccd YS |
664 | return; |
665 | ||
c2dd117b YS |
666 | err_dealloc_cqe_ctx: |
667 | backend_dev->rdma_dev_res->stats.rx_bufs_err++; | |
3c890bcf | 668 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); |
ef6d4ccd | 669 | |
c2dd117b | 670 | err_free_bctx: |
ef6d4ccd YS |
671 | g_free(bctx); |
672 | } | |
673 | ||
e926c9f1 KH |
674 | void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev, |
675 | RdmaBackendSRQ *srq, struct ibv_sge *sge, | |
676 | uint32_t num_sge, void *ctx) | |
677 | { | |
678 | BackendCtx *bctx; | |
e926c9f1 KH |
679 | uint32_t bctx_id; |
680 | int rc; | |
681 | struct ibv_recv_wr wr = {}, *bad_wr; | |
682 | ||
683 | bctx = g_malloc0(sizeof(*bctx)); | |
684 | bctx->up_ctx = ctx; | |
685 | bctx->backend_srq = srq; | |
686 | ||
687 | rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx); | |
688 | if (unlikely(rc)) { | |
689 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx); | |
690 | goto err_free_bctx; | |
691 | } | |
692 | ||
693 | rdma_protected_gslist_append_int32(&srq->cqe_ctx_list, bctx_id); | |
694 | ||
f2360151 | 695 | rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge, |
e926c9f1 KH |
696 | &backend_dev->rdma_dev_res->stats.rx_bufs_len); |
697 | if (rc) { | |
698 | complete_work(IBV_WC_GENERAL_ERR, rc, ctx); | |
699 | goto err_dealloc_cqe_ctx; | |
700 | } | |
701 | ||
702 | wr.num_sge = num_sge; | |
f2360151 | 703 | wr.sg_list = sge; |
e926c9f1 KH |
704 | wr.wr_id = bctx_id; |
705 | rc = ibv_post_srq_recv(srq->ibsrq, &wr, &bad_wr); | |
706 | if (rc) { | |
707 | rdma_error_report("ibv_post_srq_recv fail, srqn=0x%x, rc=%d, errno=%d", | |
708 | srq->ibsrq->handle, rc, errno); | |
709 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx); | |
710 | goto err_dealloc_cqe_ctx; | |
711 | } | |
712 | ||
d73415a3 | 713 | qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); |
e926c9f1 KH |
714 | backend_dev->rdma_dev_res->stats.rx_bufs++; |
715 | backend_dev->rdma_dev_res->stats.rx_srq++; | |
716 | ||
717 | return; | |
718 | ||
719 | err_dealloc_cqe_ctx: | |
720 | backend_dev->rdma_dev_res->stats.rx_bufs_err++; | |
721 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id); | |
722 | ||
723 | err_free_bctx: | |
724 | g_free(bctx); | |
725 | } | |
726 | ||
ef6d4ccd YS |
727 | int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd) |
728 | { | |
729 | pd->ibpd = ibv_alloc_pd(backend_dev->context); | |
730 | ||
4d71b38a YS |
731 | if (!pd->ibpd) { |
732 | rdma_error_report("ibv_alloc_pd fail, errno=%d", errno); | |
733 | return -EIO; | |
734 | } | |
735 | ||
736 | return 0; | |
ef6d4ccd YS |
737 | } |
738 | ||
739 | void rdma_backend_destroy_pd(RdmaBackendPD *pd) | |
740 | { | |
741 | if (pd->ibpd) { | |
742 | ibv_dealloc_pd(pd->ibpd); | |
743 | } | |
744 | } | |
745 | ||
68b89aee YS |
746 | int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr, |
747 | size_t length, uint64_t guest_start, int access) | |
ef6d4ccd | 748 | { |
68b89aee | 749 | #ifdef LEGACY_RDMA_REG_MR |
9bbb8d35 | 750 | mr->ibmr = ibv_reg_mr(pd->ibpd, addr, length, access); |
68b89aee YS |
751 | #else |
752 | mr->ibmr = ibv_reg_mr_iova(pd->ibpd, addr, length, guest_start, access); | |
753 | #endif | |
4d71b38a YS |
754 | if (!mr->ibmr) { |
755 | rdma_error_report("ibv_reg_mr fail, errno=%d", errno); | |
756 | return -EIO; | |
ef6d4ccd YS |
757 | } |
758 | ||
4d71b38a YS |
759 | mr->ibpd = pd->ibpd; |
760 | ||
761 | return 0; | |
ef6d4ccd YS |
762 | } |
763 | ||
764 | void rdma_backend_destroy_mr(RdmaBackendMR *mr) | |
765 | { | |
766 | if (mr->ibmr) { | |
767 | ibv_dereg_mr(mr->ibmr); | |
768 | } | |
769 | } | |
770 | ||
771 | int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq, | |
772 | int cqe) | |
773 | { | |
774 | int rc; | |
775 | ||
ef6d4ccd YS |
776 | cq->ibcq = ibv_create_cq(backend_dev->context, cqe + 1, NULL, |
777 | backend_dev->channel, 0); | |
4d71b38a YS |
778 | if (!cq->ibcq) { |
779 | rdma_error_report("ibv_create_cq fail, errno=%d", errno); | |
780 | return -EIO; | |
781 | } | |
ef6d4ccd | 782 | |
4d71b38a YS |
783 | rc = ibv_req_notify_cq(cq->ibcq, 0); |
784 | if (rc) { | |
785 | rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno); | |
ef6d4ccd YS |
786 | } |
787 | ||
4d71b38a YS |
788 | cq->backend_dev = backend_dev; |
789 | ||
790 | return 0; | |
ef6d4ccd YS |
791 | } |
792 | ||
793 | void rdma_backend_destroy_cq(RdmaBackendCQ *cq) | |
794 | { | |
795 | if (cq->ibcq) { | |
796 | ibv_destroy_cq(cq->ibcq); | |
797 | } | |
798 | } | |
799 | ||
800 | int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type, | |
801 | RdmaBackendPD *pd, RdmaBackendCQ *scq, | |
8b42cfab KH |
802 | RdmaBackendCQ *rcq, RdmaBackendSRQ *srq, |
803 | uint32_t max_send_wr, uint32_t max_recv_wr, | |
804 | uint32_t max_send_sge, uint32_t max_recv_sge) | |
ef6d4ccd | 805 | { |
a421c811 | 806 | struct ibv_qp_init_attr attr = {}; |
ef6d4ccd YS |
807 | |
808 | qp->ibqp = 0; | |
ef6d4ccd YS |
809 | |
810 | switch (qp_type) { | |
811 | case IBV_QPT_GSI: | |
ef6d4ccd YS |
812 | return 0; |
813 | ||
814 | case IBV_QPT_RC: | |
815 | /* fall through */ | |
816 | case IBV_QPT_UD: | |
817 | /* do nothing */ | |
818 | break; | |
819 | ||
820 | default: | |
4d71b38a | 821 | rdma_error_report("Unsupported QP type %d", qp_type); |
ef6d4ccd YS |
822 | return -EIO; |
823 | } | |
824 | ||
825 | attr.qp_type = qp_type; | |
826 | attr.send_cq = scq->ibcq; | |
827 | attr.recv_cq = rcq->ibcq; | |
828 | attr.cap.max_send_wr = max_send_wr; | |
829 | attr.cap.max_recv_wr = max_recv_wr; | |
830 | attr.cap.max_send_sge = max_send_sge; | |
831 | attr.cap.max_recv_sge = max_recv_sge; | |
8b42cfab KH |
832 | if (srq) { |
833 | attr.srq = srq->ibsrq; | |
834 | } | |
ef6d4ccd | 835 | |
ef6d4ccd | 836 | qp->ibqp = ibv_create_qp(pd->ibpd, &attr); |
4d71b38a YS |
837 | if (!qp->ibqp) { |
838 | rdma_error_report("ibv_create_qp fail, errno=%d", errno); | |
ef6d4ccd YS |
839 | return -EIO; |
840 | } | |
841 | ||
bf441451 YS |
842 | rdma_protected_gslist_init(&qp->cqe_ctx_list); |
843 | ||
ef6d4ccd YS |
844 | qp->ibpd = pd->ibpd; |
845 | ||
846 | /* TODO: Query QP to get max_inline_data and save it to be used in send */ | |
847 | ||
ef6d4ccd YS |
848 | return 0; |
849 | } | |
850 | ||
851 | int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, | |
852 | uint8_t qp_type, uint32_t qkey) | |
853 | { | |
a421c811 | 854 | struct ibv_qp_attr attr = {}; |
ef6d4ccd YS |
855 | int rc, attr_mask; |
856 | ||
ef6d4ccd YS |
857 | attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT; |
858 | attr.qp_state = IBV_QPS_INIT; | |
859 | attr.pkey_index = 0; | |
860 | attr.port_num = backend_dev->port_num; | |
861 | ||
862 | switch (qp_type) { | |
863 | case IBV_QPT_RC: | |
864 | attr_mask |= IBV_QP_ACCESS_FLAGS; | |
4d71b38a | 865 | trace_rdma_backend_rc_qp_state_init(qp->ibqp->qp_num); |
ef6d4ccd YS |
866 | break; |
867 | ||
868 | case IBV_QPT_UD: | |
869 | attr.qkey = qkey; | |
870 | attr_mask |= IBV_QP_QKEY; | |
4d71b38a | 871 | trace_rdma_backend_ud_qp_state_init(qp->ibqp->qp_num, qkey); |
ef6d4ccd YS |
872 | break; |
873 | ||
874 | default: | |
4d71b38a | 875 | rdma_error_report("Unsupported QP type %d", qp_type); |
ef6d4ccd YS |
876 | return -EIO; |
877 | } | |
878 | ||
879 | rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); | |
880 | if (rc) { | |
4d71b38a | 881 | rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
882 | return -EIO; |
883 | } | |
884 | ||
885 | return 0; | |
886 | } | |
887 | ||
888 | int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp, | |
2b05705d YS |
889 | uint8_t qp_type, uint8_t sgid_idx, |
890 | union ibv_gid *dgid, uint32_t dqpn, | |
891 | uint32_t rq_psn, uint32_t qkey, bool use_qkey) | |
ef6d4ccd | 892 | { |
a421c811 | 893 | struct ibv_qp_attr attr = {}; |
ef6d4ccd YS |
894 | union ibv_gid ibv_gid = { |
895 | .global.interface_id = dgid->global.interface_id, | |
896 | .global.subnet_prefix = dgid->global.subnet_prefix | |
897 | }; | |
898 | int rc, attr_mask; | |
899 | ||
900 | attr.qp_state = IBV_QPS_RTR; | |
901 | attr_mask = IBV_QP_STATE; | |
902 | ||
2b05705d YS |
903 | qp->sgid_idx = sgid_idx; |
904 | ||
ef6d4ccd YS |
905 | switch (qp_type) { |
906 | case IBV_QPT_RC: | |
ef6d4ccd YS |
907 | attr.path_mtu = IBV_MTU_1024; |
908 | attr.dest_qp_num = dqpn; | |
909 | attr.max_dest_rd_atomic = 1; | |
910 | attr.min_rnr_timer = 12; | |
911 | attr.ah_attr.port_num = backend_dev->port_num; | |
912 | attr.ah_attr.is_global = 1; | |
913 | attr.ah_attr.grh.hop_limit = 1; | |
914 | attr.ah_attr.grh.dgid = ibv_gid; | |
2b05705d | 915 | attr.ah_attr.grh.sgid_index = qp->sgid_idx; |
ef6d4ccd YS |
916 | attr.rq_psn = rq_psn; |
917 | ||
918 | attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | | |
919 | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | | |
920 | IBV_QP_MIN_RNR_TIMER; | |
4d71b38a YS |
921 | |
922 | trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num, | |
923 | be64_to_cpu(ibv_gid.global. | |
924 | subnet_prefix), | |
925 | be64_to_cpu(ibv_gid.global. | |
926 | interface_id), | |
927 | qp->sgid_idx, dqpn, rq_psn); | |
ef6d4ccd YS |
928 | break; |
929 | ||
930 | case IBV_QPT_UD: | |
931 | if (use_qkey) { | |
ef6d4ccd YS |
932 | attr.qkey = qkey; |
933 | attr_mask |= IBV_QP_QKEY; | |
934 | } | |
4d71b38a YS |
935 | trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey : |
936 | 0); | |
ef6d4ccd YS |
937 | break; |
938 | } | |
939 | ||
940 | rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); | |
941 | if (rc) { | |
4d71b38a | 942 | rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
943 | return -EIO; |
944 | } | |
945 | ||
946 | return 0; | |
947 | } | |
948 | ||
949 | int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type, | |
950 | uint32_t sq_psn, uint32_t qkey, bool use_qkey) | |
951 | { | |
a421c811 | 952 | struct ibv_qp_attr attr = {}; |
ef6d4ccd YS |
953 | int rc, attr_mask; |
954 | ||
ef6d4ccd YS |
955 | attr.qp_state = IBV_QPS_RTS; |
956 | attr.sq_psn = sq_psn; | |
957 | attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN; | |
958 | ||
959 | switch (qp_type) { | |
960 | case IBV_QPT_RC: | |
961 | attr.timeout = 14; | |
962 | attr.retry_cnt = 7; | |
963 | attr.rnr_retry = 7; | |
964 | attr.max_rd_atomic = 1; | |
965 | ||
966 | attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | | |
967 | IBV_QP_MAX_QP_RD_ATOMIC; | |
4d71b38a | 968 | trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn); |
ef6d4ccd YS |
969 | break; |
970 | ||
971 | case IBV_QPT_UD: | |
972 | if (use_qkey) { | |
ef6d4ccd YS |
973 | attr.qkey = qkey; |
974 | attr_mask |= IBV_QP_QKEY; | |
975 | } | |
4d71b38a YS |
976 | trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn, |
977 | use_qkey ? qkey : 0); | |
ef6d4ccd YS |
978 | break; |
979 | } | |
980 | ||
981 | rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask); | |
982 | if (rc) { | |
4d71b38a | 983 | rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno); |
ef6d4ccd YS |
984 | return -EIO; |
985 | } | |
986 | ||
987 | return 0; | |
988 | } | |
989 | ||
c99f2174 YS |
990 | int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr, |
991 | int attr_mask, struct ibv_qp_init_attr *init_attr) | |
992 | { | |
993 | if (!qp->ibqp) { | |
c99f2174 YS |
994 | attr->qp_state = IBV_QPS_RTS; |
995 | return 0; | |
996 | } | |
997 | ||
998 | return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr); | |
999 | } | |
1000 | ||
bf441451 | 1001 | void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res) |
ef6d4ccd YS |
1002 | { |
1003 | if (qp->ibqp) { | |
1004 | ibv_destroy_qp(qp->ibqp); | |
1005 | } | |
bf441451 YS |
1006 | g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res); |
1007 | rdma_protected_gslist_destroy(&qp->cqe_ctx_list); | |
ef6d4ccd YS |
1008 | } |
1009 | ||
e926c9f1 KH |
1010 | int rdma_backend_create_srq(RdmaBackendSRQ *srq, RdmaBackendPD *pd, |
1011 | uint32_t max_wr, uint32_t max_sge, | |
1012 | uint32_t srq_limit) | |
1013 | { | |
1014 | struct ibv_srq_init_attr srq_init_attr = {}; | |
1015 | ||
1016 | srq_init_attr.attr.max_wr = max_wr; | |
1017 | srq_init_attr.attr.max_sge = max_sge; | |
1018 | srq_init_attr.attr.srq_limit = srq_limit; | |
1019 | ||
1020 | srq->ibsrq = ibv_create_srq(pd->ibpd, &srq_init_attr); | |
1021 | if (!srq->ibsrq) { | |
1022 | rdma_error_report("ibv_create_srq failed, errno=%d", errno); | |
1023 | return -EIO; | |
1024 | } | |
1025 | ||
1026 | rdma_protected_gslist_init(&srq->cqe_ctx_list); | |
1027 | ||
1028 | return 0; | |
1029 | } | |
1030 | ||
1031 | int rdma_backend_query_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr) | |
1032 | { | |
1033 | if (!srq->ibsrq) { | |
1034 | return -EINVAL; | |
1035 | } | |
1036 | ||
1037 | return ibv_query_srq(srq->ibsrq, srq_attr); | |
1038 | } | |
1039 | ||
1040 | int rdma_backend_modify_srq(RdmaBackendSRQ *srq, struct ibv_srq_attr *srq_attr, | |
1041 | int srq_attr_mask) | |
1042 | { | |
1043 | if (!srq->ibsrq) { | |
1044 | return -EINVAL; | |
1045 | } | |
1046 | ||
1047 | return ibv_modify_srq(srq->ibsrq, srq_attr, srq_attr_mask); | |
1048 | } | |
1049 | ||
1050 | void rdma_backend_destroy_srq(RdmaBackendSRQ *srq, RdmaDeviceResources *dev_res) | |
1051 | { | |
1052 | if (srq->ibsrq) { | |
1053 | ibv_destroy_srq(srq->ibsrq); | |
1054 | } | |
1055 | g_slist_foreach(srq->cqe_ctx_list.list, free_cqe_ctx, dev_res); | |
1056 | rdma_protected_gslist_destroy(&srq->cqe_ctx_list); | |
1057 | } | |
1058 | ||
ef6d4ccd | 1059 | #define CHK_ATTR(req, dev, member, fmt) ({ \ |
4d71b38a | 1060 | trace_rdma_check_dev_attr(#member, dev.member, req->member); \ |
ef6d4ccd | 1061 | if (req->member > dev.member) { \ |
4d71b38a YS |
1062 | rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \ |
1063 | #member, req->member, dev.member); \ | |
ef6d4ccd YS |
1064 | req->member = dev.member; \ |
1065 | } \ | |
4d71b38a | 1066 | }) |
ef6d4ccd YS |
1067 | |
1068 | static int init_device_caps(RdmaBackendDev *backend_dev, | |
1069 | struct ibv_device_attr *dev_attr) | |
1070 | { | |
732d948c | 1071 | struct ibv_device_attr bk_dev_attr; |
4d71b38a | 1072 | int rc; |
732d948c | 1073 | |
4d71b38a YS |
1074 | rc = ibv_query_device(backend_dev->context, &bk_dev_attr); |
1075 | if (rc) { | |
1076 | rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno); | |
ef6d4ccd YS |
1077 | return -EIO; |
1078 | } | |
1079 | ||
ffef4775 | 1080 | dev_attr->max_sge = MAX_SGE; |
e926c9f1 | 1081 | dev_attr->max_srq_sge = MAX_SGE; |
ffef4775 | 1082 | |
732d948c YS |
1083 | CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64); |
1084 | CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d"); | |
1085 | CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d"); | |
732d948c | 1086 | CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d"); |
732d948c YS |
1087 | CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d"); |
1088 | CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d"); | |
1089 | CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d"); | |
1090 | CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d"); | |
1091 | CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d"); | |
e926c9f1 | 1092 | CHK_ATTR(dev_attr, bk_dev_attr, max_srq, "%d"); |
ef6d4ccd YS |
1093 | |
1094 | return 0; | |
1095 | } | |
1096 | ||
605ec166 YS |
1097 | static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid, |
1098 | union ibv_gid *my_gid, int paylen) | |
1099 | { | |
1100 | grh->paylen = htons(paylen); | |
1101 | grh->sgid = *sgid; | |
1102 | grh->dgid = *my_gid; | |
605ec166 YS |
1103 | } |
1104 | ||
2b05705d YS |
1105 | static void process_incoming_mad_req(RdmaBackendDev *backend_dev, |
1106 | RdmaCmMuxMsg *msg) | |
605ec166 | 1107 | { |
605ec166 YS |
1108 | unsigned long cqe_ctx_id; |
1109 | BackendCtx *bctx; | |
1110 | char *mad; | |
605ec166 | 1111 | |
4d71b38a | 1112 | trace_mad_message("recv", msg->umad.mad, msg->umad_len); |
605ec166 | 1113 | |
bce80086 | 1114 | cqe_ctx_id = rdma_protected_gqueue_pop_int64(&backend_dev->recv_mads_list); |
b20fc795 | 1115 | if (cqe_ctx_id == -ENOENT) { |
4d71b38a | 1116 | rdma_warn_report("No more free MADs buffers, waiting for a while"); |
605ec166 YS |
1117 | sleep(THR_POLL_TO); |
1118 | return; | |
1119 | } | |
1120 | ||
605ec166 YS |
1121 | bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id); |
1122 | if (unlikely(!bctx)) { | |
4d71b38a | 1123 | rdma_error_report("No matching ctx for req %ld", cqe_ctx_id); |
c2dd117b | 1124 | backend_dev->rdma_dev_res->stats.mad_rx_err++; |
605ec166 YS |
1125 | return; |
1126 | } | |
1127 | ||
605ec166 YS |
1128 | mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr, |
1129 | bctx->sge.length); | |
2b05705d | 1130 | if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) { |
c2dd117b | 1131 | backend_dev->rdma_dev_res->stats.mad_rx_err++; |
eaac0100 YS |
1132 | complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF, |
1133 | bctx->up_ctx); | |
605ec166 | 1134 | } else { |
a421c811 | 1135 | struct ibv_wc wc = {}; |
605ec166 YS |
1136 | memset(mad, 0, bctx->sge.length); |
1137 | build_mad_hdr((struct ibv_grh *)mad, | |
2b05705d YS |
1138 | (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid, |
1139 | msg->umad_len); | |
1140 | memcpy(&mad[MAD_HDR_SIZE], msg->umad.mad, msg->umad_len); | |
605ec166 YS |
1141 | rdma_pci_dma_unmap(backend_dev->dev, mad, bctx->sge.length); |
1142 | ||
eaac0100 YS |
1143 | wc.byte_len = msg->umad_len; |
1144 | wc.status = IBV_WC_SUCCESS; | |
1145 | wc.wc_flags = IBV_WC_GRH; | |
c2dd117b | 1146 | backend_dev->rdma_dev_res->stats.mad_rx++; |
eaac0100 | 1147 | comp_handler(bctx->up_ctx, &wc); |
605ec166 YS |
1148 | } |
1149 | ||
1150 | g_free(bctx); | |
1151 | rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id); | |
1152 | } | |
1153 | ||
2b05705d | 1154 | static inline int rdmacm_mux_can_receive(void *opaque) |
605ec166 | 1155 | { |
2b05705d | 1156 | RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque; |
605ec166 | 1157 | |
2b05705d YS |
1158 | return rdmacm_mux_can_process_async(backend_dev); |
1159 | } | |
1160 | ||
1161 | static void rdmacm_mux_read(void *opaque, const uint8_t *buf, int size) | |
1162 | { | |
1163 | RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque; | |
1164 | RdmaCmMuxMsg *msg = (RdmaCmMuxMsg *)buf; | |
1165 | ||
4d71b38a | 1166 | trace_rdmacm_mux("read", msg->hdr.msg_type, msg->hdr.op_code); |
2b05705d YS |
1167 | |
1168 | if (msg->hdr.msg_type != RDMACM_MUX_MSG_TYPE_REQ && | |
1169 | msg->hdr.op_code != RDMACM_MUX_OP_CODE_MAD) { | |
4d71b38a | 1170 | rdma_error_report("Error: Not a MAD request, skipping"); |
2b05705d | 1171 | return; |
605ec166 | 1172 | } |
2b05705d YS |
1173 | process_incoming_mad_req(backend_dev, msg); |
1174 | } | |
1175 | ||
1176 | static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be) | |
1177 | { | |
1178 | int ret; | |
605ec166 | 1179 | |
2b05705d | 1180 | backend_dev->rdmacm_mux.chr_be = mad_chr_be; |
605ec166 | 1181 | |
2b05705d YS |
1182 | ret = qemu_chr_fe_backend_connected(backend_dev->rdmacm_mux.chr_be); |
1183 | if (!ret) { | |
4d71b38a | 1184 | rdma_error_report("Missing chardev for MAD multiplexer"); |
2b05705d | 1185 | return -EIO; |
605ec166 YS |
1186 | } |
1187 | ||
bce80086 | 1188 | rdma_protected_gqueue_init(&backend_dev->recv_mads_list); |
605ec166 | 1189 | |
2b05705d YS |
1190 | enable_rdmacm_mux_async(backend_dev); |
1191 | ||
1192 | qemu_chr_fe_set_handlers(backend_dev->rdmacm_mux.chr_be, | |
1193 | rdmacm_mux_can_receive, rdmacm_mux_read, NULL, | |
1194 | NULL, backend_dev, NULL, true); | |
1195 | ||
605ec166 YS |
1196 | return 0; |
1197 | } | |
1198 | ||
ff30a446 YS |
1199 | static void mad_stop(RdmaBackendDev *backend_dev) |
1200 | { | |
1201 | clean_recv_mads(backend_dev); | |
1202 | } | |
1203 | ||
605ec166 YS |
1204 | static void mad_fini(RdmaBackendDev *backend_dev) |
1205 | { | |
2b05705d YS |
1206 | disable_rdmacm_mux_async(backend_dev); |
1207 | qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be); | |
bce80086 | 1208 | rdma_protected_gqueue_destroy(&backend_dev->recv_mads_list); |
605ec166 YS |
1209 | } |
1210 | ||
2b05705d YS |
1211 | int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev, |
1212 | union ibv_gid *gid) | |
1213 | { | |
1214 | union ibv_gid sgid; | |
1215 | int ret; | |
1216 | int i = 0; | |
1217 | ||
2b05705d YS |
1218 | do { |
1219 | ret = ibv_query_gid(backend_dev->context, backend_dev->port_num, i, | |
1220 | &sgid); | |
1221 | i++; | |
1222 | } while (!ret && (memcmp(&sgid, gid, sizeof(*gid)))); | |
1223 | ||
4d71b38a YS |
1224 | trace_rdma_backend_get_gid_index(be64_to_cpu(gid->global.subnet_prefix), |
1225 | be64_to_cpu(gid->global.interface_id), | |
1226 | i - 1); | |
2b05705d YS |
1227 | |
1228 | return ret ? ret : i - 1; | |
1229 | } | |
1230 | ||
1231 | int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname, | |
1232 | union ibv_gid *gid) | |
1233 | { | |
555b3d67 | 1234 | RdmaCmMuxMsg msg = {}; |
2b05705d YS |
1235 | int ret; |
1236 | ||
4d71b38a YS |
1237 | trace_rdma_backend_gid_change("add", be64_to_cpu(gid->global.subnet_prefix), |
1238 | be64_to_cpu(gid->global.interface_id)); | |
2b05705d YS |
1239 | |
1240 | msg.hdr.op_code = RDMACM_MUX_OP_CODE_REG; | |
1241 | memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); | |
1242 | ||
4d71b38a | 1243 | ret = rdmacm_mux_send(backend_dev, &msg); |
2b05705d | 1244 | if (ret) { |
4d71b38a | 1245 | rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret); |
2b05705d YS |
1246 | return -EIO; |
1247 | } | |
1248 | ||
1249 | qapi_event_send_rdma_gid_status_changed(ifname, true, | |
1250 | gid->global.subnet_prefix, | |
1251 | gid->global.interface_id); | |
1252 | ||
1253 | return ret; | |
1254 | } | |
1255 | ||
1256 | int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname, | |
1257 | union ibv_gid *gid) | |
1258 | { | |
555b3d67 | 1259 | RdmaCmMuxMsg msg = {}; |
2b05705d YS |
1260 | int ret; |
1261 | ||
4d71b38a YS |
1262 | trace_rdma_backend_gid_change("del", be64_to_cpu(gid->global.subnet_prefix), |
1263 | be64_to_cpu(gid->global.interface_id)); | |
2b05705d YS |
1264 | |
1265 | msg.hdr.op_code = RDMACM_MUX_OP_CODE_UNREG; | |
1266 | memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid)); | |
1267 | ||
4d71b38a | 1268 | ret = rdmacm_mux_send(backend_dev, &msg); |
2b05705d | 1269 | if (ret) { |
4d71b38a YS |
1270 | rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)", |
1271 | ret); | |
2b05705d YS |
1272 | return -EIO; |
1273 | } | |
1274 | ||
1275 | qapi_event_send_rdma_gid_status_changed(ifname, false, | |
1276 | gid->global.subnet_prefix, | |
1277 | gid->global.interface_id); | |
1278 | ||
1279 | return 0; | |
1280 | } | |
1281 | ||
430e440c | 1282 | int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev, |
ef6d4ccd YS |
1283 | RdmaDeviceResources *rdma_dev_res, |
1284 | const char *backend_device_name, uint8_t port_num, | |
4d71b38a | 1285 | struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be) |
ef6d4ccd YS |
1286 | { |
1287 | int i; | |
1288 | int ret = 0; | |
1289 | int num_ibv_devices; | |
ef6d4ccd | 1290 | struct ibv_device **dev_list; |
ef6d4ccd | 1291 | |
430e440c YS |
1292 | memset(backend_dev, 0, sizeof(*backend_dev)); |
1293 | ||
1294 | backend_dev->dev = pdev; | |
ef6d4ccd YS |
1295 | backend_dev->port_num = port_num; |
1296 | backend_dev->rdma_dev_res = rdma_dev_res; | |
1297 | ||
1298 | rdma_backend_register_comp_handler(dummy_comp_handler); | |
1299 | ||
1300 | dev_list = ibv_get_device_list(&num_ibv_devices); | |
1301 | if (!dev_list) { | |
4d71b38a | 1302 | rdma_error_report("Failed to get IB devices list"); |
ef6d4ccd YS |
1303 | return -EIO; |
1304 | } | |
1305 | ||
1306 | if (num_ibv_devices == 0) { | |
4d71b38a | 1307 | rdma_error_report("No IB devices were found"); |
ef6d4ccd YS |
1308 | ret = -ENXIO; |
1309 | goto out_free_dev_list; | |
1310 | } | |
1311 | ||
1312 | if (backend_device_name) { | |
1313 | for (i = 0; dev_list[i]; ++i) { | |
1314 | if (!strcmp(ibv_get_device_name(dev_list[i]), | |
1315 | backend_device_name)) { | |
1316 | break; | |
1317 | } | |
1318 | } | |
1319 | ||
1320 | backend_dev->ib_dev = dev_list[i]; | |
1321 | if (!backend_dev->ib_dev) { | |
4d71b38a YS |
1322 | rdma_error_report("Failed to find IB device %s", |
1323 | backend_device_name); | |
ef6d4ccd YS |
1324 | ret = -EIO; |
1325 | goto out_free_dev_list; | |
1326 | } | |
1327 | } else { | |
1328 | backend_dev->ib_dev = *dev_list; | |
1329 | } | |
1330 | ||
4d71b38a | 1331 | rdma_info_report("uverb device %s", backend_dev->ib_dev->dev_name); |
ef6d4ccd YS |
1332 | |
1333 | backend_dev->context = ibv_open_device(backend_dev->ib_dev); | |
1334 | if (!backend_dev->context) { | |
4d71b38a YS |
1335 | rdma_error_report("Failed to open IB device %s", |
1336 | ibv_get_device_name(backend_dev->ib_dev)); | |
ef6d4ccd YS |
1337 | ret = -EIO; |
1338 | goto out; | |
1339 | } | |
1340 | ||
1341 | backend_dev->channel = ibv_create_comp_channel(backend_dev->context); | |
1342 | if (!backend_dev->channel) { | |
4d71b38a | 1343 | rdma_error_report("Failed to create IB communication channel"); |
ef6d4ccd YS |
1344 | ret = -EIO; |
1345 | goto out_close_device; | |
1346 | } | |
ef6d4ccd | 1347 | |
ef6d4ccd YS |
1348 | ret = init_device_caps(backend_dev, dev_attr); |
1349 | if (ret) { | |
4d71b38a | 1350 | rdma_error_report("Failed to initialize device capabilities"); |
ef6d4ccd YS |
1351 | ret = -EIO; |
1352 | goto out_destroy_comm_channel; | |
1353 | } | |
1354 | ||
ef6d4ccd | 1355 | |
2b05705d | 1356 | ret = mad_init(backend_dev, mad_chr_be); |
605ec166 | 1357 | if (ret) { |
4d71b38a | 1358 | rdma_error_report("Failed to initialize mad"); |
605ec166 YS |
1359 | ret = -EIO; |
1360 | goto out_destroy_comm_channel; | |
1361 | } | |
1362 | ||
75152227 YS |
1363 | backend_dev->comp_thread.run = false; |
1364 | backend_dev->comp_thread.is_running = false; | |
ef6d4ccd YS |
1365 | |
1366 | ah_cache_init(); | |
1367 | ||
1368 | goto out_free_dev_list; | |
1369 | ||
1370 | out_destroy_comm_channel: | |
1371 | ibv_destroy_comp_channel(backend_dev->channel); | |
1372 | ||
1373 | out_close_device: | |
1374 | ibv_close_device(backend_dev->context); | |
1375 | ||
1376 | out_free_dev_list: | |
1377 | ibv_free_device_list(dev_list); | |
1378 | ||
1379 | out: | |
1380 | return ret; | |
1381 | } | |
1382 | ||
75152227 YS |
1383 | |
1384 | void rdma_backend_start(RdmaBackendDev *backend_dev) | |
1385 | { | |
75152227 YS |
1386 | start_comp_thread(backend_dev); |
1387 | } | |
1388 | ||
1389 | void rdma_backend_stop(RdmaBackendDev *backend_dev) | |
1390 | { | |
ff30a446 | 1391 | mad_stop(backend_dev); |
292dce62 | 1392 | stop_backend_thread(&backend_dev->comp_thread); |
75152227 YS |
1393 | } |
1394 | ||
ef6d4ccd YS |
1395 | void rdma_backend_fini(RdmaBackendDev *backend_dev) |
1396 | { | |
605ec166 | 1397 | mad_fini(backend_dev); |
ef6d4ccd YS |
1398 | g_hash_table_destroy(ah_hash); |
1399 | ibv_destroy_comp_channel(backend_dev->channel); | |
1400 | ibv_close_device(backend_dev->context); | |
1401 | } |