]> git.ipfire.org Git - people/ms/linux.git/blob - drivers/infiniband/sw/rdmavt/qp.c
31a2e65e490614422fe13413635d3511b7750576
[people/ms/linux.git] / drivers / infiniband / sw / rdmavt / qp.c
1 /*
2 * Copyright(c) 2016 - 2018 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
56 #include <rdma/uverbs_ioctl.h>
57 #include "qp.h"
58 #include "vt.h"
59 #include "trace.h"
60
61 static void rvt_rc_timeout(struct timer_list *t);
62
63 /*
64 * Convert the AETH RNR timeout code into the number of microseconds.
65 */
66 static const u32 ib_rvt_rnr_table[32] = {
67 655360, /* 00: 655.36 */
68 10, /* 01: .01 */
69 20, /* 02 .02 */
70 30, /* 03: .03 */
71 40, /* 04: .04 */
72 60, /* 05: .06 */
73 80, /* 06: .08 */
74 120, /* 07: .12 */
75 160, /* 08: .16 */
76 240, /* 09: .24 */
77 320, /* 0A: .32 */
78 480, /* 0B: .48 */
79 640, /* 0C: .64 */
80 960, /* 0D: .96 */
81 1280, /* 0E: 1.28 */
82 1920, /* 0F: 1.92 */
83 2560, /* 10: 2.56 */
84 3840, /* 11: 3.84 */
85 5120, /* 12: 5.12 */
86 7680, /* 13: 7.68 */
87 10240, /* 14: 10.24 */
88 15360, /* 15: 15.36 */
89 20480, /* 16: 20.48 */
90 30720, /* 17: 30.72 */
91 40960, /* 18: 40.96 */
92 61440, /* 19: 61.44 */
93 81920, /* 1A: 81.92 */
94 122880, /* 1B: 122.88 */
95 163840, /* 1C: 163.84 */
96 245760, /* 1D: 245.76 */
97 327680, /* 1E: 327.68 */
98 491520 /* 1F: 491.52 */
99 };
100
101 /*
102 * Note that it is OK to post send work requests in the SQE and ERR
103 * states; rvt_do_send() will process them and generate error
104 * completions as per IB 1.2 C10-96.
105 */
106 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
107 [IB_QPS_RESET] = 0,
108 [IB_QPS_INIT] = RVT_POST_RECV_OK,
109 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
110 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
111 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
112 RVT_PROCESS_NEXT_SEND_OK,
113 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
114 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
115 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
116 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
117 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
118 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
119 };
120 EXPORT_SYMBOL(ib_rvt_state_ops);
121
122 /* platform specific: return the last level cache (llc) size, in KiB */
123 static int rvt_wss_llc_size(void)
124 {
125 /* assume that the boot CPU value is universal for all CPUs */
126 return boot_cpu_data.x86_cache_size;
127 }
128
129 /* platform specific: cacheless copy */
130 static void cacheless_memcpy(void *dst, void *src, size_t n)
131 {
132 /*
133 * Use the only available X64 cacheless copy. Add a __user cast
134 * to quiet sparse. The src agument is already in the kernel so
135 * there are no security issues. The extra fault recovery machinery
136 * is not invoked.
137 */
138 __copy_user_nocache(dst, (void __user *)src, n, 0);
139 }
140
141 void rvt_wss_exit(struct rvt_dev_info *rdi)
142 {
143 struct rvt_wss *wss = rdi->wss;
144
145 if (!wss)
146 return;
147
148 /* coded to handle partially initialized and repeat callers */
149 kfree(wss->entries);
150 wss->entries = NULL;
151 kfree(rdi->wss);
152 rdi->wss = NULL;
153 }
154
155 /**
156 * rvt_wss_init - Init wss data structures
157 *
158 * Return: 0 on success
159 */
160 int rvt_wss_init(struct rvt_dev_info *rdi)
161 {
162 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
163 unsigned int wss_threshold = rdi->dparms.wss_threshold;
164 unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
165 long llc_size;
166 long llc_bits;
167 long table_size;
168 long table_bits;
169 struct rvt_wss *wss;
170 int node = rdi->dparms.node;
171
172 if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
173 rdi->wss = NULL;
174 return 0;
175 }
176
177 rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
178 if (!rdi->wss)
179 return -ENOMEM;
180 wss = rdi->wss;
181
182 /* check for a valid percent range - default to 80 if none or invalid */
183 if (wss_threshold < 1 || wss_threshold > 100)
184 wss_threshold = 80;
185
186 /* reject a wildly large period */
187 if (wss_clean_period > 1000000)
188 wss_clean_period = 256;
189
190 /* reject a zero period */
191 if (wss_clean_period == 0)
192 wss_clean_period = 1;
193
194 /*
195 * Calculate the table size - the next power of 2 larger than the
196 * LLC size. LLC size is in KiB.
197 */
198 llc_size = rvt_wss_llc_size() * 1024;
199 table_size = roundup_pow_of_two(llc_size);
200
201 /* one bit per page in rounded up table */
202 llc_bits = llc_size / PAGE_SIZE;
203 table_bits = table_size / PAGE_SIZE;
204 wss->pages_mask = table_bits - 1;
205 wss->num_entries = table_bits / BITS_PER_LONG;
206
207 wss->threshold = (llc_bits * wss_threshold) / 100;
208 if (wss->threshold == 0)
209 wss->threshold = 1;
210
211 wss->clean_period = wss_clean_period;
212 atomic_set(&wss->clean_counter, wss_clean_period);
213
214 wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
215 GFP_KERNEL, node);
216 if (!wss->entries) {
217 rvt_wss_exit(rdi);
218 return -ENOMEM;
219 }
220
221 return 0;
222 }
223
224 /*
225 * Advance the clean counter. When the clean period has expired,
226 * clean an entry.
227 *
228 * This is implemented in atomics to avoid locking. Because multiple
229 * variables are involved, it can be racy which can lead to slightly
230 * inaccurate information. Since this is only a heuristic, this is
231 * OK. Any innaccuracies will clean themselves out as the counter
232 * advances. That said, it is unlikely the entry clean operation will
233 * race - the next possible racer will not start until the next clean
234 * period.
235 *
236 * The clean counter is implemented as a decrement to zero. When zero
237 * is reached an entry is cleaned.
238 */
239 static void wss_advance_clean_counter(struct rvt_wss *wss)
240 {
241 int entry;
242 int weight;
243 unsigned long bits;
244
245 /* become the cleaner if we decrement the counter to zero */
246 if (atomic_dec_and_test(&wss->clean_counter)) {
247 /*
248 * Set, not add, the clean period. This avoids an issue
249 * where the counter could decrement below the clean period.
250 * Doing a set can result in lost decrements, slowing the
251 * clean advance. Since this a heuristic, this possible
252 * slowdown is OK.
253 *
254 * An alternative is to loop, advancing the counter by a
255 * clean period until the result is > 0. However, this could
256 * lead to several threads keeping another in the clean loop.
257 * This could be mitigated by limiting the number of times
258 * we stay in the loop.
259 */
260 atomic_set(&wss->clean_counter, wss->clean_period);
261
262 /*
263 * Uniquely grab the entry to clean and move to next.
264 * The current entry is always the lower bits of
265 * wss.clean_entry. The table size, wss.num_entries,
266 * is always a power-of-2.
267 */
268 entry = (atomic_inc_return(&wss->clean_entry) - 1)
269 & (wss->num_entries - 1);
270
271 /* clear the entry and count the bits */
272 bits = xchg(&wss->entries[entry], 0);
273 weight = hweight64((u64)bits);
274 /* only adjust the contended total count if needed */
275 if (weight)
276 atomic_sub(weight, &wss->total_count);
277 }
278 }
279
280 /*
281 * Insert the given address into the working set array.
282 */
283 static void wss_insert(struct rvt_wss *wss, void *address)
284 {
285 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
286 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
287 u32 nr = page & (BITS_PER_LONG - 1);
288
289 if (!test_and_set_bit(nr, &wss->entries[entry]))
290 atomic_inc(&wss->total_count);
291
292 wss_advance_clean_counter(wss);
293 }
294
295 /*
296 * Is the working set larger than the threshold?
297 */
298 static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
299 {
300 return atomic_read(&wss->total_count) >= wss->threshold;
301 }
302
303 static void get_map_page(struct rvt_qpn_table *qpt,
304 struct rvt_qpn_map *map)
305 {
306 unsigned long page = get_zeroed_page(GFP_KERNEL);
307
308 /*
309 * Free the page if someone raced with us installing it.
310 */
311
312 spin_lock(&qpt->lock);
313 if (map->page)
314 free_page(page);
315 else
316 map->page = (void *)page;
317 spin_unlock(&qpt->lock);
318 }
319
320 /**
321 * init_qpn_table - initialize the QP number table for a device
322 * @qpt: the QPN table
323 */
324 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
325 {
326 u32 offset, i;
327 struct rvt_qpn_map *map;
328 int ret = 0;
329
330 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
331 return -EINVAL;
332
333 spin_lock_init(&qpt->lock);
334
335 qpt->last = rdi->dparms.qpn_start;
336 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
337
338 /*
339 * Drivers may want some QPs beyond what we need for verbs let them use
340 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
341 * for those. The reserved range must be *after* the range which verbs
342 * will pick from.
343 */
344
345 /* Figure out number of bit maps needed before reserved range */
346 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
347
348 /* This should always be zero */
349 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
350
351 /* Starting with the first reserved bit map */
352 map = &qpt->map[qpt->nmaps];
353
354 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
355 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
356 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
357 if (!map->page) {
358 get_map_page(qpt, map);
359 if (!map->page) {
360 ret = -ENOMEM;
361 break;
362 }
363 }
364 set_bit(offset, map->page);
365 offset++;
366 if (offset == RVT_BITS_PER_PAGE) {
367 /* next page */
368 qpt->nmaps++;
369 map++;
370 offset = 0;
371 }
372 }
373 return ret;
374 }
375
376 /**
377 * free_qpn_table - free the QP number table for a device
378 * @qpt: the QPN table
379 */
380 static void free_qpn_table(struct rvt_qpn_table *qpt)
381 {
382 int i;
383
384 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
385 free_page((unsigned long)qpt->map[i].page);
386 }
387
388 /**
389 * rvt_driver_qp_init - Init driver qp resources
390 * @rdi: rvt dev strucutre
391 *
392 * Return: 0 on success
393 */
394 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
395 {
396 int i;
397 int ret = -ENOMEM;
398
399 if (!rdi->dparms.qp_table_size)
400 return -EINVAL;
401
402 /*
403 * If driver is not doing any QP allocation then make sure it is
404 * providing the necessary QP functions.
405 */
406 if (!rdi->driver_f.free_all_qps ||
407 !rdi->driver_f.qp_priv_alloc ||
408 !rdi->driver_f.qp_priv_free ||
409 !rdi->driver_f.notify_qp_reset ||
410 !rdi->driver_f.notify_restart_rc)
411 return -EINVAL;
412
413 /* allocate parent object */
414 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
415 rdi->dparms.node);
416 if (!rdi->qp_dev)
417 return -ENOMEM;
418
419 /* allocate hash table */
420 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
421 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
422 rdi->qp_dev->qp_table =
423 kmalloc_array_node(rdi->qp_dev->qp_table_size,
424 sizeof(*rdi->qp_dev->qp_table),
425 GFP_KERNEL, rdi->dparms.node);
426 if (!rdi->qp_dev->qp_table)
427 goto no_qp_table;
428
429 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
430 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
431
432 spin_lock_init(&rdi->qp_dev->qpt_lock);
433
434 /* initialize qpn map */
435 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
436 goto fail_table;
437
438 spin_lock_init(&rdi->n_qps_lock);
439
440 return 0;
441
442 fail_table:
443 kfree(rdi->qp_dev->qp_table);
444 free_qpn_table(&rdi->qp_dev->qpn_table);
445
446 no_qp_table:
447 kfree(rdi->qp_dev);
448
449 return ret;
450 }
451
452 /**
453 * free_all_qps - check for QPs still in use
454 * @rdi: rvt device info structure
455 *
456 * There should not be any QPs still in use.
457 * Free memory for table.
458 */
459 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
460 {
461 unsigned long flags;
462 struct rvt_qp *qp;
463 unsigned n, qp_inuse = 0;
464 spinlock_t *ql; /* work around too long line below */
465
466 if (rdi->driver_f.free_all_qps)
467 qp_inuse = rdi->driver_f.free_all_qps(rdi);
468
469 qp_inuse += rvt_mcast_tree_empty(rdi);
470
471 if (!rdi->qp_dev)
472 return qp_inuse;
473
474 ql = &rdi->qp_dev->qpt_lock;
475 spin_lock_irqsave(ql, flags);
476 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
477 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
478 lockdep_is_held(ql));
479 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
480
481 for (; qp; qp = rcu_dereference_protected(qp->next,
482 lockdep_is_held(ql)))
483 qp_inuse++;
484 }
485 spin_unlock_irqrestore(ql, flags);
486 synchronize_rcu();
487 return qp_inuse;
488 }
489
490 /**
491 * rvt_qp_exit - clean up qps on device exit
492 * @rdi: rvt dev structure
493 *
494 * Check for qp leaks and free resources.
495 */
496 void rvt_qp_exit(struct rvt_dev_info *rdi)
497 {
498 u32 qps_inuse = rvt_free_all_qps(rdi);
499
500 if (qps_inuse)
501 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
502 qps_inuse);
503 if (!rdi->qp_dev)
504 return;
505
506 kfree(rdi->qp_dev->qp_table);
507 free_qpn_table(&rdi->qp_dev->qpn_table);
508 kfree(rdi->qp_dev);
509 }
510
511 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
512 struct rvt_qpn_map *map, unsigned off)
513 {
514 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
515 }
516
517 /**
518 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
519 * IB_QPT_SMI/IB_QPT_GSI
520 * @rdi: rvt device info structure
521 * @qpt: queue pair number table pointer
522 * @port_num: IB port number, 1 based, comes from core
523 *
524 * Return: The queue pair number
525 */
526 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
527 enum ib_qp_type type, u8 port_num)
528 {
529 u32 i, offset, max_scan, qpn;
530 struct rvt_qpn_map *map;
531 u32 ret;
532
533 if (rdi->driver_f.alloc_qpn)
534 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
535
536 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
537 unsigned n;
538
539 ret = type == IB_QPT_GSI;
540 n = 1 << (ret + 2 * (port_num - 1));
541 spin_lock(&qpt->lock);
542 if (qpt->flags & n)
543 ret = -EINVAL;
544 else
545 qpt->flags |= n;
546 spin_unlock(&qpt->lock);
547 goto bail;
548 }
549
550 qpn = qpt->last + qpt->incr;
551 if (qpn >= RVT_QPN_MAX)
552 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
553 /* offset carries bit 0 */
554 offset = qpn & RVT_BITS_PER_PAGE_MASK;
555 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
556 max_scan = qpt->nmaps - !offset;
557 for (i = 0;;) {
558 if (unlikely(!map->page)) {
559 get_map_page(qpt, map);
560 if (unlikely(!map->page))
561 break;
562 }
563 do {
564 if (!test_and_set_bit(offset, map->page)) {
565 qpt->last = qpn;
566 ret = qpn;
567 goto bail;
568 }
569 offset += qpt->incr;
570 /*
571 * This qpn might be bogus if offset >= BITS_PER_PAGE.
572 * That is OK. It gets re-assigned below
573 */
574 qpn = mk_qpn(qpt, map, offset);
575 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
576 /*
577 * In order to keep the number of pages allocated to a
578 * minimum, we scan the all existing pages before increasing
579 * the size of the bitmap table.
580 */
581 if (++i > max_scan) {
582 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
583 break;
584 map = &qpt->map[qpt->nmaps++];
585 /* start at incr with current bit 0 */
586 offset = qpt->incr | (offset & 1);
587 } else if (map < &qpt->map[qpt->nmaps]) {
588 ++map;
589 /* start at incr with current bit 0 */
590 offset = qpt->incr | (offset & 1);
591 } else {
592 map = &qpt->map[0];
593 /* wrap to first map page, invert bit 0 */
594 offset = qpt->incr | ((offset & 1) ^ 1);
595 }
596 /* there can be no set bits in low-order QoS bits */
597 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
598 qpn = mk_qpn(qpt, map, offset);
599 }
600
601 ret = -ENOMEM;
602
603 bail:
604 return ret;
605 }
606
607 /**
608 * rvt_clear_mr_refs - Drop help mr refs
609 * @qp: rvt qp data structure
610 * @clr_sends: If shoudl clear send side or not
611 */
612 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
613 {
614 unsigned n;
615 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
616
617 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
618 rvt_put_ss(&qp->s_rdma_read_sge);
619
620 rvt_put_ss(&qp->r_sge);
621
622 if (clr_sends) {
623 while (qp->s_last != qp->s_head) {
624 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
625
626 rvt_put_qp_swqe(qp, wqe);
627 if (++qp->s_last >= qp->s_size)
628 qp->s_last = 0;
629 smp_wmb(); /* see qp_set_savail */
630 }
631 if (qp->s_rdma_mr) {
632 rvt_put_mr(qp->s_rdma_mr);
633 qp->s_rdma_mr = NULL;
634 }
635 }
636
637 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
638 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
639
640 if (e->rdma_sge.mr) {
641 rvt_put_mr(e->rdma_sge.mr);
642 e->rdma_sge.mr = NULL;
643 }
644 }
645 }
646
647 /**
648 * rvt_swqe_has_lkey - return true if lkey is used by swqe
649 * @wqe - the send wqe
650 * @lkey - the lkey
651 *
652 * Test the swqe for using lkey
653 */
654 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
655 {
656 int i;
657
658 for (i = 0; i < wqe->wr.num_sge; i++) {
659 struct rvt_sge *sge = &wqe->sg_list[i];
660
661 if (rvt_mr_has_lkey(sge->mr, lkey))
662 return true;
663 }
664 return false;
665 }
666
667 /**
668 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
669 * @qp - the rvt_qp
670 * @lkey - the lkey
671 */
672 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
673 {
674 u32 s_last = qp->s_last;
675
676 while (s_last != qp->s_head) {
677 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
678
679 if (rvt_swqe_has_lkey(wqe, lkey))
680 return true;
681
682 if (++s_last >= qp->s_size)
683 s_last = 0;
684 }
685 if (qp->s_rdma_mr)
686 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
687 return true;
688 return false;
689 }
690
691 /**
692 * rvt_qp_acks_has_lkey - return true if acks have lkey
693 * @qp - the qp
694 * @lkey - the lkey
695 */
696 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
697 {
698 int i;
699 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
700
701 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
702 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
703
704 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
705 return true;
706 }
707 return false;
708 }
709
710 /*
711 * rvt_qp_mr_clean - clean up remote ops for lkey
712 * @qp - the qp
713 * @lkey - the lkey that is being de-registered
714 *
715 * This routine checks if the lkey is being used by
716 * the qp.
717 *
718 * If so, the qp is put into an error state to elminate
719 * any references from the qp.
720 */
721 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
722 {
723 bool lastwqe = false;
724
725 if (qp->ibqp.qp_type == IB_QPT_SMI ||
726 qp->ibqp.qp_type == IB_QPT_GSI)
727 /* avoid special QPs */
728 return;
729 spin_lock_irq(&qp->r_lock);
730 spin_lock(&qp->s_hlock);
731 spin_lock(&qp->s_lock);
732
733 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
734 goto check_lwqe;
735
736 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
737 rvt_qp_sends_has_lkey(qp, lkey) ||
738 rvt_qp_acks_has_lkey(qp, lkey))
739 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
740 check_lwqe:
741 spin_unlock(&qp->s_lock);
742 spin_unlock(&qp->s_hlock);
743 spin_unlock_irq(&qp->r_lock);
744 if (lastwqe) {
745 struct ib_event ev;
746
747 ev.device = qp->ibqp.device;
748 ev.element.qp = &qp->ibqp;
749 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
750 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
751 }
752 }
753
754 /**
755 * rvt_remove_qp - remove qp form table
756 * @rdi: rvt dev struct
757 * @qp: qp to remove
758 *
759 * Remove the QP from the table so it can't be found asynchronously by
760 * the receive routine.
761 */
762 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
763 {
764 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
765 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
766 unsigned long flags;
767 int removed = 1;
768
769 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
770
771 if (rcu_dereference_protected(rvp->qp[0],
772 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
773 RCU_INIT_POINTER(rvp->qp[0], NULL);
774 } else if (rcu_dereference_protected(rvp->qp[1],
775 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
776 RCU_INIT_POINTER(rvp->qp[1], NULL);
777 } else {
778 struct rvt_qp *q;
779 struct rvt_qp __rcu **qpp;
780
781 removed = 0;
782 qpp = &rdi->qp_dev->qp_table[n];
783 for (; (q = rcu_dereference_protected(*qpp,
784 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
785 qpp = &q->next) {
786 if (q == qp) {
787 RCU_INIT_POINTER(*qpp,
788 rcu_dereference_protected(qp->next,
789 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
790 removed = 1;
791 trace_rvt_qpremove(qp, n);
792 break;
793 }
794 }
795 }
796
797 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
798 if (removed) {
799 synchronize_rcu();
800 rvt_put_qp(qp);
801 }
802 }
803
804 /**
805 * rvt_init_qp - initialize the QP state to the reset state
806 * @qp: the QP to init or reinit
807 * @type: the QP type
808 *
809 * This function is called from both rvt_create_qp() and
810 * rvt_reset_qp(). The difference is that the reset
811 * patch the necessary locks to protect against concurent
812 * access.
813 */
814 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
815 enum ib_qp_type type)
816 {
817 qp->remote_qpn = 0;
818 qp->qkey = 0;
819 qp->qp_access_flags = 0;
820 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
821 qp->s_hdrwords = 0;
822 qp->s_wqe = NULL;
823 qp->s_draining = 0;
824 qp->s_next_psn = 0;
825 qp->s_last_psn = 0;
826 qp->s_sending_psn = 0;
827 qp->s_sending_hpsn = 0;
828 qp->s_psn = 0;
829 qp->r_psn = 0;
830 qp->r_msn = 0;
831 if (type == IB_QPT_RC) {
832 qp->s_state = IB_OPCODE_RC_SEND_LAST;
833 qp->r_state = IB_OPCODE_RC_SEND_LAST;
834 } else {
835 qp->s_state = IB_OPCODE_UC_SEND_LAST;
836 qp->r_state = IB_OPCODE_UC_SEND_LAST;
837 }
838 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
839 qp->r_nak_state = 0;
840 qp->r_aflags = 0;
841 qp->r_flags = 0;
842 qp->s_head = 0;
843 qp->s_tail = 0;
844 qp->s_cur = 0;
845 qp->s_acked = 0;
846 qp->s_last = 0;
847 qp->s_ssn = 1;
848 qp->s_lsn = 0;
849 qp->s_mig_state = IB_MIG_MIGRATED;
850 qp->r_head_ack_queue = 0;
851 qp->s_tail_ack_queue = 0;
852 qp->s_acked_ack_queue = 0;
853 qp->s_num_rd_atomic = 0;
854 if (qp->r_rq.wq) {
855 qp->r_rq.wq->head = 0;
856 qp->r_rq.wq->tail = 0;
857 }
858 qp->r_sge.num_sge = 0;
859 atomic_set(&qp->s_reserved_used, 0);
860 }
861
862 /**
863 * rvt_reset_qp - initialize the QP state to the reset state
864 * @qp: the QP to reset
865 * @type: the QP type
866 *
867 * r_lock, s_hlock, and s_lock are required to be held by the caller
868 */
869 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
870 enum ib_qp_type type)
871 __must_hold(&qp->s_lock)
872 __must_hold(&qp->s_hlock)
873 __must_hold(&qp->r_lock)
874 {
875 lockdep_assert_held(&qp->r_lock);
876 lockdep_assert_held(&qp->s_hlock);
877 lockdep_assert_held(&qp->s_lock);
878 if (qp->state != IB_QPS_RESET) {
879 qp->state = IB_QPS_RESET;
880
881 /* Let drivers flush their waitlist */
882 rdi->driver_f.flush_qp_waiters(qp);
883 rvt_stop_rc_timers(qp);
884 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
885 spin_unlock(&qp->s_lock);
886 spin_unlock(&qp->s_hlock);
887 spin_unlock_irq(&qp->r_lock);
888
889 /* Stop the send queue and the retry timer */
890 rdi->driver_f.stop_send_queue(qp);
891 rvt_del_timers_sync(qp);
892 /* Wait for things to stop */
893 rdi->driver_f.quiesce_qp(qp);
894
895 /* take qp out the hash and wait for it to be unused */
896 rvt_remove_qp(rdi, qp);
897
898 /* grab the lock b/c it was locked at call time */
899 spin_lock_irq(&qp->r_lock);
900 spin_lock(&qp->s_hlock);
901 spin_lock(&qp->s_lock);
902
903 rvt_clear_mr_refs(qp, 1);
904 /*
905 * Let the driver do any tear down or re-init it needs to for
906 * a qp that has been reset
907 */
908 rdi->driver_f.notify_qp_reset(qp);
909 }
910 rvt_init_qp(rdi, qp, type);
911 lockdep_assert_held(&qp->r_lock);
912 lockdep_assert_held(&qp->s_hlock);
913 lockdep_assert_held(&qp->s_lock);
914 }
915
916 /** rvt_free_qpn - Free a qpn from the bit map
917 * @qpt: QP table
918 * @qpn: queue pair number to free
919 */
920 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
921 {
922 struct rvt_qpn_map *map;
923
924 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
925 if (map->page)
926 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
927 }
928
929 /**
930 * rvt_create_qp - create a queue pair for a device
931 * @ibpd: the protection domain who's device we create the queue pair for
932 * @init_attr: the attributes of the queue pair
933 * @udata: user data for libibverbs.so
934 *
935 * Queue pair creation is mostly an rvt issue. However, drivers have their own
936 * unique idea of what queue pair numbers mean. For instance there is a reserved
937 * range for PSM.
938 *
939 * Return: the queue pair on success, otherwise returns an errno.
940 *
941 * Called by the ib_create_qp() core verbs function.
942 */
943 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
944 struct ib_qp_init_attr *init_attr,
945 struct ib_udata *udata)
946 {
947 struct rvt_qp *qp;
948 int err;
949 struct rvt_swqe *swq = NULL;
950 size_t sz;
951 size_t sg_list_sz;
952 struct ib_qp *ret = ERR_PTR(-ENOMEM);
953 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
954 void *priv = NULL;
955 size_t sqsize;
956
957 if (!rdi)
958 return ERR_PTR(-EINVAL);
959
960 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
961 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
962 init_attr->create_flags)
963 return ERR_PTR(-EINVAL);
964
965 /* Check receive queue parameters if no SRQ is specified. */
966 if (!init_attr->srq) {
967 if (init_attr->cap.max_recv_sge >
968 rdi->dparms.props.max_recv_sge ||
969 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
970 return ERR_PTR(-EINVAL);
971
972 if (init_attr->cap.max_send_sge +
973 init_attr->cap.max_send_wr +
974 init_attr->cap.max_recv_sge +
975 init_attr->cap.max_recv_wr == 0)
976 return ERR_PTR(-EINVAL);
977 }
978 sqsize =
979 init_attr->cap.max_send_wr + 1 +
980 rdi->dparms.reserved_operations;
981 switch (init_attr->qp_type) {
982 case IB_QPT_SMI:
983 case IB_QPT_GSI:
984 if (init_attr->port_num == 0 ||
985 init_attr->port_num > ibpd->device->phys_port_cnt)
986 return ERR_PTR(-EINVAL);
987 /* fall through */
988 case IB_QPT_UC:
989 case IB_QPT_RC:
990 case IB_QPT_UD:
991 sz = sizeof(struct rvt_sge) *
992 init_attr->cap.max_send_sge +
993 sizeof(struct rvt_swqe);
994 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
995 if (!swq)
996 return ERR_PTR(-ENOMEM);
997
998 sz = sizeof(*qp);
999 sg_list_sz = 0;
1000 if (init_attr->srq) {
1001 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1002
1003 if (srq->rq.max_sge > 1)
1004 sg_list_sz = sizeof(*qp->r_sg_list) *
1005 (srq->rq.max_sge - 1);
1006 } else if (init_attr->cap.max_recv_sge > 1)
1007 sg_list_sz = sizeof(*qp->r_sg_list) *
1008 (init_attr->cap.max_recv_sge - 1);
1009 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1010 rdi->dparms.node);
1011 if (!qp)
1012 goto bail_swq;
1013
1014 RCU_INIT_POINTER(qp->next, NULL);
1015 if (init_attr->qp_type == IB_QPT_RC) {
1016 qp->s_ack_queue =
1017 kcalloc_node(rvt_max_atomic(rdi),
1018 sizeof(*qp->s_ack_queue),
1019 GFP_KERNEL,
1020 rdi->dparms.node);
1021 if (!qp->s_ack_queue)
1022 goto bail_qp;
1023 }
1024 /* initialize timers needed for rc qp */
1025 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1026 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1027 HRTIMER_MODE_REL);
1028 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1029
1030 /*
1031 * Driver needs to set up it's private QP structure and do any
1032 * initialization that is needed.
1033 */
1034 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1035 if (IS_ERR(priv)) {
1036 ret = priv;
1037 goto bail_qp;
1038 }
1039 qp->priv = priv;
1040 qp->timeout_jiffies =
1041 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1042 1000UL);
1043 if (init_attr->srq) {
1044 sz = 0;
1045 } else {
1046 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1047 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1048 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1049 sizeof(struct rvt_rwqe);
1050 if (udata)
1051 qp->r_rq.wq = vmalloc_user(
1052 sizeof(struct rvt_rwq) +
1053 qp->r_rq.size * sz);
1054 else
1055 qp->r_rq.wq = vzalloc_node(
1056 sizeof(struct rvt_rwq) +
1057 qp->r_rq.size * sz,
1058 rdi->dparms.node);
1059 if (!qp->r_rq.wq)
1060 goto bail_driver_priv;
1061 }
1062
1063 /*
1064 * ib_create_qp() will initialize qp->ibqp
1065 * except for qp->ibqp.qp_num.
1066 */
1067 spin_lock_init(&qp->r_lock);
1068 spin_lock_init(&qp->s_hlock);
1069 spin_lock_init(&qp->s_lock);
1070 spin_lock_init(&qp->r_rq.lock);
1071 atomic_set(&qp->refcount, 0);
1072 atomic_set(&qp->local_ops_pending, 0);
1073 init_waitqueue_head(&qp->wait);
1074 INIT_LIST_HEAD(&qp->rspwait);
1075 qp->state = IB_QPS_RESET;
1076 qp->s_wq = swq;
1077 qp->s_size = sqsize;
1078 qp->s_avail = init_attr->cap.max_send_wr;
1079 qp->s_max_sge = init_attr->cap.max_send_sge;
1080 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1081 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1082
1083 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1084 init_attr->qp_type,
1085 init_attr->port_num);
1086 if (err < 0) {
1087 ret = ERR_PTR(err);
1088 goto bail_rq_wq;
1089 }
1090 qp->ibqp.qp_num = err;
1091 qp->port_num = init_attr->port_num;
1092 rvt_init_qp(rdi, qp, init_attr->qp_type);
1093 if (rdi->driver_f.qp_priv_init) {
1094 err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1095 if (err) {
1096 ret = ERR_PTR(err);
1097 goto bail_rq_wq;
1098 }
1099 }
1100 break;
1101
1102 default:
1103 /* Don't support raw QPs */
1104 return ERR_PTR(-EINVAL);
1105 }
1106
1107 init_attr->cap.max_inline_data = 0;
1108
1109 /*
1110 * Return the address of the RWQ as the offset to mmap.
1111 * See rvt_mmap() for details.
1112 */
1113 if (udata && udata->outlen >= sizeof(__u64)) {
1114 if (!qp->r_rq.wq) {
1115 __u64 offset = 0;
1116
1117 err = ib_copy_to_udata(udata, &offset,
1118 sizeof(offset));
1119 if (err) {
1120 ret = ERR_PTR(err);
1121 goto bail_qpn;
1122 }
1123 } else {
1124 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1125
1126 qp->ip = rvt_create_mmap_info(rdi, s, udata,
1127 qp->r_rq.wq);
1128 if (!qp->ip) {
1129 ret = ERR_PTR(-ENOMEM);
1130 goto bail_qpn;
1131 }
1132
1133 err = ib_copy_to_udata(udata, &qp->ip->offset,
1134 sizeof(qp->ip->offset));
1135 if (err) {
1136 ret = ERR_PTR(err);
1137 goto bail_ip;
1138 }
1139 }
1140 qp->pid = current->pid;
1141 }
1142
1143 spin_lock(&rdi->n_qps_lock);
1144 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1145 spin_unlock(&rdi->n_qps_lock);
1146 ret = ERR_PTR(-ENOMEM);
1147 goto bail_ip;
1148 }
1149
1150 rdi->n_qps_allocated++;
1151 /*
1152 * Maintain a busy_jiffies variable that will be added to the timeout
1153 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1154 * is scaled by the number of rc qps created for the device to reduce
1155 * the number of timeouts occurring when there is a large number of
1156 * qps. busy_jiffies is incremented every rc qp scaling interval.
1157 * The scaling interval is selected based on extensive performance
1158 * evaluation of targeted workloads.
1159 */
1160 if (init_attr->qp_type == IB_QPT_RC) {
1161 rdi->n_rc_qps++;
1162 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1163 }
1164 spin_unlock(&rdi->n_qps_lock);
1165
1166 if (qp->ip) {
1167 spin_lock_irq(&rdi->pending_lock);
1168 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1169 spin_unlock_irq(&rdi->pending_lock);
1170 }
1171
1172 ret = &qp->ibqp;
1173
1174 /*
1175 * We have our QP and its good, now keep track of what types of opcodes
1176 * can be processed on this QP. We do this by keeping track of what the
1177 * 3 high order bits of the opcode are.
1178 */
1179 switch (init_attr->qp_type) {
1180 case IB_QPT_SMI:
1181 case IB_QPT_GSI:
1182 case IB_QPT_UD:
1183 qp->allowed_ops = IB_OPCODE_UD;
1184 break;
1185 case IB_QPT_RC:
1186 qp->allowed_ops = IB_OPCODE_RC;
1187 break;
1188 case IB_QPT_UC:
1189 qp->allowed_ops = IB_OPCODE_UC;
1190 break;
1191 default:
1192 ret = ERR_PTR(-EINVAL);
1193 goto bail_ip;
1194 }
1195
1196 return ret;
1197
1198 bail_ip:
1199 if (qp->ip)
1200 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1201
1202 bail_qpn:
1203 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1204
1205 bail_rq_wq:
1206 if (!qp->ip)
1207 vfree(qp->r_rq.wq);
1208
1209 bail_driver_priv:
1210 rdi->driver_f.qp_priv_free(rdi, qp);
1211
1212 bail_qp:
1213 kfree(qp->s_ack_queue);
1214 kfree(qp);
1215
1216 bail_swq:
1217 vfree(swq);
1218
1219 return ret;
1220 }
1221
1222 /**
1223 * rvt_error_qp - put a QP into the error state
1224 * @qp: the QP to put into the error state
1225 * @err: the receive completion error to signal if a RWQE is active
1226 *
1227 * Flushes both send and receive work queues.
1228 *
1229 * Return: true if last WQE event should be generated.
1230 * The QP r_lock and s_lock should be held and interrupts disabled.
1231 * If we are already in error state, just return.
1232 */
1233 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1234 {
1235 struct ib_wc wc;
1236 int ret = 0;
1237 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1238
1239 lockdep_assert_held(&qp->r_lock);
1240 lockdep_assert_held(&qp->s_lock);
1241 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1242 goto bail;
1243
1244 qp->state = IB_QPS_ERR;
1245
1246 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1247 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1248 del_timer(&qp->s_timer);
1249 }
1250
1251 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1252 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1253
1254 rdi->driver_f.notify_error_qp(qp);
1255
1256 /* Schedule the sending tasklet to drain the send work queue. */
1257 if (READ_ONCE(qp->s_last) != qp->s_head)
1258 rdi->driver_f.schedule_send(qp);
1259
1260 rvt_clear_mr_refs(qp, 0);
1261
1262 memset(&wc, 0, sizeof(wc));
1263 wc.qp = &qp->ibqp;
1264 wc.opcode = IB_WC_RECV;
1265
1266 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1267 wc.wr_id = qp->r_wr_id;
1268 wc.status = err;
1269 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1270 }
1271 wc.status = IB_WC_WR_FLUSH_ERR;
1272
1273 if (qp->r_rq.wq) {
1274 struct rvt_rwq *wq;
1275 u32 head;
1276 u32 tail;
1277
1278 spin_lock(&qp->r_rq.lock);
1279
1280 /* sanity check pointers before trusting them */
1281 wq = qp->r_rq.wq;
1282 head = wq->head;
1283 if (head >= qp->r_rq.size)
1284 head = 0;
1285 tail = wq->tail;
1286 if (tail >= qp->r_rq.size)
1287 tail = 0;
1288 while (tail != head) {
1289 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1290 if (++tail >= qp->r_rq.size)
1291 tail = 0;
1292 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1293 }
1294 wq->tail = tail;
1295
1296 spin_unlock(&qp->r_rq.lock);
1297 } else if (qp->ibqp.event_handler) {
1298 ret = 1;
1299 }
1300
1301 bail:
1302 return ret;
1303 }
1304 EXPORT_SYMBOL(rvt_error_qp);
1305
1306 /*
1307 * Put the QP into the hash table.
1308 * The hash table holds a reference to the QP.
1309 */
1310 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1311 {
1312 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1313 unsigned long flags;
1314
1315 rvt_get_qp(qp);
1316 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1317
1318 if (qp->ibqp.qp_num <= 1) {
1319 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1320 } else {
1321 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1322
1323 qp->next = rdi->qp_dev->qp_table[n];
1324 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1325 trace_rvt_qpinsert(qp, n);
1326 }
1327
1328 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1329 }
1330
1331 /**
1332 * rvt_modify_qp - modify the attributes of a queue pair
1333 * @ibqp: the queue pair who's attributes we're modifying
1334 * @attr: the new attributes
1335 * @attr_mask: the mask of attributes to modify
1336 * @udata: user data for libibverbs.so
1337 *
1338 * Return: 0 on success, otherwise returns an errno.
1339 */
1340 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1341 int attr_mask, struct ib_udata *udata)
1342 {
1343 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1344 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1345 enum ib_qp_state cur_state, new_state;
1346 struct ib_event ev;
1347 int lastwqe = 0;
1348 int mig = 0;
1349 int pmtu = 0; /* for gcc warning only */
1350 int opa_ah;
1351
1352 spin_lock_irq(&qp->r_lock);
1353 spin_lock(&qp->s_hlock);
1354 spin_lock(&qp->s_lock);
1355
1356 cur_state = attr_mask & IB_QP_CUR_STATE ?
1357 attr->cur_qp_state : qp->state;
1358 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1359 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1360
1361 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1362 attr_mask))
1363 goto inval;
1364
1365 if (rdi->driver_f.check_modify_qp &&
1366 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1367 goto inval;
1368
1369 if (attr_mask & IB_QP_AV) {
1370 if (opa_ah) {
1371 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1372 opa_get_mcast_base(OPA_MCAST_NR))
1373 goto inval;
1374 } else {
1375 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1376 be16_to_cpu(IB_MULTICAST_LID_BASE))
1377 goto inval;
1378 }
1379
1380 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1381 goto inval;
1382 }
1383
1384 if (attr_mask & IB_QP_ALT_PATH) {
1385 if (opa_ah) {
1386 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1387 opa_get_mcast_base(OPA_MCAST_NR))
1388 goto inval;
1389 } else {
1390 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1391 be16_to_cpu(IB_MULTICAST_LID_BASE))
1392 goto inval;
1393 }
1394
1395 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1396 goto inval;
1397 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1398 goto inval;
1399 }
1400
1401 if (attr_mask & IB_QP_PKEY_INDEX)
1402 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1403 goto inval;
1404
1405 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1406 if (attr->min_rnr_timer > 31)
1407 goto inval;
1408
1409 if (attr_mask & IB_QP_PORT)
1410 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1411 qp->ibqp.qp_type == IB_QPT_GSI ||
1412 attr->port_num == 0 ||
1413 attr->port_num > ibqp->device->phys_port_cnt)
1414 goto inval;
1415
1416 if (attr_mask & IB_QP_DEST_QPN)
1417 if (attr->dest_qp_num > RVT_QPN_MASK)
1418 goto inval;
1419
1420 if (attr_mask & IB_QP_RETRY_CNT)
1421 if (attr->retry_cnt > 7)
1422 goto inval;
1423
1424 if (attr_mask & IB_QP_RNR_RETRY)
1425 if (attr->rnr_retry > 7)
1426 goto inval;
1427
1428 /*
1429 * Don't allow invalid path_mtu values. OK to set greater
1430 * than the active mtu (or even the max_cap, if we have tuned
1431 * that to a small mtu. We'll set qp->path_mtu
1432 * to the lesser of requested attribute mtu and active,
1433 * for packetizing messages.
1434 * Note that the QP port has to be set in INIT and MTU in RTR.
1435 */
1436 if (attr_mask & IB_QP_PATH_MTU) {
1437 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1438 if (pmtu < 0)
1439 goto inval;
1440 }
1441
1442 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1443 if (attr->path_mig_state == IB_MIG_REARM) {
1444 if (qp->s_mig_state == IB_MIG_ARMED)
1445 goto inval;
1446 if (new_state != IB_QPS_RTS)
1447 goto inval;
1448 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1449 if (qp->s_mig_state == IB_MIG_REARM)
1450 goto inval;
1451 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1452 goto inval;
1453 if (qp->s_mig_state == IB_MIG_ARMED)
1454 mig = 1;
1455 } else {
1456 goto inval;
1457 }
1458 }
1459
1460 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1461 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1462 goto inval;
1463
1464 switch (new_state) {
1465 case IB_QPS_RESET:
1466 if (qp->state != IB_QPS_RESET)
1467 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1468 break;
1469
1470 case IB_QPS_RTR:
1471 /* Allow event to re-trigger if QP set to RTR more than once */
1472 qp->r_flags &= ~RVT_R_COMM_EST;
1473 qp->state = new_state;
1474 break;
1475
1476 case IB_QPS_SQD:
1477 qp->s_draining = qp->s_last != qp->s_cur;
1478 qp->state = new_state;
1479 break;
1480
1481 case IB_QPS_SQE:
1482 if (qp->ibqp.qp_type == IB_QPT_RC)
1483 goto inval;
1484 qp->state = new_state;
1485 break;
1486
1487 case IB_QPS_ERR:
1488 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1489 break;
1490
1491 default:
1492 qp->state = new_state;
1493 break;
1494 }
1495
1496 if (attr_mask & IB_QP_PKEY_INDEX)
1497 qp->s_pkey_index = attr->pkey_index;
1498
1499 if (attr_mask & IB_QP_PORT)
1500 qp->port_num = attr->port_num;
1501
1502 if (attr_mask & IB_QP_DEST_QPN)
1503 qp->remote_qpn = attr->dest_qp_num;
1504
1505 if (attr_mask & IB_QP_SQ_PSN) {
1506 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1507 qp->s_psn = qp->s_next_psn;
1508 qp->s_sending_psn = qp->s_next_psn;
1509 qp->s_last_psn = qp->s_next_psn - 1;
1510 qp->s_sending_hpsn = qp->s_last_psn;
1511 }
1512
1513 if (attr_mask & IB_QP_RQ_PSN)
1514 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1515
1516 if (attr_mask & IB_QP_ACCESS_FLAGS)
1517 qp->qp_access_flags = attr->qp_access_flags;
1518
1519 if (attr_mask & IB_QP_AV) {
1520 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1521 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1522 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1523 }
1524
1525 if (attr_mask & IB_QP_ALT_PATH) {
1526 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1527 qp->s_alt_pkey_index = attr->alt_pkey_index;
1528 }
1529
1530 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1531 qp->s_mig_state = attr->path_mig_state;
1532 if (mig) {
1533 qp->remote_ah_attr = qp->alt_ah_attr;
1534 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1535 qp->s_pkey_index = qp->s_alt_pkey_index;
1536 }
1537 }
1538
1539 if (attr_mask & IB_QP_PATH_MTU) {
1540 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1541 qp->log_pmtu = ilog2(qp->pmtu);
1542 }
1543
1544 if (attr_mask & IB_QP_RETRY_CNT) {
1545 qp->s_retry_cnt = attr->retry_cnt;
1546 qp->s_retry = attr->retry_cnt;
1547 }
1548
1549 if (attr_mask & IB_QP_RNR_RETRY) {
1550 qp->s_rnr_retry_cnt = attr->rnr_retry;
1551 qp->s_rnr_retry = attr->rnr_retry;
1552 }
1553
1554 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1555 qp->r_min_rnr_timer = attr->min_rnr_timer;
1556
1557 if (attr_mask & IB_QP_TIMEOUT) {
1558 qp->timeout = attr->timeout;
1559 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1560 }
1561
1562 if (attr_mask & IB_QP_QKEY)
1563 qp->qkey = attr->qkey;
1564
1565 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1566 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1567
1568 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1569 qp->s_max_rd_atomic = attr->max_rd_atomic;
1570
1571 if (rdi->driver_f.modify_qp)
1572 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1573
1574 spin_unlock(&qp->s_lock);
1575 spin_unlock(&qp->s_hlock);
1576 spin_unlock_irq(&qp->r_lock);
1577
1578 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1579 rvt_insert_qp(rdi, qp);
1580
1581 if (lastwqe) {
1582 ev.device = qp->ibqp.device;
1583 ev.element.qp = &qp->ibqp;
1584 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1585 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1586 }
1587 if (mig) {
1588 ev.device = qp->ibqp.device;
1589 ev.element.qp = &qp->ibqp;
1590 ev.event = IB_EVENT_PATH_MIG;
1591 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1592 }
1593 return 0;
1594
1595 inval:
1596 spin_unlock(&qp->s_lock);
1597 spin_unlock(&qp->s_hlock);
1598 spin_unlock_irq(&qp->r_lock);
1599 return -EINVAL;
1600 }
1601
1602 /**
1603 * rvt_destroy_qp - destroy a queue pair
1604 * @ibqp: the queue pair to destroy
1605 *
1606 * Note that this can be called while the QP is actively sending or
1607 * receiving!
1608 *
1609 * Return: 0 on success.
1610 */
1611 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1612 {
1613 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1614 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1615
1616 spin_lock_irq(&qp->r_lock);
1617 spin_lock(&qp->s_hlock);
1618 spin_lock(&qp->s_lock);
1619 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1620 spin_unlock(&qp->s_lock);
1621 spin_unlock(&qp->s_hlock);
1622 spin_unlock_irq(&qp->r_lock);
1623
1624 wait_event(qp->wait, !atomic_read(&qp->refcount));
1625 /* qpn is now available for use again */
1626 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1627
1628 spin_lock(&rdi->n_qps_lock);
1629 rdi->n_qps_allocated--;
1630 if (qp->ibqp.qp_type == IB_QPT_RC) {
1631 rdi->n_rc_qps--;
1632 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1633 }
1634 spin_unlock(&rdi->n_qps_lock);
1635
1636 if (qp->ip)
1637 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1638 else
1639 vfree(qp->r_rq.wq);
1640 rdi->driver_f.qp_priv_free(rdi, qp);
1641 kfree(qp->s_ack_queue);
1642 rdma_destroy_ah_attr(&qp->remote_ah_attr);
1643 rdma_destroy_ah_attr(&qp->alt_ah_attr);
1644 vfree(qp->s_wq);
1645 kfree(qp);
1646 return 0;
1647 }
1648
1649 /**
1650 * rvt_query_qp - query an ipbq
1651 * @ibqp: IB qp to query
1652 * @attr: attr struct to fill in
1653 * @attr_mask: attr mask ignored
1654 * @init_attr: struct to fill in
1655 *
1656 * Return: always 0
1657 */
1658 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1659 int attr_mask, struct ib_qp_init_attr *init_attr)
1660 {
1661 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1662 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1663
1664 attr->qp_state = qp->state;
1665 attr->cur_qp_state = attr->qp_state;
1666 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1667 attr->path_mig_state = qp->s_mig_state;
1668 attr->qkey = qp->qkey;
1669 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1670 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1671 attr->dest_qp_num = qp->remote_qpn;
1672 attr->qp_access_flags = qp->qp_access_flags;
1673 attr->cap.max_send_wr = qp->s_size - 1 -
1674 rdi->dparms.reserved_operations;
1675 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1676 attr->cap.max_send_sge = qp->s_max_sge;
1677 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1678 attr->cap.max_inline_data = 0;
1679 attr->ah_attr = qp->remote_ah_attr;
1680 attr->alt_ah_attr = qp->alt_ah_attr;
1681 attr->pkey_index = qp->s_pkey_index;
1682 attr->alt_pkey_index = qp->s_alt_pkey_index;
1683 attr->en_sqd_async_notify = 0;
1684 attr->sq_draining = qp->s_draining;
1685 attr->max_rd_atomic = qp->s_max_rd_atomic;
1686 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1687 attr->min_rnr_timer = qp->r_min_rnr_timer;
1688 attr->port_num = qp->port_num;
1689 attr->timeout = qp->timeout;
1690 attr->retry_cnt = qp->s_retry_cnt;
1691 attr->rnr_retry = qp->s_rnr_retry_cnt;
1692 attr->alt_port_num =
1693 rdma_ah_get_port_num(&qp->alt_ah_attr);
1694 attr->alt_timeout = qp->alt_timeout;
1695
1696 init_attr->event_handler = qp->ibqp.event_handler;
1697 init_attr->qp_context = qp->ibqp.qp_context;
1698 init_attr->send_cq = qp->ibqp.send_cq;
1699 init_attr->recv_cq = qp->ibqp.recv_cq;
1700 init_attr->srq = qp->ibqp.srq;
1701 init_attr->cap = attr->cap;
1702 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1703 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1704 else
1705 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1706 init_attr->qp_type = qp->ibqp.qp_type;
1707 init_attr->port_num = qp->port_num;
1708 return 0;
1709 }
1710
1711 /**
1712 * rvt_post_receive - post a receive on a QP
1713 * @ibqp: the QP to post the receive on
1714 * @wr: the WR to post
1715 * @bad_wr: the first bad WR is put here
1716 *
1717 * This may be called from interrupt context.
1718 *
1719 * Return: 0 on success otherwise errno
1720 */
1721 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1722 const struct ib_recv_wr **bad_wr)
1723 {
1724 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1725 struct rvt_rwq *wq = qp->r_rq.wq;
1726 unsigned long flags;
1727 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1728 !qp->ibqp.srq;
1729
1730 /* Check that state is OK to post receive. */
1731 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1732 *bad_wr = wr;
1733 return -EINVAL;
1734 }
1735
1736 for (; wr; wr = wr->next) {
1737 struct rvt_rwqe *wqe;
1738 u32 next;
1739 int i;
1740
1741 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1742 *bad_wr = wr;
1743 return -EINVAL;
1744 }
1745
1746 spin_lock_irqsave(&qp->r_rq.lock, flags);
1747 next = wq->head + 1;
1748 if (next >= qp->r_rq.size)
1749 next = 0;
1750 if (next == wq->tail) {
1751 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1752 *bad_wr = wr;
1753 return -ENOMEM;
1754 }
1755 if (unlikely(qp_err_flush)) {
1756 struct ib_wc wc;
1757
1758 memset(&wc, 0, sizeof(wc));
1759 wc.qp = &qp->ibqp;
1760 wc.opcode = IB_WC_RECV;
1761 wc.wr_id = wr->wr_id;
1762 wc.status = IB_WC_WR_FLUSH_ERR;
1763 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1764 } else {
1765 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1766 wqe->wr_id = wr->wr_id;
1767 wqe->num_sge = wr->num_sge;
1768 for (i = 0; i < wr->num_sge; i++)
1769 wqe->sg_list[i] = wr->sg_list[i];
1770 /*
1771 * Make sure queue entry is written
1772 * before the head index.
1773 */
1774 smp_wmb();
1775 wq->head = next;
1776 }
1777 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1778 }
1779 return 0;
1780 }
1781
1782 /**
1783 * rvt_qp_valid_operation - validate post send wr request
1784 * @qp - the qp
1785 * @post-parms - the post send table for the driver
1786 * @wr - the work request
1787 *
1788 * The routine validates the operation based on the
1789 * validation table an returns the length of the operation
1790 * which can extend beyond the ib_send_bw. Operation
1791 * dependent flags key atomic operation validation.
1792 *
1793 * There is an exception for UD qps that validates the pd and
1794 * overrides the length to include the additional UD specific
1795 * length.
1796 *
1797 * Returns a negative error or the length of the work request
1798 * for building the swqe.
1799 */
1800 static inline int rvt_qp_valid_operation(
1801 struct rvt_qp *qp,
1802 const struct rvt_operation_params *post_parms,
1803 const struct ib_send_wr *wr)
1804 {
1805 int len;
1806
1807 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1808 return -EINVAL;
1809 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1810 return -EINVAL;
1811 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1812 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1813 return -EINVAL;
1814 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1815 (wr->num_sge == 0 ||
1816 wr->sg_list[0].length < sizeof(u64) ||
1817 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1818 return -EINVAL;
1819 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1820 !qp->s_max_rd_atomic)
1821 return -EINVAL;
1822 len = post_parms[wr->opcode].length;
1823 /* UD specific */
1824 if (qp->ibqp.qp_type != IB_QPT_UC &&
1825 qp->ibqp.qp_type != IB_QPT_RC) {
1826 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1827 return -EINVAL;
1828 len = sizeof(struct ib_ud_wr);
1829 }
1830 return len;
1831 }
1832
1833 /**
1834 * rvt_qp_is_avail - determine queue capacity
1835 * @qp: the qp
1836 * @rdi: the rdmavt device
1837 * @reserved_op: is reserved operation
1838 *
1839 * This assumes the s_hlock is held but the s_last
1840 * qp variable is uncontrolled.
1841 *
1842 * For non reserved operations, the qp->s_avail
1843 * may be changed.
1844 *
1845 * The return value is zero or a -ENOMEM.
1846 */
1847 static inline int rvt_qp_is_avail(
1848 struct rvt_qp *qp,
1849 struct rvt_dev_info *rdi,
1850 bool reserved_op)
1851 {
1852 u32 slast;
1853 u32 avail;
1854 u32 reserved_used;
1855
1856 /* see rvt_qp_wqe_unreserve() */
1857 smp_mb__before_atomic();
1858 reserved_used = atomic_read(&qp->s_reserved_used);
1859 if (unlikely(reserved_op)) {
1860 /* see rvt_qp_wqe_unreserve() */
1861 smp_mb__before_atomic();
1862 if (reserved_used >= rdi->dparms.reserved_operations)
1863 return -ENOMEM;
1864 return 0;
1865 }
1866 /* non-reserved operations */
1867 if (likely(qp->s_avail))
1868 return 0;
1869 slast = READ_ONCE(qp->s_last);
1870 if (qp->s_head >= slast)
1871 avail = qp->s_size - (qp->s_head - slast);
1872 else
1873 avail = slast - qp->s_head;
1874
1875 /* see rvt_qp_wqe_unreserve() */
1876 smp_mb__before_atomic();
1877 reserved_used = atomic_read(&qp->s_reserved_used);
1878 avail = avail - 1 -
1879 (rdi->dparms.reserved_operations - reserved_used);
1880 /* insure we don't assign a negative s_avail */
1881 if ((s32)avail <= 0)
1882 return -ENOMEM;
1883 qp->s_avail = avail;
1884 if (WARN_ON(qp->s_avail >
1885 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1886 rvt_pr_err(rdi,
1887 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1888 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1889 qp->s_head, qp->s_tail, qp->s_cur,
1890 qp->s_acked, qp->s_last);
1891 return 0;
1892 }
1893
1894 /**
1895 * rvt_post_one_wr - post one RC, UC, or UD send work request
1896 * @qp: the QP to post on
1897 * @wr: the work request to send
1898 */
1899 static int rvt_post_one_wr(struct rvt_qp *qp,
1900 const struct ib_send_wr *wr,
1901 bool *call_send)
1902 {
1903 struct rvt_swqe *wqe;
1904 u32 next;
1905 int i;
1906 int j;
1907 int acc;
1908 struct rvt_lkey_table *rkt;
1909 struct rvt_pd *pd;
1910 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1911 u8 log_pmtu;
1912 int ret;
1913 size_t cplen;
1914 bool reserved_op;
1915 int local_ops_delayed = 0;
1916
1917 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1918
1919 /* IB spec says that num_sge == 0 is OK. */
1920 if (unlikely(wr->num_sge > qp->s_max_sge))
1921 return -EINVAL;
1922
1923 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1924 if (ret < 0)
1925 return ret;
1926 cplen = ret;
1927
1928 /*
1929 * Local operations include fast register and local invalidate.
1930 * Fast register needs to be processed immediately because the
1931 * registered lkey may be used by following work requests and the
1932 * lkey needs to be valid at the time those requests are posted.
1933 * Local invalidate can be processed immediately if fencing is
1934 * not required and no previous local invalidate ops are pending.
1935 * Signaled local operations that have been processed immediately
1936 * need to have requests with "completion only" flags set posted
1937 * to the send queue in order to generate completions.
1938 */
1939 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
1940 switch (wr->opcode) {
1941 case IB_WR_REG_MR:
1942 ret = rvt_fast_reg_mr(qp,
1943 reg_wr(wr)->mr,
1944 reg_wr(wr)->key,
1945 reg_wr(wr)->access);
1946 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1947 return ret;
1948 break;
1949 case IB_WR_LOCAL_INV:
1950 if ((wr->send_flags & IB_SEND_FENCE) ||
1951 atomic_read(&qp->local_ops_pending)) {
1952 local_ops_delayed = 1;
1953 } else {
1954 ret = rvt_invalidate_rkey(
1955 qp, wr->ex.invalidate_rkey);
1956 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1957 return ret;
1958 }
1959 break;
1960 default:
1961 return -EINVAL;
1962 }
1963 }
1964
1965 reserved_op = rdi->post_parms[wr->opcode].flags &
1966 RVT_OPERATION_USE_RESERVE;
1967 /* check for avail */
1968 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
1969 if (ret)
1970 return ret;
1971 next = qp->s_head + 1;
1972 if (next >= qp->s_size)
1973 next = 0;
1974
1975 rkt = &rdi->lkey_table;
1976 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1977 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1978
1979 /* cplen has length from above */
1980 memcpy(&wqe->wr, wr, cplen);
1981
1982 wqe->length = 0;
1983 j = 0;
1984 if (wr->num_sge) {
1985 struct rvt_sge *last_sge = NULL;
1986
1987 acc = wr->opcode >= IB_WR_RDMA_READ ?
1988 IB_ACCESS_LOCAL_WRITE : 0;
1989 for (i = 0; i < wr->num_sge; i++) {
1990 u32 length = wr->sg_list[i].length;
1991
1992 if (length == 0)
1993 continue;
1994 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
1995 &wr->sg_list[i], acc);
1996 if (unlikely(ret < 0))
1997 goto bail_inval_free;
1998 wqe->length += length;
1999 if (ret)
2000 last_sge = &wqe->sg_list[j];
2001 j += ret;
2002 }
2003 wqe->wr.num_sge = j;
2004 }
2005
2006 /*
2007 * Calculate and set SWQE PSN values prior to handing it off
2008 * to the driver's check routine. This give the driver the
2009 * opportunity to adjust PSN values based on internal checks.
2010 */
2011 log_pmtu = qp->log_pmtu;
2012 if (qp->allowed_ops == IB_OPCODE_UD) {
2013 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
2014
2015 log_pmtu = ah->log_pmtu;
2016 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
2017 }
2018
2019 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2020 if (local_ops_delayed)
2021 atomic_inc(&qp->local_ops_pending);
2022 else
2023 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2024 wqe->ssn = 0;
2025 wqe->psn = 0;
2026 wqe->lpsn = 0;
2027 } else {
2028 wqe->ssn = qp->s_ssn++;
2029 wqe->psn = qp->s_next_psn;
2030 wqe->lpsn = wqe->psn +
2031 (wqe->length ?
2032 ((wqe->length - 1) >> log_pmtu) :
2033 0);
2034 }
2035
2036 /* general part of wqe valid - allow for driver checks */
2037 if (rdi->driver_f.setup_wqe) {
2038 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2039 if (ret < 0)
2040 goto bail_inval_free_ref;
2041 }
2042
2043 if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2044 qp->s_next_psn = wqe->lpsn + 1;
2045
2046 if (unlikely(reserved_op)) {
2047 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2048 rvt_qp_wqe_reserve(qp, wqe);
2049 } else {
2050 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2051 qp->s_avail--;
2052 }
2053 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2054 smp_wmb(); /* see request builders */
2055 qp->s_head = next;
2056
2057 return 0;
2058
2059 bail_inval_free_ref:
2060 if (qp->allowed_ops == IB_OPCODE_UD)
2061 atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
2062 bail_inval_free:
2063 /* release mr holds */
2064 while (j) {
2065 struct rvt_sge *sge = &wqe->sg_list[--j];
2066
2067 rvt_put_mr(sge->mr);
2068 }
2069 return ret;
2070 }
2071
2072 /**
2073 * rvt_post_send - post a send on a QP
2074 * @ibqp: the QP to post the send on
2075 * @wr: the list of work requests to post
2076 * @bad_wr: the first bad WR is put here
2077 *
2078 * This may be called from interrupt context.
2079 *
2080 * Return: 0 on success else errno
2081 */
2082 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2083 const struct ib_send_wr **bad_wr)
2084 {
2085 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2086 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2087 unsigned long flags = 0;
2088 bool call_send;
2089 unsigned nreq = 0;
2090 int err = 0;
2091
2092 spin_lock_irqsave(&qp->s_hlock, flags);
2093
2094 /*
2095 * Ensure QP state is such that we can send. If not bail out early,
2096 * there is no need to do this every time we post a send.
2097 */
2098 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2099 spin_unlock_irqrestore(&qp->s_hlock, flags);
2100 return -EINVAL;
2101 }
2102
2103 /*
2104 * If the send queue is empty, and we only have a single WR then just go
2105 * ahead and kick the send engine into gear. Otherwise we will always
2106 * just schedule the send to happen later.
2107 */
2108 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2109
2110 for (; wr; wr = wr->next) {
2111 err = rvt_post_one_wr(qp, wr, &call_send);
2112 if (unlikely(err)) {
2113 *bad_wr = wr;
2114 goto bail;
2115 }
2116 nreq++;
2117 }
2118 bail:
2119 spin_unlock_irqrestore(&qp->s_hlock, flags);
2120 if (nreq) {
2121 /*
2122 * Only call do_send if there is exactly one packet, and the
2123 * driver said it was ok.
2124 */
2125 if (nreq == 1 && call_send)
2126 rdi->driver_f.do_send(qp);
2127 else
2128 rdi->driver_f.schedule_send_no_lock(qp);
2129 }
2130 return err;
2131 }
2132
2133 /**
2134 * rvt_post_srq_receive - post a receive on a shared receive queue
2135 * @ibsrq: the SRQ to post the receive on
2136 * @wr: the list of work requests to post
2137 * @bad_wr: A pointer to the first WR to cause a problem is put here
2138 *
2139 * This may be called from interrupt context.
2140 *
2141 * Return: 0 on success else errno
2142 */
2143 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2144 const struct ib_recv_wr **bad_wr)
2145 {
2146 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2147 struct rvt_rwq *wq;
2148 unsigned long flags;
2149
2150 for (; wr; wr = wr->next) {
2151 struct rvt_rwqe *wqe;
2152 u32 next;
2153 int i;
2154
2155 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2156 *bad_wr = wr;
2157 return -EINVAL;
2158 }
2159
2160 spin_lock_irqsave(&srq->rq.lock, flags);
2161 wq = srq->rq.wq;
2162 next = wq->head + 1;
2163 if (next >= srq->rq.size)
2164 next = 0;
2165 if (next == wq->tail) {
2166 spin_unlock_irqrestore(&srq->rq.lock, flags);
2167 *bad_wr = wr;
2168 return -ENOMEM;
2169 }
2170
2171 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2172 wqe->wr_id = wr->wr_id;
2173 wqe->num_sge = wr->num_sge;
2174 for (i = 0; i < wr->num_sge; i++)
2175 wqe->sg_list[i] = wr->sg_list[i];
2176 /* Make sure queue entry is written before the head index. */
2177 smp_wmb();
2178 wq->head = next;
2179 spin_unlock_irqrestore(&srq->rq.lock, flags);
2180 }
2181 return 0;
2182 }
2183
2184 /*
2185 * Validate a RWQE and fill in the SGE state.
2186 * Return 1 if OK.
2187 */
2188 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2189 {
2190 int i, j, ret;
2191 struct ib_wc wc;
2192 struct rvt_lkey_table *rkt;
2193 struct rvt_pd *pd;
2194 struct rvt_sge_state *ss;
2195 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2196
2197 rkt = &rdi->lkey_table;
2198 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2199 ss = &qp->r_sge;
2200 ss->sg_list = qp->r_sg_list;
2201 qp->r_len = 0;
2202 for (i = j = 0; i < wqe->num_sge; i++) {
2203 if (wqe->sg_list[i].length == 0)
2204 continue;
2205 /* Check LKEY */
2206 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2207 NULL, &wqe->sg_list[i],
2208 IB_ACCESS_LOCAL_WRITE);
2209 if (unlikely(ret <= 0))
2210 goto bad_lkey;
2211 qp->r_len += wqe->sg_list[i].length;
2212 j++;
2213 }
2214 ss->num_sge = j;
2215 ss->total_len = qp->r_len;
2216 return 1;
2217
2218 bad_lkey:
2219 while (j) {
2220 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2221
2222 rvt_put_mr(sge->mr);
2223 }
2224 ss->num_sge = 0;
2225 memset(&wc, 0, sizeof(wc));
2226 wc.wr_id = wqe->wr_id;
2227 wc.status = IB_WC_LOC_PROT_ERR;
2228 wc.opcode = IB_WC_RECV;
2229 wc.qp = &qp->ibqp;
2230 /* Signal solicited completion event. */
2231 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2232 return 0;
2233 }
2234
2235 /**
2236 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2237 * @qp: the QP
2238 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2239 *
2240 * Return -1 if there is a local error, 0 if no RWQE is available,
2241 * otherwise return 1.
2242 *
2243 * Can be called from interrupt level.
2244 */
2245 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2246 {
2247 unsigned long flags;
2248 struct rvt_rq *rq;
2249 struct rvt_rwq *wq;
2250 struct rvt_srq *srq;
2251 struct rvt_rwqe *wqe;
2252 void (*handler)(struct ib_event *, void *);
2253 u32 tail;
2254 int ret;
2255
2256 if (qp->ibqp.srq) {
2257 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2258 handler = srq->ibsrq.event_handler;
2259 rq = &srq->rq;
2260 } else {
2261 srq = NULL;
2262 handler = NULL;
2263 rq = &qp->r_rq;
2264 }
2265
2266 spin_lock_irqsave(&rq->lock, flags);
2267 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2268 ret = 0;
2269 goto unlock;
2270 }
2271
2272 wq = rq->wq;
2273 tail = wq->tail;
2274 /* Validate tail before using it since it is user writable. */
2275 if (tail >= rq->size)
2276 tail = 0;
2277 if (unlikely(tail == wq->head)) {
2278 ret = 0;
2279 goto unlock;
2280 }
2281 /* Make sure entry is read after head index is read. */
2282 smp_rmb();
2283 wqe = rvt_get_rwqe_ptr(rq, tail);
2284 /*
2285 * Even though we update the tail index in memory, the verbs
2286 * consumer is not supposed to post more entries until a
2287 * completion is generated.
2288 */
2289 if (++tail >= rq->size)
2290 tail = 0;
2291 wq->tail = tail;
2292 if (!wr_id_only && !init_sge(qp, wqe)) {
2293 ret = -1;
2294 goto unlock;
2295 }
2296 qp->r_wr_id = wqe->wr_id;
2297
2298 ret = 1;
2299 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2300 if (handler) {
2301 u32 n;
2302
2303 /*
2304 * Validate head pointer value and compute
2305 * the number of remaining WQEs.
2306 */
2307 n = wq->head;
2308 if (n >= rq->size)
2309 n = 0;
2310 if (n < tail)
2311 n += rq->size - tail;
2312 else
2313 n -= tail;
2314 if (n < srq->limit) {
2315 struct ib_event ev;
2316
2317 srq->limit = 0;
2318 spin_unlock_irqrestore(&rq->lock, flags);
2319 ev.device = qp->ibqp.device;
2320 ev.element.srq = qp->ibqp.srq;
2321 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2322 handler(&ev, srq->ibsrq.srq_context);
2323 goto bail;
2324 }
2325 }
2326 unlock:
2327 spin_unlock_irqrestore(&rq->lock, flags);
2328 bail:
2329 return ret;
2330 }
2331 EXPORT_SYMBOL(rvt_get_rwqe);
2332
2333 /**
2334 * qp_comm_est - handle trap with QP established
2335 * @qp: the QP
2336 */
2337 void rvt_comm_est(struct rvt_qp *qp)
2338 {
2339 qp->r_flags |= RVT_R_COMM_EST;
2340 if (qp->ibqp.event_handler) {
2341 struct ib_event ev;
2342
2343 ev.device = qp->ibqp.device;
2344 ev.element.qp = &qp->ibqp;
2345 ev.event = IB_EVENT_COMM_EST;
2346 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2347 }
2348 }
2349 EXPORT_SYMBOL(rvt_comm_est);
2350
2351 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2352 {
2353 unsigned long flags;
2354 int lastwqe;
2355
2356 spin_lock_irqsave(&qp->s_lock, flags);
2357 lastwqe = rvt_error_qp(qp, err);
2358 spin_unlock_irqrestore(&qp->s_lock, flags);
2359
2360 if (lastwqe) {
2361 struct ib_event ev;
2362
2363 ev.device = qp->ibqp.device;
2364 ev.element.qp = &qp->ibqp;
2365 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2366 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2367 }
2368 }
2369 EXPORT_SYMBOL(rvt_rc_error);
2370
2371 /*
2372 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2373 * @index - the index
2374 * return usec from an index into ib_rvt_rnr_table
2375 */
2376 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2377 {
2378 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2379 }
2380 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2381
2382 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2383 {
2384 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2385 IB_AETH_CREDIT_MASK];
2386 }
2387
2388 /*
2389 * rvt_add_retry_timer_ext - add/start a retry timer
2390 * @qp - the QP
2391 * @shift - timeout shift to wait for multiple packets
2392 * add a retry timer on the QP
2393 */
2394 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2395 {
2396 struct ib_qp *ibqp = &qp->ibqp;
2397 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2398
2399 lockdep_assert_held(&qp->s_lock);
2400 qp->s_flags |= RVT_S_TIMER;
2401 /* 4.096 usec. * (1 << qp->timeout) */
2402 qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2403 (qp->timeout_jiffies << shift);
2404 add_timer(&qp->s_timer);
2405 }
2406 EXPORT_SYMBOL(rvt_add_retry_timer_ext);
2407
2408 /**
2409 * rvt_add_rnr_timer - add/start an rnr timer
2410 * @qp - the QP
2411 * @aeth - aeth of RNR timeout, simulated aeth for loopback
2412 * add an rnr timer on the QP
2413 */
2414 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2415 {
2416 u32 to;
2417
2418 lockdep_assert_held(&qp->s_lock);
2419 qp->s_flags |= RVT_S_WAIT_RNR;
2420 to = rvt_aeth_to_usec(aeth);
2421 trace_rvt_rnrnak_add(qp, to);
2422 hrtimer_start(&qp->s_rnr_timer,
2423 ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2424 }
2425 EXPORT_SYMBOL(rvt_add_rnr_timer);
2426
2427 /**
2428 * rvt_stop_rc_timers - stop all timers
2429 * @qp - the QP
2430 * stop any pending timers
2431 */
2432 void rvt_stop_rc_timers(struct rvt_qp *qp)
2433 {
2434 lockdep_assert_held(&qp->s_lock);
2435 /* Remove QP from all timers */
2436 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2437 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2438 del_timer(&qp->s_timer);
2439 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2440 }
2441 }
2442 EXPORT_SYMBOL(rvt_stop_rc_timers);
2443
2444 /**
2445 * rvt_stop_rnr_timer - stop an rnr timer
2446 * @qp - the QP
2447 *
2448 * stop an rnr timer and return if the timer
2449 * had been pending.
2450 */
2451 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2452 {
2453 lockdep_assert_held(&qp->s_lock);
2454 /* Remove QP from rnr timer */
2455 if (qp->s_flags & RVT_S_WAIT_RNR) {
2456 qp->s_flags &= ~RVT_S_WAIT_RNR;
2457 trace_rvt_rnrnak_stop(qp, 0);
2458 }
2459 }
2460
2461 /**
2462 * rvt_del_timers_sync - wait for any timeout routines to exit
2463 * @qp - the QP
2464 */
2465 void rvt_del_timers_sync(struct rvt_qp *qp)
2466 {
2467 del_timer_sync(&qp->s_timer);
2468 hrtimer_cancel(&qp->s_rnr_timer);
2469 }
2470 EXPORT_SYMBOL(rvt_del_timers_sync);
2471
2472 /**
2473 * This is called from s_timer for missing responses.
2474 */
2475 static void rvt_rc_timeout(struct timer_list *t)
2476 {
2477 struct rvt_qp *qp = from_timer(qp, t, s_timer);
2478 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2479 unsigned long flags;
2480
2481 spin_lock_irqsave(&qp->r_lock, flags);
2482 spin_lock(&qp->s_lock);
2483 if (qp->s_flags & RVT_S_TIMER) {
2484 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2485
2486 qp->s_flags &= ~RVT_S_TIMER;
2487 rvp->n_rc_timeouts++;
2488 del_timer(&qp->s_timer);
2489 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2490 if (rdi->driver_f.notify_restart_rc)
2491 rdi->driver_f.notify_restart_rc(qp,
2492 qp->s_last_psn + 1,
2493 1);
2494 rdi->driver_f.schedule_send(qp);
2495 }
2496 spin_unlock(&qp->s_lock);
2497 spin_unlock_irqrestore(&qp->r_lock, flags);
2498 }
2499
2500 /*
2501 * This is called from s_timer for RNR timeouts.
2502 */
2503 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2504 {
2505 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2506 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2507 unsigned long flags;
2508
2509 spin_lock_irqsave(&qp->s_lock, flags);
2510 rvt_stop_rnr_timer(qp);
2511 trace_rvt_rnrnak_timeout(qp, 0);
2512 rdi->driver_f.schedule_send(qp);
2513 spin_unlock_irqrestore(&qp->s_lock, flags);
2514 return HRTIMER_NORESTART;
2515 }
2516 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2517
2518 /**
2519 * rvt_qp_iter_init - initial for QP iteration
2520 * @rdi: rvt devinfo
2521 * @v: u64 value
2522 *
2523 * This returns an iterator suitable for iterating QPs
2524 * in the system.
2525 *
2526 * The @cb is a user defined callback and @v is a 64
2527 * bit value passed to and relevant for processing in the
2528 * @cb. An example use case would be to alter QP processing
2529 * based on criteria not part of the rvt_qp.
2530 *
2531 * Use cases that require memory allocation to succeed
2532 * must preallocate appropriately.
2533 *
2534 * Return: a pointer to an rvt_qp_iter or NULL
2535 */
2536 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2537 u64 v,
2538 void (*cb)(struct rvt_qp *qp, u64 v))
2539 {
2540 struct rvt_qp_iter *i;
2541
2542 i = kzalloc(sizeof(*i), GFP_KERNEL);
2543 if (!i)
2544 return NULL;
2545
2546 i->rdi = rdi;
2547 /* number of special QPs (SMI/GSI) for device */
2548 i->specials = rdi->ibdev.phys_port_cnt * 2;
2549 i->v = v;
2550 i->cb = cb;
2551
2552 return i;
2553 }
2554 EXPORT_SYMBOL(rvt_qp_iter_init);
2555
2556 /**
2557 * rvt_qp_iter_next - return the next QP in iter
2558 * @iter - the iterator
2559 *
2560 * Fine grained QP iterator suitable for use
2561 * with debugfs seq_file mechanisms.
2562 *
2563 * Updates iter->qp with the current QP when the return
2564 * value is 0.
2565 *
2566 * Return: 0 - iter->qp is valid 1 - no more QPs
2567 */
2568 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2569 __must_hold(RCU)
2570 {
2571 int n = iter->n;
2572 int ret = 1;
2573 struct rvt_qp *pqp = iter->qp;
2574 struct rvt_qp *qp;
2575 struct rvt_dev_info *rdi = iter->rdi;
2576
2577 /*
2578 * The approach is to consider the special qps
2579 * as additional table entries before the
2580 * real hash table. Since the qp code sets
2581 * the qp->next hash link to NULL, this works just fine.
2582 *
2583 * iter->specials is 2 * # ports
2584 *
2585 * n = 0..iter->specials is the special qp indices
2586 *
2587 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2588 * the potential hash bucket entries
2589 *
2590 */
2591 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2592 if (pqp) {
2593 qp = rcu_dereference(pqp->next);
2594 } else {
2595 if (n < iter->specials) {
2596 struct rvt_ibport *rvp;
2597 int pidx;
2598
2599 pidx = n % rdi->ibdev.phys_port_cnt;
2600 rvp = rdi->ports[pidx];
2601 qp = rcu_dereference(rvp->qp[n & 1]);
2602 } else {
2603 qp = rcu_dereference(
2604 rdi->qp_dev->qp_table[
2605 (n - iter->specials)]);
2606 }
2607 }
2608 pqp = qp;
2609 if (qp) {
2610 iter->qp = qp;
2611 iter->n = n;
2612 return 0;
2613 }
2614 }
2615 return ret;
2616 }
2617 EXPORT_SYMBOL(rvt_qp_iter_next);
2618
2619 /**
2620 * rvt_qp_iter - iterate all QPs
2621 * @rdi - rvt devinfo
2622 * @v - a 64 bit value
2623 * @cb - a callback
2624 *
2625 * This provides a way for iterating all QPs.
2626 *
2627 * The @cb is a user defined callback and @v is a 64
2628 * bit value passed to and relevant for processing in the
2629 * cb. An example use case would be to alter QP processing
2630 * based on criteria not part of the rvt_qp.
2631 *
2632 * The code has an internal iterator to simplify
2633 * non seq_file use cases.
2634 */
2635 void rvt_qp_iter(struct rvt_dev_info *rdi,
2636 u64 v,
2637 void (*cb)(struct rvt_qp *qp, u64 v))
2638 {
2639 int ret;
2640 struct rvt_qp_iter i = {
2641 .rdi = rdi,
2642 .specials = rdi->ibdev.phys_port_cnt * 2,
2643 .v = v,
2644 .cb = cb
2645 };
2646
2647 rcu_read_lock();
2648 do {
2649 ret = rvt_qp_iter_next(&i);
2650 if (!ret) {
2651 rvt_get_qp(i.qp);
2652 rcu_read_unlock();
2653 i.cb(i.qp, i.v);
2654 rcu_read_lock();
2655 rvt_put_qp(i.qp);
2656 }
2657 } while (!ret);
2658 rcu_read_unlock();
2659 }
2660 EXPORT_SYMBOL(rvt_qp_iter);
2661
2662 /*
2663 * This should be called with s_lock held.
2664 */
2665 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2666 enum ib_wc_status status)
2667 {
2668 u32 old_last, last;
2669 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2670
2671 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2672 return;
2673
2674 last = qp->s_last;
2675 old_last = last;
2676 trace_rvt_qp_send_completion(qp, wqe, last);
2677 if (++last >= qp->s_size)
2678 last = 0;
2679 trace_rvt_qp_send_completion(qp, wqe, last);
2680 qp->s_last = last;
2681 /* See post_send() */
2682 barrier();
2683 rvt_put_qp_swqe(qp, wqe);
2684
2685 rvt_qp_swqe_complete(qp,
2686 wqe,
2687 rdi->wc_opcode[wqe->wr.opcode],
2688 status);
2689
2690 if (qp->s_acked == old_last)
2691 qp->s_acked = last;
2692 if (qp->s_cur == old_last)
2693 qp->s_cur = last;
2694 if (qp->s_tail == old_last)
2695 qp->s_tail = last;
2696 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2697 qp->s_draining = 0;
2698 }
2699 EXPORT_SYMBOL(rvt_send_complete);
2700
2701 /**
2702 * rvt_copy_sge - copy data to SGE memory
2703 * @qp: associated QP
2704 * @ss: the SGE state
2705 * @data: the data to copy
2706 * @length: the length of the data
2707 * @release: boolean to release MR
2708 * @copy_last: do a separate copy of the last 8 bytes
2709 */
2710 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2711 void *data, u32 length,
2712 bool release, bool copy_last)
2713 {
2714 struct rvt_sge *sge = &ss->sge;
2715 int i;
2716 bool in_last = false;
2717 bool cacheless_copy = false;
2718 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2719 struct rvt_wss *wss = rdi->wss;
2720 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2721
2722 if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2723 cacheless_copy = length >= PAGE_SIZE;
2724 } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2725 if (length >= PAGE_SIZE) {
2726 /*
2727 * NOTE: this *assumes*:
2728 * o The first vaddr is the dest.
2729 * o If multiple pages, then vaddr is sequential.
2730 */
2731 wss_insert(wss, sge->vaddr);
2732 if (length >= (2 * PAGE_SIZE))
2733 wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2734
2735 cacheless_copy = wss_exceeds_threshold(wss);
2736 } else {
2737 wss_advance_clean_counter(wss);
2738 }
2739 }
2740
2741 if (copy_last) {
2742 if (length > 8) {
2743 length -= 8;
2744 } else {
2745 copy_last = false;
2746 in_last = true;
2747 }
2748 }
2749
2750 again:
2751 while (length) {
2752 u32 len = rvt_get_sge_length(sge, length);
2753
2754 WARN_ON_ONCE(len == 0);
2755 if (unlikely(in_last)) {
2756 /* enforce byte transfer ordering */
2757 for (i = 0; i < len; i++)
2758 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2759 } else if (cacheless_copy) {
2760 cacheless_memcpy(sge->vaddr, data, len);
2761 } else {
2762 memcpy(sge->vaddr, data, len);
2763 }
2764 rvt_update_sge(ss, len, release);
2765 data += len;
2766 length -= len;
2767 }
2768
2769 if (copy_last) {
2770 copy_last = false;
2771 in_last = true;
2772 length = 8;
2773 goto again;
2774 }
2775 }
2776 EXPORT_SYMBOL(rvt_copy_sge);
2777
2778 static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2779 struct rvt_qp *sqp)
2780 {
2781 rvp->n_pkt_drops++;
2782 /*
2783 * For RC, the requester would timeout and retry so
2784 * shortcut the timeouts and just signal too many retries.
2785 */
2786 return sqp->ibqp.qp_type == IB_QPT_RC ?
2787 IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2788 }
2789
2790 /**
2791 * ruc_loopback - handle UC and RC loopback requests
2792 * @sqp: the sending QP
2793 *
2794 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2795 * Note that although we are single threaded due to the send engine, we still
2796 * have to protect against post_send(). We don't have to worry about
2797 * receive interrupts since this is a connected protocol and all packets
2798 * will pass through here.
2799 */
2800 void rvt_ruc_loopback(struct rvt_qp *sqp)
2801 {
2802 struct rvt_ibport *rvp = NULL;
2803 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2804 struct rvt_qp *qp;
2805 struct rvt_swqe *wqe;
2806 struct rvt_sge *sge;
2807 unsigned long flags;
2808 struct ib_wc wc;
2809 u64 sdata;
2810 atomic64_t *maddr;
2811 enum ib_wc_status send_status;
2812 bool release;
2813 int ret;
2814 bool copy_last = false;
2815 int local_ops = 0;
2816
2817 rcu_read_lock();
2818 rvp = rdi->ports[sqp->port_num - 1];
2819
2820 /*
2821 * Note that we check the responder QP state after
2822 * checking the requester's state.
2823 */
2824
2825 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2826 sqp->remote_qpn);
2827
2828 spin_lock_irqsave(&sqp->s_lock, flags);
2829
2830 /* Return if we are already busy processing a work request. */
2831 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2832 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2833 goto unlock;
2834
2835 sqp->s_flags |= RVT_S_BUSY;
2836
2837 again:
2838 if (sqp->s_last == READ_ONCE(sqp->s_head))
2839 goto clr_busy;
2840 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2841
2842 /* Return if it is not OK to start a new work request. */
2843 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2844 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
2845 goto clr_busy;
2846 /* We are in the error state, flush the work request. */
2847 send_status = IB_WC_WR_FLUSH_ERR;
2848 goto flush_send;
2849 }
2850
2851 /*
2852 * We can rely on the entry not changing without the s_lock
2853 * being held until we update s_last.
2854 * We increment s_cur to indicate s_last is in progress.
2855 */
2856 if (sqp->s_last == sqp->s_cur) {
2857 if (++sqp->s_cur >= sqp->s_size)
2858 sqp->s_cur = 0;
2859 }
2860 spin_unlock_irqrestore(&sqp->s_lock, flags);
2861
2862 if (!qp) {
2863 send_status = loopback_qp_drop(rvp, sqp);
2864 goto serr_no_r_lock;
2865 }
2866 spin_lock_irqsave(&qp->r_lock, flags);
2867 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
2868 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
2869 send_status = loopback_qp_drop(rvp, sqp);
2870 goto serr;
2871 }
2872
2873 memset(&wc, 0, sizeof(wc));
2874 send_status = IB_WC_SUCCESS;
2875
2876 release = true;
2877 sqp->s_sge.sge = wqe->sg_list[0];
2878 sqp->s_sge.sg_list = wqe->sg_list + 1;
2879 sqp->s_sge.num_sge = wqe->wr.num_sge;
2880 sqp->s_len = wqe->length;
2881 switch (wqe->wr.opcode) {
2882 case IB_WR_REG_MR:
2883 goto send_comp;
2884
2885 case IB_WR_LOCAL_INV:
2886 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
2887 if (rvt_invalidate_rkey(sqp,
2888 wqe->wr.ex.invalidate_rkey))
2889 send_status = IB_WC_LOC_PROT_ERR;
2890 local_ops = 1;
2891 }
2892 goto send_comp;
2893
2894 case IB_WR_SEND_WITH_INV:
2895 case IB_WR_SEND_WITH_IMM:
2896 case IB_WR_SEND:
2897 ret = rvt_get_rwqe(qp, false);
2898 if (ret < 0)
2899 goto op_err;
2900 if (!ret)
2901 goto rnr_nak;
2902 if (wqe->length > qp->r_len)
2903 goto inv_err;
2904 switch (wqe->wr.opcode) {
2905 case IB_WR_SEND_WITH_INV:
2906 if (!rvt_invalidate_rkey(qp,
2907 wqe->wr.ex.invalidate_rkey)) {
2908 wc.wc_flags = IB_WC_WITH_INVALIDATE;
2909 wc.ex.invalidate_rkey =
2910 wqe->wr.ex.invalidate_rkey;
2911 }
2912 break;
2913 case IB_WR_SEND_WITH_IMM:
2914 wc.wc_flags = IB_WC_WITH_IMM;
2915 wc.ex.imm_data = wqe->wr.ex.imm_data;
2916 break;
2917 default:
2918 break;
2919 }
2920 break;
2921
2922 case IB_WR_RDMA_WRITE_WITH_IMM:
2923 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2924 goto inv_err;
2925 wc.wc_flags = IB_WC_WITH_IMM;
2926 wc.ex.imm_data = wqe->wr.ex.imm_data;
2927 ret = rvt_get_rwqe(qp, true);
2928 if (ret < 0)
2929 goto op_err;
2930 if (!ret)
2931 goto rnr_nak;
2932 /* skip copy_last set and qp_access_flags recheck */
2933 goto do_write;
2934 case IB_WR_RDMA_WRITE:
2935 copy_last = rvt_is_user_qp(qp);
2936 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2937 goto inv_err;
2938 do_write:
2939 if (wqe->length == 0)
2940 break;
2941 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
2942 wqe->rdma_wr.remote_addr,
2943 wqe->rdma_wr.rkey,
2944 IB_ACCESS_REMOTE_WRITE)))
2945 goto acc_err;
2946 qp->r_sge.sg_list = NULL;
2947 qp->r_sge.num_sge = 1;
2948 qp->r_sge.total_len = wqe->length;
2949 break;
2950
2951 case IB_WR_RDMA_READ:
2952 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2953 goto inv_err;
2954 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
2955 wqe->rdma_wr.remote_addr,
2956 wqe->rdma_wr.rkey,
2957 IB_ACCESS_REMOTE_READ)))
2958 goto acc_err;
2959 release = false;
2960 sqp->s_sge.sg_list = NULL;
2961 sqp->s_sge.num_sge = 1;
2962 qp->r_sge.sge = wqe->sg_list[0];
2963 qp->r_sge.sg_list = wqe->sg_list + 1;
2964 qp->r_sge.num_sge = wqe->wr.num_sge;
2965 qp->r_sge.total_len = wqe->length;
2966 break;
2967
2968 case IB_WR_ATOMIC_CMP_AND_SWP:
2969 case IB_WR_ATOMIC_FETCH_AND_ADD:
2970 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2971 goto inv_err;
2972 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2973 wqe->atomic_wr.remote_addr,
2974 wqe->atomic_wr.rkey,
2975 IB_ACCESS_REMOTE_ATOMIC)))
2976 goto acc_err;
2977 /* Perform atomic OP and save result. */
2978 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
2979 sdata = wqe->atomic_wr.compare_add;
2980 *(u64 *)sqp->s_sge.sge.vaddr =
2981 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
2982 (u64)atomic64_add_return(sdata, maddr) - sdata :
2983 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
2984 sdata, wqe->atomic_wr.swap);
2985 rvt_put_mr(qp->r_sge.sge.mr);
2986 qp->r_sge.num_sge = 0;
2987 goto send_comp;
2988
2989 default:
2990 send_status = IB_WC_LOC_QP_OP_ERR;
2991 goto serr;
2992 }
2993
2994 sge = &sqp->s_sge.sge;
2995 while (sqp->s_len) {
2996 u32 len = rvt_get_sge_length(sge, sqp->s_len);
2997
2998 WARN_ON_ONCE(len == 0);
2999 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3000 len, release, copy_last);
3001 rvt_update_sge(&sqp->s_sge, len, !release);
3002 sqp->s_len -= len;
3003 }
3004 if (release)
3005 rvt_put_ss(&qp->r_sge);
3006
3007 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3008 goto send_comp;
3009
3010 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3011 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3012 else
3013 wc.opcode = IB_WC_RECV;
3014 wc.wr_id = qp->r_wr_id;
3015 wc.status = IB_WC_SUCCESS;
3016 wc.byte_len = wqe->length;
3017 wc.qp = &qp->ibqp;
3018 wc.src_qp = qp->remote_qpn;
3019 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3020 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3021 wc.port_num = 1;
3022 /* Signal completion event if the solicited bit is set. */
3023 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
3024 wqe->wr.send_flags & IB_SEND_SOLICITED);
3025
3026 send_comp:
3027 spin_unlock_irqrestore(&qp->r_lock, flags);
3028 spin_lock_irqsave(&sqp->s_lock, flags);
3029 rvp->n_loop_pkts++;
3030 flush_send:
3031 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3032 rvt_send_complete(sqp, wqe, send_status);
3033 if (local_ops) {
3034 atomic_dec(&sqp->local_ops_pending);
3035 local_ops = 0;
3036 }
3037 goto again;
3038
3039 rnr_nak:
3040 /* Handle RNR NAK */
3041 if (qp->ibqp.qp_type == IB_QPT_UC)
3042 goto send_comp;
3043 rvp->n_rnr_naks++;
3044 /*
3045 * Note: we don't need the s_lock held since the BUSY flag
3046 * makes this single threaded.
3047 */
3048 if (sqp->s_rnr_retry == 0) {
3049 send_status = IB_WC_RNR_RETRY_EXC_ERR;
3050 goto serr;
3051 }
3052 if (sqp->s_rnr_retry_cnt < 7)
3053 sqp->s_rnr_retry--;
3054 spin_unlock_irqrestore(&qp->r_lock, flags);
3055 spin_lock_irqsave(&sqp->s_lock, flags);
3056 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3057 goto clr_busy;
3058 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3059 IB_AETH_CREDIT_SHIFT);
3060 goto clr_busy;
3061
3062 op_err:
3063 send_status = IB_WC_REM_OP_ERR;
3064 wc.status = IB_WC_LOC_QP_OP_ERR;
3065 goto err;
3066
3067 inv_err:
3068 send_status =
3069 sqp->ibqp.qp_type == IB_QPT_RC ?
3070 IB_WC_REM_INV_REQ_ERR :
3071 IB_WC_SUCCESS;
3072 wc.status = IB_WC_LOC_QP_OP_ERR;
3073 goto err;
3074
3075 acc_err:
3076 send_status = IB_WC_REM_ACCESS_ERR;
3077 wc.status = IB_WC_LOC_PROT_ERR;
3078 err:
3079 /* responder goes to error state */
3080 rvt_rc_error(qp, wc.status);
3081
3082 serr:
3083 spin_unlock_irqrestore(&qp->r_lock, flags);
3084 serr_no_r_lock:
3085 spin_lock_irqsave(&sqp->s_lock, flags);
3086 rvt_send_complete(sqp, wqe, send_status);
3087 if (sqp->ibqp.qp_type == IB_QPT_RC) {
3088 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3089
3090 sqp->s_flags &= ~RVT_S_BUSY;
3091 spin_unlock_irqrestore(&sqp->s_lock, flags);
3092 if (lastwqe) {
3093 struct ib_event ev;
3094
3095 ev.device = sqp->ibqp.device;
3096 ev.element.qp = &sqp->ibqp;
3097 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3098 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3099 }
3100 goto done;
3101 }
3102 clr_busy:
3103 sqp->s_flags &= ~RVT_S_BUSY;
3104 unlock:
3105 spin_unlock_irqrestore(&sqp->s_lock, flags);
3106 done:
3107 rcu_read_unlock();
3108 }
3109 EXPORT_SYMBOL(rvt_ruc_loopback);