2 * Copyright(c) 2016 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
56 #include <rdma/uverbs_ioctl.h>
61 static void rvt_rc_timeout(struct timer_list
*t
);
64 * Convert the AETH RNR timeout code into the number of microseconds.
66 static const u32 ib_rvt_rnr_table
[32] = {
67 655360, /* 00: 655.36 */
87 10240, /* 14: 10.24 */
88 15360, /* 15: 15.36 */
89 20480, /* 16: 20.48 */
90 30720, /* 17: 30.72 */
91 40960, /* 18: 40.96 */
92 61440, /* 19: 61.44 */
93 81920, /* 1A: 81.92 */
94 122880, /* 1B: 122.88 */
95 163840, /* 1C: 163.84 */
96 245760, /* 1D: 245.76 */
97 327680, /* 1E: 327.68 */
98 491520 /* 1F: 491.52 */
102 * Note that it is OK to post send work requests in the SQE and ERR
103 * states; rvt_do_send() will process them and generate error
104 * completions as per IB 1.2 C10-96.
106 const int ib_rvt_state_ops
[IB_QPS_ERR
+ 1] = {
108 [IB_QPS_INIT
] = RVT_POST_RECV_OK
,
109 [IB_QPS_RTR
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
,
110 [IB_QPS_RTS
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
111 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
|
112 RVT_PROCESS_NEXT_SEND_OK
,
113 [IB_QPS_SQD
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
114 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
,
115 [IB_QPS_SQE
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
116 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
117 [IB_QPS_ERR
] = RVT_POST_RECV_OK
| RVT_FLUSH_RECV
|
118 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
120 EXPORT_SYMBOL(ib_rvt_state_ops
);
122 /* platform specific: return the last level cache (llc) size, in KiB */
123 static int rvt_wss_llc_size(void)
125 /* assume that the boot CPU value is universal for all CPUs */
126 return boot_cpu_data
.x86_cache_size
;
129 /* platform specific: cacheless copy */
130 static void cacheless_memcpy(void *dst
, void *src
, size_t n
)
133 * Use the only available X64 cacheless copy. Add a __user cast
134 * to quiet sparse. The src agument is already in the kernel so
135 * there are no security issues. The extra fault recovery machinery
138 __copy_user_nocache(dst
, (void __user
*)src
, n
, 0);
141 void rvt_wss_exit(struct rvt_dev_info
*rdi
)
143 struct rvt_wss
*wss
= rdi
->wss
;
148 /* coded to handle partially initialized and repeat callers */
156 * rvt_wss_init - Init wss data structures
158 * Return: 0 on success
160 int rvt_wss_init(struct rvt_dev_info
*rdi
)
162 unsigned int sge_copy_mode
= rdi
->dparms
.sge_copy_mode
;
163 unsigned int wss_threshold
= rdi
->dparms
.wss_threshold
;
164 unsigned int wss_clean_period
= rdi
->dparms
.wss_clean_period
;
170 int node
= rdi
->dparms
.node
;
172 if (sge_copy_mode
!= RVT_SGE_COPY_ADAPTIVE
) {
177 rdi
->wss
= kzalloc_node(sizeof(*rdi
->wss
), GFP_KERNEL
, node
);
182 /* check for a valid percent range - default to 80 if none or invalid */
183 if (wss_threshold
< 1 || wss_threshold
> 100)
186 /* reject a wildly large period */
187 if (wss_clean_period
> 1000000)
188 wss_clean_period
= 256;
190 /* reject a zero period */
191 if (wss_clean_period
== 0)
192 wss_clean_period
= 1;
195 * Calculate the table size - the next power of 2 larger than the
196 * LLC size. LLC size is in KiB.
198 llc_size
= rvt_wss_llc_size() * 1024;
199 table_size
= roundup_pow_of_two(llc_size
);
201 /* one bit per page in rounded up table */
202 llc_bits
= llc_size
/ PAGE_SIZE
;
203 table_bits
= table_size
/ PAGE_SIZE
;
204 wss
->pages_mask
= table_bits
- 1;
205 wss
->num_entries
= table_bits
/ BITS_PER_LONG
;
207 wss
->threshold
= (llc_bits
* wss_threshold
) / 100;
208 if (wss
->threshold
== 0)
211 wss
->clean_period
= wss_clean_period
;
212 atomic_set(&wss
->clean_counter
, wss_clean_period
);
214 wss
->entries
= kcalloc_node(wss
->num_entries
, sizeof(*wss
->entries
),
225 * Advance the clean counter. When the clean period has expired,
228 * This is implemented in atomics to avoid locking. Because multiple
229 * variables are involved, it can be racy which can lead to slightly
230 * inaccurate information. Since this is only a heuristic, this is
231 * OK. Any innaccuracies will clean themselves out as the counter
232 * advances. That said, it is unlikely the entry clean operation will
233 * race - the next possible racer will not start until the next clean
236 * The clean counter is implemented as a decrement to zero. When zero
237 * is reached an entry is cleaned.
239 static void wss_advance_clean_counter(struct rvt_wss
*wss
)
245 /* become the cleaner if we decrement the counter to zero */
246 if (atomic_dec_and_test(&wss
->clean_counter
)) {
248 * Set, not add, the clean period. This avoids an issue
249 * where the counter could decrement below the clean period.
250 * Doing a set can result in lost decrements, slowing the
251 * clean advance. Since this a heuristic, this possible
254 * An alternative is to loop, advancing the counter by a
255 * clean period until the result is > 0. However, this could
256 * lead to several threads keeping another in the clean loop.
257 * This could be mitigated by limiting the number of times
258 * we stay in the loop.
260 atomic_set(&wss
->clean_counter
, wss
->clean_period
);
263 * Uniquely grab the entry to clean and move to next.
264 * The current entry is always the lower bits of
265 * wss.clean_entry. The table size, wss.num_entries,
266 * is always a power-of-2.
268 entry
= (atomic_inc_return(&wss
->clean_entry
) - 1)
269 & (wss
->num_entries
- 1);
271 /* clear the entry and count the bits */
272 bits
= xchg(&wss
->entries
[entry
], 0);
273 weight
= hweight64((u64
)bits
);
274 /* only adjust the contended total count if needed */
276 atomic_sub(weight
, &wss
->total_count
);
281 * Insert the given address into the working set array.
283 static void wss_insert(struct rvt_wss
*wss
, void *address
)
285 u32 page
= ((unsigned long)address
>> PAGE_SHIFT
) & wss
->pages_mask
;
286 u32 entry
= page
/ BITS_PER_LONG
; /* assumes this ends up a shift */
287 u32 nr
= page
& (BITS_PER_LONG
- 1);
289 if (!test_and_set_bit(nr
, &wss
->entries
[entry
]))
290 atomic_inc(&wss
->total_count
);
292 wss_advance_clean_counter(wss
);
296 * Is the working set larger than the threshold?
298 static inline bool wss_exceeds_threshold(struct rvt_wss
*wss
)
300 return atomic_read(&wss
->total_count
) >= wss
->threshold
;
303 static void get_map_page(struct rvt_qpn_table
*qpt
,
304 struct rvt_qpn_map
*map
)
306 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
309 * Free the page if someone raced with us installing it.
312 spin_lock(&qpt
->lock
);
316 map
->page
= (void *)page
;
317 spin_unlock(&qpt
->lock
);
321 * init_qpn_table - initialize the QP number table for a device
322 * @qpt: the QPN table
324 static int init_qpn_table(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
)
327 struct rvt_qpn_map
*map
;
330 if (!(rdi
->dparms
.qpn_res_end
>= rdi
->dparms
.qpn_res_start
))
333 spin_lock_init(&qpt
->lock
);
335 qpt
->last
= rdi
->dparms
.qpn_start
;
336 qpt
->incr
= rdi
->dparms
.qpn_inc
<< rdi
->dparms
.qos_shift
;
339 * Drivers may want some QPs beyond what we need for verbs let them use
340 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
341 * for those. The reserved range must be *after* the range which verbs
345 /* Figure out number of bit maps needed before reserved range */
346 qpt
->nmaps
= rdi
->dparms
.qpn_res_start
/ RVT_BITS_PER_PAGE
;
348 /* This should always be zero */
349 offset
= rdi
->dparms
.qpn_res_start
& RVT_BITS_PER_PAGE_MASK
;
351 /* Starting with the first reserved bit map */
352 map
= &qpt
->map
[qpt
->nmaps
];
354 rvt_pr_info(rdi
, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
355 rdi
->dparms
.qpn_res_start
, rdi
->dparms
.qpn_res_end
);
356 for (i
= rdi
->dparms
.qpn_res_start
; i
<= rdi
->dparms
.qpn_res_end
; i
++) {
358 get_map_page(qpt
, map
);
364 set_bit(offset
, map
->page
);
366 if (offset
== RVT_BITS_PER_PAGE
) {
377 * free_qpn_table - free the QP number table for a device
378 * @qpt: the QPN table
380 static void free_qpn_table(struct rvt_qpn_table
*qpt
)
384 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
385 free_page((unsigned long)qpt
->map
[i
].page
);
389 * rvt_driver_qp_init - Init driver qp resources
390 * @rdi: rvt dev strucutre
392 * Return: 0 on success
394 int rvt_driver_qp_init(struct rvt_dev_info
*rdi
)
399 if (!rdi
->dparms
.qp_table_size
)
403 * If driver is not doing any QP allocation then make sure it is
404 * providing the necessary QP functions.
406 if (!rdi
->driver_f
.free_all_qps
||
407 !rdi
->driver_f
.qp_priv_alloc
||
408 !rdi
->driver_f
.qp_priv_free
||
409 !rdi
->driver_f
.notify_qp_reset
||
410 !rdi
->driver_f
.notify_restart_rc
)
413 /* allocate parent object */
414 rdi
->qp_dev
= kzalloc_node(sizeof(*rdi
->qp_dev
), GFP_KERNEL
,
419 /* allocate hash table */
420 rdi
->qp_dev
->qp_table_size
= rdi
->dparms
.qp_table_size
;
421 rdi
->qp_dev
->qp_table_bits
= ilog2(rdi
->dparms
.qp_table_size
);
422 rdi
->qp_dev
->qp_table
=
423 kmalloc_array_node(rdi
->qp_dev
->qp_table_size
,
424 sizeof(*rdi
->qp_dev
->qp_table
),
425 GFP_KERNEL
, rdi
->dparms
.node
);
426 if (!rdi
->qp_dev
->qp_table
)
429 for (i
= 0; i
< rdi
->qp_dev
->qp_table_size
; i
++)
430 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[i
], NULL
);
432 spin_lock_init(&rdi
->qp_dev
->qpt_lock
);
434 /* initialize qpn map */
435 if (init_qpn_table(rdi
, &rdi
->qp_dev
->qpn_table
))
438 spin_lock_init(&rdi
->n_qps_lock
);
443 kfree(rdi
->qp_dev
->qp_table
);
444 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
453 * free_all_qps - check for QPs still in use
454 * @rdi: rvt device info structure
456 * There should not be any QPs still in use.
457 * Free memory for table.
459 static unsigned rvt_free_all_qps(struct rvt_dev_info
*rdi
)
463 unsigned n
, qp_inuse
= 0;
464 spinlock_t
*ql
; /* work around too long line below */
466 if (rdi
->driver_f
.free_all_qps
)
467 qp_inuse
= rdi
->driver_f
.free_all_qps(rdi
);
469 qp_inuse
+= rvt_mcast_tree_empty(rdi
);
474 ql
= &rdi
->qp_dev
->qpt_lock
;
475 spin_lock_irqsave(ql
, flags
);
476 for (n
= 0; n
< rdi
->qp_dev
->qp_table_size
; n
++) {
477 qp
= rcu_dereference_protected(rdi
->qp_dev
->qp_table
[n
],
478 lockdep_is_held(ql
));
479 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[n
], NULL
);
481 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
482 lockdep_is_held(ql
)))
485 spin_unlock_irqrestore(ql
, flags
);
491 * rvt_qp_exit - clean up qps on device exit
492 * @rdi: rvt dev structure
494 * Check for qp leaks and free resources.
496 void rvt_qp_exit(struct rvt_dev_info
*rdi
)
498 u32 qps_inuse
= rvt_free_all_qps(rdi
);
501 rvt_pr_err(rdi
, "QP memory leak! %u still in use\n",
506 kfree(rdi
->qp_dev
->qp_table
);
507 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
511 static inline unsigned mk_qpn(struct rvt_qpn_table
*qpt
,
512 struct rvt_qpn_map
*map
, unsigned off
)
514 return (map
- qpt
->map
) * RVT_BITS_PER_PAGE
+ off
;
518 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
519 * IB_QPT_SMI/IB_QPT_GSI
520 * @rdi: rvt device info structure
521 * @qpt: queue pair number table pointer
522 * @port_num: IB port number, 1 based, comes from core
524 * Return: The queue pair number
526 static int alloc_qpn(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
,
527 enum ib_qp_type type
, u8 port_num
)
529 u32 i
, offset
, max_scan
, qpn
;
530 struct rvt_qpn_map
*map
;
533 if (rdi
->driver_f
.alloc_qpn
)
534 return rdi
->driver_f
.alloc_qpn(rdi
, qpt
, type
, port_num
);
536 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
539 ret
= type
== IB_QPT_GSI
;
540 n
= 1 << (ret
+ 2 * (port_num
- 1));
541 spin_lock(&qpt
->lock
);
546 spin_unlock(&qpt
->lock
);
550 qpn
= qpt
->last
+ qpt
->incr
;
551 if (qpn
>= RVT_QPN_MAX
)
552 qpn
= qpt
->incr
| ((qpt
->last
& 1) ^ 1);
553 /* offset carries bit 0 */
554 offset
= qpn
& RVT_BITS_PER_PAGE_MASK
;
555 map
= &qpt
->map
[qpn
/ RVT_BITS_PER_PAGE
];
556 max_scan
= qpt
->nmaps
- !offset
;
558 if (unlikely(!map
->page
)) {
559 get_map_page(qpt
, map
);
560 if (unlikely(!map
->page
))
564 if (!test_and_set_bit(offset
, map
->page
)) {
571 * This qpn might be bogus if offset >= BITS_PER_PAGE.
572 * That is OK. It gets re-assigned below
574 qpn
= mk_qpn(qpt
, map
, offset
);
575 } while (offset
< RVT_BITS_PER_PAGE
&& qpn
< RVT_QPN_MAX
);
577 * In order to keep the number of pages allocated to a
578 * minimum, we scan the all existing pages before increasing
579 * the size of the bitmap table.
581 if (++i
> max_scan
) {
582 if (qpt
->nmaps
== RVT_QPNMAP_ENTRIES
)
584 map
= &qpt
->map
[qpt
->nmaps
++];
585 /* start at incr with current bit 0 */
586 offset
= qpt
->incr
| (offset
& 1);
587 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
589 /* start at incr with current bit 0 */
590 offset
= qpt
->incr
| (offset
& 1);
593 /* wrap to first map page, invert bit 0 */
594 offset
= qpt
->incr
| ((offset
& 1) ^ 1);
596 /* there can be no set bits in low-order QoS bits */
597 WARN_ON(offset
& (BIT(rdi
->dparms
.qos_shift
) - 1));
598 qpn
= mk_qpn(qpt
, map
, offset
);
608 * rvt_clear_mr_refs - Drop help mr refs
609 * @qp: rvt qp data structure
610 * @clr_sends: If shoudl clear send side or not
612 static void rvt_clear_mr_refs(struct rvt_qp
*qp
, int clr_sends
)
615 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
617 if (test_and_clear_bit(RVT_R_REWIND_SGE
, &qp
->r_aflags
))
618 rvt_put_ss(&qp
->s_rdma_read_sge
);
620 rvt_put_ss(&qp
->r_sge
);
623 while (qp
->s_last
!= qp
->s_head
) {
624 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
626 rvt_put_qp_swqe(qp
, wqe
);
627 if (++qp
->s_last
>= qp
->s_size
)
629 smp_wmb(); /* see qp_set_savail */
632 rvt_put_mr(qp
->s_rdma_mr
);
633 qp
->s_rdma_mr
= NULL
;
637 for (n
= 0; qp
->s_ack_queue
&& n
< rvt_max_atomic(rdi
); n
++) {
638 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[n
];
640 if (e
->rdma_sge
.mr
) {
641 rvt_put_mr(e
->rdma_sge
.mr
);
642 e
->rdma_sge
.mr
= NULL
;
648 * rvt_swqe_has_lkey - return true if lkey is used by swqe
649 * @wqe - the send wqe
652 * Test the swqe for using lkey
654 static bool rvt_swqe_has_lkey(struct rvt_swqe
*wqe
, u32 lkey
)
658 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
659 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
661 if (rvt_mr_has_lkey(sge
->mr
, lkey
))
668 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
672 static bool rvt_qp_sends_has_lkey(struct rvt_qp
*qp
, u32 lkey
)
674 u32 s_last
= qp
->s_last
;
676 while (s_last
!= qp
->s_head
) {
677 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, s_last
);
679 if (rvt_swqe_has_lkey(wqe
, lkey
))
682 if (++s_last
>= qp
->s_size
)
686 if (rvt_mr_has_lkey(qp
->s_rdma_mr
, lkey
))
692 * rvt_qp_acks_has_lkey - return true if acks have lkey
696 static bool rvt_qp_acks_has_lkey(struct rvt_qp
*qp
, u32 lkey
)
699 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
701 for (i
= 0; qp
->s_ack_queue
&& i
< rvt_max_atomic(rdi
); i
++) {
702 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[i
];
704 if (rvt_mr_has_lkey(e
->rdma_sge
.mr
, lkey
))
711 * rvt_qp_mr_clean - clean up remote ops for lkey
713 * @lkey - the lkey that is being de-registered
715 * This routine checks if the lkey is being used by
718 * If so, the qp is put into an error state to elminate
719 * any references from the qp.
721 void rvt_qp_mr_clean(struct rvt_qp
*qp
, u32 lkey
)
723 bool lastwqe
= false;
725 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
726 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
727 /* avoid special QPs */
729 spin_lock_irq(&qp
->r_lock
);
730 spin_lock(&qp
->s_hlock
);
731 spin_lock(&qp
->s_lock
);
733 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
736 if (rvt_ss_has_lkey(&qp
->r_sge
, lkey
) ||
737 rvt_qp_sends_has_lkey(qp
, lkey
) ||
738 rvt_qp_acks_has_lkey(qp
, lkey
))
739 lastwqe
= rvt_error_qp(qp
, IB_WC_LOC_PROT_ERR
);
741 spin_unlock(&qp
->s_lock
);
742 spin_unlock(&qp
->s_hlock
);
743 spin_unlock_irq(&qp
->r_lock
);
747 ev
.device
= qp
->ibqp
.device
;
748 ev
.element
.qp
= &qp
->ibqp
;
749 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
750 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
755 * rvt_remove_qp - remove qp form table
756 * @rdi: rvt dev struct
759 * Remove the QP from the table so it can't be found asynchronously by
760 * the receive routine.
762 static void rvt_remove_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
764 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
765 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
769 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
771 if (rcu_dereference_protected(rvp
->qp
[0],
772 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
773 RCU_INIT_POINTER(rvp
->qp
[0], NULL
);
774 } else if (rcu_dereference_protected(rvp
->qp
[1],
775 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
776 RCU_INIT_POINTER(rvp
->qp
[1], NULL
);
779 struct rvt_qp __rcu
**qpp
;
782 qpp
= &rdi
->qp_dev
->qp_table
[n
];
783 for (; (q
= rcu_dereference_protected(*qpp
,
784 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
))) != NULL
;
787 RCU_INIT_POINTER(*qpp
,
788 rcu_dereference_protected(qp
->next
,
789 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)));
791 trace_rvt_qpremove(qp
, n
);
797 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
805 * rvt_init_qp - initialize the QP state to the reset state
806 * @qp: the QP to init or reinit
809 * This function is called from both rvt_create_qp() and
810 * rvt_reset_qp(). The difference is that the reset
811 * patch the necessary locks to protect against concurent
814 static void rvt_init_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
815 enum ib_qp_type type
)
819 qp
->qp_access_flags
= 0;
820 qp
->s_flags
&= RVT_S_SIGNAL_REQ_WR
;
826 qp
->s_sending_psn
= 0;
827 qp
->s_sending_hpsn
= 0;
831 if (type
== IB_QPT_RC
) {
832 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
833 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
835 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
836 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
838 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
849 qp
->s_mig_state
= IB_MIG_MIGRATED
;
850 qp
->r_head_ack_queue
= 0;
851 qp
->s_tail_ack_queue
= 0;
852 qp
->s_acked_ack_queue
= 0;
853 qp
->s_num_rd_atomic
= 0;
855 qp
->r_rq
.wq
->head
= 0;
856 qp
->r_rq
.wq
->tail
= 0;
858 qp
->r_sge
.num_sge
= 0;
859 atomic_set(&qp
->s_reserved_used
, 0);
863 * rvt_reset_qp - initialize the QP state to the reset state
864 * @qp: the QP to reset
867 * r_lock, s_hlock, and s_lock are required to be held by the caller
869 static void rvt_reset_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
870 enum ib_qp_type type
)
871 __must_hold(&qp
->s_lock
)
872 __must_hold(&qp
->s_hlock
)
873 __must_hold(&qp
->r_lock
)
875 lockdep_assert_held(&qp
->r_lock
);
876 lockdep_assert_held(&qp
->s_hlock
);
877 lockdep_assert_held(&qp
->s_lock
);
878 if (qp
->state
!= IB_QPS_RESET
) {
879 qp
->state
= IB_QPS_RESET
;
881 /* Let drivers flush their waitlist */
882 rdi
->driver_f
.flush_qp_waiters(qp
);
883 rvt_stop_rc_timers(qp
);
884 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_ANY_WAIT
);
885 spin_unlock(&qp
->s_lock
);
886 spin_unlock(&qp
->s_hlock
);
887 spin_unlock_irq(&qp
->r_lock
);
889 /* Stop the send queue and the retry timer */
890 rdi
->driver_f
.stop_send_queue(qp
);
891 rvt_del_timers_sync(qp
);
892 /* Wait for things to stop */
893 rdi
->driver_f
.quiesce_qp(qp
);
895 /* take qp out the hash and wait for it to be unused */
896 rvt_remove_qp(rdi
, qp
);
898 /* grab the lock b/c it was locked at call time */
899 spin_lock_irq(&qp
->r_lock
);
900 spin_lock(&qp
->s_hlock
);
901 spin_lock(&qp
->s_lock
);
903 rvt_clear_mr_refs(qp
, 1);
905 * Let the driver do any tear down or re-init it needs to for
906 * a qp that has been reset
908 rdi
->driver_f
.notify_qp_reset(qp
);
910 rvt_init_qp(rdi
, qp
, type
);
911 lockdep_assert_held(&qp
->r_lock
);
912 lockdep_assert_held(&qp
->s_hlock
);
913 lockdep_assert_held(&qp
->s_lock
);
916 /** rvt_free_qpn - Free a qpn from the bit map
918 * @qpn: queue pair number to free
920 static void rvt_free_qpn(struct rvt_qpn_table
*qpt
, u32 qpn
)
922 struct rvt_qpn_map
*map
;
924 map
= qpt
->map
+ (qpn
& RVT_QPN_MASK
) / RVT_BITS_PER_PAGE
;
926 clear_bit(qpn
& RVT_BITS_PER_PAGE_MASK
, map
->page
);
930 * rvt_create_qp - create a queue pair for a device
931 * @ibpd: the protection domain who's device we create the queue pair for
932 * @init_attr: the attributes of the queue pair
933 * @udata: user data for libibverbs.so
935 * Queue pair creation is mostly an rvt issue. However, drivers have their own
936 * unique idea of what queue pair numbers mean. For instance there is a reserved
939 * Return: the queue pair on success, otherwise returns an errno.
941 * Called by the ib_create_qp() core verbs function.
943 struct ib_qp
*rvt_create_qp(struct ib_pd
*ibpd
,
944 struct ib_qp_init_attr
*init_attr
,
945 struct ib_udata
*udata
)
949 struct rvt_swqe
*swq
= NULL
;
952 struct ib_qp
*ret
= ERR_PTR(-ENOMEM
);
953 struct rvt_dev_info
*rdi
= ib_to_rvt(ibpd
->device
);
958 return ERR_PTR(-EINVAL
);
960 if (init_attr
->cap
.max_send_sge
> rdi
->dparms
.props
.max_send_sge
||
961 init_attr
->cap
.max_send_wr
> rdi
->dparms
.props
.max_qp_wr
||
962 init_attr
->create_flags
)
963 return ERR_PTR(-EINVAL
);
965 /* Check receive queue parameters if no SRQ is specified. */
966 if (!init_attr
->srq
) {
967 if (init_attr
->cap
.max_recv_sge
>
968 rdi
->dparms
.props
.max_recv_sge
||
969 init_attr
->cap
.max_recv_wr
> rdi
->dparms
.props
.max_qp_wr
)
970 return ERR_PTR(-EINVAL
);
972 if (init_attr
->cap
.max_send_sge
+
973 init_attr
->cap
.max_send_wr
+
974 init_attr
->cap
.max_recv_sge
+
975 init_attr
->cap
.max_recv_wr
== 0)
976 return ERR_PTR(-EINVAL
);
979 init_attr
->cap
.max_send_wr
+ 1 +
980 rdi
->dparms
.reserved_operations
;
981 switch (init_attr
->qp_type
) {
984 if (init_attr
->port_num
== 0 ||
985 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
)
986 return ERR_PTR(-EINVAL
);
991 sz
= sizeof(struct rvt_sge
) *
992 init_attr
->cap
.max_send_sge
+
993 sizeof(struct rvt_swqe
);
994 swq
= vzalloc_node(array_size(sz
, sqsize
), rdi
->dparms
.node
);
996 return ERR_PTR(-ENOMEM
);
1000 if (init_attr
->srq
) {
1001 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(init_attr
->srq
);
1003 if (srq
->rq
.max_sge
> 1)
1004 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1005 (srq
->rq
.max_sge
- 1);
1006 } else if (init_attr
->cap
.max_recv_sge
> 1)
1007 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1008 (init_attr
->cap
.max_recv_sge
- 1);
1009 qp
= kzalloc_node(sz
+ sg_list_sz
, GFP_KERNEL
,
1014 RCU_INIT_POINTER(qp
->next
, NULL
);
1015 if (init_attr
->qp_type
== IB_QPT_RC
) {
1017 kcalloc_node(rvt_max_atomic(rdi
),
1018 sizeof(*qp
->s_ack_queue
),
1021 if (!qp
->s_ack_queue
)
1024 /* initialize timers needed for rc qp */
1025 timer_setup(&qp
->s_timer
, rvt_rc_timeout
, 0);
1026 hrtimer_init(&qp
->s_rnr_timer
, CLOCK_MONOTONIC
,
1028 qp
->s_rnr_timer
.function
= rvt_rc_rnr_retry
;
1031 * Driver needs to set up it's private QP structure and do any
1032 * initialization that is needed.
1034 priv
= rdi
->driver_f
.qp_priv_alloc(rdi
, qp
);
1040 qp
->timeout_jiffies
=
1041 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1043 if (init_attr
->srq
) {
1046 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1047 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1048 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1049 sizeof(struct rvt_rwqe
);
1051 qp
->r_rq
.wq
= vmalloc_user(
1052 sizeof(struct rvt_rwq
) +
1053 qp
->r_rq
.size
* sz
);
1055 qp
->r_rq
.wq
= vzalloc_node(
1056 sizeof(struct rvt_rwq
) +
1060 goto bail_driver_priv
;
1064 * ib_create_qp() will initialize qp->ibqp
1065 * except for qp->ibqp.qp_num.
1067 spin_lock_init(&qp
->r_lock
);
1068 spin_lock_init(&qp
->s_hlock
);
1069 spin_lock_init(&qp
->s_lock
);
1070 spin_lock_init(&qp
->r_rq
.lock
);
1071 atomic_set(&qp
->refcount
, 0);
1072 atomic_set(&qp
->local_ops_pending
, 0);
1073 init_waitqueue_head(&qp
->wait
);
1074 INIT_LIST_HEAD(&qp
->rspwait
);
1075 qp
->state
= IB_QPS_RESET
;
1077 qp
->s_size
= sqsize
;
1078 qp
->s_avail
= init_attr
->cap
.max_send_wr
;
1079 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1080 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1081 qp
->s_flags
= RVT_S_SIGNAL_REQ_WR
;
1083 err
= alloc_qpn(rdi
, &rdi
->qp_dev
->qpn_table
,
1085 init_attr
->port_num
);
1090 qp
->ibqp
.qp_num
= err
;
1091 qp
->port_num
= init_attr
->port_num
;
1092 rvt_init_qp(rdi
, qp
, init_attr
->qp_type
);
1093 if (rdi
->driver_f
.qp_priv_init
) {
1094 err
= rdi
->driver_f
.qp_priv_init(rdi
, qp
, init_attr
);
1103 /* Don't support raw QPs */
1104 return ERR_PTR(-EINVAL
);
1107 init_attr
->cap
.max_inline_data
= 0;
1110 * Return the address of the RWQ as the offset to mmap.
1111 * See rvt_mmap() for details.
1113 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1117 err
= ib_copy_to_udata(udata
, &offset
,
1124 u32 s
= sizeof(struct rvt_rwq
) + qp
->r_rq
.size
* sz
;
1126 qp
->ip
= rvt_create_mmap_info(rdi
, s
, udata
,
1129 ret
= ERR_PTR(-ENOMEM
);
1133 err
= ib_copy_to_udata(udata
, &qp
->ip
->offset
,
1134 sizeof(qp
->ip
->offset
));
1140 qp
->pid
= current
->pid
;
1143 spin_lock(&rdi
->n_qps_lock
);
1144 if (rdi
->n_qps_allocated
== rdi
->dparms
.props
.max_qp
) {
1145 spin_unlock(&rdi
->n_qps_lock
);
1146 ret
= ERR_PTR(-ENOMEM
);
1150 rdi
->n_qps_allocated
++;
1152 * Maintain a busy_jiffies variable that will be added to the timeout
1153 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1154 * is scaled by the number of rc qps created for the device to reduce
1155 * the number of timeouts occurring when there is a large number of
1156 * qps. busy_jiffies is incremented every rc qp scaling interval.
1157 * The scaling interval is selected based on extensive performance
1158 * evaluation of targeted workloads.
1160 if (init_attr
->qp_type
== IB_QPT_RC
) {
1162 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1164 spin_unlock(&rdi
->n_qps_lock
);
1167 spin_lock_irq(&rdi
->pending_lock
);
1168 list_add(&qp
->ip
->pending_mmaps
, &rdi
->pending_mmaps
);
1169 spin_unlock_irq(&rdi
->pending_lock
);
1175 * We have our QP and its good, now keep track of what types of opcodes
1176 * can be processed on this QP. We do this by keeping track of what the
1177 * 3 high order bits of the opcode are.
1179 switch (init_attr
->qp_type
) {
1183 qp
->allowed_ops
= IB_OPCODE_UD
;
1186 qp
->allowed_ops
= IB_OPCODE_RC
;
1189 qp
->allowed_ops
= IB_OPCODE_UC
;
1192 ret
= ERR_PTR(-EINVAL
);
1200 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1203 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1210 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1213 kfree(qp
->s_ack_queue
);
1223 * rvt_error_qp - put a QP into the error state
1224 * @qp: the QP to put into the error state
1225 * @err: the receive completion error to signal if a RWQE is active
1227 * Flushes both send and receive work queues.
1229 * Return: true if last WQE event should be generated.
1230 * The QP r_lock and s_lock should be held and interrupts disabled.
1231 * If we are already in error state, just return.
1233 int rvt_error_qp(struct rvt_qp
*qp
, enum ib_wc_status err
)
1237 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
1239 lockdep_assert_held(&qp
->r_lock
);
1240 lockdep_assert_held(&qp
->s_lock
);
1241 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
1244 qp
->state
= IB_QPS_ERR
;
1246 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
1247 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
1248 del_timer(&qp
->s_timer
);
1251 if (qp
->s_flags
& RVT_S_ANY_WAIT_SEND
)
1252 qp
->s_flags
&= ~RVT_S_ANY_WAIT_SEND
;
1254 rdi
->driver_f
.notify_error_qp(qp
);
1256 /* Schedule the sending tasklet to drain the send work queue. */
1257 if (READ_ONCE(qp
->s_last
) != qp
->s_head
)
1258 rdi
->driver_f
.schedule_send(qp
);
1260 rvt_clear_mr_refs(qp
, 0);
1262 memset(&wc
, 0, sizeof(wc
));
1264 wc
.opcode
= IB_WC_RECV
;
1266 if (test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
)) {
1267 wc
.wr_id
= qp
->r_wr_id
;
1269 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1271 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1278 spin_lock(&qp
->r_rq
.lock
);
1280 /* sanity check pointers before trusting them */
1283 if (head
>= qp
->r_rq
.size
)
1286 if (tail
>= qp
->r_rq
.size
)
1288 while (tail
!= head
) {
1289 wc
.wr_id
= rvt_get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
1290 if (++tail
>= qp
->r_rq
.size
)
1292 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1296 spin_unlock(&qp
->r_rq
.lock
);
1297 } else if (qp
->ibqp
.event_handler
) {
1304 EXPORT_SYMBOL(rvt_error_qp
);
1307 * Put the QP into the hash table.
1308 * The hash table holds a reference to the QP.
1310 static void rvt_insert_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
1312 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
1313 unsigned long flags
;
1316 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
1318 if (qp
->ibqp
.qp_num
<= 1) {
1319 rcu_assign_pointer(rvp
->qp
[qp
->ibqp
.qp_num
], qp
);
1321 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
1323 qp
->next
= rdi
->qp_dev
->qp_table
[n
];
1324 rcu_assign_pointer(rdi
->qp_dev
->qp_table
[n
], qp
);
1325 trace_rvt_qpinsert(qp
, n
);
1328 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
1332 * rvt_modify_qp - modify the attributes of a queue pair
1333 * @ibqp: the queue pair who's attributes we're modifying
1334 * @attr: the new attributes
1335 * @attr_mask: the mask of attributes to modify
1336 * @udata: user data for libibverbs.so
1338 * Return: 0 on success, otherwise returns an errno.
1340 int rvt_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1341 int attr_mask
, struct ib_udata
*udata
)
1343 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1344 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1345 enum ib_qp_state cur_state
, new_state
;
1349 int pmtu
= 0; /* for gcc warning only */
1352 spin_lock_irq(&qp
->r_lock
);
1353 spin_lock(&qp
->s_hlock
);
1354 spin_lock(&qp
->s_lock
);
1356 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
1357 attr
->cur_qp_state
: qp
->state
;
1358 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1359 opa_ah
= rdma_cap_opa_ah(ibqp
->device
, qp
->port_num
);
1361 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1365 if (rdi
->driver_f
.check_modify_qp
&&
1366 rdi
->driver_f
.check_modify_qp(qp
, attr
, attr_mask
, udata
))
1369 if (attr_mask
& IB_QP_AV
) {
1371 if (rdma_ah_get_dlid(&attr
->ah_attr
) >=
1372 opa_get_mcast_base(OPA_MCAST_NR
))
1375 if (rdma_ah_get_dlid(&attr
->ah_attr
) >=
1376 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1380 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
1384 if (attr_mask
& IB_QP_ALT_PATH
) {
1386 if (rdma_ah_get_dlid(&attr
->alt_ah_attr
) >=
1387 opa_get_mcast_base(OPA_MCAST_NR
))
1390 if (rdma_ah_get_dlid(&attr
->alt_ah_attr
) >=
1391 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1395 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
1397 if (attr
->alt_pkey_index
>= rvt_get_npkeys(rdi
))
1401 if (attr_mask
& IB_QP_PKEY_INDEX
)
1402 if (attr
->pkey_index
>= rvt_get_npkeys(rdi
))
1405 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1406 if (attr
->min_rnr_timer
> 31)
1409 if (attr_mask
& IB_QP_PORT
)
1410 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
1411 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
1412 attr
->port_num
== 0 ||
1413 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
1416 if (attr_mask
& IB_QP_DEST_QPN
)
1417 if (attr
->dest_qp_num
> RVT_QPN_MASK
)
1420 if (attr_mask
& IB_QP_RETRY_CNT
)
1421 if (attr
->retry_cnt
> 7)
1424 if (attr_mask
& IB_QP_RNR_RETRY
)
1425 if (attr
->rnr_retry
> 7)
1429 * Don't allow invalid path_mtu values. OK to set greater
1430 * than the active mtu (or even the max_cap, if we have tuned
1431 * that to a small mtu. We'll set qp->path_mtu
1432 * to the lesser of requested attribute mtu and active,
1433 * for packetizing messages.
1434 * Note that the QP port has to be set in INIT and MTU in RTR.
1436 if (attr_mask
& IB_QP_PATH_MTU
) {
1437 pmtu
= rdi
->driver_f
.get_pmtu_from_attr(rdi
, qp
, attr
);
1442 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1443 if (attr
->path_mig_state
== IB_MIG_REARM
) {
1444 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1446 if (new_state
!= IB_QPS_RTS
)
1448 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
1449 if (qp
->s_mig_state
== IB_MIG_REARM
)
1451 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
1453 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1460 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1461 if (attr
->max_dest_rd_atomic
> rdi
->dparms
.max_rdma_atomic
)
1464 switch (new_state
) {
1466 if (qp
->state
!= IB_QPS_RESET
)
1467 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1471 /* Allow event to re-trigger if QP set to RTR more than once */
1472 qp
->r_flags
&= ~RVT_R_COMM_EST
;
1473 qp
->state
= new_state
;
1477 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
1478 qp
->state
= new_state
;
1482 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
1484 qp
->state
= new_state
;
1488 lastwqe
= rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1492 qp
->state
= new_state
;
1496 if (attr_mask
& IB_QP_PKEY_INDEX
)
1497 qp
->s_pkey_index
= attr
->pkey_index
;
1499 if (attr_mask
& IB_QP_PORT
)
1500 qp
->port_num
= attr
->port_num
;
1502 if (attr_mask
& IB_QP_DEST_QPN
)
1503 qp
->remote_qpn
= attr
->dest_qp_num
;
1505 if (attr_mask
& IB_QP_SQ_PSN
) {
1506 qp
->s_next_psn
= attr
->sq_psn
& rdi
->dparms
.psn_modify_mask
;
1507 qp
->s_psn
= qp
->s_next_psn
;
1508 qp
->s_sending_psn
= qp
->s_next_psn
;
1509 qp
->s_last_psn
= qp
->s_next_psn
- 1;
1510 qp
->s_sending_hpsn
= qp
->s_last_psn
;
1513 if (attr_mask
& IB_QP_RQ_PSN
)
1514 qp
->r_psn
= attr
->rq_psn
& rdi
->dparms
.psn_modify_mask
;
1516 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1517 qp
->qp_access_flags
= attr
->qp_access_flags
;
1519 if (attr_mask
& IB_QP_AV
) {
1520 rdma_replace_ah_attr(&qp
->remote_ah_attr
, &attr
->ah_attr
);
1521 qp
->s_srate
= rdma_ah_get_static_rate(&attr
->ah_attr
);
1522 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
1525 if (attr_mask
& IB_QP_ALT_PATH
) {
1526 rdma_replace_ah_attr(&qp
->alt_ah_attr
, &attr
->alt_ah_attr
);
1527 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
1530 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1531 qp
->s_mig_state
= attr
->path_mig_state
;
1533 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
1534 qp
->port_num
= rdma_ah_get_port_num(&qp
->alt_ah_attr
);
1535 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
1539 if (attr_mask
& IB_QP_PATH_MTU
) {
1540 qp
->pmtu
= rdi
->driver_f
.mtu_from_qp(rdi
, qp
, pmtu
);
1541 qp
->log_pmtu
= ilog2(qp
->pmtu
);
1544 if (attr_mask
& IB_QP_RETRY_CNT
) {
1545 qp
->s_retry_cnt
= attr
->retry_cnt
;
1546 qp
->s_retry
= attr
->retry_cnt
;
1549 if (attr_mask
& IB_QP_RNR_RETRY
) {
1550 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
1551 qp
->s_rnr_retry
= attr
->rnr_retry
;
1554 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1555 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
1557 if (attr_mask
& IB_QP_TIMEOUT
) {
1558 qp
->timeout
= attr
->timeout
;
1559 qp
->timeout_jiffies
= rvt_timeout_to_jiffies(qp
->timeout
);
1562 if (attr_mask
& IB_QP_QKEY
)
1563 qp
->qkey
= attr
->qkey
;
1565 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1566 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
1568 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
1569 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
1571 if (rdi
->driver_f
.modify_qp
)
1572 rdi
->driver_f
.modify_qp(qp
, attr
, attr_mask
, udata
);
1574 spin_unlock(&qp
->s_lock
);
1575 spin_unlock(&qp
->s_hlock
);
1576 spin_unlock_irq(&qp
->r_lock
);
1578 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1579 rvt_insert_qp(rdi
, qp
);
1582 ev
.device
= qp
->ibqp
.device
;
1583 ev
.element
.qp
= &qp
->ibqp
;
1584 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1585 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1588 ev
.device
= qp
->ibqp
.device
;
1589 ev
.element
.qp
= &qp
->ibqp
;
1590 ev
.event
= IB_EVENT_PATH_MIG
;
1591 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1596 spin_unlock(&qp
->s_lock
);
1597 spin_unlock(&qp
->s_hlock
);
1598 spin_unlock_irq(&qp
->r_lock
);
1603 * rvt_destroy_qp - destroy a queue pair
1604 * @ibqp: the queue pair to destroy
1606 * Note that this can be called while the QP is actively sending or
1609 * Return: 0 on success.
1611 int rvt_destroy_qp(struct ib_qp
*ibqp
, struct ib_udata
*udata
)
1613 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1614 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1616 spin_lock_irq(&qp
->r_lock
);
1617 spin_lock(&qp
->s_hlock
);
1618 spin_lock(&qp
->s_lock
);
1619 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1620 spin_unlock(&qp
->s_lock
);
1621 spin_unlock(&qp
->s_hlock
);
1622 spin_unlock_irq(&qp
->r_lock
);
1624 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1625 /* qpn is now available for use again */
1626 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1628 spin_lock(&rdi
->n_qps_lock
);
1629 rdi
->n_qps_allocated
--;
1630 if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
1632 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1634 spin_unlock(&rdi
->n_qps_lock
);
1637 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1640 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1641 kfree(qp
->s_ack_queue
);
1642 rdma_destroy_ah_attr(&qp
->remote_ah_attr
);
1643 rdma_destroy_ah_attr(&qp
->alt_ah_attr
);
1650 * rvt_query_qp - query an ipbq
1651 * @ibqp: IB qp to query
1652 * @attr: attr struct to fill in
1653 * @attr_mask: attr mask ignored
1654 * @init_attr: struct to fill in
1658 int rvt_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1659 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
1661 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1662 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1664 attr
->qp_state
= qp
->state
;
1665 attr
->cur_qp_state
= attr
->qp_state
;
1666 attr
->path_mtu
= rdi
->driver_f
.mtu_to_path_mtu(qp
->pmtu
);
1667 attr
->path_mig_state
= qp
->s_mig_state
;
1668 attr
->qkey
= qp
->qkey
;
1669 attr
->rq_psn
= qp
->r_psn
& rdi
->dparms
.psn_mask
;
1670 attr
->sq_psn
= qp
->s_next_psn
& rdi
->dparms
.psn_mask
;
1671 attr
->dest_qp_num
= qp
->remote_qpn
;
1672 attr
->qp_access_flags
= qp
->qp_access_flags
;
1673 attr
->cap
.max_send_wr
= qp
->s_size
- 1 -
1674 rdi
->dparms
.reserved_operations
;
1675 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
1676 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
1677 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
1678 attr
->cap
.max_inline_data
= 0;
1679 attr
->ah_attr
= qp
->remote_ah_attr
;
1680 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
1681 attr
->pkey_index
= qp
->s_pkey_index
;
1682 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
1683 attr
->en_sqd_async_notify
= 0;
1684 attr
->sq_draining
= qp
->s_draining
;
1685 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
1686 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
1687 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
1688 attr
->port_num
= qp
->port_num
;
1689 attr
->timeout
= qp
->timeout
;
1690 attr
->retry_cnt
= qp
->s_retry_cnt
;
1691 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
1692 attr
->alt_port_num
=
1693 rdma_ah_get_port_num(&qp
->alt_ah_attr
);
1694 attr
->alt_timeout
= qp
->alt_timeout
;
1696 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
1697 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
1698 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
1699 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
1700 init_attr
->srq
= qp
->ibqp
.srq
;
1701 init_attr
->cap
= attr
->cap
;
1702 if (qp
->s_flags
& RVT_S_SIGNAL_REQ_WR
)
1703 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
1705 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
1706 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
1707 init_attr
->port_num
= qp
->port_num
;
1712 * rvt_post_receive - post a receive on a QP
1713 * @ibqp: the QP to post the receive on
1714 * @wr: the WR to post
1715 * @bad_wr: the first bad WR is put here
1717 * This may be called from interrupt context.
1719 * Return: 0 on success otherwise errno
1721 int rvt_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
1722 const struct ib_recv_wr
**bad_wr
)
1724 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1725 struct rvt_rwq
*wq
= qp
->r_rq
.wq
;
1726 unsigned long flags
;
1727 int qp_err_flush
= (ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_RECV
) &&
1730 /* Check that state is OK to post receive. */
1731 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_RECV_OK
) || !wq
) {
1736 for (; wr
; wr
= wr
->next
) {
1737 struct rvt_rwqe
*wqe
;
1741 if ((unsigned)wr
->num_sge
> qp
->r_rq
.max_sge
) {
1746 spin_lock_irqsave(&qp
->r_rq
.lock
, flags
);
1747 next
= wq
->head
+ 1;
1748 if (next
>= qp
->r_rq
.size
)
1750 if (next
== wq
->tail
) {
1751 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
1755 if (unlikely(qp_err_flush
)) {
1758 memset(&wc
, 0, sizeof(wc
));
1760 wc
.opcode
= IB_WC_RECV
;
1761 wc
.wr_id
= wr
->wr_id
;
1762 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1763 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1765 wqe
= rvt_get_rwqe_ptr(&qp
->r_rq
, wq
->head
);
1766 wqe
->wr_id
= wr
->wr_id
;
1767 wqe
->num_sge
= wr
->num_sge
;
1768 for (i
= 0; i
< wr
->num_sge
; i
++)
1769 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
1771 * Make sure queue entry is written
1772 * before the head index.
1777 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
1783 * rvt_qp_valid_operation - validate post send wr request
1785 * @post-parms - the post send table for the driver
1786 * @wr - the work request
1788 * The routine validates the operation based on the
1789 * validation table an returns the length of the operation
1790 * which can extend beyond the ib_send_bw. Operation
1791 * dependent flags key atomic operation validation.
1793 * There is an exception for UD qps that validates the pd and
1794 * overrides the length to include the additional UD specific
1797 * Returns a negative error or the length of the work request
1798 * for building the swqe.
1800 static inline int rvt_qp_valid_operation(
1802 const struct rvt_operation_params
*post_parms
,
1803 const struct ib_send_wr
*wr
)
1807 if (wr
->opcode
>= RVT_OPERATION_MAX
|| !post_parms
[wr
->opcode
].length
)
1809 if (!(post_parms
[wr
->opcode
].qpt_support
& BIT(qp
->ibqp
.qp_type
)))
1811 if ((post_parms
[wr
->opcode
].flags
& RVT_OPERATION_PRIV
) &&
1812 ibpd_to_rvtpd(qp
->ibqp
.pd
)->user
)
1814 if (post_parms
[wr
->opcode
].flags
& RVT_OPERATION_ATOMIC_SGE
&&
1815 (wr
->num_sge
== 0 ||
1816 wr
->sg_list
[0].length
< sizeof(u64
) ||
1817 wr
->sg_list
[0].addr
& (sizeof(u64
) - 1)))
1819 if (post_parms
[wr
->opcode
].flags
& RVT_OPERATION_ATOMIC
&&
1820 !qp
->s_max_rd_atomic
)
1822 len
= post_parms
[wr
->opcode
].length
;
1824 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
1825 qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
1826 if (qp
->ibqp
.pd
!= ud_wr(wr
)->ah
->pd
)
1828 len
= sizeof(struct ib_ud_wr
);
1834 * rvt_qp_is_avail - determine queue capacity
1836 * @rdi: the rdmavt device
1837 * @reserved_op: is reserved operation
1839 * This assumes the s_hlock is held but the s_last
1840 * qp variable is uncontrolled.
1842 * For non reserved operations, the qp->s_avail
1845 * The return value is zero or a -ENOMEM.
1847 static inline int rvt_qp_is_avail(
1849 struct rvt_dev_info
*rdi
,
1856 /* see rvt_qp_wqe_unreserve() */
1857 smp_mb__before_atomic();
1858 reserved_used
= atomic_read(&qp
->s_reserved_used
);
1859 if (unlikely(reserved_op
)) {
1860 /* see rvt_qp_wqe_unreserve() */
1861 smp_mb__before_atomic();
1862 if (reserved_used
>= rdi
->dparms
.reserved_operations
)
1866 /* non-reserved operations */
1867 if (likely(qp
->s_avail
))
1869 slast
= READ_ONCE(qp
->s_last
);
1870 if (qp
->s_head
>= slast
)
1871 avail
= qp
->s_size
- (qp
->s_head
- slast
);
1873 avail
= slast
- qp
->s_head
;
1875 /* see rvt_qp_wqe_unreserve() */
1876 smp_mb__before_atomic();
1877 reserved_used
= atomic_read(&qp
->s_reserved_used
);
1879 (rdi
->dparms
.reserved_operations
- reserved_used
);
1880 /* insure we don't assign a negative s_avail */
1881 if ((s32
)avail
<= 0)
1883 qp
->s_avail
= avail
;
1884 if (WARN_ON(qp
->s_avail
>
1885 (qp
->s_size
- 1 - rdi
->dparms
.reserved_operations
)))
1887 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1888 qp
->ibqp
.qp_num
, qp
->s_size
, qp
->s_avail
,
1889 qp
->s_head
, qp
->s_tail
, qp
->s_cur
,
1890 qp
->s_acked
, qp
->s_last
);
1895 * rvt_post_one_wr - post one RC, UC, or UD send work request
1896 * @qp: the QP to post on
1897 * @wr: the work request to send
1899 static int rvt_post_one_wr(struct rvt_qp
*qp
,
1900 const struct ib_send_wr
*wr
,
1903 struct rvt_swqe
*wqe
;
1908 struct rvt_lkey_table
*rkt
;
1910 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
1915 int local_ops_delayed
= 0;
1917 BUILD_BUG_ON(IB_QPT_MAX
>= (sizeof(u32
) * BITS_PER_BYTE
));
1919 /* IB spec says that num_sge == 0 is OK. */
1920 if (unlikely(wr
->num_sge
> qp
->s_max_sge
))
1923 ret
= rvt_qp_valid_operation(qp
, rdi
->post_parms
, wr
);
1929 * Local operations include fast register and local invalidate.
1930 * Fast register needs to be processed immediately because the
1931 * registered lkey may be used by following work requests and the
1932 * lkey needs to be valid at the time those requests are posted.
1933 * Local invalidate can be processed immediately if fencing is
1934 * not required and no previous local invalidate ops are pending.
1935 * Signaled local operations that have been processed immediately
1936 * need to have requests with "completion only" flags set posted
1937 * to the send queue in order to generate completions.
1939 if ((rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
)) {
1940 switch (wr
->opcode
) {
1942 ret
= rvt_fast_reg_mr(qp
,
1945 reg_wr(wr
)->access
);
1946 if (ret
|| !(wr
->send_flags
& IB_SEND_SIGNALED
))
1949 case IB_WR_LOCAL_INV
:
1950 if ((wr
->send_flags
& IB_SEND_FENCE
) ||
1951 atomic_read(&qp
->local_ops_pending
)) {
1952 local_ops_delayed
= 1;
1954 ret
= rvt_invalidate_rkey(
1955 qp
, wr
->ex
.invalidate_rkey
);
1956 if (ret
|| !(wr
->send_flags
& IB_SEND_SIGNALED
))
1965 reserved_op
= rdi
->post_parms
[wr
->opcode
].flags
&
1966 RVT_OPERATION_USE_RESERVE
;
1967 /* check for avail */
1968 ret
= rvt_qp_is_avail(qp
, rdi
, reserved_op
);
1971 next
= qp
->s_head
+ 1;
1972 if (next
>= qp
->s_size
)
1975 rkt
= &rdi
->lkey_table
;
1976 pd
= ibpd_to_rvtpd(qp
->ibqp
.pd
);
1977 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_head
);
1979 /* cplen has length from above */
1980 memcpy(&wqe
->wr
, wr
, cplen
);
1985 struct rvt_sge
*last_sge
= NULL
;
1987 acc
= wr
->opcode
>= IB_WR_RDMA_READ
?
1988 IB_ACCESS_LOCAL_WRITE
: 0;
1989 for (i
= 0; i
< wr
->num_sge
; i
++) {
1990 u32 length
= wr
->sg_list
[i
].length
;
1994 ret
= rvt_lkey_ok(rkt
, pd
, &wqe
->sg_list
[j
], last_sge
,
1995 &wr
->sg_list
[i
], acc
);
1996 if (unlikely(ret
< 0))
1997 goto bail_inval_free
;
1998 wqe
->length
+= length
;
2000 last_sge
= &wqe
->sg_list
[j
];
2003 wqe
->wr
.num_sge
= j
;
2007 * Calculate and set SWQE PSN values prior to handing it off
2008 * to the driver's check routine. This give the driver the
2009 * opportunity to adjust PSN values based on internal checks.
2011 log_pmtu
= qp
->log_pmtu
;
2012 if (qp
->allowed_ops
== IB_OPCODE_UD
) {
2013 struct rvt_ah
*ah
= ibah_to_rvtah(wqe
->ud_wr
.ah
);
2015 log_pmtu
= ah
->log_pmtu
;
2016 atomic_inc(&ibah_to_rvtah(ud_wr(wr
)->ah
)->refcount
);
2019 if (rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
) {
2020 if (local_ops_delayed
)
2021 atomic_inc(&qp
->local_ops_pending
);
2023 wqe
->wr
.send_flags
|= RVT_SEND_COMPLETION_ONLY
;
2028 wqe
->ssn
= qp
->s_ssn
++;
2029 wqe
->psn
= qp
->s_next_psn
;
2030 wqe
->lpsn
= wqe
->psn
+
2032 ((wqe
->length
- 1) >> log_pmtu
) :
2036 /* general part of wqe valid - allow for driver checks */
2037 if (rdi
->driver_f
.setup_wqe
) {
2038 ret
= rdi
->driver_f
.setup_wqe(qp
, wqe
, call_send
);
2040 goto bail_inval_free_ref
;
2043 if (!(rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
))
2044 qp
->s_next_psn
= wqe
->lpsn
+ 1;
2046 if (unlikely(reserved_op
)) {
2047 wqe
->wr
.send_flags
|= RVT_SEND_RESERVE_USED
;
2048 rvt_qp_wqe_reserve(qp
, wqe
);
2050 wqe
->wr
.send_flags
&= ~RVT_SEND_RESERVE_USED
;
2053 trace_rvt_post_one_wr(qp
, wqe
, wr
->num_sge
);
2054 smp_wmb(); /* see request builders */
2059 bail_inval_free_ref
:
2060 if (qp
->allowed_ops
== IB_OPCODE_UD
)
2061 atomic_dec(&ibah_to_rvtah(ud_wr(wr
)->ah
)->refcount
);
2063 /* release mr holds */
2065 struct rvt_sge
*sge
= &wqe
->sg_list
[--j
];
2067 rvt_put_mr(sge
->mr
);
2073 * rvt_post_send - post a send on a QP
2074 * @ibqp: the QP to post the send on
2075 * @wr: the list of work requests to post
2076 * @bad_wr: the first bad WR is put here
2078 * This may be called from interrupt context.
2080 * Return: 0 on success else errno
2082 int rvt_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
2083 const struct ib_send_wr
**bad_wr
)
2085 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
2086 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
2087 unsigned long flags
= 0;
2092 spin_lock_irqsave(&qp
->s_hlock
, flags
);
2095 * Ensure QP state is such that we can send. If not bail out early,
2096 * there is no need to do this every time we post a send.
2098 if (unlikely(!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_SEND_OK
))) {
2099 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
2104 * If the send queue is empty, and we only have a single WR then just go
2105 * ahead and kick the send engine into gear. Otherwise we will always
2106 * just schedule the send to happen later.
2108 call_send
= qp
->s_head
== READ_ONCE(qp
->s_last
) && !wr
->next
;
2110 for (; wr
; wr
= wr
->next
) {
2111 err
= rvt_post_one_wr(qp
, wr
, &call_send
);
2112 if (unlikely(err
)) {
2119 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
2122 * Only call do_send if there is exactly one packet, and the
2123 * driver said it was ok.
2125 if (nreq
== 1 && call_send
)
2126 rdi
->driver_f
.do_send(qp
);
2128 rdi
->driver_f
.schedule_send_no_lock(qp
);
2134 * rvt_post_srq_receive - post a receive on a shared receive queue
2135 * @ibsrq: the SRQ to post the receive on
2136 * @wr: the list of work requests to post
2137 * @bad_wr: A pointer to the first WR to cause a problem is put here
2139 * This may be called from interrupt context.
2141 * Return: 0 on success else errno
2143 int rvt_post_srq_recv(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
2144 const struct ib_recv_wr
**bad_wr
)
2146 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
2148 unsigned long flags
;
2150 for (; wr
; wr
= wr
->next
) {
2151 struct rvt_rwqe
*wqe
;
2155 if ((unsigned)wr
->num_sge
> srq
->rq
.max_sge
) {
2160 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
2162 next
= wq
->head
+ 1;
2163 if (next
>= srq
->rq
.size
)
2165 if (next
== wq
->tail
) {
2166 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
2171 wqe
= rvt_get_rwqe_ptr(&srq
->rq
, wq
->head
);
2172 wqe
->wr_id
= wr
->wr_id
;
2173 wqe
->num_sge
= wr
->num_sge
;
2174 for (i
= 0; i
< wr
->num_sge
; i
++)
2175 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
2176 /* Make sure queue entry is written before the head index. */
2179 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
2185 * Validate a RWQE and fill in the SGE state.
2188 static int init_sge(struct rvt_qp
*qp
, struct rvt_rwqe
*wqe
)
2192 struct rvt_lkey_table
*rkt
;
2194 struct rvt_sge_state
*ss
;
2195 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2197 rkt
= &rdi
->lkey_table
;
2198 pd
= ibpd_to_rvtpd(qp
->ibqp
.srq
? qp
->ibqp
.srq
->pd
: qp
->ibqp
.pd
);
2200 ss
->sg_list
= qp
->r_sg_list
;
2202 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
2203 if (wqe
->sg_list
[i
].length
== 0)
2206 ret
= rvt_lkey_ok(rkt
, pd
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
2207 NULL
, &wqe
->sg_list
[i
],
2208 IB_ACCESS_LOCAL_WRITE
);
2209 if (unlikely(ret
<= 0))
2211 qp
->r_len
+= wqe
->sg_list
[i
].length
;
2215 ss
->total_len
= qp
->r_len
;
2220 struct rvt_sge
*sge
= --j
? &ss
->sg_list
[j
- 1] : &ss
->sge
;
2222 rvt_put_mr(sge
->mr
);
2225 memset(&wc
, 0, sizeof(wc
));
2226 wc
.wr_id
= wqe
->wr_id
;
2227 wc
.status
= IB_WC_LOC_PROT_ERR
;
2228 wc
.opcode
= IB_WC_RECV
;
2230 /* Signal solicited completion event. */
2231 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
2236 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2238 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2240 * Return -1 if there is a local error, 0 if no RWQE is available,
2241 * otherwise return 1.
2243 * Can be called from interrupt level.
2245 int rvt_get_rwqe(struct rvt_qp
*qp
, bool wr_id_only
)
2247 unsigned long flags
;
2250 struct rvt_srq
*srq
;
2251 struct rvt_rwqe
*wqe
;
2252 void (*handler
)(struct ib_event
*, void *);
2257 srq
= ibsrq_to_rvtsrq(qp
->ibqp
.srq
);
2258 handler
= srq
->ibsrq
.event_handler
;
2266 spin_lock_irqsave(&rq
->lock
, flags
);
2267 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
2274 /* Validate tail before using it since it is user writable. */
2275 if (tail
>= rq
->size
)
2277 if (unlikely(tail
== wq
->head
)) {
2281 /* Make sure entry is read after head index is read. */
2283 wqe
= rvt_get_rwqe_ptr(rq
, tail
);
2285 * Even though we update the tail index in memory, the verbs
2286 * consumer is not supposed to post more entries until a
2287 * completion is generated.
2289 if (++tail
>= rq
->size
)
2292 if (!wr_id_only
&& !init_sge(qp
, wqe
)) {
2296 qp
->r_wr_id
= wqe
->wr_id
;
2299 set_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
);
2304 * Validate head pointer value and compute
2305 * the number of remaining WQEs.
2311 n
+= rq
->size
- tail
;
2314 if (n
< srq
->limit
) {
2318 spin_unlock_irqrestore(&rq
->lock
, flags
);
2319 ev
.device
= qp
->ibqp
.device
;
2320 ev
.element
.srq
= qp
->ibqp
.srq
;
2321 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
2322 handler(&ev
, srq
->ibsrq
.srq_context
);
2327 spin_unlock_irqrestore(&rq
->lock
, flags
);
2331 EXPORT_SYMBOL(rvt_get_rwqe
);
2334 * qp_comm_est - handle trap with QP established
2337 void rvt_comm_est(struct rvt_qp
*qp
)
2339 qp
->r_flags
|= RVT_R_COMM_EST
;
2340 if (qp
->ibqp
.event_handler
) {
2343 ev
.device
= qp
->ibqp
.device
;
2344 ev
.element
.qp
= &qp
->ibqp
;
2345 ev
.event
= IB_EVENT_COMM_EST
;
2346 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
2349 EXPORT_SYMBOL(rvt_comm_est
);
2351 void rvt_rc_error(struct rvt_qp
*qp
, enum ib_wc_status err
)
2353 unsigned long flags
;
2356 spin_lock_irqsave(&qp
->s_lock
, flags
);
2357 lastwqe
= rvt_error_qp(qp
, err
);
2358 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2363 ev
.device
= qp
->ibqp
.device
;
2364 ev
.element
.qp
= &qp
->ibqp
;
2365 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
2366 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
2369 EXPORT_SYMBOL(rvt_rc_error
);
2372 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2373 * @index - the index
2374 * return usec from an index into ib_rvt_rnr_table
2376 unsigned long rvt_rnr_tbl_to_usec(u32 index
)
2378 return ib_rvt_rnr_table
[(index
& IB_AETH_CREDIT_MASK
)];
2380 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec
);
2382 static inline unsigned long rvt_aeth_to_usec(u32 aeth
)
2384 return ib_rvt_rnr_table
[(aeth
>> IB_AETH_CREDIT_SHIFT
) &
2385 IB_AETH_CREDIT_MASK
];
2389 * rvt_add_retry_timer_ext - add/start a retry timer
2391 * @shift - timeout shift to wait for multiple packets
2392 * add a retry timer on the QP
2394 void rvt_add_retry_timer_ext(struct rvt_qp
*qp
, u8 shift
)
2396 struct ib_qp
*ibqp
= &qp
->ibqp
;
2397 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
2399 lockdep_assert_held(&qp
->s_lock
);
2400 qp
->s_flags
|= RVT_S_TIMER
;
2401 /* 4.096 usec. * (1 << qp->timeout) */
2402 qp
->s_timer
.expires
= jiffies
+ rdi
->busy_jiffies
+
2403 (qp
->timeout_jiffies
<< shift
);
2404 add_timer(&qp
->s_timer
);
2406 EXPORT_SYMBOL(rvt_add_retry_timer_ext
);
2409 * rvt_add_rnr_timer - add/start an rnr timer
2411 * @aeth - aeth of RNR timeout, simulated aeth for loopback
2412 * add an rnr timer on the QP
2414 void rvt_add_rnr_timer(struct rvt_qp
*qp
, u32 aeth
)
2418 lockdep_assert_held(&qp
->s_lock
);
2419 qp
->s_flags
|= RVT_S_WAIT_RNR
;
2420 to
= rvt_aeth_to_usec(aeth
);
2421 trace_rvt_rnrnak_add(qp
, to
);
2422 hrtimer_start(&qp
->s_rnr_timer
,
2423 ns_to_ktime(1000 * to
), HRTIMER_MODE_REL_PINNED
);
2425 EXPORT_SYMBOL(rvt_add_rnr_timer
);
2428 * rvt_stop_rc_timers - stop all timers
2430 * stop any pending timers
2432 void rvt_stop_rc_timers(struct rvt_qp
*qp
)
2434 lockdep_assert_held(&qp
->s_lock
);
2435 /* Remove QP from all timers */
2436 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
2437 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
2438 del_timer(&qp
->s_timer
);
2439 hrtimer_try_to_cancel(&qp
->s_rnr_timer
);
2442 EXPORT_SYMBOL(rvt_stop_rc_timers
);
2445 * rvt_stop_rnr_timer - stop an rnr timer
2448 * stop an rnr timer and return if the timer
2451 static void rvt_stop_rnr_timer(struct rvt_qp
*qp
)
2453 lockdep_assert_held(&qp
->s_lock
);
2454 /* Remove QP from rnr timer */
2455 if (qp
->s_flags
& RVT_S_WAIT_RNR
) {
2456 qp
->s_flags
&= ~RVT_S_WAIT_RNR
;
2457 trace_rvt_rnrnak_stop(qp
, 0);
2462 * rvt_del_timers_sync - wait for any timeout routines to exit
2465 void rvt_del_timers_sync(struct rvt_qp
*qp
)
2467 del_timer_sync(&qp
->s_timer
);
2468 hrtimer_cancel(&qp
->s_rnr_timer
);
2470 EXPORT_SYMBOL(rvt_del_timers_sync
);
2473 * This is called from s_timer for missing responses.
2475 static void rvt_rc_timeout(struct timer_list
*t
)
2477 struct rvt_qp
*qp
= from_timer(qp
, t
, s_timer
);
2478 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2479 unsigned long flags
;
2481 spin_lock_irqsave(&qp
->r_lock
, flags
);
2482 spin_lock(&qp
->s_lock
);
2483 if (qp
->s_flags
& RVT_S_TIMER
) {
2484 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
2486 qp
->s_flags
&= ~RVT_S_TIMER
;
2487 rvp
->n_rc_timeouts
++;
2488 del_timer(&qp
->s_timer
);
2489 trace_rvt_rc_timeout(qp
, qp
->s_last_psn
+ 1);
2490 if (rdi
->driver_f
.notify_restart_rc
)
2491 rdi
->driver_f
.notify_restart_rc(qp
,
2494 rdi
->driver_f
.schedule_send(qp
);
2496 spin_unlock(&qp
->s_lock
);
2497 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
2501 * This is called from s_timer for RNR timeouts.
2503 enum hrtimer_restart
rvt_rc_rnr_retry(struct hrtimer
*t
)
2505 struct rvt_qp
*qp
= container_of(t
, struct rvt_qp
, s_rnr_timer
);
2506 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2507 unsigned long flags
;
2509 spin_lock_irqsave(&qp
->s_lock
, flags
);
2510 rvt_stop_rnr_timer(qp
);
2511 trace_rvt_rnrnak_timeout(qp
, 0);
2512 rdi
->driver_f
.schedule_send(qp
);
2513 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2514 return HRTIMER_NORESTART
;
2516 EXPORT_SYMBOL(rvt_rc_rnr_retry
);
2519 * rvt_qp_iter_init - initial for QP iteration
2523 * This returns an iterator suitable for iterating QPs
2526 * The @cb is a user defined callback and @v is a 64
2527 * bit value passed to and relevant for processing in the
2528 * @cb. An example use case would be to alter QP processing
2529 * based on criteria not part of the rvt_qp.
2531 * Use cases that require memory allocation to succeed
2532 * must preallocate appropriately.
2534 * Return: a pointer to an rvt_qp_iter or NULL
2536 struct rvt_qp_iter
*rvt_qp_iter_init(struct rvt_dev_info
*rdi
,
2538 void (*cb
)(struct rvt_qp
*qp
, u64 v
))
2540 struct rvt_qp_iter
*i
;
2542 i
= kzalloc(sizeof(*i
), GFP_KERNEL
);
2547 /* number of special QPs (SMI/GSI) for device */
2548 i
->specials
= rdi
->ibdev
.phys_port_cnt
* 2;
2554 EXPORT_SYMBOL(rvt_qp_iter_init
);
2557 * rvt_qp_iter_next - return the next QP in iter
2558 * @iter - the iterator
2560 * Fine grained QP iterator suitable for use
2561 * with debugfs seq_file mechanisms.
2563 * Updates iter->qp with the current QP when the return
2566 * Return: 0 - iter->qp is valid 1 - no more QPs
2568 int rvt_qp_iter_next(struct rvt_qp_iter
*iter
)
2573 struct rvt_qp
*pqp
= iter
->qp
;
2575 struct rvt_dev_info
*rdi
= iter
->rdi
;
2578 * The approach is to consider the special qps
2579 * as additional table entries before the
2580 * real hash table. Since the qp code sets
2581 * the qp->next hash link to NULL, this works just fine.
2583 * iter->specials is 2 * # ports
2585 * n = 0..iter->specials is the special qp indices
2587 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2588 * the potential hash bucket entries
2591 for (; n
< rdi
->qp_dev
->qp_table_size
+ iter
->specials
; n
++) {
2593 qp
= rcu_dereference(pqp
->next
);
2595 if (n
< iter
->specials
) {
2596 struct rvt_ibport
*rvp
;
2599 pidx
= n
% rdi
->ibdev
.phys_port_cnt
;
2600 rvp
= rdi
->ports
[pidx
];
2601 qp
= rcu_dereference(rvp
->qp
[n
& 1]);
2603 qp
= rcu_dereference(
2604 rdi
->qp_dev
->qp_table
[
2605 (n
- iter
->specials
)]);
2617 EXPORT_SYMBOL(rvt_qp_iter_next
);
2620 * rvt_qp_iter - iterate all QPs
2621 * @rdi - rvt devinfo
2622 * @v - a 64 bit value
2625 * This provides a way for iterating all QPs.
2627 * The @cb is a user defined callback and @v is a 64
2628 * bit value passed to and relevant for processing in the
2629 * cb. An example use case would be to alter QP processing
2630 * based on criteria not part of the rvt_qp.
2632 * The code has an internal iterator to simplify
2633 * non seq_file use cases.
2635 void rvt_qp_iter(struct rvt_dev_info
*rdi
,
2637 void (*cb
)(struct rvt_qp
*qp
, u64 v
))
2640 struct rvt_qp_iter i
= {
2642 .specials
= rdi
->ibdev
.phys_port_cnt
* 2,
2649 ret
= rvt_qp_iter_next(&i
);
2660 EXPORT_SYMBOL(rvt_qp_iter
);
2663 * This should be called with s_lock held.
2665 void rvt_send_complete(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
2666 enum ib_wc_status status
)
2669 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2671 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
2676 trace_rvt_qp_send_completion(qp
, wqe
, last
);
2677 if (++last
>= qp
->s_size
)
2679 trace_rvt_qp_send_completion(qp
, wqe
, last
);
2681 /* See post_send() */
2683 rvt_put_qp_swqe(qp
, wqe
);
2685 rvt_qp_swqe_complete(qp
,
2687 rdi
->wc_opcode
[wqe
->wr
.opcode
],
2690 if (qp
->s_acked
== old_last
)
2692 if (qp
->s_cur
== old_last
)
2694 if (qp
->s_tail
== old_last
)
2696 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)
2699 EXPORT_SYMBOL(rvt_send_complete
);
2702 * rvt_copy_sge - copy data to SGE memory
2703 * @qp: associated QP
2704 * @ss: the SGE state
2705 * @data: the data to copy
2706 * @length: the length of the data
2707 * @release: boolean to release MR
2708 * @copy_last: do a separate copy of the last 8 bytes
2710 void rvt_copy_sge(struct rvt_qp
*qp
, struct rvt_sge_state
*ss
,
2711 void *data
, u32 length
,
2712 bool release
, bool copy_last
)
2714 struct rvt_sge
*sge
= &ss
->sge
;
2716 bool in_last
= false;
2717 bool cacheless_copy
= false;
2718 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2719 struct rvt_wss
*wss
= rdi
->wss
;
2720 unsigned int sge_copy_mode
= rdi
->dparms
.sge_copy_mode
;
2722 if (sge_copy_mode
== RVT_SGE_COPY_CACHELESS
) {
2723 cacheless_copy
= length
>= PAGE_SIZE
;
2724 } else if (sge_copy_mode
== RVT_SGE_COPY_ADAPTIVE
) {
2725 if (length
>= PAGE_SIZE
) {
2727 * NOTE: this *assumes*:
2728 * o The first vaddr is the dest.
2729 * o If multiple pages, then vaddr is sequential.
2731 wss_insert(wss
, sge
->vaddr
);
2732 if (length
>= (2 * PAGE_SIZE
))
2733 wss_insert(wss
, (sge
->vaddr
+ PAGE_SIZE
));
2735 cacheless_copy
= wss_exceeds_threshold(wss
);
2737 wss_advance_clean_counter(wss
);
2752 u32 len
= rvt_get_sge_length(sge
, length
);
2754 WARN_ON_ONCE(len
== 0);
2755 if (unlikely(in_last
)) {
2756 /* enforce byte transfer ordering */
2757 for (i
= 0; i
< len
; i
++)
2758 ((u8
*)sge
->vaddr
)[i
] = ((u8
*)data
)[i
];
2759 } else if (cacheless_copy
) {
2760 cacheless_memcpy(sge
->vaddr
, data
, len
);
2762 memcpy(sge
->vaddr
, data
, len
);
2764 rvt_update_sge(ss
, len
, release
);
2776 EXPORT_SYMBOL(rvt_copy_sge
);
2778 static enum ib_wc_status
loopback_qp_drop(struct rvt_ibport
*rvp
,
2783 * For RC, the requester would timeout and retry so
2784 * shortcut the timeouts and just signal too many retries.
2786 return sqp
->ibqp
.qp_type
== IB_QPT_RC
?
2787 IB_WC_RETRY_EXC_ERR
: IB_WC_SUCCESS
;
2791 * ruc_loopback - handle UC and RC loopback requests
2792 * @sqp: the sending QP
2794 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2795 * Note that although we are single threaded due to the send engine, we still
2796 * have to protect against post_send(). We don't have to worry about
2797 * receive interrupts since this is a connected protocol and all packets
2798 * will pass through here.
2800 void rvt_ruc_loopback(struct rvt_qp
*sqp
)
2802 struct rvt_ibport
*rvp
= NULL
;
2803 struct rvt_dev_info
*rdi
= ib_to_rvt(sqp
->ibqp
.device
);
2805 struct rvt_swqe
*wqe
;
2806 struct rvt_sge
*sge
;
2807 unsigned long flags
;
2811 enum ib_wc_status send_status
;
2814 bool copy_last
= false;
2818 rvp
= rdi
->ports
[sqp
->port_num
- 1];
2821 * Note that we check the responder QP state after
2822 * checking the requester's state.
2825 qp
= rvt_lookup_qpn(ib_to_rvt(sqp
->ibqp
.device
), rvp
,
2828 spin_lock_irqsave(&sqp
->s_lock
, flags
);
2830 /* Return if we are already busy processing a work request. */
2831 if ((sqp
->s_flags
& (RVT_S_BUSY
| RVT_S_ANY_WAIT
)) ||
2832 !(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
2835 sqp
->s_flags
|= RVT_S_BUSY
;
2838 if (sqp
->s_last
== READ_ONCE(sqp
->s_head
))
2840 wqe
= rvt_get_swqe_ptr(sqp
, sqp
->s_last
);
2842 /* Return if it is not OK to start a new work request. */
2843 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_NEXT_SEND_OK
)) {
2844 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_FLUSH_SEND
))
2846 /* We are in the error state, flush the work request. */
2847 send_status
= IB_WC_WR_FLUSH_ERR
;
2852 * We can rely on the entry not changing without the s_lock
2853 * being held until we update s_last.
2854 * We increment s_cur to indicate s_last is in progress.
2856 if (sqp
->s_last
== sqp
->s_cur
) {
2857 if (++sqp
->s_cur
>= sqp
->s_size
)
2860 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
2863 send_status
= loopback_qp_drop(rvp
, sqp
);
2864 goto serr_no_r_lock
;
2866 spin_lock_irqsave(&qp
->r_lock
, flags
);
2867 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) ||
2868 qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
) {
2869 send_status
= loopback_qp_drop(rvp
, sqp
);
2873 memset(&wc
, 0, sizeof(wc
));
2874 send_status
= IB_WC_SUCCESS
;
2877 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
2878 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
2879 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
2880 sqp
->s_len
= wqe
->length
;
2881 switch (wqe
->wr
.opcode
) {
2885 case IB_WR_LOCAL_INV
:
2886 if (!(wqe
->wr
.send_flags
& RVT_SEND_COMPLETION_ONLY
)) {
2887 if (rvt_invalidate_rkey(sqp
,
2888 wqe
->wr
.ex
.invalidate_rkey
))
2889 send_status
= IB_WC_LOC_PROT_ERR
;
2894 case IB_WR_SEND_WITH_INV
:
2895 case IB_WR_SEND_WITH_IMM
:
2897 ret
= rvt_get_rwqe(qp
, false);
2902 if (wqe
->length
> qp
->r_len
)
2904 switch (wqe
->wr
.opcode
) {
2905 case IB_WR_SEND_WITH_INV
:
2906 if (!rvt_invalidate_rkey(qp
,
2907 wqe
->wr
.ex
.invalidate_rkey
)) {
2908 wc
.wc_flags
= IB_WC_WITH_INVALIDATE
;
2909 wc
.ex
.invalidate_rkey
=
2910 wqe
->wr
.ex
.invalidate_rkey
;
2913 case IB_WR_SEND_WITH_IMM
:
2914 wc
.wc_flags
= IB_WC_WITH_IMM
;
2915 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
2922 case IB_WR_RDMA_WRITE_WITH_IMM
:
2923 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
2925 wc
.wc_flags
= IB_WC_WITH_IMM
;
2926 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
2927 ret
= rvt_get_rwqe(qp
, true);
2932 /* skip copy_last set and qp_access_flags recheck */
2934 case IB_WR_RDMA_WRITE
:
2935 copy_last
= rvt_is_user_qp(qp
);
2936 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
2939 if (wqe
->length
== 0)
2941 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, wqe
->length
,
2942 wqe
->rdma_wr
.remote_addr
,
2944 IB_ACCESS_REMOTE_WRITE
)))
2946 qp
->r_sge
.sg_list
= NULL
;
2947 qp
->r_sge
.num_sge
= 1;
2948 qp
->r_sge
.total_len
= wqe
->length
;
2951 case IB_WR_RDMA_READ
:
2952 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
2954 if (unlikely(!rvt_rkey_ok(qp
, &sqp
->s_sge
.sge
, wqe
->length
,
2955 wqe
->rdma_wr
.remote_addr
,
2957 IB_ACCESS_REMOTE_READ
)))
2960 sqp
->s_sge
.sg_list
= NULL
;
2961 sqp
->s_sge
.num_sge
= 1;
2962 qp
->r_sge
.sge
= wqe
->sg_list
[0];
2963 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
2964 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
2965 qp
->r_sge
.total_len
= wqe
->length
;
2968 case IB_WR_ATOMIC_CMP_AND_SWP
:
2969 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2970 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
2972 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
2973 wqe
->atomic_wr
.remote_addr
,
2974 wqe
->atomic_wr
.rkey
,
2975 IB_ACCESS_REMOTE_ATOMIC
)))
2977 /* Perform atomic OP and save result. */
2978 maddr
= (atomic64_t
*)qp
->r_sge
.sge
.vaddr
;
2979 sdata
= wqe
->atomic_wr
.compare_add
;
2980 *(u64
*)sqp
->s_sge
.sge
.vaddr
=
2981 (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
2982 (u64
)atomic64_add_return(sdata
, maddr
) - sdata
:
2983 (u64
)cmpxchg((u64
*)qp
->r_sge
.sge
.vaddr
,
2984 sdata
, wqe
->atomic_wr
.swap
);
2985 rvt_put_mr(qp
->r_sge
.sge
.mr
);
2986 qp
->r_sge
.num_sge
= 0;
2990 send_status
= IB_WC_LOC_QP_OP_ERR
;
2994 sge
= &sqp
->s_sge
.sge
;
2995 while (sqp
->s_len
) {
2996 u32 len
= rvt_get_sge_length(sge
, sqp
->s_len
);
2998 WARN_ON_ONCE(len
== 0);
2999 rvt_copy_sge(qp
, &qp
->r_sge
, sge
->vaddr
,
3000 len
, release
, copy_last
);
3001 rvt_update_sge(&sqp
->s_sge
, len
, !release
);
3005 rvt_put_ss(&qp
->r_sge
);
3007 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
3010 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
3011 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
3013 wc
.opcode
= IB_WC_RECV
;
3014 wc
.wr_id
= qp
->r_wr_id
;
3015 wc
.status
= IB_WC_SUCCESS
;
3016 wc
.byte_len
= wqe
->length
;
3018 wc
.src_qp
= qp
->remote_qpn
;
3019 wc
.slid
= rdma_ah_get_dlid(&qp
->remote_ah_attr
) & U16_MAX
;
3020 wc
.sl
= rdma_ah_get_sl(&qp
->remote_ah_attr
);
3022 /* Signal completion event if the solicited bit is set. */
3023 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
,
3024 wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
3027 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3028 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3031 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
3032 rvt_send_complete(sqp
, wqe
, send_status
);
3034 atomic_dec(&sqp
->local_ops_pending
);
3040 /* Handle RNR NAK */
3041 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
3045 * Note: we don't need the s_lock held since the BUSY flag
3046 * makes this single threaded.
3048 if (sqp
->s_rnr_retry
== 0) {
3049 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
3052 if (sqp
->s_rnr_retry_cnt
< 7)
3054 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3055 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3056 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_RECV_OK
))
3058 rvt_add_rnr_timer(sqp
, qp
->r_min_rnr_timer
<<
3059 IB_AETH_CREDIT_SHIFT
);
3063 send_status
= IB_WC_REM_OP_ERR
;
3064 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
3069 sqp
->ibqp
.qp_type
== IB_QPT_RC
?
3070 IB_WC_REM_INV_REQ_ERR
:
3072 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
3076 send_status
= IB_WC_REM_ACCESS_ERR
;
3077 wc
.status
= IB_WC_LOC_PROT_ERR
;
3079 /* responder goes to error state */
3080 rvt_rc_error(qp
, wc
.status
);
3083 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3085 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3086 rvt_send_complete(sqp
, wqe
, send_status
);
3087 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
3088 int lastwqe
= rvt_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
3090 sqp
->s_flags
&= ~RVT_S_BUSY
;
3091 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3095 ev
.device
= sqp
->ibqp
.device
;
3096 ev
.element
.qp
= &sqp
->ibqp
;
3097 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
3098 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
3103 sqp
->s_flags
&= ~RVT_S_BUSY
;
3105 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3109 EXPORT_SYMBOL(rvt_ruc_loopback
);