2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/log2.h>
35 #include <linux/etherdevice.h>
37 #include <linux/slab.h>
38 #include <linux/netdevice.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_pack.h>
42 #include <rdma/ib_addr.h>
43 #include <rdma/ib_mad.h>
44 #include <rdma/uverbs_ioctl.h>
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/qp.h>
50 #include <rdma/mlx4-abi.h>
52 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
,
53 struct mlx4_ib_cq
*recv_cq
);
54 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
,
55 struct mlx4_ib_cq
*recv_cq
);
56 static int _mlx4_ib_modify_wq(struct ib_wq
*ibwq
, enum ib_wq_state new_state
,
57 struct ib_udata
*udata
);
60 MLX4_IB_ACK_REQ_FREQ
= 8,
64 MLX4_IB_DEFAULT_SCHED_QUEUE
= 0x83,
65 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
66 MLX4_IB_LINK_TYPE_IB
= 0,
67 MLX4_IB_LINK_TYPE_ETH
= 1
72 * Largest possible UD header: send with GRH and immediate
73 * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
74 * tag. (LRH would only use 8 bytes, so Ethernet is the
77 MLX4_IB_UD_HEADER_SIZE
= 82,
78 MLX4_IB_LSO_HEADER_SPARE
= 128,
86 struct ib_ud_header ud_header
;
87 u8 header_buf
[MLX4_IB_UD_HEADER_SIZE
];
88 struct ib_qp
*roce_v2_gsi
;
92 MLX4_IB_MIN_SQ_STRIDE
= 6,
93 MLX4_IB_CACHE_LINE_SIZE
= 64,
98 MLX4_RAW_QP_MSGMAX
= 31,
105 static const __be32 mlx4_ib_opcode
[] = {
106 [IB_WR_SEND
] = cpu_to_be32(MLX4_OPCODE_SEND
),
107 [IB_WR_LSO
] = cpu_to_be32(MLX4_OPCODE_LSO
),
108 [IB_WR_SEND_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_SEND_IMM
),
109 [IB_WR_RDMA_WRITE
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE
),
110 [IB_WR_RDMA_WRITE_WITH_IMM
] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM
),
111 [IB_WR_RDMA_READ
] = cpu_to_be32(MLX4_OPCODE_RDMA_READ
),
112 [IB_WR_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS
),
113 [IB_WR_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA
),
114 [IB_WR_SEND_WITH_INV
] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL
),
115 [IB_WR_LOCAL_INV
] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL
),
116 [IB_WR_REG_MR
] = cpu_to_be32(MLX4_OPCODE_FMR
),
117 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS
),
118 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA
),
121 enum mlx4_ib_source_type
{
126 static struct mlx4_ib_sqp
*to_msqp(struct mlx4_ib_qp
*mqp
)
128 return container_of(mqp
, struct mlx4_ib_sqp
, qp
);
131 static int is_tunnel_qp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
133 if (!mlx4_is_master(dev
->dev
))
136 return qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_tunnel_sqpn
&&
137 qp
->mqp
.qpn
< dev
->dev
->phys_caps
.base_tunnel_sqpn
+
141 static int is_sqp(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
146 /* PPF or Native -- real SQP */
147 real_sqp
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
148 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
149 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 3);
152 /* VF or PF -- proxy SQP */
153 if (mlx4_is_mfunc(dev
->dev
)) {
154 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
155 if (qp
->mqp
.qpn
== dev
->dev
->caps
.spec_qps
[i
].qp0_proxy
||
156 qp
->mqp
.qpn
== dev
->dev
->caps
.spec_qps
[i
].qp1_proxy
) {
165 return !!(qp
->flags
& MLX4_IB_ROCE_V2_GSI_QP
);
168 /* used for INIT/CLOSE port logic */
169 static int is_qp0(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
174 /* PPF or Native -- real QP0 */
175 real_qp0
= ((mlx4_is_master(dev
->dev
) || !mlx4_is_mfunc(dev
->dev
)) &&
176 qp
->mqp
.qpn
>= dev
->dev
->phys_caps
.base_sqpn
&&
177 qp
->mqp
.qpn
<= dev
->dev
->phys_caps
.base_sqpn
+ 1);
180 /* VF or PF -- proxy QP0 */
181 if (mlx4_is_mfunc(dev
->dev
)) {
182 for (i
= 0; i
< dev
->dev
->caps
.num_ports
; i
++) {
183 if (qp
->mqp
.qpn
== dev
->dev
->caps
.spec_qps
[i
].qp0_proxy
) {
192 static void *get_wqe(struct mlx4_ib_qp
*qp
, int offset
)
194 return mlx4_buf_offset(&qp
->buf
, offset
);
197 static void *get_recv_wqe(struct mlx4_ib_qp
*qp
, int n
)
199 return get_wqe(qp
, qp
->rq
.offset
+ (n
<< qp
->rq
.wqe_shift
));
202 static void *get_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
204 return get_wqe(qp
, qp
->sq
.offset
+ (n
<< qp
->sq
.wqe_shift
));
208 * Stamp a SQ WQE so that it is invalid if prefetched by marking the
209 * first four bytes of every 64 byte chunk with 0xffffffff, except for
210 * the very first chunk of the WQE.
212 static void stamp_send_wqe(struct mlx4_ib_qp
*qp
, int n
)
218 struct mlx4_wqe_ctrl_seg
*ctrl
;
220 buf
= get_send_wqe(qp
, n
& (qp
->sq
.wqe_cnt
- 1));
221 ctrl
= (struct mlx4_wqe_ctrl_seg
*)buf
;
222 s
= (ctrl
->qpn_vlan
.fence_size
& 0x3f) << 4;
223 for (i
= 64; i
< s
; i
+= 64) {
225 *wqe
= cpu_to_be32(0xffffffff);
229 static void mlx4_ib_qp_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
231 struct ib_event event
;
232 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
234 if (type
== MLX4_EVENT_TYPE_PATH_MIG
)
235 to_mibqp(qp
)->port
= to_mibqp(qp
)->alt_port
;
237 if (ibqp
->event_handler
) {
238 event
.device
= ibqp
->device
;
239 event
.element
.qp
= ibqp
;
241 case MLX4_EVENT_TYPE_PATH_MIG
:
242 event
.event
= IB_EVENT_PATH_MIG
;
244 case MLX4_EVENT_TYPE_COMM_EST
:
245 event
.event
= IB_EVENT_COMM_EST
;
247 case MLX4_EVENT_TYPE_SQ_DRAINED
:
248 event
.event
= IB_EVENT_SQ_DRAINED
;
250 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
251 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
253 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
254 event
.event
= IB_EVENT_QP_FATAL
;
256 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
257 event
.event
= IB_EVENT_PATH_MIG_ERR
;
259 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
260 event
.event
= IB_EVENT_QP_REQ_ERR
;
262 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
263 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
266 pr_warn("Unexpected event type %d "
267 "on QP %06x\n", type
, qp
->qpn
);
271 ibqp
->event_handler(&event
, ibqp
->qp_context
);
275 static void mlx4_ib_wq_event(struct mlx4_qp
*qp
, enum mlx4_event type
)
277 pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n",
281 static int send_wqe_overhead(enum mlx4_ib_qp_type type
, u32 flags
)
284 * UD WQEs must have a datagram segment.
285 * RC and UC WQEs might have a remote address segment.
286 * MLX WQEs need two extra inline data segments (for the UD
287 * header and space for the ICRC).
291 return sizeof (struct mlx4_wqe_ctrl_seg
) +
292 sizeof (struct mlx4_wqe_datagram_seg
) +
293 ((flags
& MLX4_IB_QP_LSO
) ? MLX4_IB_LSO_HEADER_SPARE
: 0);
294 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
295 case MLX4_IB_QPT_PROXY_SMI
:
296 case MLX4_IB_QPT_PROXY_GSI
:
297 return sizeof (struct mlx4_wqe_ctrl_seg
) +
298 sizeof (struct mlx4_wqe_datagram_seg
) + 64;
299 case MLX4_IB_QPT_TUN_SMI_OWNER
:
300 case MLX4_IB_QPT_TUN_GSI
:
301 return sizeof (struct mlx4_wqe_ctrl_seg
) +
302 sizeof (struct mlx4_wqe_datagram_seg
);
305 return sizeof (struct mlx4_wqe_ctrl_seg
) +
306 sizeof (struct mlx4_wqe_raddr_seg
);
308 return sizeof (struct mlx4_wqe_ctrl_seg
) +
309 sizeof (struct mlx4_wqe_masked_atomic_seg
) +
310 sizeof (struct mlx4_wqe_raddr_seg
);
311 case MLX4_IB_QPT_SMI
:
312 case MLX4_IB_QPT_GSI
:
313 return sizeof (struct mlx4_wqe_ctrl_seg
) +
314 ALIGN(MLX4_IB_UD_HEADER_SIZE
+
315 DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE
,
317 sizeof (struct mlx4_wqe_inline_seg
),
318 sizeof (struct mlx4_wqe_data_seg
)) +
320 sizeof (struct mlx4_wqe_inline_seg
),
321 sizeof (struct mlx4_wqe_data_seg
));
323 return sizeof (struct mlx4_wqe_ctrl_seg
);
327 static int set_rq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
328 bool is_user
, bool has_rq
, struct mlx4_ib_qp
*qp
,
331 /* Sanity check RQ size before proceeding */
332 if (cap
->max_recv_wr
> dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
||
333 cap
->max_recv_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
))
337 if (cap
->max_recv_wr
|| inl_recv_sz
)
340 qp
->rq
.wqe_cnt
= qp
->rq
.max_gs
= 0;
342 u32 max_inl_recv_sz
= dev
->dev
->caps
.max_rq_sg
*
343 sizeof(struct mlx4_wqe_data_seg
);
346 /* HW requires >= 1 RQ entry with >= 1 gather entry */
347 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
||
348 inl_recv_sz
> max_inl_recv_sz
))
351 qp
->rq
.wqe_cnt
= roundup_pow_of_two(max(1U, cap
->max_recv_wr
));
352 qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
353 wqe_size
= qp
->rq
.max_gs
* sizeof(struct mlx4_wqe_data_seg
);
354 qp
->rq
.wqe_shift
= ilog2(max_t(u32
, wqe_size
, inl_recv_sz
));
357 /* leave userspace return values as they were, so as not to break ABI */
359 cap
->max_recv_wr
= qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
360 cap
->max_recv_sge
= qp
->rq
.max_gs
;
362 cap
->max_recv_wr
= qp
->rq
.max_post
=
363 min(dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
, qp
->rq
.wqe_cnt
);
364 cap
->max_recv_sge
= min(qp
->rq
.max_gs
,
365 min(dev
->dev
->caps
.max_sq_sg
,
366 dev
->dev
->caps
.max_rq_sg
));
372 static int set_kernel_sq_size(struct mlx4_ib_dev
*dev
, struct ib_qp_cap
*cap
,
373 enum mlx4_ib_qp_type type
, struct mlx4_ib_qp
*qp
)
377 /* Sanity check SQ size before proceeding */
378 if (cap
->max_send_wr
> (dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
) ||
379 cap
->max_send_sge
> min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
) ||
380 cap
->max_inline_data
+ send_wqe_overhead(type
, qp
->flags
) +
381 sizeof (struct mlx4_wqe_inline_seg
) > dev
->dev
->caps
.max_sq_desc_sz
)
385 * For MLX transport we need 2 extra S/G entries:
386 * one for the header and one for the checksum at the end
388 if ((type
== MLX4_IB_QPT_SMI
|| type
== MLX4_IB_QPT_GSI
||
389 type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) &&
390 cap
->max_send_sge
+ 2 > dev
->dev
->caps
.max_sq_sg
)
393 s
= max(cap
->max_send_sge
* sizeof (struct mlx4_wqe_data_seg
),
394 cap
->max_inline_data
+ sizeof (struct mlx4_wqe_inline_seg
)) +
395 send_wqe_overhead(type
, qp
->flags
);
397 if (s
> dev
->dev
->caps
.max_sq_desc_sz
)
400 qp
->sq
.wqe_shift
= ilog2(roundup_pow_of_two(s
));
403 * We need to leave 2 KB + 1 WR of headroom in the SQ to
404 * allow HW to prefetch.
406 qp
->sq_spare_wqes
= MLX4_IB_SQ_HEADROOM(qp
->sq
.wqe_shift
);
407 qp
->sq
.wqe_cnt
= roundup_pow_of_two(cap
->max_send_wr
+
411 (min(dev
->dev
->caps
.max_sq_desc_sz
,
412 (1 << qp
->sq
.wqe_shift
)) -
413 send_wqe_overhead(type
, qp
->flags
)) /
414 sizeof (struct mlx4_wqe_data_seg
);
416 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
417 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
418 if (qp
->rq
.wqe_shift
> qp
->sq
.wqe_shift
) {
420 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
422 qp
->rq
.offset
= qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
;
426 cap
->max_send_wr
= qp
->sq
.max_post
=
427 qp
->sq
.wqe_cnt
- qp
->sq_spare_wqes
;
428 cap
->max_send_sge
= min(qp
->sq
.max_gs
,
429 min(dev
->dev
->caps
.max_sq_sg
,
430 dev
->dev
->caps
.max_rq_sg
));
431 /* We don't support inline sends for kernel QPs (yet) */
432 cap
->max_inline_data
= 0;
437 static int set_user_sq_size(struct mlx4_ib_dev
*dev
,
438 struct mlx4_ib_qp
*qp
,
439 struct mlx4_ib_create_qp
*ucmd
)
441 /* Sanity check SQ size before proceeding */
442 if ((1 << ucmd
->log_sq_bb_count
) > dev
->dev
->caps
.max_wqes
||
443 ucmd
->log_sq_stride
>
444 ilog2(roundup_pow_of_two(dev
->dev
->caps
.max_sq_desc_sz
)) ||
445 ucmd
->log_sq_stride
< MLX4_IB_MIN_SQ_STRIDE
)
448 qp
->sq
.wqe_cnt
= 1 << ucmd
->log_sq_bb_count
;
449 qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
451 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
452 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
457 static int alloc_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
462 kmalloc_array(qp
->rq
.wqe_cnt
, sizeof(struct mlx4_ib_buf
),
464 if (!qp
->sqp_proxy_rcv
)
466 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
467 qp
->sqp_proxy_rcv
[i
].addr
=
468 kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr
),
470 if (!qp
->sqp_proxy_rcv
[i
].addr
)
472 qp
->sqp_proxy_rcv
[i
].map
=
473 ib_dma_map_single(dev
, qp
->sqp_proxy_rcv
[i
].addr
,
474 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
476 if (ib_dma_mapping_error(dev
, qp
->sqp_proxy_rcv
[i
].map
)) {
477 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
486 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
487 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
489 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
491 kfree(qp
->sqp_proxy_rcv
);
492 qp
->sqp_proxy_rcv
= NULL
;
496 static void free_proxy_bufs(struct ib_device
*dev
, struct mlx4_ib_qp
*qp
)
500 for (i
= 0; i
< qp
->rq
.wqe_cnt
; i
++) {
501 ib_dma_unmap_single(dev
, qp
->sqp_proxy_rcv
[i
].map
,
502 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
504 kfree(qp
->sqp_proxy_rcv
[i
].addr
);
506 kfree(qp
->sqp_proxy_rcv
);
509 static bool qp_has_rq(struct ib_qp_init_attr
*attr
)
511 if (attr
->qp_type
== IB_QPT_XRC_INI
|| attr
->qp_type
== IB_QPT_XRC_TGT
)
517 static int qp0_enabled_vf(struct mlx4_dev
*dev
, int qpn
)
520 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
521 if (qpn
== dev
->caps
.spec_qps
[i
].qp0_proxy
)
522 return !!dev
->caps
.spec_qps
[i
].qp0_qkey
;
527 static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev
*dev
,
528 struct mlx4_ib_qp
*qp
)
530 mutex_lock(&dev
->counters_table
[qp
->port
- 1].mutex
);
531 mlx4_counter_free(dev
->dev
, qp
->counter_index
->index
);
532 list_del(&qp
->counter_index
->list
);
533 mutex_unlock(&dev
->counters_table
[qp
->port
- 1].mutex
);
535 kfree(qp
->counter_index
);
536 qp
->counter_index
= NULL
;
539 static int set_qp_rss(struct mlx4_ib_dev
*dev
, struct mlx4_ib_rss
*rss_ctx
,
540 struct ib_qp_init_attr
*init_attr
,
541 struct mlx4_ib_create_qp_rss
*ucmd
)
543 rss_ctx
->base_qpn_tbl_sz
= init_attr
->rwq_ind_tbl
->ind_tbl
[0]->wq_num
|
544 (init_attr
->rwq_ind_tbl
->log_ind_tbl_size
<< 24);
546 if ((ucmd
->rx_hash_function
== MLX4_IB_RX_HASH_FUNC_TOEPLITZ
) &&
547 (dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_RSS_TOP
)) {
548 memcpy(rss_ctx
->rss_key
, ucmd
->rx_hash_key
,
549 MLX4_EN_RSS_KEY_SIZE
);
551 pr_debug("RX Hash function is not supported\n");
552 return (-EOPNOTSUPP
);
555 if (ucmd
->rx_hash_fields_mask
& ~(MLX4_IB_RX_HASH_SRC_IPV4
|
556 MLX4_IB_RX_HASH_DST_IPV4
|
557 MLX4_IB_RX_HASH_SRC_IPV6
|
558 MLX4_IB_RX_HASH_DST_IPV6
|
559 MLX4_IB_RX_HASH_SRC_PORT_TCP
|
560 MLX4_IB_RX_HASH_DST_PORT_TCP
|
561 MLX4_IB_RX_HASH_SRC_PORT_UDP
|
562 MLX4_IB_RX_HASH_DST_PORT_UDP
|
563 MLX4_IB_RX_HASH_INNER
)) {
564 pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
565 ucmd
->rx_hash_fields_mask
);
566 return (-EOPNOTSUPP
);
569 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV4
) &&
570 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV4
)) {
571 rss_ctx
->flags
= MLX4_RSS_IPV4
;
572 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV4
) ||
573 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV4
)) {
574 pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n");
575 return (-EOPNOTSUPP
);
578 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV6
) &&
579 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV6
)) {
580 rss_ctx
->flags
|= MLX4_RSS_IPV6
;
581 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_IPV6
) ||
582 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_IPV6
)) {
583 pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n");
584 return (-EOPNOTSUPP
);
587 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_UDP
) &&
588 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_UDP
)) {
589 if (!(dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS
)) {
590 pr_debug("RX Hash fields_mask for UDP is not supported\n");
591 return (-EOPNOTSUPP
);
594 if (rss_ctx
->flags
& MLX4_RSS_IPV4
)
595 rss_ctx
->flags
|= MLX4_RSS_UDP_IPV4
;
596 if (rss_ctx
->flags
& MLX4_RSS_IPV6
)
597 rss_ctx
->flags
|= MLX4_RSS_UDP_IPV6
;
598 if (!(rss_ctx
->flags
& (MLX4_RSS_IPV6
| MLX4_RSS_IPV4
))) {
599 pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
600 return (-EOPNOTSUPP
);
602 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_UDP
) ||
603 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_UDP
)) {
604 pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n");
605 return (-EOPNOTSUPP
);
608 if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_TCP
) &&
609 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_TCP
)) {
610 if (rss_ctx
->flags
& MLX4_RSS_IPV4
)
611 rss_ctx
->flags
|= MLX4_RSS_TCP_IPV4
;
612 if (rss_ctx
->flags
& MLX4_RSS_IPV6
)
613 rss_ctx
->flags
|= MLX4_RSS_TCP_IPV6
;
614 if (!(rss_ctx
->flags
& (MLX4_RSS_IPV6
| MLX4_RSS_IPV4
))) {
615 pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
616 return (-EOPNOTSUPP
);
618 } else if ((ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_SRC_PORT_TCP
) ||
619 (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_DST_PORT_TCP
)) {
620 pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
621 return (-EOPNOTSUPP
);
624 if (ucmd
->rx_hash_fields_mask
& MLX4_IB_RX_HASH_INNER
) {
625 if (dev
->dev
->caps
.tunnel_offload_mode
==
626 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
628 * Hash according to inner headers if exist, otherwise
629 * according to outer headers.
631 rss_ctx
->flags
|= MLX4_RSS_BY_INNER_HEADERS_IPONLY
;
633 pr_debug("RSS Hash for inner headers isn't supported\n");
634 return (-EOPNOTSUPP
);
641 static int create_qp_rss(struct mlx4_ib_dev
*dev
,
642 struct ib_qp_init_attr
*init_attr
,
643 struct mlx4_ib_create_qp_rss
*ucmd
,
644 struct mlx4_ib_qp
*qp
)
649 qp
->mqp
.usage
= MLX4_RES_USAGE_USER_VERBS
;
651 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
, 0, qp
->mqp
.usage
);
655 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
659 mutex_init(&qp
->mutex
);
661 INIT_LIST_HEAD(&qp
->gid_list
);
662 INIT_LIST_HEAD(&qp
->steering_rules
);
664 qp
->mlx4_ib_qp_type
= MLX4_IB_QPT_RAW_PACKET
;
665 qp
->state
= IB_QPS_RESET
;
667 /* Set dummy send resources to be compatible with HV and PRM */
668 qp
->sq_no_prefetch
= 1;
670 qp
->sq
.wqe_shift
= MLX4_IB_MIN_SQ_STRIDE
;
671 qp
->buf_size
= qp
->sq
.wqe_cnt
<< MLX4_IB_MIN_SQ_STRIDE
;
673 (struct ib_qp
*)init_attr
->rwq_ind_tbl
->ind_tbl
[0]))->mtt
;
675 qp
->rss_ctx
= kzalloc(sizeof(*qp
->rss_ctx
), GFP_KERNEL
);
681 err
= set_qp_rss(dev
, qp
->rss_ctx
, init_attr
, ucmd
);
691 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
692 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
695 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
699 static struct ib_qp
*_mlx4_ib_create_qp_rss(struct ib_pd
*pd
,
700 struct ib_qp_init_attr
*init_attr
,
701 struct ib_udata
*udata
)
703 struct mlx4_ib_qp
*qp
;
704 struct mlx4_ib_create_qp_rss ucmd
= {};
705 size_t required_cmd_sz
;
709 pr_debug("RSS QP with NULL udata\n");
710 return ERR_PTR(-EINVAL
);
714 return ERR_PTR(-EOPNOTSUPP
);
716 required_cmd_sz
= offsetof(typeof(ucmd
), reserved1
) +
717 sizeof(ucmd
.reserved1
);
718 if (udata
->inlen
< required_cmd_sz
) {
719 pr_debug("invalid inlen\n");
720 return ERR_PTR(-EINVAL
);
723 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
724 pr_debug("copy failed\n");
725 return ERR_PTR(-EFAULT
);
728 if (memchr_inv(ucmd
.reserved
, 0, sizeof(ucmd
.reserved
)))
729 return ERR_PTR(-EOPNOTSUPP
);
731 if (ucmd
.comp_mask
|| ucmd
.reserved1
)
732 return ERR_PTR(-EOPNOTSUPP
);
734 if (udata
->inlen
> sizeof(ucmd
) &&
735 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
736 udata
->inlen
- sizeof(ucmd
))) {
737 pr_debug("inlen is not supported\n");
738 return ERR_PTR(-EOPNOTSUPP
);
741 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
742 pr_debug("RSS QP with unsupported QP type %d\n",
744 return ERR_PTR(-EOPNOTSUPP
);
747 if (init_attr
->create_flags
) {
748 pr_debug("RSS QP doesn't support create flags\n");
749 return ERR_PTR(-EOPNOTSUPP
);
752 if (init_attr
->send_cq
|| init_attr
->cap
.max_send_wr
) {
753 pr_debug("RSS QP with unsupported send attributes\n");
754 return ERR_PTR(-EOPNOTSUPP
);
757 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
759 return ERR_PTR(-ENOMEM
);
761 qp
->pri
.vid
= 0xFFFF;
762 qp
->alt
.vid
= 0xFFFF;
764 err
= create_qp_rss(to_mdev(pd
->device
), init_attr
, &ucmd
, qp
);
770 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
776 * This function allocates a WQN from a range which is consecutive and aligned
777 * to its size. In case the range is full, then it creates a new range and
778 * allocates WQN from it. The new range will be used for following allocations.
780 static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext
*context
,
781 struct mlx4_ib_qp
*qp
, int range_size
, int *wqn
)
783 struct mlx4_ib_dev
*dev
= to_mdev(context
->ibucontext
.device
);
784 struct mlx4_wqn_range
*range
;
787 mutex_lock(&context
->wqn_ranges_mutex
);
789 range
= list_first_entry_or_null(&context
->wqn_ranges_list
,
790 struct mlx4_wqn_range
, list
);
792 if (!range
|| (range
->refcount
== range
->size
) || range
->dirty
) {
793 range
= kzalloc(sizeof(*range
), GFP_KERNEL
);
799 err
= mlx4_qp_reserve_range(dev
->dev
, range_size
,
800 range_size
, &range
->base_wqn
, 0,
807 range
->size
= range_size
;
808 list_add(&range
->list
, &context
->wqn_ranges_list
);
809 } else if (range_size
!= 1) {
811 * Requesting a new range (>1) when last range is still open, is
818 qp
->wqn_range
= range
;
820 *wqn
= range
->base_wqn
+ range
->refcount
;
825 mutex_unlock(&context
->wqn_ranges_mutex
);
830 static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext
*context
,
831 struct mlx4_ib_qp
*qp
, bool dirty_release
)
833 struct mlx4_ib_dev
*dev
= to_mdev(context
->ibucontext
.device
);
834 struct mlx4_wqn_range
*range
;
836 mutex_lock(&context
->wqn_ranges_mutex
);
838 range
= qp
->wqn_range
;
841 if (!range
->refcount
) {
842 mlx4_qp_release_range(dev
->dev
, range
->base_wqn
,
844 list_del(&range
->list
);
846 } else if (dirty_release
) {
848 * A range which one of its WQNs is destroyed, won't be able to be
849 * reused for further WQN allocations.
850 * The next created WQ will allocate a new range.
855 mutex_unlock(&context
->wqn_ranges_mutex
);
858 static int create_rq(struct ib_pd
*pd
, struct ib_qp_init_attr
*init_attr
,
859 struct ib_udata
*udata
, struct mlx4_ib_qp
*qp
)
861 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
864 struct mlx4_ib_ucontext
*context
= rdma_udata_to_drv_context(
865 udata
, struct mlx4_ib_ucontext
, ibucontext
);
866 struct mlx4_ib_cq
*mcq
;
869 struct mlx4_ib_create_wq wq
;
874 qp
->mlx4_ib_qp_type
= MLX4_IB_QPT_RAW_PACKET
;
876 mutex_init(&qp
->mutex
);
877 spin_lock_init(&qp
->sq
.lock
);
878 spin_lock_init(&qp
->rq
.lock
);
879 INIT_LIST_HEAD(&qp
->gid_list
);
880 INIT_LIST_HEAD(&qp
->steering_rules
);
882 qp
->state
= IB_QPS_RESET
;
884 copy_len
= min(sizeof(struct mlx4_ib_create_wq
), udata
->inlen
);
886 if (ib_copy_from_udata(&wq
, udata
, copy_len
)) {
891 if (wq
.comp_mask
|| wq
.reserved
[0] || wq
.reserved
[1] ||
893 pr_debug("user command isn't supported\n");
898 if (wq
.log_range_size
> ilog2(dev
->dev
->caps
.max_rss_tbl_sz
)) {
899 pr_debug("WQN range size must be equal or smaller than %d\n",
900 dev
->dev
->caps
.max_rss_tbl_sz
);
904 range_size
= 1 << wq
.log_range_size
;
906 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
)
907 qp
->flags
|= MLX4_IB_QP_SCATTER_FCS
;
909 err
= set_rq_size(dev
, &init_attr
->cap
, true, true, qp
, qp
->inl_recv_sz
);
913 qp
->sq_no_prefetch
= 1;
915 qp
->sq
.wqe_shift
= MLX4_IB_MIN_SQ_STRIDE
;
916 qp
->buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
917 (qp
->sq
.wqe_cnt
<< qp
->sq
.wqe_shift
);
919 qp
->umem
= ib_umem_get(pd
->device
, wq
.buf_addr
, qp
->buf_size
, 0);
920 if (IS_ERR(qp
->umem
)) {
921 err
= PTR_ERR(qp
->umem
);
925 n
= ib_umem_page_count(qp
->umem
);
926 shift
= mlx4_ib_umem_calc_optimal_mtt_size(qp
->umem
, 0, &n
);
927 err
= mlx4_mtt_init(dev
->dev
, n
, shift
, &qp
->mtt
);
932 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
936 err
= mlx4_ib_db_map_user(udata
, wq
.db_addr
, &qp
->db
);
939 qp
->mqp
.usage
= MLX4_RES_USAGE_USER_VERBS
;
941 err
= mlx4_ib_alloc_wqn(context
, qp
, range_size
, &qpn
);
945 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
950 * Hardware wants QPN written in big-endian order (after
951 * shifting) for send doorbell. Precompute this value to save
952 * a little bit when posting sends.
954 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
956 qp
->mqp
.event
= mlx4_ib_wq_event
;
958 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
959 mlx4_ib_lock_cqs(to_mcq(init_attr
->send_cq
),
960 to_mcq(init_attr
->recv_cq
));
961 /* Maintain device to QPs access, needed for further handling
964 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
965 /* Maintain CQ to QPs access, needed for further handling
968 mcq
= to_mcq(init_attr
->send_cq
);
969 list_add_tail(&qp
->cq_send_list
, &mcq
->send_qp_list
);
970 mcq
= to_mcq(init_attr
->recv_cq
);
971 list_add_tail(&qp
->cq_recv_list
, &mcq
->recv_qp_list
);
972 mlx4_ib_unlock_cqs(to_mcq(init_attr
->send_cq
),
973 to_mcq(init_attr
->recv_cq
));
974 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
978 mlx4_ib_release_wqn(context
, qp
, 0);
980 mlx4_ib_db_unmap_user(context
, &qp
->db
);
983 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
985 ib_umem_release(qp
->umem
);
990 static int create_qp_common(struct ib_pd
*pd
, struct ib_qp_init_attr
*init_attr
,
991 struct ib_udata
*udata
, int sqpn
,
992 struct mlx4_ib_qp
**caller_qp
)
994 struct mlx4_ib_dev
*dev
= to_mdev(pd
->device
);
997 struct mlx4_ib_sqp
*sqp
= NULL
;
998 struct mlx4_ib_qp
*qp
;
999 struct mlx4_ib_ucontext
*context
= rdma_udata_to_drv_context(
1000 udata
, struct mlx4_ib_ucontext
, ibucontext
);
1001 enum mlx4_ib_qp_type qp_type
= (enum mlx4_ib_qp_type
) init_attr
->qp_type
;
1002 struct mlx4_ib_cq
*mcq
;
1003 unsigned long flags
;
1005 /* When tunneling special qps, we use a plain UD qp */
1007 if (mlx4_is_mfunc(dev
->dev
) &&
1008 (!mlx4_is_master(dev
->dev
) ||
1009 !(init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
))) {
1010 if (init_attr
->qp_type
== IB_QPT_GSI
)
1011 qp_type
= MLX4_IB_QPT_PROXY_GSI
;
1013 if (mlx4_is_master(dev
->dev
) ||
1014 qp0_enabled_vf(dev
->dev
, sqpn
))
1015 qp_type
= MLX4_IB_QPT_PROXY_SMI_OWNER
;
1017 qp_type
= MLX4_IB_QPT_PROXY_SMI
;
1021 /* add extra sg entry for tunneling */
1022 init_attr
->cap
.max_recv_sge
++;
1023 } else if (init_attr
->create_flags
& MLX4_IB_SRIOV_TUNNEL_QP
) {
1024 struct mlx4_ib_qp_tunnel_init_attr
*tnl_init
=
1025 container_of(init_attr
,
1026 struct mlx4_ib_qp_tunnel_init_attr
, init_attr
);
1027 if ((tnl_init
->proxy_qp_type
!= IB_QPT_SMI
&&
1028 tnl_init
->proxy_qp_type
!= IB_QPT_GSI
) ||
1029 !mlx4_is_master(dev
->dev
))
1031 if (tnl_init
->proxy_qp_type
== IB_QPT_GSI
)
1032 qp_type
= MLX4_IB_QPT_TUN_GSI
;
1033 else if (tnl_init
->slave
== mlx4_master_func_num(dev
->dev
) ||
1034 mlx4_vf_smi_enabled(dev
->dev
, tnl_init
->slave
,
1036 qp_type
= MLX4_IB_QPT_TUN_SMI_OWNER
;
1038 qp_type
= MLX4_IB_QPT_TUN_SMI
;
1039 /* we are definitely in the PPF here, since we are creating
1040 * tunnel QPs. base_tunnel_sqpn is therefore valid. */
1041 qpn
= dev
->dev
->phys_caps
.base_tunnel_sqpn
+ 8 * tnl_init
->slave
1042 + tnl_init
->proxy_qp_type
* 2 + tnl_init
->port
- 1;
1047 if (qp_type
== MLX4_IB_QPT_SMI
|| qp_type
== MLX4_IB_QPT_GSI
||
1048 (qp_type
& (MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_SMI_OWNER
|
1049 MLX4_IB_QPT_PROXY_GSI
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
1050 sqp
= kzalloc(sizeof(struct mlx4_ib_sqp
), GFP_KERNEL
);
1055 qp
= kzalloc(sizeof(struct mlx4_ib_qp
), GFP_KERNEL
);
1059 qp
->pri
.vid
= 0xFFFF;
1060 qp
->alt
.vid
= 0xFFFF;
1064 qp
->mlx4_ib_qp_type
= qp_type
;
1066 mutex_init(&qp
->mutex
);
1067 spin_lock_init(&qp
->sq
.lock
);
1068 spin_lock_init(&qp
->rq
.lock
);
1069 INIT_LIST_HEAD(&qp
->gid_list
);
1070 INIT_LIST_HEAD(&qp
->steering_rules
);
1072 qp
->state
= IB_QPS_RESET
;
1073 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1074 qp
->sq_signal_bits
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
1077 struct mlx4_ib_create_qp ucmd
;
1082 copy_len
= sizeof(struct mlx4_ib_create_qp
);
1084 if (ib_copy_from_udata(&ucmd
, udata
, copy_len
)) {
1089 qp
->inl_recv_sz
= ucmd
.inl_recv_sz
;
1091 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
1092 if (!(dev
->dev
->caps
.flags
&
1093 MLX4_DEV_CAP_FLAG_FCS_KEEP
)) {
1094 pr_debug("scatter FCS is unsupported\n");
1099 qp
->flags
|= MLX4_IB_QP_SCATTER_FCS
;
1102 err
= set_rq_size(dev
, &init_attr
->cap
, udata
,
1103 qp_has_rq(init_attr
), qp
, qp
->inl_recv_sz
);
1107 qp
->sq_no_prefetch
= ucmd
.sq_no_prefetch
;
1109 err
= set_user_sq_size(dev
, qp
, &ucmd
);
1114 ib_umem_get(pd
->device
, ucmd
.buf_addr
, qp
->buf_size
, 0);
1115 if (IS_ERR(qp
->umem
)) {
1116 err
= PTR_ERR(qp
->umem
);
1120 n
= ib_umem_page_count(qp
->umem
);
1121 shift
= mlx4_ib_umem_calc_optimal_mtt_size(qp
->umem
, 0, &n
);
1122 err
= mlx4_mtt_init(dev
->dev
, n
, shift
, &qp
->mtt
);
1127 err
= mlx4_ib_umem_write_mtt(dev
, &qp
->mtt
, qp
->umem
);
1131 if (qp_has_rq(init_attr
)) {
1132 err
= mlx4_ib_db_map_user(udata
, ucmd
.db_addr
, &qp
->db
);
1136 qp
->mqp
.usage
= MLX4_RES_USAGE_USER_VERBS
;
1138 err
= set_rq_size(dev
, &init_attr
->cap
, udata
,
1139 qp_has_rq(init_attr
), qp
, 0);
1143 qp
->sq_no_prefetch
= 0;
1145 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
1146 qp
->flags
|= MLX4_IB_QP_LSO
;
1148 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1149 if (dev
->steering_support
==
1150 MLX4_STEERING_MODE_DEVICE_MANAGED
)
1151 qp
->flags
|= MLX4_IB_QP_NETIF
;
1156 err
= set_kernel_sq_size(dev
, &init_attr
->cap
, qp_type
, qp
);
1160 if (qp_has_rq(init_attr
)) {
1161 err
= mlx4_db_alloc(dev
->dev
, &qp
->db
, 0);
1168 if (mlx4_buf_alloc(dev
->dev
, qp
->buf_size
, PAGE_SIZE
* 2,
1174 err
= mlx4_mtt_init(dev
->dev
, qp
->buf
.npages
, qp
->buf
.page_shift
,
1179 err
= mlx4_buf_write_mtt(dev
->dev
, &qp
->mtt
, &qp
->buf
);
1183 qp
->sq
.wrid
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1184 sizeof(u64
), GFP_KERNEL
);
1185 qp
->rq
.wrid
= kvmalloc_array(qp
->rq
.wqe_cnt
,
1186 sizeof(u64
), GFP_KERNEL
);
1187 if (!qp
->sq
.wrid
|| !qp
->rq
.wrid
) {
1191 qp
->mqp
.usage
= MLX4_RES_USAGE_DRIVER
;
1195 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1196 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
1197 if (alloc_proxy_bufs(pd
->device
, qp
)) {
1203 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
1204 * otherwise, the WQE BlueFlame setup flow wrongly causes
1205 * VLAN insertion. */
1206 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
)
1207 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1, &qpn
,
1208 (init_attr
->cap
.max_send_wr
?
1209 MLX4_RESERVE_ETH_BF_QP
: 0) |
1210 (init_attr
->cap
.max_recv_wr
?
1211 MLX4_RESERVE_A0_QP
: 0),
1214 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1215 err
= mlx4_ib_steer_qp_alloc(dev
, 1, &qpn
);
1217 err
= mlx4_qp_reserve_range(dev
->dev
, 1, 1,
1218 &qpn
, 0, qp
->mqp
.usage
);
1223 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
)
1224 qp
->flags
|= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1226 err
= mlx4_qp_alloc(dev
->dev
, qpn
, &qp
->mqp
);
1230 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
)
1231 qp
->mqp
.qpn
|= (1 << 23);
1234 * Hardware wants QPN written in big-endian order (after
1235 * shifting) for send doorbell. Precompute this value to save
1236 * a little bit when posting sends.
1238 qp
->doorbell_qpn
= swab32(qp
->mqp
.qpn
<< 8);
1240 qp
->mqp
.event
= mlx4_ib_qp_event
;
1245 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1246 mlx4_ib_lock_cqs(to_mcq(init_attr
->send_cq
),
1247 to_mcq(init_attr
->recv_cq
));
1248 /* Maintain device to QPs access, needed for further handling
1251 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
1252 /* Maintain CQ to QPs access, needed for further handling
1255 mcq
= to_mcq(init_attr
->send_cq
);
1256 list_add_tail(&qp
->cq_send_list
, &mcq
->send_qp_list
);
1257 mcq
= to_mcq(init_attr
->recv_cq
);
1258 list_add_tail(&qp
->cq_recv_list
, &mcq
->recv_qp_list
);
1259 mlx4_ib_unlock_cqs(to_mcq(init_attr
->send_cq
),
1260 to_mcq(init_attr
->recv_cq
));
1261 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1266 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1267 mlx4_ib_steer_qp_free(dev
, qpn
, 1);
1269 mlx4_qp_release_range(dev
->dev
, qpn
, 1);
1272 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
1273 free_proxy_bufs(pd
->device
, qp
);
1276 if (qp_has_rq(init_attr
))
1277 mlx4_ib_db_unmap_user(context
, &qp
->db
);
1279 kvfree(qp
->sq
.wrid
);
1280 kvfree(qp
->rq
.wrid
);
1284 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1288 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1289 ib_umem_release(qp
->umem
);
1292 if (!udata
&& qp_has_rq(init_attr
))
1293 mlx4_db_free(dev
->dev
, &qp
->db
);
1296 if (!sqp
&& !*caller_qp
)
1303 static enum mlx4_qp_state
to_mlx4_state(enum ib_qp_state state
)
1306 case IB_QPS_RESET
: return MLX4_QP_STATE_RST
;
1307 case IB_QPS_INIT
: return MLX4_QP_STATE_INIT
;
1308 case IB_QPS_RTR
: return MLX4_QP_STATE_RTR
;
1309 case IB_QPS_RTS
: return MLX4_QP_STATE_RTS
;
1310 case IB_QPS_SQD
: return MLX4_QP_STATE_SQD
;
1311 case IB_QPS_SQE
: return MLX4_QP_STATE_SQER
;
1312 case IB_QPS_ERR
: return MLX4_QP_STATE_ERR
;
1317 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
1318 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1320 if (send_cq
== recv_cq
) {
1321 spin_lock(&send_cq
->lock
);
1322 __acquire(&recv_cq
->lock
);
1323 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1324 spin_lock(&send_cq
->lock
);
1325 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
1327 spin_lock(&recv_cq
->lock
);
1328 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
1332 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq
*send_cq
, struct mlx4_ib_cq
*recv_cq
)
1333 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1335 if (send_cq
== recv_cq
) {
1336 __release(&recv_cq
->lock
);
1337 spin_unlock(&send_cq
->lock
);
1338 } else if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
1339 spin_unlock(&recv_cq
->lock
);
1340 spin_unlock(&send_cq
->lock
);
1342 spin_unlock(&send_cq
->lock
);
1343 spin_unlock(&recv_cq
->lock
);
1347 static void del_gid_entries(struct mlx4_ib_qp
*qp
)
1349 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1351 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1352 list_del(&ge
->list
);
1357 static struct mlx4_ib_pd
*get_pd(struct mlx4_ib_qp
*qp
)
1359 if (qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
1360 return to_mpd(to_mxrcd(qp
->ibqp
.xrcd
)->pd
);
1362 return to_mpd(qp
->ibqp
.pd
);
1365 static void get_cqs(struct mlx4_ib_qp
*qp
, enum mlx4_ib_source_type src
,
1366 struct mlx4_ib_cq
**send_cq
, struct mlx4_ib_cq
**recv_cq
)
1368 switch (qp
->ibqp
.qp_type
) {
1369 case IB_QPT_XRC_TGT
:
1370 *send_cq
= to_mcq(to_mxrcd(qp
->ibqp
.xrcd
)->cq
);
1371 *recv_cq
= *send_cq
;
1373 case IB_QPT_XRC_INI
:
1374 *send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1375 *recv_cq
= *send_cq
;
1378 *recv_cq
= (src
== MLX4_IB_QP_SRC
) ? to_mcq(qp
->ibqp
.recv_cq
) :
1379 to_mcq(qp
->ibwq
.cq
);
1380 *send_cq
= (src
== MLX4_IB_QP_SRC
) ? to_mcq(qp
->ibqp
.send_cq
) :
1386 static void destroy_qp_rss(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1388 if (qp
->state
!= IB_QPS_RESET
) {
1391 for (i
= 0; i
< (1 << qp
->ibqp
.rwq_ind_tbl
->log_ind_tbl_size
);
1393 struct ib_wq
*ibwq
= qp
->ibqp
.rwq_ind_tbl
->ind_tbl
[i
];
1394 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
1396 mutex_lock(&wq
->mutex
);
1400 mutex_unlock(&wq
->mutex
);
1403 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
1404 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
1405 pr_warn("modify QP %06x to RESET failed.\n",
1409 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
1410 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
1411 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
1412 del_gid_entries(qp
);
1416 static void destroy_qp_common(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
,
1417 enum mlx4_ib_source_type src
,
1418 struct ib_udata
*udata
)
1420 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
1421 unsigned long flags
;
1423 if (qp
->state
!= IB_QPS_RESET
) {
1424 if (mlx4_qp_modify(dev
->dev
, NULL
, to_mlx4_state(qp
->state
),
1425 MLX4_QP_STATE_RST
, NULL
, 0, 0, &qp
->mqp
))
1426 pr_warn("modify QP %06x to RESET failed.\n",
1428 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
1429 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
1431 qp
->pri
.smac_port
= 0;
1434 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
1437 if (qp
->pri
.vid
< 0x1000) {
1438 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
1439 qp
->pri
.vid
= 0xFFFF;
1440 qp
->pri
.candidate_vid
= 0xFFFF;
1441 qp
->pri
.update_vid
= 0;
1443 if (qp
->alt
.vid
< 0x1000) {
1444 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
1445 qp
->alt
.vid
= 0xFFFF;
1446 qp
->alt
.candidate_vid
= 0xFFFF;
1447 qp
->alt
.update_vid
= 0;
1451 get_cqs(qp
, src
, &send_cq
, &recv_cq
);
1453 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
1454 mlx4_ib_lock_cqs(send_cq
, recv_cq
);
1456 /* del from lists under both locks above to protect reset flow paths */
1457 list_del(&qp
->qps_list
);
1458 list_del(&qp
->cq_send_list
);
1459 list_del(&qp
->cq_recv_list
);
1461 __mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
1462 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
): NULL
);
1463 if (send_cq
!= recv_cq
)
1464 __mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
1467 mlx4_qp_remove(dev
->dev
, &qp
->mqp
);
1469 mlx4_ib_unlock_cqs(send_cq
, recv_cq
);
1470 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
1472 mlx4_qp_free(dev
->dev
, &qp
->mqp
);
1474 if (!is_sqp(dev
, qp
) && !is_tunnel_qp(dev
, qp
)) {
1475 if (qp
->flags
& MLX4_IB_QP_NETIF
)
1476 mlx4_ib_steer_qp_free(dev
, qp
->mqp
.qpn
, 1);
1477 else if (src
== MLX4_IB_RWQ_SRC
)
1478 mlx4_ib_release_wqn(
1479 rdma_udata_to_drv_context(
1481 struct mlx4_ib_ucontext
,
1485 mlx4_qp_release_range(dev
->dev
, qp
->mqp
.qpn
, 1);
1488 mlx4_mtt_cleanup(dev
->dev
, &qp
->mtt
);
1491 if (qp
->rq
.wqe_cnt
) {
1492 struct mlx4_ib_ucontext
*mcontext
=
1493 rdma_udata_to_drv_context(
1495 struct mlx4_ib_ucontext
,
1498 mlx4_ib_db_unmap_user(mcontext
, &qp
->db
);
1501 kvfree(qp
->sq
.wrid
);
1502 kvfree(qp
->rq
.wrid
);
1503 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
1504 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
))
1505 free_proxy_bufs(&dev
->ib_dev
, qp
);
1506 mlx4_buf_free(dev
->dev
, qp
->buf_size
, &qp
->buf
);
1508 mlx4_db_free(dev
->dev
, &qp
->db
);
1510 ib_umem_release(qp
->umem
);
1512 del_gid_entries(qp
);
1515 static u32
get_sqp_num(struct mlx4_ib_dev
*dev
, struct ib_qp_init_attr
*attr
)
1518 if (!mlx4_is_mfunc(dev
->dev
) ||
1519 (mlx4_is_master(dev
->dev
) &&
1520 attr
->create_flags
& MLX4_IB_SRIOV_SQP
)) {
1521 return dev
->dev
->phys_caps
.base_sqpn
+
1522 (attr
->qp_type
== IB_QPT_SMI
? 0 : 2) +
1525 /* PF or VF -- creating proxies */
1526 if (attr
->qp_type
== IB_QPT_SMI
)
1527 return dev
->dev
->caps
.spec_qps
[attr
->port_num
- 1].qp0_proxy
;
1529 return dev
->dev
->caps
.spec_qps
[attr
->port_num
- 1].qp1_proxy
;
1532 static struct ib_qp
*_mlx4_ib_create_qp(struct ib_pd
*pd
,
1533 struct ib_qp_init_attr
*init_attr
,
1534 struct ib_udata
*udata
)
1536 struct mlx4_ib_qp
*qp
= NULL
;
1538 int sup_u_create_flags
= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1541 if (init_attr
->rwq_ind_tbl
)
1542 return _mlx4_ib_create_qp_rss(pd
, init_attr
, udata
);
1545 * We only support LSO, vendor flag1, and multicast loopback blocking,
1546 * and only for kernel UD QPs.
1548 if (init_attr
->create_flags
& ~(MLX4_IB_QP_LSO
|
1549 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
|
1550 MLX4_IB_SRIOV_TUNNEL_QP
|
1553 MLX4_IB_QP_CREATE_ROCE_V2_GSI
))
1554 return ERR_PTR(-EINVAL
);
1556 if (init_attr
->create_flags
& IB_QP_CREATE_NETIF_QP
) {
1557 if (init_attr
->qp_type
!= IB_QPT_UD
)
1558 return ERR_PTR(-EINVAL
);
1561 if (init_attr
->create_flags
) {
1562 if (udata
&& init_attr
->create_flags
& ~(sup_u_create_flags
))
1563 return ERR_PTR(-EINVAL
);
1565 if ((init_attr
->create_flags
& ~(MLX4_IB_SRIOV_SQP
|
1566 MLX4_IB_QP_CREATE_ROCE_V2_GSI
|
1567 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
) &&
1568 init_attr
->qp_type
!= IB_QPT_UD
) ||
1569 (init_attr
->create_flags
& MLX4_IB_SRIOV_SQP
&&
1570 init_attr
->qp_type
> IB_QPT_GSI
) ||
1571 (init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
&&
1572 init_attr
->qp_type
!= IB_QPT_GSI
))
1573 return ERR_PTR(-EINVAL
);
1576 switch (init_attr
->qp_type
) {
1577 case IB_QPT_XRC_TGT
:
1578 pd
= to_mxrcd(init_attr
->xrcd
)->pd
;
1579 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
1580 init_attr
->send_cq
= to_mxrcd(init_attr
->xrcd
)->cq
;
1582 case IB_QPT_XRC_INI
:
1583 if (!(to_mdev(pd
->device
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1584 return ERR_PTR(-ENOSYS
);
1585 init_attr
->recv_cq
= init_attr
->send_cq
;
1589 case IB_QPT_RAW_PACKET
:
1590 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
1592 return ERR_PTR(-ENOMEM
);
1593 qp
->pri
.vid
= 0xFFFF;
1594 qp
->alt
.vid
= 0xFFFF;
1598 err
= create_qp_common(pd
, init_attr
, udata
, 0, &qp
);
1601 return ERR_PTR(err
);
1604 qp
->ibqp
.qp_num
= qp
->mqp
.qpn
;
1614 /* Userspace is not allowed to create special QPs: */
1616 return ERR_PTR(-EINVAL
);
1617 if (init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
) {
1618 int res
= mlx4_qp_reserve_range(to_mdev(pd
->device
)->dev
,
1620 MLX4_RES_USAGE_DRIVER
);
1623 return ERR_PTR(res
);
1625 sqpn
= get_sqp_num(to_mdev(pd
->device
), init_attr
);
1628 err
= create_qp_common(pd
, init_attr
, udata
, sqpn
, &qp
);
1630 return ERR_PTR(err
);
1632 qp
->port
= init_attr
->port_num
;
1633 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 :
1634 init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
? sqpn
: 1;
1638 /* Don't support raw QPs */
1639 return ERR_PTR(-EOPNOTSUPP
);
1645 struct ib_qp
*mlx4_ib_create_qp(struct ib_pd
*pd
,
1646 struct ib_qp_init_attr
*init_attr
,
1647 struct ib_udata
*udata
) {
1648 struct ib_device
*device
= pd
? pd
->device
: init_attr
->xrcd
->device
;
1650 struct mlx4_ib_dev
*dev
= to_mdev(device
);
1652 ibqp
= _mlx4_ib_create_qp(pd
, init_attr
, udata
);
1654 if (!IS_ERR(ibqp
) &&
1655 (init_attr
->qp_type
== IB_QPT_GSI
) &&
1656 !(init_attr
->create_flags
& MLX4_IB_QP_CREATE_ROCE_V2_GSI
)) {
1657 struct mlx4_ib_sqp
*sqp
= to_msqp((to_mqp(ibqp
)));
1658 int is_eth
= rdma_cap_eth_ah(&dev
->ib_dev
, init_attr
->port_num
);
1661 dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
) {
1662 init_attr
->create_flags
|= MLX4_IB_QP_CREATE_ROCE_V2_GSI
;
1663 sqp
->roce_v2_gsi
= ib_create_qp(pd
, init_attr
);
1665 if (IS_ERR(sqp
->roce_v2_gsi
)) {
1666 pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp
->roce_v2_gsi
));
1667 sqp
->roce_v2_gsi
= NULL
;
1669 sqp
= to_msqp(to_mqp(sqp
->roce_v2_gsi
));
1670 sqp
->qp
.flags
|= MLX4_IB_ROCE_V2_GSI_QP
;
1673 init_attr
->create_flags
&= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI
;
1679 static int _mlx4_ib_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
)
1681 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
1682 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1684 if (is_qp0(dev
, mqp
))
1685 mlx4_CLOSE_PORT(dev
->dev
, mqp
->port
);
1687 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
&&
1688 dev
->qp1_proxy
[mqp
->port
- 1] == mqp
) {
1689 mutex_lock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1690 dev
->qp1_proxy
[mqp
->port
- 1] = NULL
;
1691 mutex_unlock(&dev
->qp1_proxy_lock
[mqp
->port
- 1]);
1694 if (mqp
->counter_index
)
1695 mlx4_ib_free_qp_counter(dev
, mqp
);
1697 if (qp
->rwq_ind_tbl
) {
1698 destroy_qp_rss(dev
, mqp
);
1700 destroy_qp_common(dev
, mqp
, MLX4_IB_QP_SRC
, udata
);
1703 if (is_sqp(dev
, mqp
))
1704 kfree(to_msqp(mqp
));
1711 int mlx4_ib_destroy_qp(struct ib_qp
*qp
, struct ib_udata
*udata
)
1713 struct mlx4_ib_qp
*mqp
= to_mqp(qp
);
1715 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
1716 struct mlx4_ib_sqp
*sqp
= to_msqp(mqp
);
1718 if (sqp
->roce_v2_gsi
)
1719 ib_destroy_qp(sqp
->roce_v2_gsi
);
1722 return _mlx4_ib_destroy_qp(qp
, udata
);
1725 static int to_mlx4_st(struct mlx4_ib_dev
*dev
, enum mlx4_ib_qp_type type
)
1728 case MLX4_IB_QPT_RC
: return MLX4_QP_ST_RC
;
1729 case MLX4_IB_QPT_UC
: return MLX4_QP_ST_UC
;
1730 case MLX4_IB_QPT_UD
: return MLX4_QP_ST_UD
;
1731 case MLX4_IB_QPT_XRC_INI
:
1732 case MLX4_IB_QPT_XRC_TGT
: return MLX4_QP_ST_XRC
;
1733 case MLX4_IB_QPT_SMI
:
1734 case MLX4_IB_QPT_GSI
:
1735 case MLX4_IB_QPT_RAW_PACKET
: return MLX4_QP_ST_MLX
;
1737 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
1738 case MLX4_IB_QPT_TUN_SMI_OWNER
: return (mlx4_is_mfunc(dev
->dev
) ?
1739 MLX4_QP_ST_MLX
: -1);
1740 case MLX4_IB_QPT_PROXY_SMI
:
1741 case MLX4_IB_QPT_TUN_SMI
:
1742 case MLX4_IB_QPT_PROXY_GSI
:
1743 case MLX4_IB_QPT_TUN_GSI
: return (mlx4_is_mfunc(dev
->dev
) ?
1744 MLX4_QP_ST_UD
: -1);
1749 static __be32
to_mlx4_access_flags(struct mlx4_ib_qp
*qp
, const struct ib_qp_attr
*attr
,
1754 u32 hw_access_flags
= 0;
1756 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1757 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
1759 dest_rd_atomic
= qp
->resp_depth
;
1761 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1762 access_flags
= attr
->qp_access_flags
;
1764 access_flags
= qp
->atomic_rd_en
;
1766 if (!dest_rd_atomic
)
1767 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
1769 if (access_flags
& IB_ACCESS_REMOTE_READ
)
1770 hw_access_flags
|= MLX4_QP_BIT_RRE
;
1771 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
1772 hw_access_flags
|= MLX4_QP_BIT_RAE
;
1773 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
1774 hw_access_flags
|= MLX4_QP_BIT_RWE
;
1776 return cpu_to_be32(hw_access_flags
);
1779 static void store_sqp_attrs(struct mlx4_ib_sqp
*sqp
, const struct ib_qp_attr
*attr
,
1782 if (attr_mask
& IB_QP_PKEY_INDEX
)
1783 sqp
->pkey_index
= attr
->pkey_index
;
1784 if (attr_mask
& IB_QP_QKEY
)
1785 sqp
->qkey
= attr
->qkey
;
1786 if (attr_mask
& IB_QP_SQ_PSN
)
1787 sqp
->send_psn
= attr
->sq_psn
;
1790 static void mlx4_set_sched(struct mlx4_qp_path
*path
, u8 port
)
1792 path
->sched_queue
= (path
->sched_queue
& 0xbf) | ((port
- 1) << 6);
1795 static int _mlx4_set_path(struct mlx4_ib_dev
*dev
,
1796 const struct rdma_ah_attr
*ah
,
1797 u64 smac
, u16 vlan_tag
, struct mlx4_qp_path
*path
,
1798 struct mlx4_roce_smac_vlan_info
*smac_info
, u8 port
)
1804 path
->grh_mylmc
= rdma_ah_get_path_bits(ah
) & 0x7f;
1805 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
1806 if (rdma_ah_get_static_rate(ah
)) {
1807 path
->static_rate
= rdma_ah_get_static_rate(ah
) +
1808 MLX4_STAT_RATE_OFFSET
;
1809 while (path
->static_rate
> IB_RATE_2_5_GBPS
+ MLX4_STAT_RATE_OFFSET
&&
1810 !(1 << path
->static_rate
& dev
->dev
->caps
.stat_rate_support
))
1811 --path
->static_rate
;
1813 path
->static_rate
= 0;
1815 if (rdma_ah_get_ah_flags(ah
) & IB_AH_GRH
) {
1816 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
1817 int real_sgid_index
=
1818 mlx4_ib_gid_index_to_real_index(dev
, grh
->sgid_attr
);
1820 if (real_sgid_index
< 0)
1821 return real_sgid_index
;
1822 if (real_sgid_index
>= dev
->dev
->caps
.gid_table_len
[port
]) {
1823 pr_err("sgid_index (%u) too large. max is %d\n",
1824 real_sgid_index
, dev
->dev
->caps
.gid_table_len
[port
] - 1);
1828 path
->grh_mylmc
|= 1 << 7;
1829 path
->mgid_index
= real_sgid_index
;
1830 path
->hop_limit
= grh
->hop_limit
;
1831 path
->tclass_flowlabel
=
1832 cpu_to_be32((grh
->traffic_class
<< 20) |
1834 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
1837 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
1838 if (!(rdma_ah_get_ah_flags(ah
) & IB_AH_GRH
))
1841 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1842 ((port
- 1) << 6) | ((rdma_ah_get_sl(ah
) & 7) << 3);
1844 path
->feup
|= MLX4_FEUP_FORCE_ETH_UP
;
1845 if (vlan_tag
< 0x1000) {
1846 if (smac_info
->vid
< 0x1000) {
1847 /* both valid vlan ids */
1848 if (smac_info
->vid
!= vlan_tag
) {
1849 /* different VIDs. unreg old and reg new */
1850 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1853 smac_info
->candidate_vid
= vlan_tag
;
1854 smac_info
->candidate_vlan_index
= vidx
;
1855 smac_info
->candidate_vlan_port
= port
;
1856 smac_info
->update_vid
= 1;
1857 path
->vlan_index
= vidx
;
1859 path
->vlan_index
= smac_info
->vlan_index
;
1862 /* no current vlan tag in qp */
1863 err
= mlx4_register_vlan(dev
->dev
, port
, vlan_tag
, &vidx
);
1866 smac_info
->candidate_vid
= vlan_tag
;
1867 smac_info
->candidate_vlan_index
= vidx
;
1868 smac_info
->candidate_vlan_port
= port
;
1869 smac_info
->update_vid
= 1;
1870 path
->vlan_index
= vidx
;
1872 path
->feup
|= MLX4_FVL_FORCE_ETH_VLAN
;
1875 /* have current vlan tag. unregister it at modify-qp success */
1876 if (smac_info
->vid
< 0x1000) {
1877 smac_info
->candidate_vid
= 0xFFFF;
1878 smac_info
->update_vid
= 1;
1882 /* get smac_index for RoCE use.
1883 * If no smac was yet assigned, register one.
1884 * If one was already assigned, but the new mac differs,
1885 * unregister the old one and register the new one.
1887 if ((!smac_info
->smac
&& !smac_info
->smac_port
) ||
1888 smac_info
->smac
!= smac
) {
1889 /* register candidate now, unreg if needed, after success */
1890 smac_index
= mlx4_register_mac(dev
->dev
, port
, smac
);
1891 if (smac_index
>= 0) {
1892 smac_info
->candidate_smac_index
= smac_index
;
1893 smac_info
->candidate_smac
= smac
;
1894 smac_info
->candidate_smac_port
= port
;
1899 smac_index
= smac_info
->smac_index
;
1901 memcpy(path
->dmac
, ah
->roce
.dmac
, 6);
1902 path
->ackto
= MLX4_IB_LINK_TYPE_ETH
;
1903 /* put MAC table smac index for IBoE */
1904 path
->grh_mylmc
= (u8
) (smac_index
) | 0x80;
1906 path
->sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
|
1907 ((port
- 1) << 6) | ((rdma_ah_get_sl(ah
) & 0xf) << 2);
1913 static int mlx4_set_path(struct mlx4_ib_dev
*dev
, const struct ib_qp_attr
*qp
,
1914 enum ib_qp_attr_mask qp_attr_mask
,
1915 struct mlx4_ib_qp
*mqp
,
1916 struct mlx4_qp_path
*path
, u8 port
,
1917 u16 vlan_id
, u8
*smac
)
1919 return _mlx4_set_path(dev
, &qp
->ah_attr
,
1920 mlx4_mac_to_u64(smac
),
1922 path
, &mqp
->pri
, port
);
1925 static int mlx4_set_alt_path(struct mlx4_ib_dev
*dev
,
1926 const struct ib_qp_attr
*qp
,
1927 enum ib_qp_attr_mask qp_attr_mask
,
1928 struct mlx4_ib_qp
*mqp
,
1929 struct mlx4_qp_path
*path
, u8 port
)
1931 return _mlx4_set_path(dev
, &qp
->alt_ah_attr
,
1934 path
, &mqp
->alt
, port
);
1937 static void update_mcg_macs(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1939 struct mlx4_ib_gid_entry
*ge
, *tmp
;
1941 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1942 if (!ge
->added
&& mlx4_ib_add_mc(dev
, qp
, &ge
->gid
)) {
1944 ge
->port
= qp
->port
;
1949 static int handle_eth_ud_smac_index(struct mlx4_ib_dev
*dev
,
1950 struct mlx4_ib_qp
*qp
,
1951 struct mlx4_qp_context
*context
)
1956 u64_mac
= atomic64_read(&dev
->iboe
.mac
[qp
->port
- 1]);
1958 context
->pri_path
.sched_queue
= MLX4_IB_DEFAULT_SCHED_QUEUE
| ((qp
->port
- 1) << 6);
1959 if (!qp
->pri
.smac
&& !qp
->pri
.smac_port
) {
1960 smac_index
= mlx4_register_mac(dev
->dev
, qp
->port
, u64_mac
);
1961 if (smac_index
>= 0) {
1962 qp
->pri
.candidate_smac_index
= smac_index
;
1963 qp
->pri
.candidate_smac
= u64_mac
;
1964 qp
->pri
.candidate_smac_port
= qp
->port
;
1965 context
->pri_path
.grh_mylmc
= 0x80 | (u8
) smac_index
;
1973 static int create_qp_lb_counter(struct mlx4_ib_dev
*dev
, struct mlx4_ib_qp
*qp
)
1975 struct counter_index
*new_counter_index
;
1979 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) !=
1980 IB_LINK_LAYER_ETHERNET
||
1981 !(qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
) ||
1982 !(dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_LB_SRC_CHK
))
1985 err
= mlx4_counter_alloc(dev
->dev
, &tmp_idx
, MLX4_RES_USAGE_DRIVER
);
1989 new_counter_index
= kmalloc(sizeof(*new_counter_index
), GFP_KERNEL
);
1990 if (!new_counter_index
) {
1991 mlx4_counter_free(dev
->dev
, tmp_idx
);
1995 new_counter_index
->index
= tmp_idx
;
1996 new_counter_index
->allocated
= 1;
1997 qp
->counter_index
= new_counter_index
;
1999 mutex_lock(&dev
->counters_table
[qp
->port
- 1].mutex
);
2000 list_add_tail(&new_counter_index
->list
,
2001 &dev
->counters_table
[qp
->port
- 1].counters_list
);
2002 mutex_unlock(&dev
->counters_table
[qp
->port
- 1].mutex
);
2008 MLX4_QPC_ROCE_MODE_1
= 0,
2009 MLX4_QPC_ROCE_MODE_2
= 2,
2010 MLX4_QPC_ROCE_MODE_UNDEFINED
= 0xff
2013 static u8
gid_type_to_qpc(enum ib_gid_type gid_type
)
2016 case IB_GID_TYPE_ROCE
:
2017 return MLX4_QPC_ROCE_MODE_1
;
2018 case IB_GID_TYPE_ROCE_UDP_ENCAP
:
2019 return MLX4_QPC_ROCE_MODE_2
;
2021 return MLX4_QPC_ROCE_MODE_UNDEFINED
;
2026 * Go over all RSS QP's childes (WQs) and apply their HW state according to
2027 * their logic state if the RSS QP is the first RSS QP associated for the WQ.
2029 static int bringup_rss_rwqs(struct ib_rwq_ind_table
*ind_tbl
, u8 port_num
,
2030 struct ib_udata
*udata
)
2035 for (i
= 0; i
< (1 << ind_tbl
->log_ind_tbl_size
); i
++) {
2036 struct ib_wq
*ibwq
= ind_tbl
->ind_tbl
[i
];
2037 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
2039 mutex_lock(&wq
->mutex
);
2041 /* Mlx4_ib restrictions:
2042 * WQ's is associated to a port according to the RSS QP it is
2044 * In case the WQ is associated to a different port by another
2045 * RSS QP, return a failure.
2047 if ((wq
->rss_usecnt
> 0) && (wq
->port
!= port_num
)) {
2049 mutex_unlock(&wq
->mutex
);
2052 wq
->port
= port_num
;
2053 if ((wq
->rss_usecnt
== 0) && (ibwq
->state
== IB_WQS_RDY
)) {
2054 err
= _mlx4_ib_modify_wq(ibwq
, IB_WQS_RDY
, udata
);
2056 mutex_unlock(&wq
->mutex
);
2062 mutex_unlock(&wq
->mutex
);
2068 for (j
= (i
- 1); j
>= 0; j
--) {
2069 struct ib_wq
*ibwq
= ind_tbl
->ind_tbl
[j
];
2070 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
2072 mutex_lock(&wq
->mutex
);
2074 if ((wq
->rss_usecnt
== 1) &&
2075 (ibwq
->state
== IB_WQS_RDY
))
2076 if (_mlx4_ib_modify_wq(ibwq
, IB_WQS_RESET
,
2078 pr_warn("failed to reverse WQN=0x%06x\n",
2082 mutex_unlock(&wq
->mutex
);
2089 static void bring_down_rss_rwqs(struct ib_rwq_ind_table
*ind_tbl
,
2090 struct ib_udata
*udata
)
2094 for (i
= 0; i
< (1 << ind_tbl
->log_ind_tbl_size
); i
++) {
2095 struct ib_wq
*ibwq
= ind_tbl
->ind_tbl
[i
];
2096 struct mlx4_ib_qp
*wq
= to_mqp((struct ib_qp
*)ibwq
);
2098 mutex_lock(&wq
->mutex
);
2100 if ((wq
->rss_usecnt
== 1) && (ibwq
->state
== IB_WQS_RDY
))
2101 if (_mlx4_ib_modify_wq(ibwq
, IB_WQS_RESET
, udata
))
2102 pr_warn("failed to reverse WQN=%x\n",
2106 mutex_unlock(&wq
->mutex
);
2110 static void fill_qp_rss_context(struct mlx4_qp_context
*context
,
2111 struct mlx4_ib_qp
*qp
)
2113 struct mlx4_rss_context
*rss_context
;
2115 rss_context
= (void *)context
+ offsetof(struct mlx4_qp_context
,
2116 pri_path
) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH
;
2118 rss_context
->base_qpn
= cpu_to_be32(qp
->rss_ctx
->base_qpn_tbl_sz
);
2119 rss_context
->default_qpn
=
2120 cpu_to_be32(qp
->rss_ctx
->base_qpn_tbl_sz
& 0xffffff);
2121 if (qp
->rss_ctx
->flags
& (MLX4_RSS_UDP_IPV4
| MLX4_RSS_UDP_IPV6
))
2122 rss_context
->base_qpn_udp
= rss_context
->default_qpn
;
2123 rss_context
->flags
= qp
->rss_ctx
->flags
;
2124 /* Currently support just toeplitz */
2125 rss_context
->hash_fn
= MLX4_RSS_HASH_TOP
;
2127 memcpy(rss_context
->rss_key
, qp
->rss_ctx
->rss_key
,
2128 MLX4_EN_RSS_KEY_SIZE
);
2131 static int __mlx4_ib_modify_qp(void *src
, enum mlx4_ib_source_type src_type
,
2132 const struct ib_qp_attr
*attr
, int attr_mask
,
2133 enum ib_qp_state cur_state
,
2134 enum ib_qp_state new_state
,
2135 struct ib_udata
*udata
)
2137 struct ib_srq
*ibsrq
;
2138 const struct ib_gid_attr
*gid_attr
= NULL
;
2139 struct ib_rwq_ind_table
*rwq_ind_tbl
;
2140 enum ib_qp_type qp_type
;
2141 struct mlx4_ib_dev
*dev
;
2142 struct mlx4_ib_qp
*qp
;
2143 struct mlx4_ib_pd
*pd
;
2144 struct mlx4_ib_cq
*send_cq
, *recv_cq
;
2145 struct mlx4_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
2146 udata
, struct mlx4_ib_ucontext
, ibucontext
);
2147 struct mlx4_qp_context
*context
;
2148 enum mlx4_qp_optpar optpar
= 0;
2154 if (src_type
== MLX4_IB_RWQ_SRC
) {
2157 ibwq
= (struct ib_wq
*)src
;
2160 qp_type
= IB_QPT_RAW_PACKET
;
2161 qp
= to_mqp((struct ib_qp
*)ibwq
);
2162 dev
= to_mdev(ibwq
->device
);
2163 pd
= to_mpd(ibwq
->pd
);
2167 ibqp
= (struct ib_qp
*)src
;
2169 rwq_ind_tbl
= ibqp
->rwq_ind_tbl
;
2170 qp_type
= ibqp
->qp_type
;
2172 dev
= to_mdev(ibqp
->device
);
2176 /* APM is not supported under RoCE */
2177 if (attr_mask
& IB_QP_ALT_PATH
&&
2178 rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
2179 IB_LINK_LAYER_ETHERNET
)
2182 context
= kzalloc(sizeof *context
, GFP_KERNEL
);
2186 context
->flags
= cpu_to_be32((to_mlx4_state(new_state
) << 28) |
2187 (to_mlx4_st(dev
, qp
->mlx4_ib_qp_type
) << 16));
2189 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
2190 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
2192 optpar
|= MLX4_QP_OPTPAR_PM_STATE
;
2193 switch (attr
->path_mig_state
) {
2194 case IB_MIG_MIGRATED
:
2195 context
->flags
|= cpu_to_be32(MLX4_QP_PM_MIGRATED
<< 11);
2198 context
->flags
|= cpu_to_be32(MLX4_QP_PM_REARM
<< 11);
2201 context
->flags
|= cpu_to_be32(MLX4_QP_PM_ARMED
<< 11);
2206 if (qp
->inl_recv_sz
)
2207 context
->param3
|= cpu_to_be32(1 << 25);
2209 if (qp
->flags
& MLX4_IB_QP_SCATTER_FCS
)
2210 context
->param3
|= cpu_to_be32(1 << 29);
2212 if (qp_type
== IB_QPT_GSI
|| qp_type
== IB_QPT_SMI
)
2213 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 11;
2214 else if (qp_type
== IB_QPT_RAW_PACKET
)
2215 context
->mtu_msgmax
= (MLX4_RAW_QP_MTU
<< 5) | MLX4_RAW_QP_MSGMAX
;
2216 else if (qp_type
== IB_QPT_UD
) {
2217 if (qp
->flags
& MLX4_IB_QP_LSO
)
2218 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) |
2219 ilog2(dev
->dev
->caps
.max_gso_sz
);
2221 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 13;
2222 } else if (attr_mask
& IB_QP_PATH_MTU
) {
2223 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_4096
) {
2224 pr_err("path MTU (%u) is invalid\n",
2228 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
2229 ilog2(dev
->dev
->caps
.max_msg_sz
);
2232 if (!rwq_ind_tbl
) { /* PRM RSS receive side should be left zeros */
2234 context
->rq_size_stride
= ilog2(qp
->rq
.wqe_cnt
) << 3;
2235 context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
2239 context
->sq_size_stride
= ilog2(qp
->sq
.wqe_cnt
) << 3;
2240 context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
2242 if (new_state
== IB_QPS_RESET
&& qp
->counter_index
)
2243 mlx4_ib_free_qp_counter(dev
, qp
);
2245 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
2246 context
->sq_size_stride
|= !!qp
->sq_no_prefetch
<< 7;
2247 context
->xrcd
= cpu_to_be32((u32
) qp
->xrcdn
);
2248 if (qp_type
== IB_QPT_RAW_PACKET
)
2249 context
->param3
|= cpu_to_be32(1 << 30);
2253 context
->usr_page
= cpu_to_be32(
2254 mlx4_to_hw_uar_index(dev
->dev
, ucontext
->uar
.index
));
2256 context
->usr_page
= cpu_to_be32(
2257 mlx4_to_hw_uar_index(dev
->dev
, dev
->priv_uar
.index
));
2259 if (attr_mask
& IB_QP_DEST_QPN
)
2260 context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
2262 if (attr_mask
& IB_QP_PORT
) {
2263 if (cur_state
== IB_QPS_SQD
&& new_state
== IB_QPS_SQD
&&
2264 !(attr_mask
& IB_QP_AV
)) {
2265 mlx4_set_sched(&context
->pri_path
, attr
->port_num
);
2266 optpar
|= MLX4_QP_OPTPAR_SCHED_QUEUE
;
2270 if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
2271 err
= create_qp_lb_counter(dev
, qp
);
2276 dev
->counters_table
[qp
->port
- 1].default_counter
;
2277 if (qp
->counter_index
)
2278 counter_index
= qp
->counter_index
->index
;
2280 if (counter_index
!= -1) {
2281 context
->pri_path
.counter_index
= counter_index
;
2282 optpar
|= MLX4_QP_OPTPAR_COUNTER_INDEX
;
2283 if (qp
->counter_index
) {
2284 context
->pri_path
.fl
|=
2285 MLX4_FL_ETH_SRC_CHECK_MC_LB
;
2286 context
->pri_path
.vlan_control
|=
2287 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER
;
2290 context
->pri_path
.counter_index
=
2291 MLX4_SINK_COUNTER_INDEX(dev
->dev
);
2293 if (qp
->flags
& MLX4_IB_QP_NETIF
) {
2294 mlx4_ib_steer_qp_reg(dev
, qp
, 1);
2298 if (qp_type
== IB_QPT_GSI
) {
2299 enum ib_gid_type gid_type
= qp
->flags
& MLX4_IB_ROCE_V2_GSI_QP
?
2300 IB_GID_TYPE_ROCE_UDP_ENCAP
: IB_GID_TYPE_ROCE
;
2301 u8 qpc_roce_mode
= gid_type_to_qpc(gid_type
);
2303 context
->rlkey_roce_mode
|= (qpc_roce_mode
<< 6);
2307 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2308 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
2309 context
->pri_path
.disable_pkey_check
= 0x40;
2310 context
->pri_path
.pkey_index
= attr
->pkey_index
;
2311 optpar
|= MLX4_QP_OPTPAR_PKEY_INDEX
;
2314 if (attr_mask
& IB_QP_AV
) {
2315 u8 port_num
= mlx4_is_bonded(dev
->dev
) ? 1 :
2316 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2320 rdma_cap_eth_ah(&dev
->ib_dev
, port_num
) &&
2321 rdma_ah_get_ah_flags(&attr
->ah_attr
) & IB_AH_GRH
;
2324 gid_attr
= attr
->ah_attr
.grh
.sgid_attr
;
2325 err
= rdma_read_gid_l2_fields(gid_attr
, &vlan
,
2331 if (mlx4_set_path(dev
, attr
, attr_mask
, qp
, &context
->pri_path
,
2332 port_num
, vlan
, smac
))
2335 optpar
|= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
|
2336 MLX4_QP_OPTPAR_SCHED_QUEUE
);
2339 (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
)) {
2340 u8 qpc_roce_mode
= gid_type_to_qpc(gid_attr
->gid_type
);
2342 if (qpc_roce_mode
== MLX4_QPC_ROCE_MODE_UNDEFINED
) {
2346 context
->rlkey_roce_mode
|= (qpc_roce_mode
<< 6);
2351 if (attr_mask
& IB_QP_TIMEOUT
) {
2352 context
->pri_path
.ackto
|= attr
->timeout
<< 3;
2353 optpar
|= MLX4_QP_OPTPAR_ACK_TIMEOUT
;
2356 if (attr_mask
& IB_QP_ALT_PATH
) {
2357 if (attr
->alt_port_num
== 0 ||
2358 attr
->alt_port_num
> dev
->dev
->caps
.num_ports
)
2361 if (attr
->alt_pkey_index
>=
2362 dev
->dev
->caps
.pkey_table_len
[attr
->alt_port_num
])
2365 if (mlx4_set_alt_path(dev
, attr
, attr_mask
, qp
,
2367 attr
->alt_port_num
))
2370 context
->alt_path
.pkey_index
= attr
->alt_pkey_index
;
2371 context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
2372 optpar
|= MLX4_QP_OPTPAR_ALT_ADDR_PATH
;
2375 context
->pd
= cpu_to_be32(pd
->pdn
);
2378 context
->params1
= cpu_to_be32(MLX4_IB_ACK_REQ_FREQ
<< 28);
2379 get_cqs(qp
, src_type
, &send_cq
, &recv_cq
);
2380 } else { /* Set dummy CQs to be compatible with HV and PRM */
2381 send_cq
= to_mcq(rwq_ind_tbl
->ind_tbl
[0]->cq
);
2384 context
->cqn_send
= cpu_to_be32(send_cq
->mcq
.cqn
);
2385 context
->cqn_recv
= cpu_to_be32(recv_cq
->mcq
.cqn
);
2387 /* Set "fast registration enabled" for all kernel QPs */
2389 context
->params1
|= cpu_to_be32(1 << 11);
2391 if (attr_mask
& IB_QP_RNR_RETRY
) {
2392 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
2393 optpar
|= MLX4_QP_OPTPAR_RNR_RETRY
;
2396 if (attr_mask
& IB_QP_RETRY_CNT
) {
2397 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
2398 optpar
|= MLX4_QP_OPTPAR_RETRY_COUNT
;
2401 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
2402 if (attr
->max_rd_atomic
)
2404 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
2405 optpar
|= MLX4_QP_OPTPAR_SRA_MAX
;
2408 if (attr_mask
& IB_QP_SQ_PSN
)
2409 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
2411 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
2412 if (attr
->max_dest_rd_atomic
)
2414 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
2415 optpar
|= MLX4_QP_OPTPAR_RRA_MAX
;
2418 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
2419 context
->params2
|= to_mlx4_access_flags(qp
, attr
, attr_mask
);
2420 optpar
|= MLX4_QP_OPTPAR_RWE
| MLX4_QP_OPTPAR_RRE
| MLX4_QP_OPTPAR_RAE
;
2424 context
->params2
|= cpu_to_be32(MLX4_QP_BIT_RIC
);
2426 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
2427 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
2428 optpar
|= MLX4_QP_OPTPAR_RNR_TIMEOUT
;
2430 if (attr_mask
& IB_QP_RQ_PSN
)
2431 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
2433 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
2434 if (attr_mask
& IB_QP_QKEY
) {
2435 if (qp
->mlx4_ib_qp_type
&
2436 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))
2437 context
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
2439 if (mlx4_is_mfunc(dev
->dev
) &&
2440 !(qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
) &&
2441 (attr
->qkey
& MLX4_RESERVED_QKEY_MASK
) ==
2442 MLX4_RESERVED_QKEY_BASE
) {
2443 pr_err("Cannot use reserved QKEY"
2444 " 0x%x (range 0xffff0000..0xffffffff"
2445 " is reserved)\n", attr
->qkey
);
2449 context
->qkey
= cpu_to_be32(attr
->qkey
);
2451 optpar
|= MLX4_QP_OPTPAR_Q_KEY
;
2455 context
->srqn
= cpu_to_be32(1 << 24 |
2456 to_msrq(ibsrq
)->msrq
.srqn
);
2458 if (qp
->rq
.wqe_cnt
&&
2459 cur_state
== IB_QPS_RESET
&&
2460 new_state
== IB_QPS_INIT
)
2461 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
2463 if (cur_state
== IB_QPS_INIT
&&
2464 new_state
== IB_QPS_RTR
&&
2465 (qp_type
== IB_QPT_GSI
|| qp_type
== IB_QPT_SMI
||
2466 qp_type
== IB_QPT_UD
|| qp_type
== IB_QPT_RAW_PACKET
)) {
2467 context
->pri_path
.sched_queue
= (qp
->port
- 1) << 6;
2468 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
2469 qp
->mlx4_ib_qp_type
&
2470 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
)) {
2471 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE
;
2472 if (qp
->mlx4_ib_qp_type
!= MLX4_IB_QPT_SMI
)
2473 context
->pri_path
.fl
= 0x80;
2475 if (qp
->mlx4_ib_qp_type
& MLX4_IB_QPT_ANY_SRIOV
)
2476 context
->pri_path
.fl
= 0x80;
2477 context
->pri_path
.sched_queue
|= MLX4_IB_DEFAULT_SCHED_QUEUE
;
2479 if (rdma_port_get_link_layer(&dev
->ib_dev
, qp
->port
) ==
2480 IB_LINK_LAYER_ETHERNET
) {
2481 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
||
2482 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
)
2483 context
->pri_path
.feup
= 1 << 7; /* don't fsm */
2484 /* handle smac_index */
2485 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_UD
||
2486 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
||
2487 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_GSI
) {
2488 err
= handle_eth_ud_smac_index(dev
, qp
, context
);
2493 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_GSI
)
2494 dev
->qp1_proxy
[qp
->port
- 1] = qp
;
2499 if (qp_type
== IB_QPT_RAW_PACKET
) {
2500 context
->pri_path
.ackto
= (context
->pri_path
.ackto
& 0xf8) |
2501 MLX4_IB_LINK_TYPE_ETH
;
2502 if (dev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
2503 /* set QP to receive both tunneled & non-tunneled packets */
2505 context
->srqn
= cpu_to_be32(7 << 28);
2509 if (qp_type
== IB_QPT_UD
&& (new_state
== IB_QPS_RTR
)) {
2510 int is_eth
= rdma_port_get_link_layer(
2511 &dev
->ib_dev
, qp
->port
) ==
2512 IB_LINK_LAYER_ETHERNET
;
2514 context
->pri_path
.ackto
= MLX4_IB_LINK_TYPE_ETH
;
2515 optpar
|= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH
;
2519 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
2520 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&& attr
->en_sqd_async_notify
)
2526 cur_state
== IB_QPS_RESET
&&
2527 new_state
== IB_QPS_INIT
)
2528 context
->rlkey_roce_mode
|= (1 << 4);
2531 * Before passing a kernel QP to the HW, make sure that the
2532 * ownership bits of the send queue are set and the SQ
2533 * headroom is stamped so that the hardware doesn't start
2534 * processing stale work requests.
2537 cur_state
== IB_QPS_RESET
&&
2538 new_state
== IB_QPS_INIT
) {
2539 struct mlx4_wqe_ctrl_seg
*ctrl
;
2542 for (i
= 0; i
< qp
->sq
.wqe_cnt
; ++i
) {
2543 ctrl
= get_send_wqe(qp
, i
);
2544 ctrl
->owner_opcode
= cpu_to_be32(1 << 31);
2545 ctrl
->qpn_vlan
.fence_size
=
2546 1 << (qp
->sq
.wqe_shift
- 4);
2547 stamp_send_wqe(qp
, i
);
2552 cur_state
== IB_QPS_RESET
&&
2553 new_state
== IB_QPS_INIT
) {
2554 fill_qp_rss_context(context
, qp
);
2555 context
->flags
|= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET
);
2558 err
= mlx4_qp_modify(dev
->dev
, &qp
->mtt
, to_mlx4_state(cur_state
),
2559 to_mlx4_state(new_state
), context
, optpar
,
2560 sqd_event
, &qp
->mqp
);
2564 qp
->state
= new_state
;
2566 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2567 qp
->atomic_rd_en
= attr
->qp_access_flags
;
2568 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2569 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
2570 if (attr_mask
& IB_QP_PORT
) {
2571 qp
->port
= attr
->port_num
;
2572 update_mcg_macs(dev
, qp
);
2574 if (attr_mask
& IB_QP_ALT_PATH
)
2575 qp
->alt_port
= attr
->alt_port_num
;
2577 if (is_sqp(dev
, qp
))
2578 store_sqp_attrs(to_msqp(qp
), attr
, attr_mask
);
2581 * If we moved QP0 to RTR, bring the IB link up; if we moved
2582 * QP0 to RESET or ERROR, bring the link back down.
2584 if (is_qp0(dev
, qp
)) {
2585 if (cur_state
!= IB_QPS_RTR
&& new_state
== IB_QPS_RTR
)
2586 if (mlx4_INIT_PORT(dev
->dev
, qp
->port
))
2587 pr_warn("INIT_PORT failed for port %d\n",
2590 if (cur_state
!= IB_QPS_RESET
&& cur_state
!= IB_QPS_ERR
&&
2591 (new_state
== IB_QPS_RESET
|| new_state
== IB_QPS_ERR
))
2592 mlx4_CLOSE_PORT(dev
->dev
, qp
->port
);
2596 * If we moved a kernel QP to RESET, clean up all old CQ
2597 * entries and reinitialize the QP.
2599 if (new_state
== IB_QPS_RESET
) {
2601 mlx4_ib_cq_clean(recv_cq
, qp
->mqp
.qpn
,
2602 ibsrq
? to_msrq(ibsrq
) : NULL
);
2603 if (send_cq
!= recv_cq
)
2604 mlx4_ib_cq_clean(send_cq
, qp
->mqp
.qpn
, NULL
);
2610 qp
->sq_next_wqe
= 0;
2614 if (qp
->flags
& MLX4_IB_QP_NETIF
)
2615 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
2617 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
)) {
2618 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
2620 qp
->pri
.smac_port
= 0;
2623 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
2626 if (qp
->pri
.vid
< 0x1000) {
2627 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
, qp
->pri
.vid
);
2628 qp
->pri
.vid
= 0xFFFF;
2629 qp
->pri
.candidate_vid
= 0xFFFF;
2630 qp
->pri
.update_vid
= 0;
2633 if (qp
->alt
.vid
< 0x1000) {
2634 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
, qp
->alt
.vid
);
2635 qp
->alt
.vid
= 0xFFFF;
2636 qp
->alt
.candidate_vid
= 0xFFFF;
2637 qp
->alt
.update_vid
= 0;
2641 if (err
&& qp
->counter_index
)
2642 mlx4_ib_free_qp_counter(dev
, qp
);
2643 if (err
&& steer_qp
)
2644 mlx4_ib_steer_qp_reg(dev
, qp
, 0);
2646 if (qp
->pri
.candidate_smac
||
2647 (!qp
->pri
.candidate_smac
&& qp
->pri
.candidate_smac_port
)) {
2649 mlx4_unregister_mac(dev
->dev
, qp
->pri
.candidate_smac_port
, qp
->pri
.candidate_smac
);
2651 if (qp
->pri
.smac
|| (!qp
->pri
.smac
&& qp
->pri
.smac_port
))
2652 mlx4_unregister_mac(dev
->dev
, qp
->pri
.smac_port
, qp
->pri
.smac
);
2653 qp
->pri
.smac
= qp
->pri
.candidate_smac
;
2654 qp
->pri
.smac_index
= qp
->pri
.candidate_smac_index
;
2655 qp
->pri
.smac_port
= qp
->pri
.candidate_smac_port
;
2657 qp
->pri
.candidate_smac
= 0;
2658 qp
->pri
.candidate_smac_index
= 0;
2659 qp
->pri
.candidate_smac_port
= 0;
2661 if (qp
->alt
.candidate_smac
) {
2663 mlx4_unregister_mac(dev
->dev
, qp
->alt
.candidate_smac_port
, qp
->alt
.candidate_smac
);
2666 mlx4_unregister_mac(dev
->dev
, qp
->alt
.smac_port
, qp
->alt
.smac
);
2667 qp
->alt
.smac
= qp
->alt
.candidate_smac
;
2668 qp
->alt
.smac_index
= qp
->alt
.candidate_smac_index
;
2669 qp
->alt
.smac_port
= qp
->alt
.candidate_smac_port
;
2671 qp
->alt
.candidate_smac
= 0;
2672 qp
->alt
.candidate_smac_index
= 0;
2673 qp
->alt
.candidate_smac_port
= 0;
2676 if (qp
->pri
.update_vid
) {
2678 if (qp
->pri
.candidate_vid
< 0x1000)
2679 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.candidate_vlan_port
,
2680 qp
->pri
.candidate_vid
);
2682 if (qp
->pri
.vid
< 0x1000)
2683 mlx4_unregister_vlan(dev
->dev
, qp
->pri
.vlan_port
,
2685 qp
->pri
.vid
= qp
->pri
.candidate_vid
;
2686 qp
->pri
.vlan_port
= qp
->pri
.candidate_vlan_port
;
2687 qp
->pri
.vlan_index
= qp
->pri
.candidate_vlan_index
;
2689 qp
->pri
.candidate_vid
= 0xFFFF;
2690 qp
->pri
.update_vid
= 0;
2693 if (qp
->alt
.update_vid
) {
2695 if (qp
->alt
.candidate_vid
< 0x1000)
2696 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.candidate_vlan_port
,
2697 qp
->alt
.candidate_vid
);
2699 if (qp
->alt
.vid
< 0x1000)
2700 mlx4_unregister_vlan(dev
->dev
, qp
->alt
.vlan_port
,
2702 qp
->alt
.vid
= qp
->alt
.candidate_vid
;
2703 qp
->alt
.vlan_port
= qp
->alt
.candidate_vlan_port
;
2704 qp
->alt
.vlan_index
= qp
->alt
.candidate_vlan_index
;
2706 qp
->alt
.candidate_vid
= 0xFFFF;
2707 qp
->alt
.update_vid
= 0;
2714 MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK
= (IB_QP_STATE
|
2718 static int _mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2719 int attr_mask
, struct ib_udata
*udata
)
2721 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
2722 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
2723 enum ib_qp_state cur_state
, new_state
;
2725 mutex_lock(&qp
->mutex
);
2727 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
2728 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
2730 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
2732 pr_debug("qpn 0x%x: invalid attribute mask specified "
2733 "for transition %d to %d. qp_type %d,"
2734 " attr_mask 0x%x\n",
2735 ibqp
->qp_num
, cur_state
, new_state
,
2736 ibqp
->qp_type
, attr_mask
);
2740 if (ibqp
->rwq_ind_tbl
) {
2741 if (!(((cur_state
== IB_QPS_RESET
) &&
2742 (new_state
== IB_QPS_INIT
)) ||
2743 ((cur_state
== IB_QPS_INIT
) &&
2744 (new_state
== IB_QPS_RTR
)))) {
2745 pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n",
2746 ibqp
->qp_num
, cur_state
, new_state
);
2752 if (attr_mask
& ~MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK
) {
2753 pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n",
2754 ibqp
->qp_num
, attr_mask
, cur_state
, new_state
);
2761 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
)) {
2762 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
2763 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
2764 (ibqp
->qp_type
== IB_QPT_UD
) ||
2765 (ibqp
->qp_type
== IB_QPT_UC
) ||
2766 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
2767 (ibqp
->qp_type
== IB_QPT_XRC_INI
)) {
2768 attr
->port_num
= mlx4_ib_bond_next_port(dev
);
2771 /* no sense in changing port_num
2772 * when ports are bonded */
2773 attr_mask
&= ~IB_QP_PORT
;
2777 if ((attr_mask
& IB_QP_PORT
) &&
2778 (attr
->port_num
== 0 || attr
->port_num
> dev
->num_ports
)) {
2779 pr_debug("qpn 0x%x: invalid port number (%d) specified "
2780 "for transition %d to %d. qp_type %d\n",
2781 ibqp
->qp_num
, attr
->port_num
, cur_state
,
2782 new_state
, ibqp
->qp_type
);
2786 if ((attr_mask
& IB_QP_PORT
) && (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) &&
2787 (rdma_port_get_link_layer(&dev
->ib_dev
, attr
->port_num
) !=
2788 IB_LINK_LAYER_ETHERNET
))
2791 if (attr_mask
& IB_QP_PKEY_INDEX
) {
2792 int p
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
2793 if (attr
->pkey_index
>= dev
->dev
->caps
.pkey_table_len
[p
]) {
2794 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
2795 "for transition %d to %d. qp_type %d\n",
2796 ibqp
->qp_num
, attr
->pkey_index
, cur_state
,
2797 new_state
, ibqp
->qp_type
);
2802 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
2803 attr
->max_rd_atomic
> dev
->dev
->caps
.max_qp_init_rdma
) {
2804 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
2805 "Transition %d to %d. qp_type %d\n",
2806 ibqp
->qp_num
, attr
->max_rd_atomic
, cur_state
,
2807 new_state
, ibqp
->qp_type
);
2811 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
2812 attr
->max_dest_rd_atomic
> dev
->dev
->caps
.max_qp_dest_rdma
) {
2813 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
2814 "Transition %d to %d. qp_type %d\n",
2815 ibqp
->qp_num
, attr
->max_dest_rd_atomic
, cur_state
,
2816 new_state
, ibqp
->qp_type
);
2820 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
2825 if (ibqp
->rwq_ind_tbl
&& (new_state
== IB_QPS_INIT
)) {
2826 err
= bringup_rss_rwqs(ibqp
->rwq_ind_tbl
, attr
->port_num
,
2832 err
= __mlx4_ib_modify_qp(ibqp
, MLX4_IB_QP_SRC
, attr
, attr_mask
,
2833 cur_state
, new_state
, udata
);
2835 if (ibqp
->rwq_ind_tbl
&& err
)
2836 bring_down_rss_rwqs(ibqp
->rwq_ind_tbl
, udata
);
2838 if (mlx4_is_bonded(dev
->dev
) && (attr_mask
& IB_QP_PORT
))
2842 mutex_unlock(&qp
->mutex
);
2846 int mlx4_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2847 int attr_mask
, struct ib_udata
*udata
)
2849 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
2852 ret
= _mlx4_ib_modify_qp(ibqp
, attr
, attr_mask
, udata
);
2854 if (mqp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
2855 struct mlx4_ib_sqp
*sqp
= to_msqp(mqp
);
2858 if (sqp
->roce_v2_gsi
)
2859 err
= ib_modify_qp(sqp
->roce_v2_gsi
, attr
, attr_mask
);
2861 pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n",
2867 static int vf_get_qp0_qkey(struct mlx4_dev
*dev
, int qpn
, u32
*qkey
)
2870 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
2871 if (qpn
== dev
->caps
.spec_qps
[i
].qp0_proxy
||
2872 qpn
== dev
->caps
.spec_qps
[i
].qp0_tunnel
) {
2873 *qkey
= dev
->caps
.spec_qps
[i
].qp0_qkey
;
2880 static int build_sriov_qp0_header(struct mlx4_ib_sqp
*sqp
,
2881 const struct ib_ud_wr
*wr
,
2882 void *wqe
, unsigned *mlx_seg_len
)
2884 struct mlx4_ib_dev
*mdev
= to_mdev(sqp
->qp
.ibqp
.device
);
2885 struct ib_device
*ib_dev
= &mdev
->ib_dev
;
2886 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
2887 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
2888 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
2896 if (wr
->wr
.opcode
!= IB_WR_SEND
)
2901 for (i
= 0; i
< wr
->wr
.num_sge
; ++i
)
2902 send_size
+= wr
->wr
.sg_list
[i
].length
;
2904 /* for proxy-qp0 sends, need to add in size of tunnel header */
2905 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
2906 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
)
2907 send_size
+= sizeof (struct mlx4_ib_tunnel_header
);
2909 ib_ud_header_init(send_size
, 1, 0, 0, 0, 0, 0, 0, &sqp
->ud_header
);
2911 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_PROXY_SMI_OWNER
) {
2912 sqp
->ud_header
.lrh
.service_level
=
2913 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
2914 sqp
->ud_header
.lrh
.destination_lid
=
2915 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2916 sqp
->ud_header
.lrh
.source_lid
=
2917 cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
2920 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
2922 /* force loopback */
2923 mlx
->flags
|= cpu_to_be32(MLX4_WQE_MLX_VL15
| 0x1 | MLX4_WQE_MLX_SLR
);
2924 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
2926 sqp
->ud_header
.lrh
.virtual_lane
= 0;
2927 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->wr
.send_flags
& IB_SEND_SOLICITED
);
2928 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, 0, &pkey
);
2929 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
2930 if (sqp
->qp
.mlx4_ib_qp_type
== MLX4_IB_QPT_TUN_SMI_OWNER
)
2931 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->remote_qpn
);
2933 sqp
->ud_header
.bth
.destination_qpn
=
2934 cpu_to_be32(mdev
->dev
->caps
.spec_qps
[sqp
->qp
.port
- 1].qp0_tunnel
);
2936 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
2937 if (mlx4_is_master(mdev
->dev
)) {
2938 if (mlx4_get_parav_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2941 if (vf_get_qp0_qkey(mdev
->dev
, sqp
->qp
.mqp
.qpn
, &qkey
))
2944 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(qkey
);
2945 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.mqp
.qpn
);
2947 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
2948 sqp
->ud_header
.immediate_present
= 0;
2950 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
2953 * Inline data segments may not cross a 64 byte boundary. If
2954 * our UD header is bigger than the space available up to the
2955 * next 64 byte boundary in the WQE, use two inline data
2956 * segments to hold the UD header.
2958 spc
= MLX4_INLINE_ALIGN
-
2959 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
2960 if (header_size
<= spc
) {
2961 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
2962 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
2965 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
2966 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
2968 inl
= (void *) (inl
+ 1) + spc
;
2969 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
2971 * Need a barrier here to make sure all the data is
2972 * visible before the byte_count field is set.
2973 * Otherwise the HCA prefetcher could grab the 64-byte
2974 * chunk with this inline segment and get a valid (!=
2975 * 0xffffffff) byte count but stale data, and end up
2976 * generating a packet with bad headers.
2978 * The first inline segment's byte_count field doesn't
2979 * need a barrier, because it comes after a
2980 * control/MLX segment and therefore is at an offset
2984 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
2989 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
2993 static u8
sl_to_vl(struct mlx4_ib_dev
*dev
, u8 sl
, int port_num
)
2995 union sl2vl_tbl_to_u64 tmp_vltab
;
3000 tmp_vltab
.sl64
= atomic64_read(&dev
->sl2vl
[port_num
- 1]);
3001 vl
= tmp_vltab
.sl8
[sl
>> 1];
3009 static int fill_gid_by_hw_index(struct mlx4_ib_dev
*ibdev
, u8 port_num
,
3010 int index
, union ib_gid
*gid
,
3011 enum ib_gid_type
*gid_type
)
3013 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
3014 struct mlx4_port_gid_table
*port_gid_table
;
3015 unsigned long flags
;
3017 port_gid_table
= &iboe
->gids
[port_num
- 1];
3018 spin_lock_irqsave(&iboe
->lock
, flags
);
3019 memcpy(gid
, &port_gid_table
->gids
[index
].gid
, sizeof(*gid
));
3020 *gid_type
= port_gid_table
->gids
[index
].gid_type
;
3021 spin_unlock_irqrestore(&iboe
->lock
, flags
);
3022 if (rdma_is_zero_gid(gid
))
3028 #define MLX4_ROCEV2_QP1_SPORT 0xC000
3029 static int build_mlx_header(struct mlx4_ib_sqp
*sqp
, const struct ib_ud_wr
*wr
,
3030 void *wqe
, unsigned *mlx_seg_len
)
3032 struct ib_device
*ib_dev
= sqp
->qp
.ibqp
.device
;
3033 struct mlx4_ib_dev
*ibdev
= to_mdev(ib_dev
);
3034 struct mlx4_wqe_mlx_seg
*mlx
= wqe
;
3035 struct mlx4_wqe_ctrl_seg
*ctrl
= wqe
;
3036 struct mlx4_wqe_inline_seg
*inl
= wqe
+ sizeof *mlx
;
3037 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
3047 bool is_vlan
= false;
3049 bool is_udp
= false;
3053 for (i
= 0; i
< wr
->wr
.num_sge
; ++i
)
3054 send_size
+= wr
->wr
.sg_list
[i
].length
;
3056 is_eth
= rdma_port_get_link_layer(sqp
->qp
.ibqp
.device
, sqp
->qp
.port
) == IB_LINK_LAYER_ETHERNET
;
3057 is_grh
= mlx4_ib_ah_grh_present(ah
);
3059 enum ib_gid_type gid_type
;
3060 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
3061 /* When multi-function is enabled, the ib_core gid
3062 * indexes don't necessarily match the hw ones, so
3063 * we must use our own cache */
3064 err
= mlx4_get_roce_gid_from_slave(to_mdev(ib_dev
)->dev
,
3065 be32_to_cpu(ah
->av
.ib
.port_pd
) >> 24,
3066 ah
->av
.ib
.gid_index
, &sgid
.raw
[0]);
3070 err
= fill_gid_by_hw_index(ibdev
, sqp
->qp
.port
,
3071 ah
->av
.ib
.gid_index
,
3074 is_udp
= gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
;
3076 if (ipv6_addr_v4mapped((struct in6_addr
*)&sgid
))
3086 if (ah
->av
.eth
.vlan
!= cpu_to_be16(0xffff)) {
3087 vlan
= be16_to_cpu(ah
->av
.eth
.vlan
) & 0x0fff;
3091 err
= ib_ud_header_init(send_size
, !is_eth
, is_eth
, is_vlan
, is_grh
,
3092 ip_version
, is_udp
, 0, &sqp
->ud_header
);
3097 sqp
->ud_header
.lrh
.service_level
=
3098 be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 28;
3099 sqp
->ud_header
.lrh
.destination_lid
= ah
->av
.ib
.dlid
;
3100 sqp
->ud_header
.lrh
.source_lid
= cpu_to_be16(ah
->av
.ib
.g_slid
& 0x7f);
3103 if (is_grh
|| (ip_version
== 6)) {
3104 sqp
->ud_header
.grh
.traffic_class
=
3105 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
3106 sqp
->ud_header
.grh
.flow_label
=
3107 ah
->av
.ib
.sl_tclass_flowlabel
& cpu_to_be32(0xfffff);
3108 sqp
->ud_header
.grh
.hop_limit
= ah
->av
.ib
.hop_limit
;
3110 memcpy(sqp
->ud_header
.grh
.source_gid
.raw
, sgid
.raw
, 16);
3112 if (mlx4_is_mfunc(to_mdev(ib_dev
)->dev
)) {
3113 /* When multi-function is enabled, the ib_core gid
3114 * indexes don't necessarily match the hw ones, so
3115 * we must use our own cache
3117 sqp
->ud_header
.grh
.source_gid
.global
.subnet_prefix
=
3118 cpu_to_be64(atomic64_read(&(to_mdev(ib_dev
)->sriov
.
3119 demux
[sqp
->qp
.port
- 1].
3121 sqp
->ud_header
.grh
.source_gid
.global
.interface_id
=
3122 to_mdev(ib_dev
)->sriov
.demux
[sqp
->qp
.port
- 1].
3123 guid_cache
[ah
->av
.ib
.gid_index
];
3125 sqp
->ud_header
.grh
.source_gid
=
3126 ah
->ibah
.sgid_attr
->gid
;
3129 memcpy(sqp
->ud_header
.grh
.destination_gid
.raw
,
3130 ah
->av
.ib
.dgid
, 16);
3133 if (ip_version
== 4) {
3134 sqp
->ud_header
.ip4
.tos
=
3135 (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 20) & 0xff;
3136 sqp
->ud_header
.ip4
.id
= 0;
3137 sqp
->ud_header
.ip4
.frag_off
= htons(IP_DF
);
3138 sqp
->ud_header
.ip4
.ttl
= ah
->av
.eth
.hop_limit
;
3140 memcpy(&sqp
->ud_header
.ip4
.saddr
,
3142 memcpy(&sqp
->ud_header
.ip4
.daddr
, ah
->av
.ib
.dgid
+ 12, 4);
3143 sqp
->ud_header
.ip4
.check
= ib_ud_ip4_csum(&sqp
->ud_header
);
3147 sqp
->ud_header
.udp
.dport
= htons(ROCE_V2_UDP_DPORT
);
3148 sqp
->ud_header
.udp
.sport
= htons(MLX4_ROCEV2_QP1_SPORT
);
3149 sqp
->ud_header
.udp
.csum
= 0;
3152 mlx
->flags
&= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
);
3155 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MLX4_WQE_MLX_VL15
: 0) |
3156 (sqp
->ud_header
.lrh
.destination_lid
==
3157 IB_LID_PERMISSIVE
? MLX4_WQE_MLX_SLR
: 0) |
3158 (sqp
->ud_header
.lrh
.service_level
<< 8));
3159 if (ah
->av
.ib
.port_pd
& cpu_to_be32(0x80000000))
3160 mlx
->flags
|= cpu_to_be32(0x1); /* force loopback */
3161 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
3164 switch (wr
->wr
.opcode
) {
3166 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
3167 sqp
->ud_header
.immediate_present
= 0;
3169 case IB_WR_SEND_WITH_IMM
:
3170 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
3171 sqp
->ud_header
.immediate_present
= 1;
3172 sqp
->ud_header
.immediate_data
= wr
->wr
.ex
.imm_data
;
3179 struct in6_addr in6
;
3181 u16 pcp
= (be32_to_cpu(ah
->av
.ib
.sl_tclass_flowlabel
) >> 29) << 13;
3183 ether_type
= (!is_udp
) ? ETH_P_IBOE
:
3184 (ip_version
== 4 ? ETH_P_IP
: ETH_P_IPV6
);
3186 mlx
->sched_prio
= cpu_to_be16(pcp
);
3188 ether_addr_copy(sqp
->ud_header
.eth
.smac_h
, ah
->av
.eth
.s_mac
);
3189 memcpy(sqp
->ud_header
.eth
.dmac_h
, ah
->av
.eth
.mac
, 6);
3190 memcpy(&ctrl
->srcrb_flags16
[0], ah
->av
.eth
.mac
, 2);
3191 memcpy(&ctrl
->imm
, ah
->av
.eth
.mac
+ 2, 4);
3192 memcpy(&in6
, sgid
.raw
, sizeof(in6
));
3195 if (!memcmp(sqp
->ud_header
.eth
.smac_h
, sqp
->ud_header
.eth
.dmac_h
, 6))
3196 mlx
->flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
3198 sqp
->ud_header
.eth
.type
= cpu_to_be16(ether_type
);
3200 sqp
->ud_header
.vlan
.type
= cpu_to_be16(ether_type
);
3201 sqp
->ud_header
.vlan
.tag
= cpu_to_be16(vlan
| pcp
);
3204 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 :
3205 sl_to_vl(to_mdev(ib_dev
),
3206 sqp
->ud_header
.lrh
.service_level
,
3208 if (sqp
->qp
.ibqp
.qp_num
&& sqp
->ud_header
.lrh
.virtual_lane
== 15)
3210 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
3211 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
3213 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->wr
.send_flags
& IB_SEND_SOLICITED
);
3214 if (!sqp
->qp
.ibqp
.qp_num
)
3215 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, sqp
->pkey_index
, &pkey
);
3217 ib_get_cached_pkey(ib_dev
, sqp
->qp
.port
, wr
->pkey_index
, &pkey
);
3218 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
3219 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->remote_qpn
);
3220 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
3221 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->remote_qkey
& 0x80000000 ?
3222 sqp
->qkey
: wr
->remote_qkey
);
3223 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
3225 header_size
= ib_ud_header_pack(&sqp
->ud_header
, sqp
->header_buf
);
3228 pr_err("built UD header of size %d:\n", header_size
);
3229 for (i
= 0; i
< header_size
/ 4; ++i
) {
3231 pr_err(" [%02x] ", i
* 4);
3233 be32_to_cpu(((__be32
*) sqp
->header_buf
)[i
]));
3234 if ((i
+ 1) % 8 == 0)
3241 * Inline data segments may not cross a 64 byte boundary. If
3242 * our UD header is bigger than the space available up to the
3243 * next 64 byte boundary in the WQE, use two inline data
3244 * segments to hold the UD header.
3246 spc
= MLX4_INLINE_ALIGN
-
3247 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
3248 if (header_size
<= spc
) {
3249 inl
->byte_count
= cpu_to_be32(1 << 31 | header_size
);
3250 memcpy(inl
+ 1, sqp
->header_buf
, header_size
);
3253 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
3254 memcpy(inl
+ 1, sqp
->header_buf
, spc
);
3256 inl
= (void *) (inl
+ 1) + spc
;
3257 memcpy(inl
+ 1, sqp
->header_buf
+ spc
, header_size
- spc
);
3259 * Need a barrier here to make sure all the data is
3260 * visible before the byte_count field is set.
3261 * Otherwise the HCA prefetcher could grab the 64-byte
3262 * chunk with this inline segment and get a valid (!=
3263 * 0xffffffff) byte count but stale data, and end up
3264 * generating a packet with bad headers.
3266 * The first inline segment's byte_count field doesn't
3267 * need a barrier, because it comes after a
3268 * control/MLX segment and therefore is at an offset
3272 inl
->byte_count
= cpu_to_be32(1 << 31 | (header_size
- spc
));
3277 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + header_size
, 16);
3281 static int mlx4_wq_overflow(struct mlx4_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
3284 struct mlx4_ib_cq
*cq
;
3286 cur
= wq
->head
- wq
->tail
;
3287 if (likely(cur
+ nreq
< wq
->max_post
))
3291 spin_lock(&cq
->lock
);
3292 cur
= wq
->head
- wq
->tail
;
3293 spin_unlock(&cq
->lock
);
3295 return cur
+ nreq
>= wq
->max_post
;
3298 static __be32
convert_access(int acc
)
3300 return (acc
& IB_ACCESS_REMOTE_ATOMIC
?
3301 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC
) : 0) |
3302 (acc
& IB_ACCESS_REMOTE_WRITE
?
3303 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE
) : 0) |
3304 (acc
& IB_ACCESS_REMOTE_READ
?
3305 cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ
) : 0) |
3306 (acc
& IB_ACCESS_LOCAL_WRITE
? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE
) : 0) |
3307 cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ
);
3310 static void set_reg_seg(struct mlx4_wqe_fmr_seg
*fseg
,
3311 const struct ib_reg_wr
*wr
)
3313 struct mlx4_ib_mr
*mr
= to_mmr(wr
->mr
);
3315 fseg
->flags
= convert_access(wr
->access
);
3316 fseg
->mem_key
= cpu_to_be32(wr
->key
);
3317 fseg
->buf_list
= cpu_to_be64(mr
->page_map
);
3318 fseg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
3319 fseg
->reg_len
= cpu_to_be64(mr
->ibmr
.length
);
3320 fseg
->offset
= 0; /* XXX -- is this just for ZBVA? */
3321 fseg
->page_size
= cpu_to_be32(ilog2(mr
->ibmr
.page_size
));
3322 fseg
->reserved
[0] = 0;
3323 fseg
->reserved
[1] = 0;
3326 static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg
*iseg
, u32 rkey
)
3328 memset(iseg
, 0, sizeof(*iseg
));
3329 iseg
->mem_key
= cpu_to_be32(rkey
);
3332 static __always_inline
void set_raddr_seg(struct mlx4_wqe_raddr_seg
*rseg
,
3333 u64 remote_addr
, u32 rkey
)
3335 rseg
->raddr
= cpu_to_be64(remote_addr
);
3336 rseg
->rkey
= cpu_to_be32(rkey
);
3340 static void set_atomic_seg(struct mlx4_wqe_atomic_seg
*aseg
,
3341 const struct ib_atomic_wr
*wr
)
3343 if (wr
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
3344 aseg
->swap_add
= cpu_to_be64(wr
->swap
);
3345 aseg
->compare
= cpu_to_be64(wr
->compare_add
);
3346 } else if (wr
->wr
.opcode
== IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
) {
3347 aseg
->swap_add
= cpu_to_be64(wr
->compare_add
);
3348 aseg
->compare
= cpu_to_be64(wr
->compare_add_mask
);
3350 aseg
->swap_add
= cpu_to_be64(wr
->compare_add
);
3356 static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg
*aseg
,
3357 const struct ib_atomic_wr
*wr
)
3359 aseg
->swap_add
= cpu_to_be64(wr
->swap
);
3360 aseg
->swap_add_mask
= cpu_to_be64(wr
->swap_mask
);
3361 aseg
->compare
= cpu_to_be64(wr
->compare_add
);
3362 aseg
->compare_mask
= cpu_to_be64(wr
->compare_add_mask
);
3365 static void set_datagram_seg(struct mlx4_wqe_datagram_seg
*dseg
,
3366 const struct ib_ud_wr
*wr
)
3368 memcpy(dseg
->av
, &to_mah(wr
->ah
)->av
, sizeof (struct mlx4_av
));
3369 dseg
->dqpn
= cpu_to_be32(wr
->remote_qpn
);
3370 dseg
->qkey
= cpu_to_be32(wr
->remote_qkey
);
3371 dseg
->vlan
= to_mah(wr
->ah
)->av
.eth
.vlan
;
3372 memcpy(dseg
->mac
, to_mah(wr
->ah
)->av
.eth
.mac
, 6);
3375 static void set_tunnel_datagram_seg(struct mlx4_ib_dev
*dev
,
3376 struct mlx4_wqe_datagram_seg
*dseg
,
3377 const struct ib_ud_wr
*wr
,
3378 enum mlx4_ib_qp_type qpt
)
3380 union mlx4_ext_av
*av
= &to_mah(wr
->ah
)->av
;
3381 struct mlx4_av sqp_av
= {0};
3382 int port
= *((u8
*) &av
->ib
.port_pd
) & 0x3;
3384 /* force loopback */
3385 sqp_av
.port_pd
= av
->ib
.port_pd
| cpu_to_be32(0x80000000);
3386 sqp_av
.g_slid
= av
->ib
.g_slid
& 0x7f; /* no GRH */
3387 sqp_av
.sl_tclass_flowlabel
= av
->ib
.sl_tclass_flowlabel
&
3388 cpu_to_be32(0xf0000000);
3390 memcpy(dseg
->av
, &sqp_av
, sizeof (struct mlx4_av
));
3391 if (qpt
== MLX4_IB_QPT_PROXY_GSI
)
3392 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.spec_qps
[port
- 1].qp1_tunnel
);
3394 dseg
->dqpn
= cpu_to_be32(dev
->dev
->caps
.spec_qps
[port
- 1].qp0_tunnel
);
3395 /* Use QKEY from the QP context, which is set by master */
3396 dseg
->qkey
= cpu_to_be32(IB_QP_SET_QKEY
);
3399 static void build_tunnel_header(const struct ib_ud_wr
*wr
, void *wqe
,
3400 unsigned *mlx_seg_len
)
3402 struct mlx4_wqe_inline_seg
*inl
= wqe
;
3403 struct mlx4_ib_tunnel_header hdr
;
3404 struct mlx4_ib_ah
*ah
= to_mah(wr
->ah
);
3408 memcpy(&hdr
.av
, &ah
->av
, sizeof hdr
.av
);
3409 hdr
.remote_qpn
= cpu_to_be32(wr
->remote_qpn
);
3410 hdr
.pkey_index
= cpu_to_be16(wr
->pkey_index
);
3411 hdr
.qkey
= cpu_to_be32(wr
->remote_qkey
);
3412 memcpy(hdr
.mac
, ah
->av
.eth
.mac
, 6);
3413 hdr
.vlan
= ah
->av
.eth
.vlan
;
3415 spc
= MLX4_INLINE_ALIGN
-
3416 ((unsigned long) (inl
+ 1) & (MLX4_INLINE_ALIGN
- 1));
3417 if (sizeof (hdr
) <= spc
) {
3418 memcpy(inl
+ 1, &hdr
, sizeof (hdr
));
3420 inl
->byte_count
= cpu_to_be32(1 << 31 | sizeof (hdr
));
3423 memcpy(inl
+ 1, &hdr
, spc
);
3425 inl
->byte_count
= cpu_to_be32(1 << 31 | spc
);
3427 inl
= (void *) (inl
+ 1) + spc
;
3428 memcpy(inl
+ 1, (void *) &hdr
+ spc
, sizeof (hdr
) - spc
);
3430 inl
->byte_count
= cpu_to_be32(1 << 31 | (sizeof (hdr
) - spc
));
3435 ALIGN(i
* sizeof (struct mlx4_wqe_inline_seg
) + sizeof (hdr
), 16);
3438 static void set_mlx_icrc_seg(void *dseg
)
3441 struct mlx4_wqe_inline_seg
*iseg
= dseg
;
3446 * Need a barrier here before writing the byte_count field to
3447 * make sure that all the data is visible before the
3448 * byte_count field is set. Otherwise, if the segment begins
3449 * a new cacheline, the HCA prefetcher could grab the 64-byte
3450 * chunk and get a valid (!= * 0xffffffff) byte count but
3451 * stale data, and end up sending the wrong data.
3455 iseg
->byte_count
= cpu_to_be32((1 << 31) | 4);
3458 static void set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3460 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3461 dseg
->addr
= cpu_to_be64(sg
->addr
);
3464 * Need a barrier here before writing the byte_count field to
3465 * make sure that all the data is visible before the
3466 * byte_count field is set. Otherwise, if the segment begins
3467 * a new cacheline, the HCA prefetcher could grab the 64-byte
3468 * chunk and get a valid (!= * 0xffffffff) byte count but
3469 * stale data, and end up sending the wrong data.
3473 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3476 static void __set_data_seg(struct mlx4_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
3478 dseg
->byte_count
= cpu_to_be32(sg
->length
);
3479 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
3480 dseg
->addr
= cpu_to_be64(sg
->addr
);
3483 static int build_lso_seg(struct mlx4_wqe_lso_seg
*wqe
,
3484 const struct ib_ud_wr
*wr
, struct mlx4_ib_qp
*qp
,
3485 unsigned *lso_seg_len
, __be32
*lso_hdr_sz
, __be32
*blh
)
3487 unsigned halign
= ALIGN(sizeof *wqe
+ wr
->hlen
, 16);
3489 if (unlikely(halign
> MLX4_IB_CACHE_LINE_SIZE
))
3490 *blh
= cpu_to_be32(1 << 6);
3492 if (unlikely(!(qp
->flags
& MLX4_IB_QP_LSO
) &&
3493 wr
->wr
.num_sge
> qp
->sq
.max_gs
- (halign
>> 4)))
3496 memcpy(wqe
->header
, wr
->header
, wr
->hlen
);
3498 *lso_hdr_sz
= cpu_to_be32(wr
->mss
<< 16 | wr
->hlen
);
3499 *lso_seg_len
= halign
;
3503 static __be32
send_ieth(const struct ib_send_wr
*wr
)
3505 switch (wr
->opcode
) {
3506 case IB_WR_SEND_WITH_IMM
:
3507 case IB_WR_RDMA_WRITE_WITH_IMM
:
3508 return wr
->ex
.imm_data
;
3510 case IB_WR_SEND_WITH_INV
:
3511 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
3518 static void add_zero_len_inline(void *wqe
)
3520 struct mlx4_wqe_inline_seg
*inl
= wqe
;
3522 inl
->byte_count
= cpu_to_be32(1 << 31);
3525 static int _mlx4_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
3526 const struct ib_send_wr
**bad_wr
, bool drain
)
3528 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3530 struct mlx4_wqe_ctrl_seg
*ctrl
;
3531 struct mlx4_wqe_data_seg
*dseg
;
3532 unsigned long flags
;
3536 int uninitialized_var(size
);
3537 unsigned uninitialized_var(seglen
);
3540 __be32
uninitialized_var(lso_hdr_sz
);
3543 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
3545 if (qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
) {
3546 struct mlx4_ib_sqp
*sqp
= to_msqp(qp
);
3548 if (sqp
->roce_v2_gsi
) {
3549 struct mlx4_ib_ah
*ah
= to_mah(ud_wr(wr
)->ah
);
3550 enum ib_gid_type gid_type
;
3553 if (!fill_gid_by_hw_index(mdev
, sqp
->qp
.port
,
3554 ah
->av
.ib
.gid_index
,
3556 qp
= (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) ?
3557 to_mqp(sqp
->roce_v2_gsi
) : qp
;
3559 pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n",
3560 ah
->av
.ib
.gid_index
);
3564 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
3565 if (mdev
->dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
&&
3573 ind
= qp
->sq_next_wqe
;
3575 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
3579 if (mlx4_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
3585 if (unlikely(wr
->num_sge
> qp
->sq
.max_gs
)) {
3591 ctrl
= wqe
= get_send_wqe(qp
, ind
& (qp
->sq
.wqe_cnt
- 1));
3592 qp
->sq
.wrid
[(qp
->sq
.head
+ nreq
) & (qp
->sq
.wqe_cnt
- 1)] = wr
->wr_id
;
3595 (wr
->send_flags
& IB_SEND_SIGNALED
?
3596 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) : 0) |
3597 (wr
->send_flags
& IB_SEND_SOLICITED
?
3598 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED
) : 0) |
3599 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
3600 cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM
|
3601 MLX4_WQE_CTRL_TCP_UDP_CSUM
) : 0) |
3604 ctrl
->imm
= send_ieth(wr
);
3606 wqe
+= sizeof *ctrl
;
3607 size
= sizeof *ctrl
/ 16;
3609 switch (qp
->mlx4_ib_qp_type
) {
3610 case MLX4_IB_QPT_RC
:
3611 case MLX4_IB_QPT_UC
:
3612 switch (wr
->opcode
) {
3613 case IB_WR_ATOMIC_CMP_AND_SWP
:
3614 case IB_WR_ATOMIC_FETCH_AND_ADD
:
3615 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
3616 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
3617 atomic_wr(wr
)->rkey
);
3618 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3620 set_atomic_seg(wqe
, atomic_wr(wr
));
3621 wqe
+= sizeof (struct mlx4_wqe_atomic_seg
);
3623 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
3624 sizeof (struct mlx4_wqe_atomic_seg
)) / 16;
3628 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
3629 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
3630 atomic_wr(wr
)->rkey
);
3631 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3633 set_masked_atomic_seg(wqe
, atomic_wr(wr
));
3634 wqe
+= sizeof (struct mlx4_wqe_masked_atomic_seg
);
3636 size
+= (sizeof (struct mlx4_wqe_raddr_seg
) +
3637 sizeof (struct mlx4_wqe_masked_atomic_seg
)) / 16;
3641 case IB_WR_RDMA_READ
:
3642 case IB_WR_RDMA_WRITE
:
3643 case IB_WR_RDMA_WRITE_WITH_IMM
:
3644 set_raddr_seg(wqe
, rdma_wr(wr
)->remote_addr
,
3646 wqe
+= sizeof (struct mlx4_wqe_raddr_seg
);
3647 size
+= sizeof (struct mlx4_wqe_raddr_seg
) / 16;
3650 case IB_WR_LOCAL_INV
:
3651 ctrl
->srcrb_flags
|=
3652 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
3653 set_local_inv_seg(wqe
, wr
->ex
.invalidate_rkey
);
3654 wqe
+= sizeof (struct mlx4_wqe_local_inval_seg
);
3655 size
+= sizeof (struct mlx4_wqe_local_inval_seg
) / 16;
3659 ctrl
->srcrb_flags
|=
3660 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER
);
3661 set_reg_seg(wqe
, reg_wr(wr
));
3662 wqe
+= sizeof(struct mlx4_wqe_fmr_seg
);
3663 size
+= sizeof(struct mlx4_wqe_fmr_seg
) / 16;
3667 /* No extra segments required for sends */
3672 case MLX4_IB_QPT_TUN_SMI_OWNER
:
3673 err
= build_sriov_qp0_header(to_msqp(qp
), ud_wr(wr
),
3675 if (unlikely(err
)) {
3680 size
+= seglen
/ 16;
3682 case MLX4_IB_QPT_TUN_SMI
:
3683 case MLX4_IB_QPT_TUN_GSI
:
3684 /* this is a UD qp used in MAD responses to slaves. */
3685 set_datagram_seg(wqe
, ud_wr(wr
));
3686 /* set the forced-loopback bit in the data seg av */
3687 *(__be32
*) wqe
|= cpu_to_be32(0x80000000);
3688 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3689 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3691 case MLX4_IB_QPT_UD
:
3692 set_datagram_seg(wqe
, ud_wr(wr
));
3693 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3694 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3696 if (wr
->opcode
== IB_WR_LSO
) {
3697 err
= build_lso_seg(wqe
, ud_wr(wr
), qp
, &seglen
,
3699 if (unlikely(err
)) {
3703 lso_wqe
= (__be32
*) wqe
;
3705 size
+= seglen
/ 16;
3709 case MLX4_IB_QPT_PROXY_SMI_OWNER
:
3710 err
= build_sriov_qp0_header(to_msqp(qp
), ud_wr(wr
),
3712 if (unlikely(err
)) {
3717 size
+= seglen
/ 16;
3718 /* to start tunnel header on a cache-line boundary */
3719 add_zero_len_inline(wqe
);
3722 build_tunnel_header(ud_wr(wr
), wqe
, &seglen
);
3724 size
+= seglen
/ 16;
3726 case MLX4_IB_QPT_PROXY_SMI
:
3727 case MLX4_IB_QPT_PROXY_GSI
:
3728 /* If we are tunneling special qps, this is a UD qp.
3729 * In this case we first add a UD segment targeting
3730 * the tunnel qp, and then add a header with address
3732 set_tunnel_datagram_seg(to_mdev(ibqp
->device
), wqe
,
3734 qp
->mlx4_ib_qp_type
);
3735 wqe
+= sizeof (struct mlx4_wqe_datagram_seg
);
3736 size
+= sizeof (struct mlx4_wqe_datagram_seg
) / 16;
3737 build_tunnel_header(ud_wr(wr
), wqe
, &seglen
);
3739 size
+= seglen
/ 16;
3742 case MLX4_IB_QPT_SMI
:
3743 case MLX4_IB_QPT_GSI
:
3744 err
= build_mlx_header(to_msqp(qp
), ud_wr(wr
), ctrl
,
3746 if (unlikely(err
)) {
3751 size
+= seglen
/ 16;
3759 * Write data segments in reverse order, so as to
3760 * overwrite cacheline stamp last within each
3761 * cacheline. This avoids issues with WQE
3766 dseg
+= wr
->num_sge
- 1;
3767 size
+= wr
->num_sge
* (sizeof (struct mlx4_wqe_data_seg
) / 16);
3769 /* Add one more inline data segment for ICRC for MLX sends */
3770 if (unlikely(qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_SMI
||
3771 qp
->mlx4_ib_qp_type
== MLX4_IB_QPT_GSI
||
3772 qp
->mlx4_ib_qp_type
&
3773 (MLX4_IB_QPT_PROXY_SMI_OWNER
| MLX4_IB_QPT_TUN_SMI_OWNER
))) {
3774 set_mlx_icrc_seg(dseg
+ 1);
3775 size
+= sizeof (struct mlx4_wqe_data_seg
) / 16;
3778 for (i
= wr
->num_sge
- 1; i
>= 0; --i
, --dseg
)
3779 set_data_seg(dseg
, wr
->sg_list
+ i
);
3782 * Possibly overwrite stamping in cacheline with LSO
3783 * segment only after making sure all data segments
3787 *lso_wqe
= lso_hdr_sz
;
3789 ctrl
->qpn_vlan
.fence_size
= (wr
->send_flags
& IB_SEND_FENCE
?
3790 MLX4_WQE_CTRL_FENCE
: 0) | size
;
3793 * Make sure descriptor is fully written before
3794 * setting ownership bit (because HW can start
3795 * executing as soon as we do).
3799 if (wr
->opcode
< 0 || wr
->opcode
>= ARRAY_SIZE(mlx4_ib_opcode
)) {
3805 ctrl
->owner_opcode
= mlx4_ib_opcode
[wr
->opcode
] |
3806 (ind
& qp
->sq
.wqe_cnt
? cpu_to_be32(1 << 31) : 0) | blh
;
3809 * We can improve latency by not stamping the last
3810 * send queue WQE until after ringing the doorbell, so
3811 * only stamp here if there are still more WQEs to post.
3814 stamp_send_wqe(qp
, ind
+ qp
->sq_spare_wqes
);
3820 qp
->sq
.head
+= nreq
;
3823 * Make sure that descriptors are written before
3828 writel_relaxed(qp
->doorbell_qpn
,
3829 to_mdev(ibqp
->device
)->uar_map
+ MLX4_SEND_DOORBELL
);
3831 stamp_send_wqe(qp
, ind
+ qp
->sq_spare_wqes
- 1);
3833 qp
->sq_next_wqe
= ind
;
3836 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
3841 int mlx4_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
3842 const struct ib_send_wr
**bad_wr
)
3844 return _mlx4_ib_post_send(ibqp
, wr
, bad_wr
, false);
3847 static int _mlx4_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
3848 const struct ib_recv_wr
**bad_wr
, bool drain
)
3850 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
3851 struct mlx4_wqe_data_seg
*scat
;
3852 unsigned long flags
;
3858 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
3860 max_gs
= qp
->rq
.max_gs
;
3861 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
3863 if (mdev
->dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
&&
3871 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
3873 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
3874 if (mlx4_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
3880 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
3886 scat
= get_recv_wqe(qp
, ind
);
3888 if (qp
->mlx4_ib_qp_type
& (MLX4_IB_QPT_PROXY_SMI_OWNER
|
3889 MLX4_IB_QPT_PROXY_SMI
| MLX4_IB_QPT_PROXY_GSI
)) {
3890 ib_dma_sync_single_for_device(ibqp
->device
,
3891 qp
->sqp_proxy_rcv
[ind
].map
,
3892 sizeof (struct mlx4_ib_proxy_sqp_hdr
),
3895 cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr
));
3896 /* use dma lkey from upper layer entry */
3897 scat
->lkey
= cpu_to_be32(wr
->sg_list
->lkey
);
3898 scat
->addr
= cpu_to_be64(qp
->sqp_proxy_rcv
[ind
].map
);
3903 for (i
= 0; i
< wr
->num_sge
; ++i
)
3904 __set_data_seg(scat
+ i
, wr
->sg_list
+ i
);
3907 scat
[i
].byte_count
= 0;
3908 scat
[i
].lkey
= cpu_to_be32(MLX4_INVALID_LKEY
);
3912 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
3914 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
3919 qp
->rq
.head
+= nreq
;
3922 * Make sure that descriptors are written before
3927 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
3930 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
3935 int mlx4_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
3936 const struct ib_recv_wr
**bad_wr
)
3938 return _mlx4_ib_post_recv(ibqp
, wr
, bad_wr
, false);
3941 static inline enum ib_qp_state
to_ib_qp_state(enum mlx4_qp_state mlx4_state
)
3943 switch (mlx4_state
) {
3944 case MLX4_QP_STATE_RST
: return IB_QPS_RESET
;
3945 case MLX4_QP_STATE_INIT
: return IB_QPS_INIT
;
3946 case MLX4_QP_STATE_RTR
: return IB_QPS_RTR
;
3947 case MLX4_QP_STATE_RTS
: return IB_QPS_RTS
;
3948 case MLX4_QP_STATE_SQ_DRAINING
:
3949 case MLX4_QP_STATE_SQD
: return IB_QPS_SQD
;
3950 case MLX4_QP_STATE_SQER
: return IB_QPS_SQE
;
3951 case MLX4_QP_STATE_ERR
: return IB_QPS_ERR
;
3956 static inline enum ib_mig_state
to_ib_mig_state(int mlx4_mig_state
)
3958 switch (mlx4_mig_state
) {
3959 case MLX4_QP_PM_ARMED
: return IB_MIG_ARMED
;
3960 case MLX4_QP_PM_REARM
: return IB_MIG_REARM
;
3961 case MLX4_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
3966 static int to_ib_qp_access_flags(int mlx4_flags
)
3970 if (mlx4_flags
& MLX4_QP_BIT_RRE
)
3971 ib_flags
|= IB_ACCESS_REMOTE_READ
;
3972 if (mlx4_flags
& MLX4_QP_BIT_RWE
)
3973 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
3974 if (mlx4_flags
& MLX4_QP_BIT_RAE
)
3975 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
3980 static void to_rdma_ah_attr(struct mlx4_ib_dev
*ibdev
,
3981 struct rdma_ah_attr
*ah_attr
,
3982 struct mlx4_qp_path
*path
)
3984 struct mlx4_dev
*dev
= ibdev
->dev
;
3985 u8 port_num
= path
->sched_queue
& 0x40 ? 2 : 1;
3987 memset(ah_attr
, 0, sizeof(*ah_attr
));
3988 if (port_num
== 0 || port_num
> dev
->caps
.num_ports
)
3990 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, port_num
);
3992 if (ah_attr
->type
== RDMA_AH_ATTR_TYPE_ROCE
)
3993 rdma_ah_set_sl(ah_attr
, ((path
->sched_queue
>> 3) & 0x7) |
3994 ((path
->sched_queue
& 4) << 1));
3996 rdma_ah_set_sl(ah_attr
, (path
->sched_queue
>> 2) & 0xf);
3997 rdma_ah_set_port_num(ah_attr
, port_num
);
3999 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
4000 rdma_ah_set_path_bits(ah_attr
, path
->grh_mylmc
& 0x7f);
4001 rdma_ah_set_static_rate(ah_attr
,
4002 path
->static_rate
? path
->static_rate
- 5 : 0);
4003 if (path
->grh_mylmc
& (1 << 7)) {
4004 rdma_ah_set_grh(ah_attr
, NULL
,
4005 be32_to_cpu(path
->tclass_flowlabel
) & 0xfffff,
4008 (be32_to_cpu(path
->tclass_flowlabel
)
4010 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
4014 int mlx4_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
4015 struct ib_qp_init_attr
*qp_init_attr
)
4017 struct mlx4_ib_dev
*dev
= to_mdev(ibqp
->device
);
4018 struct mlx4_ib_qp
*qp
= to_mqp(ibqp
);
4019 struct mlx4_qp_context context
;
4023 if (ibqp
->rwq_ind_tbl
)
4026 mutex_lock(&qp
->mutex
);
4028 if (qp
->state
== IB_QPS_RESET
) {
4029 qp_attr
->qp_state
= IB_QPS_RESET
;
4033 err
= mlx4_qp_query(dev
->dev
, &qp
->mqp
, &context
);
4039 mlx4_state
= be32_to_cpu(context
.flags
) >> 28;
4041 qp
->state
= to_ib_qp_state(mlx4_state
);
4042 qp_attr
->qp_state
= qp
->state
;
4043 qp_attr
->path_mtu
= context
.mtu_msgmax
>> 5;
4044 qp_attr
->path_mig_state
=
4045 to_ib_mig_state((be32_to_cpu(context
.flags
) >> 11) & 0x3);
4046 qp_attr
->qkey
= be32_to_cpu(context
.qkey
);
4047 qp_attr
->rq_psn
= be32_to_cpu(context
.rnr_nextrecvpsn
) & 0xffffff;
4048 qp_attr
->sq_psn
= be32_to_cpu(context
.next_send_psn
) & 0xffffff;
4049 qp_attr
->dest_qp_num
= be32_to_cpu(context
.remote_qpn
) & 0xffffff;
4050 qp_attr
->qp_access_flags
=
4051 to_ib_qp_access_flags(be32_to_cpu(context
.params2
));
4053 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
4054 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
.pri_path
);
4055 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
.alt_path
);
4056 qp_attr
->alt_pkey_index
= context
.alt_path
.pkey_index
& 0x7f;
4057 qp_attr
->alt_port_num
=
4058 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
4061 qp_attr
->pkey_index
= context
.pri_path
.pkey_index
& 0x7f;
4062 if (qp_attr
->qp_state
== IB_QPS_INIT
)
4063 qp_attr
->port_num
= qp
->port
;
4065 qp_attr
->port_num
= context
.pri_path
.sched_queue
& 0x40 ? 2 : 1;
4067 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
4068 qp_attr
->sq_draining
= mlx4_state
== MLX4_QP_STATE_SQ_DRAINING
;
4070 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
.params1
) >> 21) & 0x7);
4072 qp_attr
->max_dest_rd_atomic
=
4073 1 << ((be32_to_cpu(context
.params2
) >> 21) & 0x7);
4074 qp_attr
->min_rnr_timer
=
4075 (be32_to_cpu(context
.rnr_nextrecvpsn
) >> 24) & 0x1f;
4076 qp_attr
->timeout
= context
.pri_path
.ackto
>> 3;
4077 qp_attr
->retry_cnt
= (be32_to_cpu(context
.params1
) >> 16) & 0x7;
4078 qp_attr
->rnr_retry
= (be32_to_cpu(context
.params1
) >> 13) & 0x7;
4079 qp_attr
->alt_timeout
= context
.alt_path
.ackto
>> 3;
4082 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
4083 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
4084 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
4086 if (!ibqp
->uobject
) {
4087 qp_attr
->cap
.max_send_wr
= qp
->sq
.wqe_cnt
;
4088 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
4090 qp_attr
->cap
.max_send_wr
= 0;
4091 qp_attr
->cap
.max_send_sge
= 0;
4095 * We don't support inline sends for kernel QPs (yet), and we
4096 * don't know what userspace's value should be.
4098 qp_attr
->cap
.max_inline_data
= 0;
4100 qp_init_attr
->cap
= qp_attr
->cap
;
4102 qp_init_attr
->create_flags
= 0;
4103 if (qp
->flags
& MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
4104 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
4106 if (qp
->flags
& MLX4_IB_QP_LSO
)
4107 qp_init_attr
->create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
4109 if (qp
->flags
& MLX4_IB_QP_NETIF
)
4110 qp_init_attr
->create_flags
|= IB_QP_CREATE_NETIF_QP
;
4112 qp_init_attr
->sq_sig_type
=
4113 qp
->sq_signal_bits
== cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
) ?
4114 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
4117 mutex_unlock(&qp
->mutex
);
4121 struct ib_wq
*mlx4_ib_create_wq(struct ib_pd
*pd
,
4122 struct ib_wq_init_attr
*init_attr
,
4123 struct ib_udata
*udata
)
4125 struct mlx4_dev
*dev
= to_mdev(pd
->device
)->dev
;
4126 struct ib_qp_init_attr ib_qp_init_attr
= {};
4127 struct mlx4_ib_qp
*qp
;
4128 struct mlx4_ib_create_wq ucmd
;
4129 int err
, required_cmd_sz
;
4132 return ERR_PTR(-EINVAL
);
4134 required_cmd_sz
= offsetof(typeof(ucmd
), comp_mask
) +
4135 sizeof(ucmd
.comp_mask
);
4136 if (udata
->inlen
< required_cmd_sz
) {
4137 pr_debug("invalid inlen\n");
4138 return ERR_PTR(-EINVAL
);
4141 if (udata
->inlen
> sizeof(ucmd
) &&
4142 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4143 udata
->inlen
- sizeof(ucmd
))) {
4144 pr_debug("inlen is not supported\n");
4145 return ERR_PTR(-EOPNOTSUPP
);
4149 return ERR_PTR(-EOPNOTSUPP
);
4151 if (init_attr
->wq_type
!= IB_WQT_RQ
) {
4152 pr_debug("unsupported wq type %d\n", init_attr
->wq_type
);
4153 return ERR_PTR(-EOPNOTSUPP
);
4156 if (init_attr
->create_flags
& ~IB_WQ_FLAGS_SCATTER_FCS
||
4157 !(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
)) {
4158 pr_debug("unsupported create_flags %u\n",
4159 init_attr
->create_flags
);
4160 return ERR_PTR(-EOPNOTSUPP
);
4163 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
4165 return ERR_PTR(-ENOMEM
);
4167 qp
->pri
.vid
= 0xFFFF;
4168 qp
->alt
.vid
= 0xFFFF;
4170 ib_qp_init_attr
.qp_context
= init_attr
->wq_context
;
4171 ib_qp_init_attr
.qp_type
= IB_QPT_RAW_PACKET
;
4172 ib_qp_init_attr
.cap
.max_recv_wr
= init_attr
->max_wr
;
4173 ib_qp_init_attr
.cap
.max_recv_sge
= init_attr
->max_sge
;
4174 ib_qp_init_attr
.recv_cq
= init_attr
->cq
;
4175 ib_qp_init_attr
.send_cq
= ib_qp_init_attr
.recv_cq
; /* Dummy CQ */
4177 if (init_attr
->create_flags
& IB_WQ_FLAGS_SCATTER_FCS
)
4178 ib_qp_init_attr
.create_flags
|= IB_QP_CREATE_SCATTER_FCS
;
4180 err
= create_rq(pd
, &ib_qp_init_attr
, udata
, qp
);
4183 return ERR_PTR(err
);
4186 qp
->ibwq
.event_handler
= init_attr
->event_handler
;
4187 qp
->ibwq
.wq_num
= qp
->mqp
.qpn
;
4188 qp
->ibwq
.state
= IB_WQS_RESET
;
4193 static int ib_wq2qp_state(enum ib_wq_state state
)
4197 return IB_QPS_RESET
;
4205 static int _mlx4_ib_modify_wq(struct ib_wq
*ibwq
, enum ib_wq_state new_state
,
4206 struct ib_udata
*udata
)
4208 struct mlx4_ib_qp
*qp
= to_mqp((struct ib_qp
*)ibwq
);
4209 enum ib_qp_state qp_cur_state
;
4210 enum ib_qp_state qp_new_state
;
4214 /* ib_qp.state represents the WQ HW state while ib_wq.state represents
4215 * the WQ logic state.
4217 qp_cur_state
= qp
->state
;
4218 qp_new_state
= ib_wq2qp_state(new_state
);
4220 if (ib_wq2qp_state(new_state
) == qp_cur_state
)
4223 if (new_state
== IB_WQS_RDY
) {
4224 struct ib_qp_attr attr
= {};
4226 attr
.port_num
= qp
->port
;
4227 attr_mask
= IB_QP_PORT
;
4229 err
= __mlx4_ib_modify_qp(ibwq
, MLX4_IB_RWQ_SRC
, &attr
,
4230 attr_mask
, IB_QPS_RESET
, IB_QPS_INIT
,
4233 pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
4238 qp_cur_state
= IB_QPS_INIT
;
4242 err
= __mlx4_ib_modify_qp(ibwq
, MLX4_IB_RWQ_SRC
, NULL
, attr_mask
,
4243 qp_cur_state
, qp_new_state
, udata
);
4245 if (err
&& (qp_cur_state
== IB_QPS_INIT
)) {
4246 qp_new_state
= IB_QPS_RESET
;
4247 if (__mlx4_ib_modify_qp(ibwq
, MLX4_IB_RWQ_SRC
, NULL
,
4248 attr_mask
, IB_QPS_INIT
, IB_QPS_RESET
,
4250 pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
4252 qp_new_state
= IB_QPS_INIT
;
4256 qp
->state
= qp_new_state
;
4261 int mlx4_ib_modify_wq(struct ib_wq
*ibwq
, struct ib_wq_attr
*wq_attr
,
4262 u32 wq_attr_mask
, struct ib_udata
*udata
)
4264 struct mlx4_ib_qp
*qp
= to_mqp((struct ib_qp
*)ibwq
);
4265 struct mlx4_ib_modify_wq ucmd
= {};
4266 size_t required_cmd_sz
;
4267 enum ib_wq_state cur_state
, new_state
;
4270 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) +
4271 sizeof(ucmd
.reserved
);
4272 if (udata
->inlen
< required_cmd_sz
)
4275 if (udata
->inlen
> sizeof(ucmd
) &&
4276 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
4277 udata
->inlen
- sizeof(ucmd
)))
4280 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
4283 if (ucmd
.comp_mask
|| ucmd
.reserved
)
4286 if (wq_attr_mask
& IB_WQ_FLAGS
)
4289 cur_state
= wq_attr_mask
& IB_WQ_CUR_STATE
? wq_attr
->curr_wq_state
:
4291 new_state
= wq_attr_mask
& IB_WQ_STATE
? wq_attr
->wq_state
: cur_state
;
4293 if (cur_state
< IB_WQS_RESET
|| cur_state
> IB_WQS_ERR
||
4294 new_state
< IB_WQS_RESET
|| new_state
> IB_WQS_ERR
)
4297 if ((new_state
== IB_WQS_RDY
) && (cur_state
== IB_WQS_ERR
))
4300 if ((new_state
== IB_WQS_ERR
) && (cur_state
== IB_WQS_RESET
))
4303 /* Need to protect against the parent RSS which also may modify WQ
4306 mutex_lock(&qp
->mutex
);
4308 /* Can update HW state only if a RSS QP has already associated to this
4309 * WQ, so we can apply its port on the WQ.
4312 err
= _mlx4_ib_modify_wq(ibwq
, new_state
, udata
);
4315 ibwq
->state
= new_state
;
4317 mutex_unlock(&qp
->mutex
);
4322 void mlx4_ib_destroy_wq(struct ib_wq
*ibwq
, struct ib_udata
*udata
)
4324 struct mlx4_ib_dev
*dev
= to_mdev(ibwq
->device
);
4325 struct mlx4_ib_qp
*qp
= to_mqp((struct ib_qp
*)ibwq
);
4327 if (qp
->counter_index
)
4328 mlx4_ib_free_qp_counter(dev
, qp
);
4330 destroy_qp_common(dev
, qp
, MLX4_IB_RWQ_SRC
, udata
);
4335 struct ib_rwq_ind_table
4336 *mlx4_ib_create_rwq_ind_table(struct ib_device
*device
,
4337 struct ib_rwq_ind_table_init_attr
*init_attr
,
4338 struct ib_udata
*udata
)
4340 struct ib_rwq_ind_table
*rwq_ind_table
;
4341 struct mlx4_ib_create_rwq_ind_tbl_resp resp
= {};
4342 unsigned int ind_tbl_size
= 1 << init_attr
->log_ind_tbl_size
;
4343 unsigned int base_wqn
;
4344 size_t min_resp_len
;
4348 if (udata
->inlen
> 0 &&
4349 !ib_is_udata_cleared(udata
, 0,
4351 return ERR_PTR(-EOPNOTSUPP
);
4353 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
4354 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
4355 return ERR_PTR(-EINVAL
);
4358 device
->attrs
.rss_caps
.max_rwq_indirection_table_size
) {
4359 pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
4361 device
->attrs
.rss_caps
.max_rwq_indirection_table_size
);
4362 return ERR_PTR(-EINVAL
);
4365 base_wqn
= init_attr
->ind_tbl
[0]->wq_num
;
4367 if (base_wqn
% ind_tbl_size
) {
4368 pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
4370 return ERR_PTR(-EINVAL
);
4373 for (i
= 1; i
< ind_tbl_size
; i
++) {
4374 if (++base_wqn
!= init_attr
->ind_tbl
[i
]->wq_num
) {
4375 pr_debug("indirection table's WQNs aren't consecutive\n");
4376 return ERR_PTR(-EINVAL
);
4380 rwq_ind_table
= kzalloc(sizeof(*rwq_ind_table
), GFP_KERNEL
);
4382 return ERR_PTR(-ENOMEM
);
4384 if (udata
->outlen
) {
4385 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
4386 sizeof(resp
.response_length
);
4387 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
4392 return rwq_ind_table
;
4395 kfree(rwq_ind_table
);
4396 return ERR_PTR(err
);
4399 int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
4401 kfree(ib_rwq_ind_tbl
);
4405 struct mlx4_ib_drain_cqe
{
4407 struct completion done
;
4410 static void mlx4_ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
4412 struct mlx4_ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
,
4413 struct mlx4_ib_drain_cqe
,
4416 complete(&cqe
->done
);
4419 /* This function returns only once the drained WR was completed */
4420 static void handle_drain_completion(struct ib_cq
*cq
,
4421 struct mlx4_ib_drain_cqe
*sdrain
,
4422 struct mlx4_ib_dev
*dev
)
4424 struct mlx4_dev
*mdev
= dev
->dev
;
4426 if (cq
->poll_ctx
== IB_POLL_DIRECT
) {
4427 while (wait_for_completion_timeout(&sdrain
->done
, HZ
/ 10) <= 0)
4428 ib_process_cq_direct(cq
, -1);
4432 if (mdev
->persist
->state
== MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
4433 struct mlx4_ib_cq
*mcq
= to_mcq(cq
);
4434 bool triggered
= false;
4435 unsigned long flags
;
4437 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
4438 /* Make sure that the CQ handler won't run if wasn't run yet */
4439 if (!mcq
->mcq
.reset_notify_added
)
4440 mcq
->mcq
.reset_notify_added
= 1;
4443 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
4446 /* Wait for any scheduled/running task to be ended */
4447 switch (cq
->poll_ctx
) {
4448 case IB_POLL_SOFTIRQ
:
4449 irq_poll_disable(&cq
->iop
);
4450 irq_poll_enable(&cq
->iop
);
4452 case IB_POLL_WORKQUEUE
:
4453 cancel_work_sync(&cq
->work
);
4460 /* Run the CQ handler - this makes sure that the drain WR will
4461 * be processed if wasn't processed yet.
4463 mcq
->mcq
.comp(&mcq
->mcq
);
4466 wait_for_completion(&sdrain
->done
);
4469 void mlx4_ib_drain_sq(struct ib_qp
*qp
)
4471 struct ib_cq
*cq
= qp
->send_cq
;
4472 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
4473 struct mlx4_ib_drain_cqe sdrain
;
4474 const struct ib_send_wr
*bad_swr
;
4475 struct ib_rdma_wr swr
= {
4478 { .wr_cqe
= &sdrain
.cqe
, },
4479 .opcode
= IB_WR_RDMA_WRITE
,
4483 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
4484 struct mlx4_dev
*mdev
= dev
->dev
;
4486 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
4487 if (ret
&& mdev
->persist
->state
!= MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
4488 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
4492 sdrain
.cqe
.done
= mlx4_ib_drain_qp_done
;
4493 init_completion(&sdrain
.done
);
4495 ret
= _mlx4_ib_post_send(qp
, &swr
.wr
, &bad_swr
, true);
4497 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
4501 handle_drain_completion(cq
, &sdrain
, dev
);
4504 void mlx4_ib_drain_rq(struct ib_qp
*qp
)
4506 struct ib_cq
*cq
= qp
->recv_cq
;
4507 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
4508 struct mlx4_ib_drain_cqe rdrain
;
4509 struct ib_recv_wr rwr
= {};
4510 const struct ib_recv_wr
*bad_rwr
;
4512 struct mlx4_ib_dev
*dev
= to_mdev(qp
->device
);
4513 struct mlx4_dev
*mdev
= dev
->dev
;
4515 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
4516 if (ret
&& mdev
->persist
->state
!= MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
4517 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
4521 rwr
.wr_cqe
= &rdrain
.cqe
;
4522 rdrain
.cqe
.done
= mlx4_ib_drain_qp_done
;
4523 init_completion(&rdrain
.done
);
4525 ret
= _mlx4_ib_post_recv(qp
, &rwr
, &bad_rwr
, true);
4527 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
4531 handle_drain_completion(cq
, &rdrain
, dev
);