2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <linux/mlx5/fs.h>
42 /* not supported currently */
43 static int wq_signature
;
46 MLX5_IB_ACK_REQ_FREQ
= 8,
50 MLX5_IB_DEFAULT_SCHED_QUEUE
= 0x83,
51 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE
= 0x3f,
52 MLX5_IB_LINK_TYPE_IB
= 0,
53 MLX5_IB_LINK_TYPE_ETH
= 1
57 MLX5_IB_SQ_STRIDE
= 6,
58 MLX5_IB_SQ_UMR_INLINE_THRESHOLD
= 64,
61 static const u32 mlx5_ib_opcode
[] = {
62 [IB_WR_SEND
] = MLX5_OPCODE_SEND
,
63 [IB_WR_LSO
] = MLX5_OPCODE_LSO
,
64 [IB_WR_SEND_WITH_IMM
] = MLX5_OPCODE_SEND_IMM
,
65 [IB_WR_RDMA_WRITE
] = MLX5_OPCODE_RDMA_WRITE
,
66 [IB_WR_RDMA_WRITE_WITH_IMM
] = MLX5_OPCODE_RDMA_WRITE_IMM
,
67 [IB_WR_RDMA_READ
] = MLX5_OPCODE_RDMA_READ
,
68 [IB_WR_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_CS
,
69 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_FA
,
70 [IB_WR_SEND_WITH_INV
] = MLX5_OPCODE_SEND_INVAL
,
71 [IB_WR_LOCAL_INV
] = MLX5_OPCODE_UMR
,
72 [IB_WR_REG_MR
] = MLX5_OPCODE_UMR
,
73 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP
] = MLX5_OPCODE_ATOMIC_MASKED_CS
,
74 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
] = MLX5_OPCODE_ATOMIC_MASKED_FA
,
75 [MLX5_IB_WR_UMR
] = MLX5_OPCODE_UMR
,
78 struct mlx5_wqe_eth_pad
{
82 enum raw_qp_set_mask_map
{
83 MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
= 1UL << 0,
84 MLX5_RAW_QP_RATE_LIMIT
= 1UL << 1,
87 struct mlx5_modify_raw_qp_param
{
90 u32 set_mask
; /* raw_qp_set_mask_map */
92 struct mlx5_rate_limit rl
;
97 static void get_cqs(enum ib_qp_type qp_type
,
98 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
99 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
);
101 static int is_qp0(enum ib_qp_type qp_type
)
103 return qp_type
== IB_QPT_SMI
;
106 static int is_sqp(enum ib_qp_type qp_type
)
108 return is_qp0(qp_type
) || is_qp1(qp_type
);
112 * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
115 * @umem: User space memory where the WQ is
116 * @buffer: buffer to copy to
117 * @buflen: buffer length
118 * @wqe_index: index of WQE to copy from
119 * @wq_offset: offset to start of WQ
120 * @wq_wqe_cnt: number of WQEs in WQ
121 * @wq_wqe_shift: log2 of WQE size
122 * @bcnt: number of bytes to copy
123 * @bytes_copied: number of bytes to copy (return value)
125 * Copies from start of WQE bcnt or less bytes.
126 * Does not gurantee to copy the entire WQE.
128 * Return: zero on success, or an error code.
130 static int mlx5_ib_read_user_wqe_common(struct ib_umem
*umem
,
138 size_t *bytes_copied
)
140 size_t offset
= wq_offset
+ ((wqe_index
% wq_wqe_cnt
) << wq_wqe_shift
);
141 size_t wq_end
= wq_offset
+ (wq_wqe_cnt
<< wq_wqe_shift
);
145 /* don't copy more than requested, more than buffer length or
148 copy_length
= min_t(u32
, buflen
, wq_end
- offset
);
149 copy_length
= min_t(u32
, copy_length
, bcnt
);
151 ret
= ib_umem_copy_from(buffer
, umem
, offset
, copy_length
);
155 if (!ret
&& bytes_copied
)
156 *bytes_copied
= copy_length
;
161 int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp
*qp
,
167 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
168 struct ib_umem
*umem
= base
->ubuffer
.umem
;
169 struct mlx5_ib_wq
*wq
= &qp
->sq
;
170 struct mlx5_wqe_ctrl_seg
*ctrl
;
172 size_t bytes_copied2
;
177 if (buflen
< sizeof(*ctrl
))
180 /* at first read as much as possible */
181 ret
= mlx5_ib_read_user_wqe_common(umem
,
193 /* we need at least control segment size to proceed */
194 if (bytes_copied
< sizeof(*ctrl
))
198 ds
= be32_to_cpu(ctrl
->qpn_ds
) & MLX5_WQE_CTRL_DS_MASK
;
199 wqe_length
= ds
* MLX5_WQE_DS_UNITS
;
201 /* if we copied enough then we are done */
202 if (bytes_copied
>= wqe_length
) {
207 /* otherwise this a wrapped around wqe
208 * so read the remaining bytes starting
211 ret
= mlx5_ib_read_user_wqe_common(umem
,
212 buffer
+ bytes_copied
,
213 buflen
- bytes_copied
,
218 wqe_length
- bytes_copied
,
223 *bc
= bytes_copied
+ bytes_copied2
;
227 int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp
*qp
,
233 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
234 struct ib_umem
*umem
= base
->ubuffer
.umem
;
235 struct mlx5_ib_wq
*wq
= &qp
->rq
;
239 ret
= mlx5_ib_read_user_wqe_common(umem
,
255 int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq
*srq
,
261 struct ib_umem
*umem
= srq
->umem
;
265 ret
= mlx5_ib_read_user_wqe_common(umem
,
281 static void mlx5_ib_qp_event(struct mlx5_core_qp
*qp
, int type
)
283 struct ib_qp
*ibqp
= &to_mibqp(qp
)->ibqp
;
284 struct ib_event event
;
286 if (type
== MLX5_EVENT_TYPE_PATH_MIG
) {
287 /* This event is only valid for trans_qps */
288 to_mibqp(qp
)->port
= to_mibqp(qp
)->trans_qp
.alt_port
;
291 if (ibqp
->event_handler
) {
292 event
.device
= ibqp
->device
;
293 event
.element
.qp
= ibqp
;
295 case MLX5_EVENT_TYPE_PATH_MIG
:
296 event
.event
= IB_EVENT_PATH_MIG
;
298 case MLX5_EVENT_TYPE_COMM_EST
:
299 event
.event
= IB_EVENT_COMM_EST
;
301 case MLX5_EVENT_TYPE_SQ_DRAINED
:
302 event
.event
= IB_EVENT_SQ_DRAINED
;
304 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
305 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
307 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
308 event
.event
= IB_EVENT_QP_FATAL
;
310 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
311 event
.event
= IB_EVENT_PATH_MIG_ERR
;
313 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
314 event
.event
= IB_EVENT_QP_REQ_ERR
;
316 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
317 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
320 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type
, qp
->qpn
);
324 ibqp
->event_handler(&event
, ibqp
->qp_context
);
328 static int set_rq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_cap
*cap
,
329 int has_rq
, struct mlx5_ib_qp
*qp
, struct mlx5_ib_create_qp
*ucmd
)
334 /* Sanity check RQ size before proceeding */
335 if (cap
->max_recv_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
)))
341 qp
->rq
.wqe_shift
= 0;
342 cap
->max_recv_wr
= 0;
343 cap
->max_recv_sge
= 0;
346 qp
->rq
.wqe_cnt
= ucmd
->rq_wqe_count
;
347 if (ucmd
->rq_wqe_shift
> BITS_PER_BYTE
* sizeof(ucmd
->rq_wqe_shift
))
349 qp
->rq
.wqe_shift
= ucmd
->rq_wqe_shift
;
350 if ((1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) < qp
->wq_sig
)
352 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
353 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
355 wqe_size
= qp
->wq_sig
? sizeof(struct mlx5_wqe_signature_seg
) : 0;
356 wqe_size
+= cap
->max_recv_sge
* sizeof(struct mlx5_wqe_data_seg
);
357 wqe_size
= roundup_pow_of_two(wqe_size
);
358 wq_size
= roundup_pow_of_two(cap
->max_recv_wr
) * wqe_size
;
359 wq_size
= max_t(int, wq_size
, MLX5_SEND_WQE_BB
);
360 qp
->rq
.wqe_cnt
= wq_size
/ wqe_size
;
361 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_rq
)) {
362 mlx5_ib_dbg(dev
, "wqe_size %d, max %d\n",
364 MLX5_CAP_GEN(dev
->mdev
,
368 qp
->rq
.wqe_shift
= ilog2(wqe_size
);
369 qp
->rq
.max_gs
= (1 << qp
->rq
.wqe_shift
) / sizeof(struct mlx5_wqe_data_seg
) - qp
->wq_sig
;
370 qp
->rq
.max_post
= qp
->rq
.wqe_cnt
;
377 static int sq_overhead(struct ib_qp_init_attr
*attr
)
381 switch (attr
->qp_type
) {
383 size
+= sizeof(struct mlx5_wqe_xrc_seg
);
386 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
387 max(sizeof(struct mlx5_wqe_atomic_seg
) +
388 sizeof(struct mlx5_wqe_raddr_seg
),
389 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
390 sizeof(struct mlx5_mkey_seg
) +
391 MLX5_IB_SQ_UMR_INLINE_THRESHOLD
/
392 MLX5_IB_UMR_OCTOWORD
);
399 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
400 max(sizeof(struct mlx5_wqe_raddr_seg
),
401 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
402 sizeof(struct mlx5_mkey_seg
));
406 if (attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)
407 size
+= sizeof(struct mlx5_wqe_eth_pad
) +
408 sizeof(struct mlx5_wqe_eth_seg
);
411 case MLX5_IB_QPT_HW_GSI
:
412 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
413 sizeof(struct mlx5_wqe_datagram_seg
);
416 case MLX5_IB_QPT_REG_UMR
:
417 size
+= sizeof(struct mlx5_wqe_ctrl_seg
) +
418 sizeof(struct mlx5_wqe_umr_ctrl_seg
) +
419 sizeof(struct mlx5_mkey_seg
);
429 static int calc_send_wqe(struct ib_qp_init_attr
*attr
)
434 size
= sq_overhead(attr
);
438 if (attr
->cap
.max_inline_data
) {
439 inl_size
= size
+ sizeof(struct mlx5_wqe_inline_seg
) +
440 attr
->cap
.max_inline_data
;
443 size
+= attr
->cap
.max_send_sge
* sizeof(struct mlx5_wqe_data_seg
);
444 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
&&
445 ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
) < MLX5_SIG_WQE_SIZE
)
446 return MLX5_SIG_WQE_SIZE
;
448 return ALIGN(max_t(int, inl_size
, size
), MLX5_SEND_WQE_BB
);
451 static int get_send_sge(struct ib_qp_init_attr
*attr
, int wqe_size
)
455 if (attr
->qp_type
== IB_QPT_RC
)
456 max_sge
= (min_t(int, wqe_size
, 512) -
457 sizeof(struct mlx5_wqe_ctrl_seg
) -
458 sizeof(struct mlx5_wqe_raddr_seg
)) /
459 sizeof(struct mlx5_wqe_data_seg
);
460 else if (attr
->qp_type
== IB_QPT_XRC_INI
)
461 max_sge
= (min_t(int, wqe_size
, 512) -
462 sizeof(struct mlx5_wqe_ctrl_seg
) -
463 sizeof(struct mlx5_wqe_xrc_seg
) -
464 sizeof(struct mlx5_wqe_raddr_seg
)) /
465 sizeof(struct mlx5_wqe_data_seg
);
467 max_sge
= (wqe_size
- sq_overhead(attr
)) /
468 sizeof(struct mlx5_wqe_data_seg
);
470 return min_t(int, max_sge
, wqe_size
- sq_overhead(attr
) /
471 sizeof(struct mlx5_wqe_data_seg
));
474 static int calc_sq_size(struct mlx5_ib_dev
*dev
, struct ib_qp_init_attr
*attr
,
475 struct mlx5_ib_qp
*qp
)
480 if (!attr
->cap
.max_send_wr
)
483 wqe_size
= calc_send_wqe(attr
);
484 mlx5_ib_dbg(dev
, "wqe_size %d\n", wqe_size
);
488 if (wqe_size
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
489 mlx5_ib_dbg(dev
, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
490 wqe_size
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
494 qp
->max_inline_data
= wqe_size
- sq_overhead(attr
) -
495 sizeof(struct mlx5_wqe_inline_seg
);
496 attr
->cap
.max_inline_data
= qp
->max_inline_data
;
498 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
499 qp
->signature_en
= true;
501 wq_size
= roundup_pow_of_two(attr
->cap
.max_send_wr
* wqe_size
);
502 qp
->sq
.wqe_cnt
= wq_size
/ MLX5_SEND_WQE_BB
;
503 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
504 mlx5_ib_dbg(dev
, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
505 attr
->cap
.max_send_wr
, wqe_size
, MLX5_SEND_WQE_BB
,
507 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
510 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
511 qp
->sq
.max_gs
= get_send_sge(attr
, wqe_size
);
512 if (qp
->sq
.max_gs
< attr
->cap
.max_send_sge
)
515 attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
516 qp
->sq
.max_post
= wq_size
/ wqe_size
;
517 attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
522 static int set_user_buf_size(struct mlx5_ib_dev
*dev
,
523 struct mlx5_ib_qp
*qp
,
524 struct mlx5_ib_create_qp
*ucmd
,
525 struct mlx5_ib_qp_base
*base
,
526 struct ib_qp_init_attr
*attr
)
528 int desc_sz
= 1 << qp
->sq
.wqe_shift
;
530 if (desc_sz
> MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
)) {
531 mlx5_ib_warn(dev
, "desc_sz %d, max_sq_desc_sz %d\n",
532 desc_sz
, MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
));
536 if (ucmd
->sq_wqe_count
&& !is_power_of_2(ucmd
->sq_wqe_count
)) {
537 mlx5_ib_warn(dev
, "sq_wqe_count %d is not a power of two\n",
542 qp
->sq
.wqe_cnt
= ucmd
->sq_wqe_count
;
544 if (qp
->sq
.wqe_cnt
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
))) {
545 mlx5_ib_warn(dev
, "wqe_cnt %d, max_wqes %d\n",
547 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
));
551 if (attr
->qp_type
== IB_QPT_RAW_PACKET
||
552 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
553 base
->ubuffer
.buf_size
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
554 qp
->raw_packet_qp
.sq
.ubuffer
.buf_size
= qp
->sq
.wqe_cnt
<< 6;
556 base
->ubuffer
.buf_size
= (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
) +
557 (qp
->sq
.wqe_cnt
<< 6);
563 static int qp_has_rq(struct ib_qp_init_attr
*attr
)
565 if (attr
->qp_type
== IB_QPT_XRC_INI
||
566 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
567 attr
->qp_type
== MLX5_IB_QPT_REG_UMR
||
568 !attr
->cap
.max_recv_wr
)
575 /* this is the first blue flame register in the array of bfregs assigned
576 * to a processes. Since we do not use it for blue flame but rather
577 * regular 64 bit doorbells, we do not need a lock for maintaiing
580 NUM_NON_BLUE_FLAME_BFREGS
= 1,
583 static int max_bfregs(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
)
585 return get_num_static_uars(dev
, bfregi
) * MLX5_NON_FP_BFREGS_PER_UAR
;
588 static int num_med_bfreg(struct mlx5_ib_dev
*dev
,
589 struct mlx5_bfreg_info
*bfregi
)
593 n
= max_bfregs(dev
, bfregi
) - bfregi
->num_low_latency_bfregs
-
594 NUM_NON_BLUE_FLAME_BFREGS
;
596 return n
>= 0 ? n
: 0;
599 static int first_med_bfreg(struct mlx5_ib_dev
*dev
,
600 struct mlx5_bfreg_info
*bfregi
)
602 return num_med_bfreg(dev
, bfregi
) ? 1 : -ENOMEM
;
605 static int first_hi_bfreg(struct mlx5_ib_dev
*dev
,
606 struct mlx5_bfreg_info
*bfregi
)
610 med
= num_med_bfreg(dev
, bfregi
);
614 static int alloc_high_class_bfreg(struct mlx5_ib_dev
*dev
,
615 struct mlx5_bfreg_info
*bfregi
)
619 for (i
= first_hi_bfreg(dev
, bfregi
); i
< max_bfregs(dev
, bfregi
); i
++) {
620 if (!bfregi
->count
[i
]) {
629 static int alloc_med_class_bfreg(struct mlx5_ib_dev
*dev
,
630 struct mlx5_bfreg_info
*bfregi
)
632 int minidx
= first_med_bfreg(dev
, bfregi
);
638 for (i
= minidx
; i
< first_hi_bfreg(dev
, bfregi
); i
++) {
639 if (bfregi
->count
[i
] < bfregi
->count
[minidx
])
641 if (!bfregi
->count
[minidx
])
645 bfregi
->count
[minidx
]++;
649 static int alloc_bfreg(struct mlx5_ib_dev
*dev
,
650 struct mlx5_bfreg_info
*bfregi
)
652 int bfregn
= -ENOMEM
;
654 mutex_lock(&bfregi
->lock
);
655 if (bfregi
->ver
>= 2) {
656 bfregn
= alloc_high_class_bfreg(dev
, bfregi
);
658 bfregn
= alloc_med_class_bfreg(dev
, bfregi
);
662 BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS
!= 1);
664 bfregi
->count
[bfregn
]++;
666 mutex_unlock(&bfregi
->lock
);
671 void mlx5_ib_free_bfreg(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
, int bfregn
)
673 mutex_lock(&bfregi
->lock
);
674 bfregi
->count
[bfregn
]--;
675 mutex_unlock(&bfregi
->lock
);
678 static enum mlx5_qp_state
to_mlx5_state(enum ib_qp_state state
)
681 case IB_QPS_RESET
: return MLX5_QP_STATE_RST
;
682 case IB_QPS_INIT
: return MLX5_QP_STATE_INIT
;
683 case IB_QPS_RTR
: return MLX5_QP_STATE_RTR
;
684 case IB_QPS_RTS
: return MLX5_QP_STATE_RTS
;
685 case IB_QPS_SQD
: return MLX5_QP_STATE_SQD
;
686 case IB_QPS_SQE
: return MLX5_QP_STATE_SQER
;
687 case IB_QPS_ERR
: return MLX5_QP_STATE_ERR
;
692 static int to_mlx5_st(enum ib_qp_type type
)
695 case IB_QPT_RC
: return MLX5_QP_ST_RC
;
696 case IB_QPT_UC
: return MLX5_QP_ST_UC
;
697 case IB_QPT_UD
: return MLX5_QP_ST_UD
;
698 case MLX5_IB_QPT_REG_UMR
: return MLX5_QP_ST_REG_UMR
;
700 case IB_QPT_XRC_TGT
: return MLX5_QP_ST_XRC
;
701 case IB_QPT_SMI
: return MLX5_QP_ST_QP0
;
702 case MLX5_IB_QPT_HW_GSI
: return MLX5_QP_ST_QP1
;
703 case MLX5_IB_QPT_DCI
: return MLX5_QP_ST_DCI
;
704 case IB_QPT_RAW_IPV6
: return MLX5_QP_ST_RAW_IPV6
;
705 case IB_QPT_RAW_PACKET
:
706 case IB_QPT_RAW_ETHERTYPE
: return MLX5_QP_ST_RAW_ETHERTYPE
;
708 default: return -EINVAL
;
712 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
,
713 struct mlx5_ib_cq
*recv_cq
);
714 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
,
715 struct mlx5_ib_cq
*recv_cq
);
717 int bfregn_to_uar_index(struct mlx5_ib_dev
*dev
,
718 struct mlx5_bfreg_info
*bfregi
, u32 bfregn
,
721 unsigned int bfregs_per_sys_page
;
722 u32 index_of_sys_page
;
725 bfregs_per_sys_page
= get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
) *
726 MLX5_NON_FP_BFREGS_PER_UAR
;
727 index_of_sys_page
= bfregn
/ bfregs_per_sys_page
;
730 index_of_sys_page
+= bfregi
->num_static_sys_pages
;
732 if (index_of_sys_page
>= bfregi
->num_sys_pages
)
735 if (bfregn
> bfregi
->num_dyn_bfregs
||
736 bfregi
->sys_pages
[index_of_sys_page
] == MLX5_IB_INVALID_UAR_INDEX
) {
737 mlx5_ib_dbg(dev
, "Invalid dynamic uar index\n");
742 offset
= bfregn
% bfregs_per_sys_page
/ MLX5_NON_FP_BFREGS_PER_UAR
;
743 return bfregi
->sys_pages
[index_of_sys_page
] + offset
;
746 static int mlx5_ib_umem_get(struct mlx5_ib_dev
*dev
, struct ib_udata
*udata
,
747 unsigned long addr
, size_t size
,
748 struct ib_umem
**umem
, int *npages
, int *page_shift
,
749 int *ncont
, u32
*offset
)
753 *umem
= ib_umem_get(udata
, addr
, size
, 0, 0);
755 mlx5_ib_dbg(dev
, "umem_get failed\n");
756 return PTR_ERR(*umem
);
759 mlx5_ib_cont_pages(*umem
, addr
, 0, npages
, page_shift
, ncont
, NULL
);
761 err
= mlx5_ib_get_buf_offset(addr
, *page_shift
, offset
);
763 mlx5_ib_warn(dev
, "bad offset\n");
767 mlx5_ib_dbg(dev
, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
768 addr
, size
, *npages
, *page_shift
, *ncont
, *offset
);
773 ib_umem_release(*umem
);
779 static void destroy_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
780 struct mlx5_ib_rwq
*rwq
)
782 struct mlx5_ib_ucontext
*context
;
784 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_DELAY_DROP
)
785 atomic_dec(&dev
->delay_drop
.rqs_cnt
);
787 context
= to_mucontext(pd
->uobject
->context
);
788 mlx5_ib_db_unmap_user(context
, &rwq
->db
);
790 ib_umem_release(rwq
->umem
);
793 static int create_user_rq(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
794 struct ib_udata
*udata
, struct mlx5_ib_rwq
*rwq
,
795 struct mlx5_ib_create_wq
*ucmd
)
797 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
798 udata
, struct mlx5_ib_ucontext
, ibucontext
);
808 rwq
->umem
= ib_umem_get(udata
, ucmd
->buf_addr
, rwq
->buf_size
, 0, 0);
809 if (IS_ERR(rwq
->umem
)) {
810 mlx5_ib_dbg(dev
, "umem_get failed\n");
811 err
= PTR_ERR(rwq
->umem
);
815 mlx5_ib_cont_pages(rwq
->umem
, ucmd
->buf_addr
, 0, &npages
, &page_shift
,
817 err
= mlx5_ib_get_buf_offset(ucmd
->buf_addr
, page_shift
,
818 &rwq
->rq_page_offset
);
820 mlx5_ib_warn(dev
, "bad offset\n");
824 rwq
->rq_num_pas
= ncont
;
825 rwq
->page_shift
= page_shift
;
826 rwq
->log_page_size
= page_shift
- MLX5_ADAPTER_PAGE_SHIFT
;
827 rwq
->wq_sig
= !!(ucmd
->flags
& MLX5_WQ_FLAG_SIGNATURE
);
829 mlx5_ib_dbg(dev
, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
830 (unsigned long long)ucmd
->buf_addr
, rwq
->buf_size
,
831 npages
, page_shift
, ncont
, offset
);
833 err
= mlx5_ib_db_map_user(ucontext
, udata
, ucmd
->db_addr
, &rwq
->db
);
835 mlx5_ib_dbg(dev
, "map failed\n");
839 rwq
->create_type
= MLX5_WQ_USER
;
843 ib_umem_release(rwq
->umem
);
847 static int adjust_bfregn(struct mlx5_ib_dev
*dev
,
848 struct mlx5_bfreg_info
*bfregi
, int bfregn
)
850 return bfregn
/ MLX5_NON_FP_BFREGS_PER_UAR
* MLX5_BFREGS_PER_UAR
+
851 bfregn
% MLX5_NON_FP_BFREGS_PER_UAR
;
854 static int create_user_qp(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
855 struct mlx5_ib_qp
*qp
, struct ib_udata
*udata
,
856 struct ib_qp_init_attr
*attr
,
858 struct mlx5_ib_create_qp_resp
*resp
, int *inlen
,
859 struct mlx5_ib_qp_base
*base
)
861 struct mlx5_ib_ucontext
*context
;
862 struct mlx5_ib_create_qp ucmd
;
863 struct mlx5_ib_ubuffer
*ubuffer
= &base
->ubuffer
;
875 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
877 mlx5_ib_dbg(dev
, "copy failed\n");
881 context
= rdma_udata_to_drv_context(udata
, struct mlx5_ib_ucontext
,
883 if (ucmd
.flags
& MLX5_QP_FLAG_BFREG_INDEX
) {
884 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
,
885 ucmd
.bfreg_index
, true);
889 bfregn
= MLX5_IB_INVALID_BFREG
;
890 } else if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
) {
892 * TBD: should come from the verbs when we have the API
894 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
895 bfregn
= MLX5_CROSS_CHANNEL_BFREG
;
898 bfregn
= alloc_bfreg(dev
, &context
->bfregi
);
903 mlx5_ib_dbg(dev
, "bfregn 0x%x, uar_index 0x%x\n", bfregn
, uar_index
);
904 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
905 uar_index
= bfregn_to_uar_index(dev
, &context
->bfregi
, bfregn
,
909 qp
->sq
.wqe_shift
= ilog2(MLX5_SEND_WQE_BB
);
910 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
912 err
= set_user_buf_size(dev
, qp
, &ucmd
, base
, attr
);
916 if (ucmd
.buf_addr
&& ubuffer
->buf_size
) {
917 ubuffer
->buf_addr
= ucmd
.buf_addr
;
918 err
= mlx5_ib_umem_get(dev
, udata
, ubuffer
->buf_addr
,
919 ubuffer
->buf_size
, &ubuffer
->umem
,
920 &npages
, &page_shift
, &ncont
, &offset
);
924 ubuffer
->umem
= NULL
;
927 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
928 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * ncont
;
929 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
935 uid
= (attr
->qp_type
!= IB_QPT_XRC_TGT
&&
936 attr
->qp_type
!= IB_QPT_XRC_INI
) ? to_mpd(pd
)->uid
: 0;
937 MLX5_SET(create_qp_in
, *in
, uid
, uid
);
938 pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, *in
, pas
);
940 mlx5_ib_populate_pas(dev
, ubuffer
->umem
, page_shift
, pas
, 0);
942 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
944 MLX5_SET(qpc
, qpc
, log_page_size
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
945 MLX5_SET(qpc
, qpc
, page_offset
, offset
);
947 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
948 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
949 resp
->bfreg_index
= adjust_bfregn(dev
, &context
->bfregi
, bfregn
);
951 resp
->bfreg_index
= MLX5_IB_INVALID_BFREG
;
954 err
= mlx5_ib_db_map_user(context
, udata
, ucmd
.db_addr
, &qp
->db
);
956 mlx5_ib_dbg(dev
, "map failed\n");
960 err
= ib_copy_to_udata(udata
, resp
, min(udata
->outlen
, sizeof(*resp
)));
962 mlx5_ib_dbg(dev
, "copy failed\n");
965 qp
->create_type
= MLX5_QP_USER
;
970 mlx5_ib_db_unmap_user(context
, &qp
->db
);
977 ib_umem_release(ubuffer
->umem
);
980 if (bfregn
!= MLX5_IB_INVALID_BFREG
)
981 mlx5_ib_free_bfreg(dev
, &context
->bfregi
, bfregn
);
985 static void destroy_qp_user(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
986 struct mlx5_ib_qp
*qp
, struct mlx5_ib_qp_base
*base
)
988 struct mlx5_ib_ucontext
*context
;
990 context
= to_mucontext(pd
->uobject
->context
);
991 mlx5_ib_db_unmap_user(context
, &qp
->db
);
992 if (base
->ubuffer
.umem
)
993 ib_umem_release(base
->ubuffer
.umem
);
996 * Free only the BFREGs which are handled by the kernel.
997 * BFREGs of UARs allocated dynamically are handled by user.
999 if (qp
->bfregn
!= MLX5_IB_INVALID_BFREG
)
1000 mlx5_ib_free_bfreg(dev
, &context
->bfregi
, qp
->bfregn
);
1003 /* get_sq_edge - Get the next nearby edge.
1005 * An 'edge' is defined as the first following address after the end
1006 * of the fragment or the SQ. Accordingly, during the WQE construction
1007 * which repetitively increases the pointer to write the next data, it
1008 * simply should check if it gets to an edge.
1011 * @idx - Stride index in the SQ buffer.
1016 static void *get_sq_edge(struct mlx5_ib_wq
*sq
, u32 idx
)
1020 fragment_end
= mlx5_frag_buf_get_wqe
1022 mlx5_frag_buf_get_idx_last_contig_stride(&sq
->fbc
, idx
));
1024 return fragment_end
+ MLX5_SEND_WQE_BB
;
1027 static int create_kernel_qp(struct mlx5_ib_dev
*dev
,
1028 struct ib_qp_init_attr
*init_attr
,
1029 struct mlx5_ib_qp
*qp
,
1030 u32
**in
, int *inlen
,
1031 struct mlx5_ib_qp_base
*base
)
1037 if (init_attr
->create_flags
& ~(IB_QP_CREATE_SIGNATURE_EN
|
1038 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
|
1039 IB_QP_CREATE_IPOIB_UD_LSO
|
1040 IB_QP_CREATE_NETIF_QP
|
1041 mlx5_ib_create_qp_sqpn_qp1()))
1044 if (init_attr
->qp_type
== MLX5_IB_QPT_REG_UMR
)
1045 qp
->bf
.bfreg
= &dev
->fp_bfreg
;
1047 qp
->bf
.bfreg
= &dev
->bfreg
;
1049 /* We need to divide by two since each register is comprised of
1050 * two buffers of identical size, namely odd and even
1052 qp
->bf
.buf_size
= (1 << MLX5_CAP_GEN(dev
->mdev
, log_bf_reg_size
)) / 2;
1053 uar_index
= qp
->bf
.bfreg
->index
;
1055 err
= calc_sq_size(dev
, init_attr
, qp
);
1057 mlx5_ib_dbg(dev
, "err %d\n", err
);
1062 qp
->sq
.offset
= qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
;
1063 base
->ubuffer
.buf_size
= err
+ (qp
->rq
.wqe_cnt
<< qp
->rq
.wqe_shift
);
1065 err
= mlx5_frag_buf_alloc_node(dev
->mdev
, base
->ubuffer
.buf_size
,
1066 &qp
->buf
, dev
->mdev
->priv
.numa_node
);
1068 mlx5_ib_dbg(dev
, "err %d\n", err
);
1073 mlx5_init_fbc(qp
->buf
.frags
, qp
->rq
.wqe_shift
,
1074 ilog2(qp
->rq
.wqe_cnt
), &qp
->rq
.fbc
);
1076 if (qp
->sq
.wqe_cnt
) {
1077 int sq_strides_offset
= (qp
->sq
.offset
& (PAGE_SIZE
- 1)) /
1079 mlx5_init_fbc_offset(qp
->buf
.frags
+
1080 (qp
->sq
.offset
/ PAGE_SIZE
),
1081 ilog2(MLX5_SEND_WQE_BB
),
1082 ilog2(qp
->sq
.wqe_cnt
),
1083 sq_strides_offset
, &qp
->sq
.fbc
);
1085 qp
->sq
.cur_edge
= get_sq_edge(&qp
->sq
, 0);
1088 *inlen
= MLX5_ST_SZ_BYTES(create_qp_in
) +
1089 MLX5_FLD_SZ_BYTES(create_qp_in
, pas
[0]) * qp
->buf
.npages
;
1090 *in
= kvzalloc(*inlen
, GFP_KERNEL
);
1096 qpc
= MLX5_ADDR_OF(create_qp_in
, *in
, qpc
);
1097 MLX5_SET(qpc
, qpc
, uar_page
, uar_index
);
1098 MLX5_SET(qpc
, qpc
, log_page_size
, qp
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1100 /* Set "fast registration enabled" for all kernel QPs */
1101 MLX5_SET(qpc
, qpc
, fre
, 1);
1102 MLX5_SET(qpc
, qpc
, rlky
, 1);
1104 if (init_attr
->create_flags
& mlx5_ib_create_qp_sqpn_qp1()) {
1105 MLX5_SET(qpc
, qpc
, deth_sqpn
, 1);
1106 qp
->flags
|= MLX5_IB_QP_SQPN_QP1
;
1109 mlx5_fill_page_frag_array(&qp
->buf
,
1110 (__be64
*)MLX5_ADDR_OF(create_qp_in
,
1113 err
= mlx5_db_alloc(dev
->mdev
, &qp
->db
);
1115 mlx5_ib_dbg(dev
, "err %d\n", err
);
1119 qp
->sq
.wrid
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1120 sizeof(*qp
->sq
.wrid
), GFP_KERNEL
);
1121 qp
->sq
.wr_data
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1122 sizeof(*qp
->sq
.wr_data
), GFP_KERNEL
);
1123 qp
->rq
.wrid
= kvmalloc_array(qp
->rq
.wqe_cnt
,
1124 sizeof(*qp
->rq
.wrid
), GFP_KERNEL
);
1125 qp
->sq
.w_list
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1126 sizeof(*qp
->sq
.w_list
), GFP_KERNEL
);
1127 qp
->sq
.wqe_head
= kvmalloc_array(qp
->sq
.wqe_cnt
,
1128 sizeof(*qp
->sq
.wqe_head
), GFP_KERNEL
);
1130 if (!qp
->sq
.wrid
|| !qp
->sq
.wr_data
|| !qp
->rq
.wrid
||
1131 !qp
->sq
.w_list
|| !qp
->sq
.wqe_head
) {
1135 qp
->create_type
= MLX5_QP_KERNEL
;
1140 kvfree(qp
->sq
.wqe_head
);
1141 kvfree(qp
->sq
.w_list
);
1142 kvfree(qp
->sq
.wrid
);
1143 kvfree(qp
->sq
.wr_data
);
1144 kvfree(qp
->rq
.wrid
);
1145 mlx5_db_free(dev
->mdev
, &qp
->db
);
1151 mlx5_frag_buf_free(dev
->mdev
, &qp
->buf
);
1155 static void destroy_qp_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1157 kvfree(qp
->sq
.wqe_head
);
1158 kvfree(qp
->sq
.w_list
);
1159 kvfree(qp
->sq
.wrid
);
1160 kvfree(qp
->sq
.wr_data
);
1161 kvfree(qp
->rq
.wrid
);
1162 mlx5_db_free(dev
->mdev
, &qp
->db
);
1163 mlx5_frag_buf_free(dev
->mdev
, &qp
->buf
);
1166 static u32
get_rx_type(struct mlx5_ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
1168 if (attr
->srq
|| (attr
->qp_type
== IB_QPT_XRC_TGT
) ||
1169 (attr
->qp_type
== MLX5_IB_QPT_DCI
) ||
1170 (attr
->qp_type
== IB_QPT_XRC_INI
))
1172 else if (!qp
->has_rq
)
1173 return MLX5_ZERO_LEN_RQ
;
1175 return MLX5_NON_ZERO_RQ
;
1178 static int is_connected(enum ib_qp_type qp_type
)
1180 if (qp_type
== IB_QPT_RC
|| qp_type
== IB_QPT_UC
||
1181 qp_type
== MLX5_IB_QPT_DCI
)
1187 static int create_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1188 struct mlx5_ib_qp
*qp
,
1189 struct mlx5_ib_sq
*sq
, u32 tdn
,
1192 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)] = {0};
1193 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1195 MLX5_SET(create_tis_in
, in
, uid
, to_mpd(pd
)->uid
);
1196 MLX5_SET(tisc
, tisc
, transport_domain
, tdn
);
1197 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
1198 MLX5_SET(tisc
, tisc
, underlay_qpn
, qp
->underlay_qpn
);
1200 return mlx5_core_create_tis(dev
->mdev
, in
, sizeof(in
), &sq
->tisn
);
1203 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev
*dev
,
1204 struct mlx5_ib_sq
*sq
, struct ib_pd
*pd
)
1206 mlx5_cmd_destroy_tis(dev
->mdev
, sq
->tisn
, to_mpd(pd
)->uid
);
1209 static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev
*dev
,
1210 struct mlx5_ib_sq
*sq
)
1213 mlx5_del_flow_rules(sq
->flow_rule
);
1216 static int create_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1217 struct ib_udata
*udata
,
1218 struct mlx5_ib_sq
*sq
, void *qpin
,
1221 struct mlx5_ib_ubuffer
*ubuffer
= &sq
->ubuffer
;
1225 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1234 err
= mlx5_ib_umem_get(dev
, udata
, ubuffer
->buf_addr
, ubuffer
->buf_size
,
1235 &sq
->ubuffer
.umem
, &npages
, &page_shift
, &ncont
,
1240 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) + sizeof(u64
) * ncont
;
1241 in
= kvzalloc(inlen
, GFP_KERNEL
);
1247 MLX5_SET(create_sq_in
, in
, uid
, to_mpd(pd
)->uid
);
1248 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
1249 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
1250 if (MLX5_CAP_ETH(dev
->mdev
, multi_pkt_send_wqe
))
1251 MLX5_SET(sqc
, sqc
, allow_multi_pkt_send_wqe
, 1);
1252 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
1253 MLX5_SET(sqc
, sqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1254 MLX5_SET(sqc
, sqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_snd
));
1255 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
1256 MLX5_SET(sqc
, sqc
, tis_num_0
, sq
->tisn
);
1257 if (MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1258 MLX5_CAP_ETH(dev
->mdev
, swp
))
1259 MLX5_SET(sqc
, sqc
, allow_swp
, 1);
1261 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1262 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1263 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1264 MLX5_SET(wq
, wq
, uar_page
, MLX5_GET(qpc
, qpc
, uar_page
));
1265 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1266 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1267 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_sq_size
));
1268 MLX5_SET(wq
, wq
, log_wq_pg_sz
, page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1269 MLX5_SET(wq
, wq
, page_offset
, offset
);
1271 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1272 mlx5_ib_populate_pas(dev
, sq
->ubuffer
.umem
, page_shift
, pas
, 0);
1274 err
= mlx5_core_create_sq_tracked(dev
->mdev
, in
, inlen
, &sq
->base
.mqp
);
1281 err
= create_flow_rule_vport_sq(dev
, sq
);
1288 mlx5_core_destroy_sq_tracked(dev
->mdev
, &sq
->base
.mqp
);
1291 ib_umem_release(sq
->ubuffer
.umem
);
1292 sq
->ubuffer
.umem
= NULL
;
1297 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev
*dev
,
1298 struct mlx5_ib_sq
*sq
)
1300 destroy_flow_rule_vport_sq(dev
, sq
);
1301 mlx5_core_destroy_sq_tracked(dev
->mdev
, &sq
->base
.mqp
);
1302 ib_umem_release(sq
->ubuffer
.umem
);
1305 static size_t get_rq_pas_size(void *qpc
)
1307 u32 log_page_size
= MLX5_GET(qpc
, qpc
, log_page_size
) + 12;
1308 u32 log_rq_stride
= MLX5_GET(qpc
, qpc
, log_rq_stride
);
1309 u32 log_rq_size
= MLX5_GET(qpc
, qpc
, log_rq_size
);
1310 u32 page_offset
= MLX5_GET(qpc
, qpc
, page_offset
);
1311 u32 po_quanta
= 1 << (log_page_size
- 6);
1312 u32 rq_sz
= 1 << (log_rq_size
+ 4 + log_rq_stride
);
1313 u32 page_size
= 1 << log_page_size
;
1314 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
1315 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
1317 return rq_num_pas
* sizeof(u64
);
1320 static int create_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1321 struct mlx5_ib_rq
*rq
, void *qpin
,
1322 size_t qpinlen
, struct ib_pd
*pd
)
1324 struct mlx5_ib_qp
*mqp
= rq
->base
.container_mibqp
;
1330 void *qpc
= MLX5_ADDR_OF(create_qp_in
, qpin
, qpc
);
1331 size_t rq_pas_size
= get_rq_pas_size(qpc
);
1335 if (qpinlen
< rq_pas_size
+ MLX5_BYTE_OFF(create_qp_in
, pas
))
1338 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + rq_pas_size
;
1339 in
= kvzalloc(inlen
, GFP_KERNEL
);
1343 MLX5_SET(create_rq_in
, in
, uid
, to_mpd(pd
)->uid
);
1344 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
1345 if (!(rq
->flags
& MLX5_IB_RQ_CVLAN_STRIPPING
))
1346 MLX5_SET(rqc
, rqc
, vsd
, 1);
1347 MLX5_SET(rqc
, rqc
, mem_rq_type
, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
1348 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
1349 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
1350 MLX5_SET(rqc
, rqc
, user_index
, MLX5_GET(qpc
, qpc
, user_index
));
1351 MLX5_SET(rqc
, rqc
, cqn
, MLX5_GET(qpc
, qpc
, cqn_rcv
));
1353 if (mqp
->flags
& MLX5_IB_QP_CAP_SCATTER_FCS
)
1354 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
1356 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1357 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
1358 if (rq
->flags
& MLX5_IB_RQ_PCI_WRITE_END_PADDING
)
1359 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1360 MLX5_SET(wq
, wq
, page_offset
, MLX5_GET(qpc
, qpc
, page_offset
));
1361 MLX5_SET(wq
, wq
, pd
, MLX5_GET(qpc
, qpc
, pd
));
1362 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(qpc
, qpc
, dbr_addr
));
1363 MLX5_SET(wq
, wq
, log_wq_stride
, MLX5_GET(qpc
, qpc
, log_rq_stride
) + 4);
1364 MLX5_SET(wq
, wq
, log_wq_pg_sz
, MLX5_GET(qpc
, qpc
, log_page_size
));
1365 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(qpc
, qpc
, log_rq_size
));
1367 pas
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
1368 qp_pas
= (__be64
*)MLX5_ADDR_OF(create_qp_in
, qpin
, pas
);
1369 memcpy(pas
, qp_pas
, rq_pas_size
);
1371 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rq
->base
.mqp
);
1378 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev
*dev
,
1379 struct mlx5_ib_rq
*rq
)
1381 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rq
->base
.mqp
);
1384 static bool tunnel_offload_supported(struct mlx5_core_dev
*dev
)
1386 return (MLX5_CAP_ETH(dev
, tunnel_stateless_vxlan
) ||
1387 MLX5_CAP_ETH(dev
, tunnel_stateless_gre
) ||
1388 MLX5_CAP_ETH(dev
, tunnel_stateless_geneve_rx
));
1391 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1392 struct mlx5_ib_rq
*rq
,
1396 if (qp_flags_en
& (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
1397 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
))
1398 mlx5_ib_disable_lb(dev
, false, true);
1399 mlx5_cmd_destroy_tir(dev
->mdev
, rq
->tirn
, to_mpd(pd
)->uid
);
1402 static int create_raw_packet_qp_tir(struct mlx5_ib_dev
*dev
,
1403 struct mlx5_ib_rq
*rq
, u32 tdn
,
1413 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1414 in
= kvzalloc(inlen
, GFP_KERNEL
);
1418 MLX5_SET(create_tir_in
, in
, uid
, to_mpd(pd
)->uid
);
1419 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1420 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_DIRECT
);
1421 MLX5_SET(tirc
, tirc
, inline_rqn
, rq
->base
.mqp
.qpn
);
1422 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1423 if (*qp_flags_en
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1424 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1426 if (*qp_flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
)
1427 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1429 if (*qp_flags_en
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
)
1430 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST
;
1433 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1434 *qp_flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
1437 MLX5_SET(tirc
, tirc
, self_lb_block
, lb_flag
);
1439 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &rq
->tirn
);
1441 if (!err
&& MLX5_GET(tirc
, tirc
, self_lb_block
)) {
1442 err
= mlx5_ib_enable_lb(dev
, false, true);
1445 destroy_raw_packet_qp_tir(dev
, rq
, 0, pd
);
1452 static int create_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1453 u32
*in
, size_t inlen
,
1455 struct ib_udata
*udata
,
1456 struct mlx5_ib_create_qp_resp
*resp
)
1458 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1459 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1460 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1461 struct mlx5_ib_ucontext
*mucontext
= rdma_udata_to_drv_context(
1462 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1464 u32 tdn
= mucontext
->tdn
;
1465 u16 uid
= to_mpd(pd
)->uid
;
1467 if (qp
->sq
.wqe_cnt
) {
1468 err
= create_raw_packet_qp_tis(dev
, qp
, sq
, tdn
, pd
);
1472 err
= create_raw_packet_qp_sq(dev
, udata
, sq
, in
, pd
);
1474 goto err_destroy_tis
;
1477 resp
->tisn
= sq
->tisn
;
1478 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_TISN
;
1479 resp
->sqn
= sq
->base
.mqp
.qpn
;
1480 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_SQN
;
1483 sq
->base
.container_mibqp
= qp
;
1484 sq
->base
.mqp
.event
= mlx5_ib_qp_event
;
1487 if (qp
->rq
.wqe_cnt
) {
1488 rq
->base
.container_mibqp
= qp
;
1490 if (qp
->flags
& MLX5_IB_QP_CVLAN_STRIPPING
)
1491 rq
->flags
|= MLX5_IB_RQ_CVLAN_STRIPPING
;
1492 if (qp
->flags
& MLX5_IB_QP_PCI_WRITE_END_PADDING
)
1493 rq
->flags
|= MLX5_IB_RQ_PCI_WRITE_END_PADDING
;
1494 err
= create_raw_packet_qp_rq(dev
, rq
, in
, inlen
, pd
);
1496 goto err_destroy_sq
;
1498 err
= create_raw_packet_qp_tir(dev
, rq
, tdn
, &qp
->flags_en
, pd
);
1500 goto err_destroy_rq
;
1503 resp
->rqn
= rq
->base
.mqp
.qpn
;
1504 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_RQN
;
1505 resp
->tirn
= rq
->tirn
;
1506 resp
->comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_TIRN
;
1510 qp
->trans_qp
.base
.mqp
.qpn
= qp
->sq
.wqe_cnt
? sq
->base
.mqp
.qpn
:
1512 err
= ib_copy_to_udata(udata
, resp
, min(udata
->outlen
, sizeof(*resp
)));
1514 goto err_destroy_tir
;
1519 destroy_raw_packet_qp_tir(dev
, rq
, qp
->flags_en
, pd
);
1521 destroy_raw_packet_qp_rq(dev
, rq
);
1523 if (!qp
->sq
.wqe_cnt
)
1525 destroy_raw_packet_qp_sq(dev
, sq
);
1527 destroy_raw_packet_qp_tis(dev
, sq
, pd
);
1532 static void destroy_raw_packet_qp(struct mlx5_ib_dev
*dev
,
1533 struct mlx5_ib_qp
*qp
)
1535 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
1536 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1537 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1539 if (qp
->rq
.wqe_cnt
) {
1540 destroy_raw_packet_qp_tir(dev
, rq
, qp
->flags_en
, qp
->ibqp
.pd
);
1541 destroy_raw_packet_qp_rq(dev
, rq
);
1544 if (qp
->sq
.wqe_cnt
) {
1545 destroy_raw_packet_qp_sq(dev
, sq
);
1546 destroy_raw_packet_qp_tis(dev
, sq
, qp
->ibqp
.pd
);
1550 static void raw_packet_qp_copy_info(struct mlx5_ib_qp
*qp
,
1551 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
)
1553 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
1554 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
1558 sq
->doorbell
= &qp
->db
;
1559 rq
->doorbell
= &qp
->db
;
1562 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
1564 if (qp
->flags_en
& (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
1565 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
))
1566 mlx5_ib_disable_lb(dev
, false, true);
1567 mlx5_cmd_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
,
1568 to_mpd(qp
->ibqp
.pd
)->uid
);
1571 static int create_rss_raw_qp_tir(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
1573 struct ib_qp_init_attr
*init_attr
,
1574 struct ib_udata
*udata
)
1576 struct mlx5_ib_ucontext
*mucontext
= rdma_udata_to_drv_context(
1577 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1578 struct mlx5_ib_create_qp_resp resp
= {};
1584 u32 selected_fields
= 0;
1586 size_t min_resp_len
;
1587 u32 tdn
= mucontext
->tdn
;
1588 struct mlx5_ib_create_qp_rss ucmd
= {};
1589 size_t required_cmd_sz
;
1592 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
)
1595 if (init_attr
->create_flags
|| init_attr
->send_cq
)
1598 min_resp_len
= offsetof(typeof(resp
), bfreg_index
) + sizeof(resp
.bfreg_index
);
1599 if (udata
->outlen
< min_resp_len
)
1602 required_cmd_sz
= offsetof(typeof(ucmd
), flags
) + sizeof(ucmd
.flags
);
1603 if (udata
->inlen
< required_cmd_sz
) {
1604 mlx5_ib_dbg(dev
, "invalid inlen\n");
1608 if (udata
->inlen
> sizeof(ucmd
) &&
1609 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
1610 udata
->inlen
- sizeof(ucmd
))) {
1611 mlx5_ib_dbg(dev
, "inlen is not supported\n");
1615 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
1616 mlx5_ib_dbg(dev
, "copy failed\n");
1620 if (ucmd
.comp_mask
) {
1621 mlx5_ib_dbg(dev
, "invalid comp mask\n");
1625 if (ucmd
.flags
& ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS
|
1626 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
1627 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
)) {
1628 mlx5_ib_dbg(dev
, "invalid flags\n");
1632 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
&&
1633 !tunnel_offload_supported(dev
->mdev
)) {
1634 mlx5_ib_dbg(dev
, "tunnel offloads isn't supported\n");
1638 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
&&
1639 !(ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)) {
1640 mlx5_ib_dbg(dev
, "Tunnel offloads must be set for inner RSS\n");
1644 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|| dev
->rep
) {
1645 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST
;
1646 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
1649 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
) {
1650 lb_flag
|= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST
;
1651 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
;
1654 err
= ib_copy_to_udata(udata
, &resp
, min(udata
->outlen
, sizeof(resp
)));
1656 mlx5_ib_dbg(dev
, "copy failed\n");
1660 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1661 in
= kvzalloc(inlen
, GFP_KERNEL
);
1665 MLX5_SET(create_tir_in
, in
, uid
, to_mpd(pd
)->uid
);
1666 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1667 MLX5_SET(tirc
, tirc
, disp_type
,
1668 MLX5_TIRC_DISP_TYPE_INDIRECT
);
1669 MLX5_SET(tirc
, tirc
, indirect_table
,
1670 init_attr
->rwq_ind_tbl
->ind_tbl_num
);
1671 MLX5_SET(tirc
, tirc
, transport_domain
, tdn
);
1673 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1675 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
)
1676 MLX5_SET(tirc
, tirc
, tunneled_offload_en
, 1);
1678 MLX5_SET(tirc
, tirc
, self_lb_block
, lb_flag
);
1680 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_INNER
)
1681 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_inner
);
1683 hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1685 switch (ucmd
.rx_hash_function
) {
1686 case MLX5_RX_HASH_FUNC_TOEPLITZ
:
1688 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_toeplitz_key
);
1689 size_t len
= MLX5_FLD_SZ_BYTES(tirc
, rx_hash_toeplitz_key
);
1691 if (len
!= ucmd
.rx_key_len
) {
1696 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_TOEPLITZ
);
1697 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1698 memcpy(rss_key
, ucmd
.rx_hash_key
, len
);
1706 if (!ucmd
.rx_hash_fields_mask
) {
1707 /* special case when this TIR serves as steering entry without hashing */
1708 if (!init_attr
->rwq_ind_tbl
->log_ind_tbl_size
)
1714 if (((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1715 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
)) &&
1716 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1717 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))) {
1722 /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1723 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1724 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
))
1725 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1726 MLX5_L3_PROT_TYPE_IPV4
);
1727 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
) ||
1728 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1729 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1730 MLX5_L3_PROT_TYPE_IPV6
);
1732 outer_l4
= ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1733 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
)) << 0 |
1734 ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1735 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
)) << 1 |
1736 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_IPSEC_SPI
) << 2;
1738 /* Check that only one l4 protocol is set */
1739 if (outer_l4
& (outer_l4
- 1)) {
1744 /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1745 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1746 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
))
1747 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1748 MLX5_L4_PROT_TYPE_TCP
);
1749 else if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
) ||
1750 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1751 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1752 MLX5_L4_PROT_TYPE_UDP
);
1754 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV4
) ||
1755 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_IPV6
))
1756 selected_fields
|= MLX5_HASH_FIELD_SEL_SRC_IP
;
1758 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV4
) ||
1759 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_IPV6
))
1760 selected_fields
|= MLX5_HASH_FIELD_SEL_DST_IP
;
1762 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_TCP
) ||
1763 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_SRC_PORT_UDP
))
1764 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_SPORT
;
1766 if ((ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_TCP
) ||
1767 (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_DST_PORT_UDP
))
1768 selected_fields
|= MLX5_HASH_FIELD_SEL_L4_DPORT
;
1770 if (ucmd
.rx_hash_fields_mask
& MLX5_RX_HASH_IPSEC_SPI
)
1771 selected_fields
|= MLX5_HASH_FIELD_SEL_IPSEC_SPI
;
1773 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
, selected_fields
);
1776 err
= mlx5_core_create_tir(dev
->mdev
, in
, inlen
, &qp
->rss_qp
.tirn
);
1778 if (!err
&& MLX5_GET(tirc
, tirc
, self_lb_block
)) {
1779 err
= mlx5_ib_enable_lb(dev
, false, true);
1782 mlx5_cmd_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
,
1789 if (mucontext
->devx_uid
) {
1790 resp
.comp_mask
|= MLX5_IB_CREATE_QP_RESP_MASK_TIRN
;
1791 resp
.tirn
= qp
->rss_qp
.tirn
;
1794 err
= ib_copy_to_udata(udata
, &resp
, min(udata
->outlen
, sizeof(resp
)));
1799 /* qpn is reserved for that QP */
1800 qp
->trans_qp
.base
.mqp
.qpn
= 0;
1801 qp
->flags
|= MLX5_IB_QP_RSS
;
1805 mlx5_cmd_destroy_tir(dev
->mdev
, qp
->rss_qp
.tirn
, mucontext
->devx_uid
);
1811 static void configure_responder_scat_cqe(struct ib_qp_init_attr
*init_attr
,
1816 if (init_attr
->qp_type
== MLX5_IB_QPT_DCI
)
1819 rcqe_sz
= mlx5_ib_get_cqe_size(init_attr
->recv_cq
);
1821 if (rcqe_sz
== 128) {
1822 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA64_CQE
);
1826 if (init_attr
->qp_type
!= MLX5_IB_QPT_DCT
)
1827 MLX5_SET(qpc
, qpc
, cs_res
, MLX5_RES_SCAT_DATA32_CQE
);
1830 static void configure_requester_scat_cqe(struct mlx5_ib_dev
*dev
,
1831 struct ib_qp_init_attr
*init_attr
,
1832 struct mlx5_ib_create_qp
*ucmd
,
1835 enum ib_qp_type qpt
= init_attr
->qp_type
;
1837 bool allow_scat_cqe
= 0;
1839 if (qpt
== IB_QPT_UC
|| qpt
== IB_QPT_UD
)
1843 allow_scat_cqe
= ucmd
->flags
& MLX5_QP_FLAG_ALLOW_SCATTER_CQE
;
1845 if (!allow_scat_cqe
&& init_attr
->sq_sig_type
!= IB_SIGNAL_ALL_WR
)
1848 scqe_sz
= mlx5_ib_get_cqe_size(init_attr
->send_cq
);
1849 if (scqe_sz
== 128) {
1850 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA64_CQE
);
1854 if (init_attr
->qp_type
!= MLX5_IB_QPT_DCI
||
1855 MLX5_CAP_GEN(dev
->mdev
, dc_req_scat_data_cqe
))
1856 MLX5_SET(qpc
, qpc
, cs_req
, MLX5_REQ_SCAT_DATA32_CQE
);
1859 static int atomic_size_to_mode(int size_mask
)
1861 /* driver does not support atomic_size > 256B
1862 * and does not know how to translate bigger sizes
1864 int supported_size_mask
= size_mask
& 0x1ff;
1867 if (!supported_size_mask
)
1870 log_max_size
= __fls(supported_size_mask
);
1872 if (log_max_size
> 3)
1873 return log_max_size
;
1875 return MLX5_ATOMIC_MODE_8B
;
1878 static int get_atomic_mode(struct mlx5_ib_dev
*dev
,
1879 enum ib_qp_type qp_type
)
1881 u8 atomic_operations
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_operations
);
1882 u8 atomic
= MLX5_CAP_GEN(dev
->mdev
, atomic
);
1883 int atomic_mode
= -EOPNOTSUPP
;
1884 int atomic_size_mask
;
1889 if (qp_type
== MLX5_IB_QPT_DCT
)
1890 atomic_size_mask
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_size_dc
);
1892 atomic_size_mask
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_size_qp
);
1894 if ((atomic_operations
& MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP
) ||
1895 (atomic_operations
& MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD
))
1896 atomic_mode
= atomic_size_to_mode(atomic_size_mask
);
1898 if (atomic_mode
<= 0 &&
1899 (atomic_operations
& MLX5_ATOMIC_OPS_CMP_SWAP
&&
1900 atomic_operations
& MLX5_ATOMIC_OPS_FETCH_ADD
))
1901 atomic_mode
= MLX5_ATOMIC_MODE_IB_COMP
;
1906 static inline bool check_flags_mask(uint64_t input
, uint64_t supported
)
1908 return (input
& ~supported
) == 0;
1911 static int create_qp_common(struct mlx5_ib_dev
*dev
, struct ib_pd
*pd
,
1912 struct ib_qp_init_attr
*init_attr
,
1913 struct ib_udata
*udata
, struct mlx5_ib_qp
*qp
)
1915 struct mlx5_ib_resources
*devr
= &dev
->devr
;
1916 int inlen
= MLX5_ST_SZ_BYTES(create_qp_in
);
1917 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1918 struct mlx5_ib_create_qp_resp resp
= {};
1919 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
1920 udata
, struct mlx5_ib_ucontext
, ibucontext
);
1921 struct mlx5_ib_cq
*send_cq
;
1922 struct mlx5_ib_cq
*recv_cq
;
1923 unsigned long flags
;
1924 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
1925 struct mlx5_ib_create_qp ucmd
;
1926 struct mlx5_ib_qp_base
*base
;
1932 mutex_init(&qp
->mutex
);
1933 spin_lock_init(&qp
->sq
.lock
);
1934 spin_lock_init(&qp
->rq
.lock
);
1936 mlx5_st
= to_mlx5_st(init_attr
->qp_type
);
1940 if (init_attr
->rwq_ind_tbl
) {
1944 err
= create_rss_raw_qp_tir(dev
, qp
, pd
, init_attr
, udata
);
1948 if (init_attr
->create_flags
& IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
1949 if (!MLX5_CAP_GEN(mdev
, block_lb_mc
)) {
1950 mlx5_ib_dbg(dev
, "block multicast loopback isn't supported\n");
1953 qp
->flags
|= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
;
1957 if (init_attr
->create_flags
&
1958 (IB_QP_CREATE_CROSS_CHANNEL
|
1959 IB_QP_CREATE_MANAGED_SEND
|
1960 IB_QP_CREATE_MANAGED_RECV
)) {
1961 if (!MLX5_CAP_GEN(mdev
, cd
)) {
1962 mlx5_ib_dbg(dev
, "cross-channel isn't supported\n");
1965 if (init_attr
->create_flags
& IB_QP_CREATE_CROSS_CHANNEL
)
1966 qp
->flags
|= MLX5_IB_QP_CROSS_CHANNEL
;
1967 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_SEND
)
1968 qp
->flags
|= MLX5_IB_QP_MANAGED_SEND
;
1969 if (init_attr
->create_flags
& IB_QP_CREATE_MANAGED_RECV
)
1970 qp
->flags
|= MLX5_IB_QP_MANAGED_RECV
;
1973 if (init_attr
->qp_type
== IB_QPT_UD
&&
1974 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
))
1975 if (!MLX5_CAP_GEN(mdev
, ipoib_basic_offloads
)) {
1976 mlx5_ib_dbg(dev
, "ipoib UD lso qp isn't supported\n");
1980 if (init_attr
->create_flags
& IB_QP_CREATE_SCATTER_FCS
) {
1981 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
1982 mlx5_ib_dbg(dev
, "Scatter FCS is supported only for Raw Packet QPs");
1985 if (!MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) ||
1986 !MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
)) {
1987 mlx5_ib_dbg(dev
, "Scatter FCS isn't supported\n");
1990 qp
->flags
|= MLX5_IB_QP_CAP_SCATTER_FCS
;
1993 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
1994 qp
->sq_signal_bits
= MLX5_WQE_CTRL_CQ_UPDATE
;
1996 if (init_attr
->create_flags
& IB_QP_CREATE_CVLAN_STRIPPING
) {
1997 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
1998 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
)) ||
1999 (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
))
2001 qp
->flags
|= MLX5_IB_QP_CVLAN_STRIPPING
;
2005 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
2006 mlx5_ib_dbg(dev
, "copy failed\n");
2010 if (!check_flags_mask(ucmd
.flags
,
2011 MLX5_QP_FLAG_ALLOW_SCATTER_CQE
|
2012 MLX5_QP_FLAG_BFREG_INDEX
|
2013 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE
|
2014 MLX5_QP_FLAG_SCATTER_CQE
|
2015 MLX5_QP_FLAG_SIGNATURE
|
2016 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
|
2017 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
|
2018 MLX5_QP_FLAG_TUNNEL_OFFLOADS
|
2019 MLX5_QP_FLAG_TYPE_DCI
|
2020 MLX5_QP_FLAG_TYPE_DCT
))
2023 err
= get_qp_user_index(ucontext
, &ucmd
, udata
->inlen
, &uidx
);
2027 qp
->wq_sig
= !!(ucmd
.flags
& MLX5_QP_FLAG_SIGNATURE
);
2028 if (MLX5_CAP_GEN(dev
->mdev
, sctr_data_cqe
))
2029 qp
->scat_cqe
= !!(ucmd
.flags
& MLX5_QP_FLAG_SCATTER_CQE
);
2030 if (ucmd
.flags
& MLX5_QP_FLAG_TUNNEL_OFFLOADS
) {
2031 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
||
2032 !tunnel_offload_supported(mdev
)) {
2033 mlx5_ib_dbg(dev
, "Tunnel offload isn't supported\n");
2036 qp
->flags_en
|= MLX5_QP_FLAG_TUNNEL_OFFLOADS
;
2039 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
) {
2040 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
2041 mlx5_ib_dbg(dev
, "Self-LB UC isn't supported\n");
2044 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC
;
2047 if (ucmd
.flags
& MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
) {
2048 if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
2049 mlx5_ib_dbg(dev
, "Self-LB UM isn't supported\n");
2052 qp
->flags_en
|= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC
;
2055 if (ucmd
.flags
& MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE
) {
2056 if (init_attr
->qp_type
!= IB_QPT_RC
||
2057 !MLX5_CAP_GEN(dev
->mdev
, qp_packet_based
)) {
2058 mlx5_ib_dbg(dev
, "packet based credit mode isn't supported\n");
2061 qp
->flags
|= MLX5_IB_QP_PACKET_BASED_CREDIT
;
2064 if (init_attr
->create_flags
& IB_QP_CREATE_SOURCE_QPN
) {
2065 if (init_attr
->qp_type
!= IB_QPT_UD
||
2066 (MLX5_CAP_GEN(dev
->mdev
, port_type
) !=
2067 MLX5_CAP_PORT_TYPE_IB
) ||
2068 !mlx5_get_flow_namespace(dev
->mdev
, MLX5_FLOW_NAMESPACE_BYPASS
)) {
2069 mlx5_ib_dbg(dev
, "Source QP option isn't supported\n");
2073 qp
->flags
|= MLX5_IB_QP_UNDERLAY
;
2074 qp
->underlay_qpn
= init_attr
->source_qpn
;
2077 qp
->wq_sig
= !!wq_signature
;
2080 base
= (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
2081 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
2082 &qp
->raw_packet_qp
.rq
.base
:
2085 qp
->has_rq
= qp_has_rq(init_attr
);
2086 err
= set_rq_size(dev
, &init_attr
->cap
, qp
->has_rq
,
2087 qp
, udata
? &ucmd
: NULL
);
2089 mlx5_ib_dbg(dev
, "err %d\n", err
);
2096 1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
);
2097 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d)\n", ucmd
.sq_wqe_count
);
2098 if (ucmd
.rq_wqe_shift
!= qp
->rq
.wqe_shift
||
2099 ucmd
.rq_wqe_count
!= qp
->rq
.wqe_cnt
) {
2100 mlx5_ib_dbg(dev
, "invalid rq params\n");
2103 if (ucmd
.sq_wqe_count
> max_wqes
) {
2104 mlx5_ib_dbg(dev
, "requested sq_wqe_count (%d) > max allowed (%d)\n",
2105 ucmd
.sq_wqe_count
, max_wqes
);
2108 if (init_attr
->create_flags
&
2109 mlx5_ib_create_qp_sqpn_qp1()) {
2110 mlx5_ib_dbg(dev
, "user-space is not allowed to create UD QPs spoofing as QP1\n");
2113 err
= create_user_qp(dev
, pd
, qp
, udata
, init_attr
, &in
,
2114 &resp
, &inlen
, base
);
2116 mlx5_ib_dbg(dev
, "err %d\n", err
);
2118 err
= create_kernel_qp(dev
, init_attr
, qp
, &in
, &inlen
,
2121 mlx5_ib_dbg(dev
, "err %d\n", err
);
2127 in
= kvzalloc(inlen
, GFP_KERNEL
);
2131 qp
->create_type
= MLX5_QP_EMPTY
;
2134 if (is_sqp(init_attr
->qp_type
))
2135 qp
->port
= init_attr
->port_num
;
2137 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
2139 MLX5_SET(qpc
, qpc
, st
, mlx5_st
);
2140 MLX5_SET(qpc
, qpc
, pm_state
, MLX5_QP_PM_MIGRATED
);
2142 if (init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
)
2143 MLX5_SET(qpc
, qpc
, pd
, to_mpd(pd
? pd
: devr
->p0
)->pdn
);
2145 MLX5_SET(qpc
, qpc
, latency_sensitive
, 1);
2149 MLX5_SET(qpc
, qpc
, wq_signature
, 1);
2151 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
2152 MLX5_SET(qpc
, qpc
, block_lb_mc
, 1);
2154 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
2155 MLX5_SET(qpc
, qpc
, cd_master
, 1);
2156 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
2157 MLX5_SET(qpc
, qpc
, cd_slave_send
, 1);
2158 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
2159 MLX5_SET(qpc
, qpc
, cd_slave_receive
, 1);
2160 if (qp
->flags
& MLX5_IB_QP_PACKET_BASED_CREDIT
)
2161 MLX5_SET(qpc
, qpc
, req_e2e_credit_mode
, 1);
2162 if (qp
->scat_cqe
&& is_connected(init_attr
->qp_type
)) {
2163 configure_responder_scat_cqe(init_attr
, qpc
);
2164 configure_requester_scat_cqe(dev
, init_attr
,
2165 udata
? &ucmd
: NULL
,
2169 if (qp
->rq
.wqe_cnt
) {
2170 MLX5_SET(qpc
, qpc
, log_rq_stride
, qp
->rq
.wqe_shift
- 4);
2171 MLX5_SET(qpc
, qpc
, log_rq_size
, ilog2(qp
->rq
.wqe_cnt
));
2174 MLX5_SET(qpc
, qpc
, rq_type
, get_rx_type(qp
, init_attr
));
2176 if (qp
->sq
.wqe_cnt
) {
2177 MLX5_SET(qpc
, qpc
, log_sq_size
, ilog2(qp
->sq
.wqe_cnt
));
2179 MLX5_SET(qpc
, qpc
, no_sq
, 1);
2180 if (init_attr
->srq
&&
2181 init_attr
->srq
->srq_type
== IB_SRQT_TM
)
2182 MLX5_SET(qpc
, qpc
, offload_type
,
2183 MLX5_QPC_OFFLOAD_TYPE_RNDV
);
2186 /* Set default resources */
2187 switch (init_attr
->qp_type
) {
2188 case IB_QPT_XRC_TGT
:
2189 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
2190 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(devr
->c0
)->mcq
.cqn
);
2191 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
2192 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(init_attr
->xrcd
)->xrcdn
);
2194 case IB_QPT_XRC_INI
:
2195 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(devr
->c0
)->mcq
.cqn
);
2196 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
2197 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s0
)->msrq
.srqn
);
2200 if (init_attr
->srq
) {
2201 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x0
)->xrcdn
);
2202 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(init_attr
->srq
)->msrq
.srqn
);
2204 MLX5_SET(qpc
, qpc
, xrcd
, to_mxrcd(devr
->x1
)->xrcdn
);
2205 MLX5_SET(qpc
, qpc
, srqn_rmpn_xrqn
, to_msrq(devr
->s1
)->msrq
.srqn
);
2209 if (init_attr
->send_cq
)
2210 MLX5_SET(qpc
, qpc
, cqn_snd
, to_mcq(init_attr
->send_cq
)->mcq
.cqn
);
2212 if (init_attr
->recv_cq
)
2213 MLX5_SET(qpc
, qpc
, cqn_rcv
, to_mcq(init_attr
->recv_cq
)->mcq
.cqn
);
2215 MLX5_SET64(qpc
, qpc
, dbr_addr
, qp
->db
.dma
);
2217 /* 0xffffff means we ask to work with cqe version 0 */
2218 if (MLX5_CAP_GEN(mdev
, cqe_version
) == MLX5_CQE_VERSION_V1
)
2219 MLX5_SET(qpc
, qpc
, user_index
, uidx
);
2221 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
2222 if (init_attr
->qp_type
== IB_QPT_UD
&&
2223 (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
)) {
2224 MLX5_SET(qpc
, qpc
, ulp_stateless_offload_mode
, 1);
2225 qp
->flags
|= MLX5_IB_QP_LSO
;
2228 if (init_attr
->create_flags
& IB_QP_CREATE_PCI_WRITE_END_PADDING
) {
2229 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
2230 mlx5_ib_dbg(dev
, "scatter end padding is not supported\n");
2233 } else if (init_attr
->qp_type
!= IB_QPT_RAW_PACKET
) {
2234 MLX5_SET(qpc
, qpc
, end_padding_mode
,
2235 MLX5_WQ_END_PAD_MODE_ALIGN
);
2237 qp
->flags
|= MLX5_IB_QP_PCI_WRITE_END_PADDING
;
2246 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
||
2247 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
2248 qp
->raw_packet_qp
.sq
.ubuffer
.buf_addr
= ucmd
.sq_buf_addr
;
2249 raw_packet_qp_copy_info(qp
, &qp
->raw_packet_qp
);
2250 err
= create_raw_packet_qp(dev
, qp
, in
, inlen
, pd
, udata
,
2253 err
= mlx5_core_create_qp(dev
->mdev
, &base
->mqp
, in
, inlen
);
2257 mlx5_ib_dbg(dev
, "create qp failed\n");
2263 base
->container_mibqp
= qp
;
2264 base
->mqp
.event
= mlx5_ib_qp_event
;
2266 get_cqs(init_attr
->qp_type
, init_attr
->send_cq
, init_attr
->recv_cq
,
2267 &send_cq
, &recv_cq
);
2268 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2269 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2270 /* Maintain device to QPs access, needed for further handling via reset
2273 list_add_tail(&qp
->qps_list
, &dev
->qp_list
);
2274 /* Maintain CQ to QPs access, needed for further handling via reset flow
2277 list_add_tail(&qp
->cq_send_list
, &send_cq
->list_send_qp
);
2279 list_add_tail(&qp
->cq_recv_list
, &recv_cq
->list_recv_qp
);
2280 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2281 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2286 if (qp
->create_type
== MLX5_QP_USER
)
2287 destroy_qp_user(dev
, pd
, qp
, base
);
2288 else if (qp
->create_type
== MLX5_QP_KERNEL
)
2289 destroy_qp_kernel(dev
, qp
);
2296 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
2297 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
2301 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
2302 spin_lock(&send_cq
->lock
);
2303 spin_lock_nested(&recv_cq
->lock
,
2304 SINGLE_DEPTH_NESTING
);
2305 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
2306 spin_lock(&send_cq
->lock
);
2307 __acquire(&recv_cq
->lock
);
2309 spin_lock(&recv_cq
->lock
);
2310 spin_lock_nested(&send_cq
->lock
,
2311 SINGLE_DEPTH_NESTING
);
2314 spin_lock(&send_cq
->lock
);
2315 __acquire(&recv_cq
->lock
);
2317 } else if (recv_cq
) {
2318 spin_lock(&recv_cq
->lock
);
2319 __acquire(&send_cq
->lock
);
2321 __acquire(&send_cq
->lock
);
2322 __acquire(&recv_cq
->lock
);
2326 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq
*send_cq
, struct mlx5_ib_cq
*recv_cq
)
2327 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
2331 if (send_cq
->mcq
.cqn
< recv_cq
->mcq
.cqn
) {
2332 spin_unlock(&recv_cq
->lock
);
2333 spin_unlock(&send_cq
->lock
);
2334 } else if (send_cq
->mcq
.cqn
== recv_cq
->mcq
.cqn
) {
2335 __release(&recv_cq
->lock
);
2336 spin_unlock(&send_cq
->lock
);
2338 spin_unlock(&send_cq
->lock
);
2339 spin_unlock(&recv_cq
->lock
);
2342 __release(&recv_cq
->lock
);
2343 spin_unlock(&send_cq
->lock
);
2345 } else if (recv_cq
) {
2346 __release(&send_cq
->lock
);
2347 spin_unlock(&recv_cq
->lock
);
2349 __release(&recv_cq
->lock
);
2350 __release(&send_cq
->lock
);
2354 static struct mlx5_ib_pd
*get_pd(struct mlx5_ib_qp
*qp
)
2356 return to_mpd(qp
->ibqp
.pd
);
2359 static void get_cqs(enum ib_qp_type qp_type
,
2360 struct ib_cq
*ib_send_cq
, struct ib_cq
*ib_recv_cq
,
2361 struct mlx5_ib_cq
**send_cq
, struct mlx5_ib_cq
**recv_cq
)
2364 case IB_QPT_XRC_TGT
:
2368 case MLX5_IB_QPT_REG_UMR
:
2369 case IB_QPT_XRC_INI
:
2370 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
2375 case MLX5_IB_QPT_HW_GSI
:
2379 case IB_QPT_RAW_IPV6
:
2380 case IB_QPT_RAW_ETHERTYPE
:
2381 case IB_QPT_RAW_PACKET
:
2382 *send_cq
= ib_send_cq
? to_mcq(ib_send_cq
) : NULL
;
2383 *recv_cq
= ib_recv_cq
? to_mcq(ib_recv_cq
) : NULL
;
2394 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2395 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
2396 u8 lag_tx_affinity
);
2398 static void destroy_qp_common(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
)
2400 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
2401 struct mlx5_ib_qp_base
*base
;
2402 unsigned long flags
;
2405 if (qp
->ibqp
.rwq_ind_tbl
) {
2406 destroy_rss_raw_qp_tir(dev
, qp
);
2410 base
= (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2411 qp
->flags
& MLX5_IB_QP_UNDERLAY
) ?
2412 &qp
->raw_packet_qp
.rq
.base
:
2415 if (qp
->state
!= IB_QPS_RESET
) {
2416 if (qp
->ibqp
.qp_type
!= IB_QPT_RAW_PACKET
&&
2417 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) {
2418 err
= mlx5_core_qp_modify(dev
->mdev
,
2419 MLX5_CMD_OP_2RST_QP
, 0,
2422 struct mlx5_modify_raw_qp_param raw_qp_param
= {
2423 .operation
= MLX5_CMD_OP_2RST_QP
2426 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, 0);
2429 mlx5_ib_warn(dev
, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2433 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
2434 &send_cq
, &recv_cq
);
2436 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
2437 mlx5_ib_lock_cqs(send_cq
, recv_cq
);
2438 /* del from lists under both locks above to protect reset flow paths */
2439 list_del(&qp
->qps_list
);
2441 list_del(&qp
->cq_send_list
);
2444 list_del(&qp
->cq_recv_list
);
2446 if (qp
->create_type
== MLX5_QP_KERNEL
) {
2447 __mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
2448 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
2449 if (send_cq
!= recv_cq
)
2450 __mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
,
2453 mlx5_ib_unlock_cqs(send_cq
, recv_cq
);
2454 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
2456 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
2457 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
2458 destroy_raw_packet_qp(dev
, qp
);
2460 err
= mlx5_core_destroy_qp(dev
->mdev
, &base
->mqp
);
2462 mlx5_ib_warn(dev
, "failed to destroy QP 0x%x\n",
2466 if (qp
->create_type
== MLX5_QP_KERNEL
)
2467 destroy_qp_kernel(dev
, qp
);
2468 else if (qp
->create_type
== MLX5_QP_USER
)
2469 destroy_qp_user(dev
, &get_pd(qp
)->ibpd
, qp
, base
);
2472 static const char *ib_qp_type_str(enum ib_qp_type type
)
2476 return "IB_QPT_SMI";
2478 return "IB_QPT_GSI";
2485 case IB_QPT_RAW_IPV6
:
2486 return "IB_QPT_RAW_IPV6";
2487 case IB_QPT_RAW_ETHERTYPE
:
2488 return "IB_QPT_RAW_ETHERTYPE";
2489 case IB_QPT_XRC_INI
:
2490 return "IB_QPT_XRC_INI";
2491 case IB_QPT_XRC_TGT
:
2492 return "IB_QPT_XRC_TGT";
2493 case IB_QPT_RAW_PACKET
:
2494 return "IB_QPT_RAW_PACKET";
2495 case MLX5_IB_QPT_REG_UMR
:
2496 return "MLX5_IB_QPT_REG_UMR";
2498 return "IB_QPT_DRIVER";
2501 return "Invalid QP type";
2505 static struct ib_qp
*mlx5_ib_create_dct(struct ib_pd
*pd
,
2506 struct ib_qp_init_attr
*attr
,
2507 struct mlx5_ib_create_qp
*ucmd
,
2508 struct ib_udata
*udata
)
2510 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
2511 udata
, struct mlx5_ib_ucontext
, ibucontext
);
2512 struct mlx5_ib_qp
*qp
;
2514 u32 uidx
= MLX5_IB_DEFAULT_UIDX
;
2517 if (!attr
->srq
|| !attr
->recv_cq
)
2518 return ERR_PTR(-EINVAL
);
2520 err
= get_qp_user_index(ucontext
, ucmd
, sizeof(*ucmd
), &uidx
);
2522 return ERR_PTR(err
);
2524 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2526 return ERR_PTR(-ENOMEM
);
2528 qp
->dct
.in
= kzalloc(MLX5_ST_SZ_BYTES(create_dct_in
), GFP_KERNEL
);
2534 MLX5_SET(create_dct_in
, qp
->dct
.in
, uid
, to_mpd(pd
)->uid
);
2535 dctc
= MLX5_ADDR_OF(create_dct_in
, qp
->dct
.in
, dct_context_entry
);
2536 qp
->qp_sub_type
= MLX5_IB_QPT_DCT
;
2537 MLX5_SET(dctc
, dctc
, pd
, to_mpd(pd
)->pdn
);
2538 MLX5_SET(dctc
, dctc
, srqn_xrqn
, to_msrq(attr
->srq
)->msrq
.srqn
);
2539 MLX5_SET(dctc
, dctc
, cqn
, to_mcq(attr
->recv_cq
)->mcq
.cqn
);
2540 MLX5_SET64(dctc
, dctc
, dc_access_key
, ucmd
->access_key
);
2541 MLX5_SET(dctc
, dctc
, user_index
, uidx
);
2543 if (ucmd
->flags
& MLX5_QP_FLAG_SCATTER_CQE
)
2544 configure_responder_scat_cqe(attr
, dctc
);
2546 qp
->state
= IB_QPS_RESET
;
2551 return ERR_PTR(err
);
2554 static int set_mlx_qp_type(struct mlx5_ib_dev
*dev
,
2555 struct ib_qp_init_attr
*init_attr
,
2556 struct mlx5_ib_create_qp
*ucmd
,
2557 struct ib_udata
*udata
)
2559 enum { MLX_QP_FLAGS
= MLX5_QP_FLAG_TYPE_DCT
| MLX5_QP_FLAG_TYPE_DCI
};
2565 if (udata
->inlen
< sizeof(*ucmd
)) {
2566 mlx5_ib_dbg(dev
, "create_qp user command is smaller than expected\n");
2569 err
= ib_copy_from_udata(ucmd
, udata
, sizeof(*ucmd
));
2573 if ((ucmd
->flags
& MLX_QP_FLAGS
) == MLX5_QP_FLAG_TYPE_DCI
) {
2574 init_attr
->qp_type
= MLX5_IB_QPT_DCI
;
2576 if ((ucmd
->flags
& MLX_QP_FLAGS
) == MLX5_QP_FLAG_TYPE_DCT
) {
2577 init_attr
->qp_type
= MLX5_IB_QPT_DCT
;
2579 mlx5_ib_dbg(dev
, "Invalid QP flags\n");
2584 if (!MLX5_CAP_GEN(dev
->mdev
, dct
)) {
2585 mlx5_ib_dbg(dev
, "DC transport is not supported\n");
2592 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
2593 struct ib_qp_init_attr
*verbs_init_attr
,
2594 struct ib_udata
*udata
)
2596 struct mlx5_ib_dev
*dev
;
2597 struct mlx5_ib_qp
*qp
;
2600 struct ib_qp_init_attr mlx_init_attr
;
2601 struct ib_qp_init_attr
*init_attr
= verbs_init_attr
;
2602 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
2603 udata
, struct mlx5_ib_ucontext
, ibucontext
);
2606 dev
= to_mdev(pd
->device
);
2608 if (init_attr
->qp_type
== IB_QPT_RAW_PACKET
) {
2610 mlx5_ib_dbg(dev
, "Raw Packet QP is not supported for kernel consumers\n");
2611 return ERR_PTR(-EINVAL
);
2612 } else if (!ucontext
->cqe_version
) {
2613 mlx5_ib_dbg(dev
, "Raw Packet QP is only supported for CQE version > 0\n");
2614 return ERR_PTR(-EINVAL
);
2618 /* being cautious here */
2619 if (init_attr
->qp_type
!= IB_QPT_XRC_TGT
&&
2620 init_attr
->qp_type
!= MLX5_IB_QPT_REG_UMR
) {
2621 pr_warn("%s: no PD for transport %s\n", __func__
,
2622 ib_qp_type_str(init_attr
->qp_type
));
2623 return ERR_PTR(-EINVAL
);
2625 dev
= to_mdev(to_mxrcd(init_attr
->xrcd
)->ibxrcd
.device
);
2628 if (init_attr
->qp_type
== IB_QPT_DRIVER
) {
2629 struct mlx5_ib_create_qp ucmd
;
2631 init_attr
= &mlx_init_attr
;
2632 memcpy(init_attr
, verbs_init_attr
, sizeof(*verbs_init_attr
));
2633 err
= set_mlx_qp_type(dev
, init_attr
, &ucmd
, udata
);
2635 return ERR_PTR(err
);
2637 if (init_attr
->qp_type
== MLX5_IB_QPT_DCI
) {
2638 if (init_attr
->cap
.max_recv_wr
||
2639 init_attr
->cap
.max_recv_sge
) {
2640 mlx5_ib_dbg(dev
, "DCI QP requires zero size receive queue\n");
2641 return ERR_PTR(-EINVAL
);
2644 return mlx5_ib_create_dct(pd
, init_attr
, &ucmd
, udata
);
2648 switch (init_attr
->qp_type
) {
2649 case IB_QPT_XRC_TGT
:
2650 case IB_QPT_XRC_INI
:
2651 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
)) {
2652 mlx5_ib_dbg(dev
, "XRC not supported\n");
2653 return ERR_PTR(-ENOSYS
);
2655 init_attr
->recv_cq
= NULL
;
2656 if (init_attr
->qp_type
== IB_QPT_XRC_TGT
) {
2657 xrcdn
= to_mxrcd(init_attr
->xrcd
)->xrcdn
;
2658 init_attr
->send_cq
= NULL
;
2662 case IB_QPT_RAW_PACKET
:
2667 case MLX5_IB_QPT_HW_GSI
:
2668 case MLX5_IB_QPT_REG_UMR
:
2669 case MLX5_IB_QPT_DCI
:
2670 qp
= kzalloc(sizeof(*qp
), GFP_KERNEL
);
2672 return ERR_PTR(-ENOMEM
);
2674 err
= create_qp_common(dev
, pd
, init_attr
, udata
, qp
);
2676 mlx5_ib_dbg(dev
, "create_qp_common failed\n");
2678 return ERR_PTR(err
);
2681 if (is_qp0(init_attr
->qp_type
))
2682 qp
->ibqp
.qp_num
= 0;
2683 else if (is_qp1(init_attr
->qp_type
))
2684 qp
->ibqp
.qp_num
= 1;
2686 qp
->ibqp
.qp_num
= qp
->trans_qp
.base
.mqp
.qpn
;
2688 mlx5_ib_dbg(dev
, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2689 qp
->ibqp
.qp_num
, qp
->trans_qp
.base
.mqp
.qpn
,
2690 init_attr
->recv_cq
? to_mcq(init_attr
->recv_cq
)->mcq
.cqn
: -1,
2691 init_attr
->send_cq
? to_mcq(init_attr
->send_cq
)->mcq
.cqn
: -1);
2693 qp
->trans_qp
.xrcdn
= xrcdn
;
2698 return mlx5_ib_gsi_create_qp(pd
, init_attr
);
2700 case IB_QPT_RAW_IPV6
:
2701 case IB_QPT_RAW_ETHERTYPE
:
2704 mlx5_ib_dbg(dev
, "unsupported qp type %d\n",
2705 init_attr
->qp_type
);
2706 /* Don't support raw QPs */
2707 return ERR_PTR(-EINVAL
);
2710 if (verbs_init_attr
->qp_type
== IB_QPT_DRIVER
)
2711 qp
->qp_sub_type
= init_attr
->qp_type
;
2716 static int mlx5_ib_destroy_dct(struct mlx5_ib_qp
*mqp
)
2718 struct mlx5_ib_dev
*dev
= to_mdev(mqp
->ibqp
.device
);
2720 if (mqp
->state
== IB_QPS_RTR
) {
2723 err
= mlx5_core_destroy_dct(dev
->mdev
, &mqp
->dct
.mdct
);
2725 mlx5_ib_warn(dev
, "failed to destroy DCT %d\n", err
);
2735 int mlx5_ib_destroy_qp(struct ib_qp
*qp
)
2737 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
2738 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
2740 if (unlikely(qp
->qp_type
== IB_QPT_GSI
))
2741 return mlx5_ib_gsi_destroy_qp(qp
);
2743 if (mqp
->qp_sub_type
== MLX5_IB_QPT_DCT
)
2744 return mlx5_ib_destroy_dct(mqp
);
2746 destroy_qp_common(dev
, mqp
);
2753 static int to_mlx5_access_flags(struct mlx5_ib_qp
*qp
,
2754 const struct ib_qp_attr
*attr
,
2755 int attr_mask
, __be32
*hw_access_flags_be
)
2758 u32 access_flags
, hw_access_flags
= 0;
2760 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.device
);
2762 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
2763 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
2765 dest_rd_atomic
= qp
->trans_qp
.resp_depth
;
2767 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
2768 access_flags
= attr
->qp_access_flags
;
2770 access_flags
= qp
->trans_qp
.atomic_rd_en
;
2772 if (!dest_rd_atomic
)
2773 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
2775 if (access_flags
& IB_ACCESS_REMOTE_READ
)
2776 hw_access_flags
|= MLX5_QP_BIT_RRE
;
2777 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
) {
2780 atomic_mode
= get_atomic_mode(dev
, qp
->ibqp
.qp_type
);
2781 if (atomic_mode
< 0)
2784 hw_access_flags
|= MLX5_QP_BIT_RAE
;
2785 hw_access_flags
|= atomic_mode
<< MLX5_ATOMIC_MODE_OFFSET
;
2788 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
2789 hw_access_flags
|= MLX5_QP_BIT_RWE
;
2791 *hw_access_flags_be
= cpu_to_be32(hw_access_flags
);
2797 MLX5_PATH_FLAG_FL
= 1 << 0,
2798 MLX5_PATH_FLAG_FREE_AR
= 1 << 1,
2799 MLX5_PATH_FLAG_COUNTER
= 1 << 2,
2802 static int ib_rate_to_mlx5(struct mlx5_ib_dev
*dev
, u8 rate
)
2804 if (rate
== IB_RATE_PORT_CURRENT
)
2807 if (rate
< IB_RATE_2_5_GBPS
|| rate
> IB_RATE_600_GBPS
)
2810 while (rate
!= IB_RATE_PORT_CURRENT
&&
2811 !(1 << (rate
+ MLX5_STAT_RATE_OFFSET
) &
2812 MLX5_CAP_GEN(dev
->mdev
, stat_rate_support
)))
2815 return rate
? rate
+ MLX5_STAT_RATE_OFFSET
: rate
;
2818 static int modify_raw_packet_eth_prio(struct mlx5_core_dev
*dev
,
2819 struct mlx5_ib_sq
*sq
, u8 sl
,
2827 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2828 in
= kvzalloc(inlen
, GFP_KERNEL
);
2832 MLX5_SET(modify_tis_in
, in
, bitmask
.prio
, 1);
2833 MLX5_SET(modify_tis_in
, in
, uid
, to_mpd(pd
)->uid
);
2835 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2836 MLX5_SET(tisc
, tisc
, prio
, ((sl
& 0x7) << 1));
2838 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2845 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev
*dev
,
2846 struct mlx5_ib_sq
*sq
, u8 tx_affinity
,
2854 inlen
= MLX5_ST_SZ_BYTES(modify_tis_in
);
2855 in
= kvzalloc(inlen
, GFP_KERNEL
);
2859 MLX5_SET(modify_tis_in
, in
, bitmask
.lag_tx_port_affinity
, 1);
2860 MLX5_SET(modify_tis_in
, in
, uid
, to_mpd(pd
)->uid
);
2862 tisc
= MLX5_ADDR_OF(modify_tis_in
, in
, ctx
);
2863 MLX5_SET(tisc
, tisc
, lag_tx_port_affinity
, tx_affinity
);
2865 err
= mlx5_core_modify_tis(dev
, sq
->tisn
, in
, inlen
);
2872 static int mlx5_set_path(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
2873 const struct rdma_ah_attr
*ah
,
2874 struct mlx5_qp_path
*path
, u8 port
, int attr_mask
,
2875 u32 path_flags
, const struct ib_qp_attr
*attr
,
2878 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
2880 enum ib_gid_type gid_type
;
2881 u8 ah_flags
= rdma_ah_get_ah_flags(ah
);
2882 u8 sl
= rdma_ah_get_sl(ah
);
2884 if (attr_mask
& IB_QP_PKEY_INDEX
)
2885 path
->pkey_index
= cpu_to_be16(alt
? attr
->alt_pkey_index
:
2888 if (ah_flags
& IB_AH_GRH
) {
2889 if (grh
->sgid_index
>=
2890 dev
->mdev
->port_caps
[port
- 1].gid_table_len
) {
2891 pr_err("sgid_index (%u) too large. max is %d\n",
2893 dev
->mdev
->port_caps
[port
- 1].gid_table_len
);
2898 if (ah
->type
== RDMA_AH_ATTR_TYPE_ROCE
) {
2899 if (!(ah_flags
& IB_AH_GRH
))
2902 memcpy(path
->rmac
, ah
->roce
.dmac
, sizeof(ah
->roce
.dmac
));
2903 if (qp
->ibqp
.qp_type
== IB_QPT_RC
||
2904 qp
->ibqp
.qp_type
== IB_QPT_UC
||
2905 qp
->ibqp
.qp_type
== IB_QPT_XRC_INI
||
2906 qp
->ibqp
.qp_type
== IB_QPT_XRC_TGT
)
2908 mlx5_get_roce_udp_sport(dev
, ah
->grh
.sgid_attr
);
2909 path
->dci_cfi_prio_sl
= (sl
& 0x7) << 4;
2910 gid_type
= ah
->grh
.sgid_attr
->gid_type
;
2911 if (gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
)
2912 path
->ecn_dscp
= (grh
->traffic_class
>> 2) & 0x3f;
2914 path
->fl_free_ar
= (path_flags
& MLX5_PATH_FLAG_FL
) ? 0x80 : 0;
2916 (path_flags
& MLX5_PATH_FLAG_FREE_AR
) ? 0x40 : 0;
2917 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
2918 path
->grh_mlid
= rdma_ah_get_path_bits(ah
) & 0x7f;
2919 if (ah_flags
& IB_AH_GRH
)
2920 path
->grh_mlid
|= 1 << 7;
2921 path
->dci_cfi_prio_sl
= sl
& 0xf;
2924 if (ah_flags
& IB_AH_GRH
) {
2925 path
->mgid_index
= grh
->sgid_index
;
2926 path
->hop_limit
= grh
->hop_limit
;
2927 path
->tclass_flowlabel
=
2928 cpu_to_be32((grh
->traffic_class
<< 20) |
2930 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
2933 err
= ib_rate_to_mlx5(dev
, rdma_ah_get_static_rate(ah
));
2936 path
->static_rate
= err
;
2939 if (attr_mask
& IB_QP_TIMEOUT
)
2940 path
->ackto_lt
= (alt
? attr
->alt_timeout
: attr
->timeout
) << 3;
2942 if ((qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
) && qp
->sq
.wqe_cnt
)
2943 return modify_raw_packet_eth_prio(dev
->mdev
,
2944 &qp
->raw_packet_qp
.sq
,
2945 sl
& 0xf, qp
->ibqp
.pd
);
2950 static enum mlx5_qp_optpar opt_mask
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
][MLX5_QP_ST_MAX
] = {
2951 [MLX5_QP_STATE_INIT
] = {
2952 [MLX5_QP_STATE_INIT
] = {
2953 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
2954 MLX5_QP_OPTPAR_RAE
|
2955 MLX5_QP_OPTPAR_RWE
|
2956 MLX5_QP_OPTPAR_PKEY_INDEX
|
2957 MLX5_QP_OPTPAR_PRI_PORT
,
2958 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
2959 MLX5_QP_OPTPAR_PKEY_INDEX
|
2960 MLX5_QP_OPTPAR_PRI_PORT
,
2961 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2962 MLX5_QP_OPTPAR_Q_KEY
|
2963 MLX5_QP_OPTPAR_PRI_PORT
,
2965 [MLX5_QP_STATE_RTR
] = {
2966 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2967 MLX5_QP_OPTPAR_RRE
|
2968 MLX5_QP_OPTPAR_RAE
|
2969 MLX5_QP_OPTPAR_RWE
|
2970 MLX5_QP_OPTPAR_PKEY_INDEX
,
2971 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2972 MLX5_QP_OPTPAR_RWE
|
2973 MLX5_QP_OPTPAR_PKEY_INDEX
,
2974 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2975 MLX5_QP_OPTPAR_Q_KEY
,
2976 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_PKEY_INDEX
|
2977 MLX5_QP_OPTPAR_Q_KEY
,
2978 [MLX5_QP_ST_XRC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2979 MLX5_QP_OPTPAR_RRE
|
2980 MLX5_QP_OPTPAR_RAE
|
2981 MLX5_QP_OPTPAR_RWE
|
2982 MLX5_QP_OPTPAR_PKEY_INDEX
,
2985 [MLX5_QP_STATE_RTR
] = {
2986 [MLX5_QP_STATE_RTS
] = {
2987 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2988 MLX5_QP_OPTPAR_RRE
|
2989 MLX5_QP_OPTPAR_RAE
|
2990 MLX5_QP_OPTPAR_RWE
|
2991 MLX5_QP_OPTPAR_PM_STATE
|
2992 MLX5_QP_OPTPAR_RNR_TIMEOUT
,
2993 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_ALT_ADDR_PATH
|
2994 MLX5_QP_OPTPAR_RWE
|
2995 MLX5_QP_OPTPAR_PM_STATE
,
2996 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
2999 [MLX5_QP_STATE_RTS
] = {
3000 [MLX5_QP_STATE_RTS
] = {
3001 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RRE
|
3002 MLX5_QP_OPTPAR_RAE
|
3003 MLX5_QP_OPTPAR_RWE
|
3004 MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3005 MLX5_QP_OPTPAR_PM_STATE
|
3006 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
3007 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
|
3008 MLX5_QP_OPTPAR_PM_STATE
|
3009 MLX5_QP_OPTPAR_ALT_ADDR_PATH
,
3010 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
|
3011 MLX5_QP_OPTPAR_SRQN
|
3012 MLX5_QP_OPTPAR_CQN_RCV
,
3015 [MLX5_QP_STATE_SQER
] = {
3016 [MLX5_QP_STATE_RTS
] = {
3017 [MLX5_QP_ST_UD
] = MLX5_QP_OPTPAR_Q_KEY
,
3018 [MLX5_QP_ST_MLX
] = MLX5_QP_OPTPAR_Q_KEY
,
3019 [MLX5_QP_ST_UC
] = MLX5_QP_OPTPAR_RWE
,
3020 [MLX5_QP_ST_RC
] = MLX5_QP_OPTPAR_RNR_TIMEOUT
|
3021 MLX5_QP_OPTPAR_RWE
|
3022 MLX5_QP_OPTPAR_RAE
|
3028 static int ib_nr_to_mlx5_nr(int ib_mask
)
3033 case IB_QP_CUR_STATE
:
3035 case IB_QP_EN_SQD_ASYNC_NOTIFY
:
3037 case IB_QP_ACCESS_FLAGS
:
3038 return MLX5_QP_OPTPAR_RWE
| MLX5_QP_OPTPAR_RRE
|
3040 case IB_QP_PKEY_INDEX
:
3041 return MLX5_QP_OPTPAR_PKEY_INDEX
;
3043 return MLX5_QP_OPTPAR_PRI_PORT
;
3045 return MLX5_QP_OPTPAR_Q_KEY
;
3047 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH
|
3048 MLX5_QP_OPTPAR_PRI_PORT
;
3049 case IB_QP_PATH_MTU
:
3052 return MLX5_QP_OPTPAR_ACK_TIMEOUT
;
3053 case IB_QP_RETRY_CNT
:
3054 return MLX5_QP_OPTPAR_RETRY_COUNT
;
3055 case IB_QP_RNR_RETRY
:
3056 return MLX5_QP_OPTPAR_RNR_RETRY
;
3059 case IB_QP_MAX_QP_RD_ATOMIC
:
3060 return MLX5_QP_OPTPAR_SRA_MAX
;
3061 case IB_QP_ALT_PATH
:
3062 return MLX5_QP_OPTPAR_ALT_ADDR_PATH
;
3063 case IB_QP_MIN_RNR_TIMER
:
3064 return MLX5_QP_OPTPAR_RNR_TIMEOUT
;
3067 case IB_QP_MAX_DEST_RD_ATOMIC
:
3068 return MLX5_QP_OPTPAR_RRA_MAX
| MLX5_QP_OPTPAR_RWE
|
3069 MLX5_QP_OPTPAR_RRE
| MLX5_QP_OPTPAR_RAE
;
3070 case IB_QP_PATH_MIG_STATE
:
3071 return MLX5_QP_OPTPAR_PM_STATE
;
3074 case IB_QP_DEST_QPN
:
3080 static int ib_mask_to_mlx5_opt(int ib_mask
)
3085 for (i
= 0; i
< 8 * sizeof(int); i
++) {
3086 if ((1 << i
) & ib_mask
)
3087 result
|= ib_nr_to_mlx5_nr(1 << i
);
3093 static int modify_raw_packet_qp_rq(
3094 struct mlx5_ib_dev
*dev
, struct mlx5_ib_rq
*rq
, int new_state
,
3095 const struct mlx5_modify_raw_qp_param
*raw_qp_param
, struct ib_pd
*pd
)
3102 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
3103 in
= kvzalloc(inlen
, GFP_KERNEL
);
3107 MLX5_SET(modify_rq_in
, in
, rq_state
, rq
->state
);
3108 MLX5_SET(modify_rq_in
, in
, uid
, to_mpd(pd
)->uid
);
3110 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
3111 MLX5_SET(rqc
, rqc
, state
, new_state
);
3113 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
) {
3114 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
3115 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
3116 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
3117 MLX5_SET(rqc
, rqc
, counter_set_id
, raw_qp_param
->rq_q_ctr_id
);
3121 "RAW PACKET QP counters are not supported on current FW\n");
3124 err
= mlx5_core_modify_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, in
, inlen
);
3128 rq
->state
= new_state
;
3135 static int modify_raw_packet_qp_sq(
3136 struct mlx5_core_dev
*dev
, struct mlx5_ib_sq
*sq
, int new_state
,
3137 const struct mlx5_modify_raw_qp_param
*raw_qp_param
, struct ib_pd
*pd
)
3139 struct mlx5_ib_qp
*ibqp
= sq
->base
.container_mibqp
;
3140 struct mlx5_rate_limit old_rl
= ibqp
->rl
;
3141 struct mlx5_rate_limit new_rl
= old_rl
;
3142 bool new_rate_added
= false;
3149 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
3150 in
= kvzalloc(inlen
, GFP_KERNEL
);
3154 MLX5_SET(modify_sq_in
, in
, uid
, to_mpd(pd
)->uid
);
3155 MLX5_SET(modify_sq_in
, in
, sq_state
, sq
->state
);
3157 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
3158 MLX5_SET(sqc
, sqc
, state
, new_state
);
3160 if (raw_qp_param
->set_mask
& MLX5_RAW_QP_RATE_LIMIT
) {
3161 if (new_state
!= MLX5_SQC_STATE_RDY
)
3162 pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
3165 new_rl
= raw_qp_param
->rl
;
3168 if (!mlx5_rl_are_equal(&old_rl
, &new_rl
)) {
3170 err
= mlx5_rl_add_rate(dev
, &rl_index
, &new_rl
);
3172 pr_err("Failed configuring rate limit(err %d): \
3173 rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
3174 err
, new_rl
.rate
, new_rl
.max_burst_sz
,
3175 new_rl
.typical_pkt_sz
);
3179 new_rate_added
= true;
3182 MLX5_SET64(modify_sq_in
, in
, modify_bitmask
, 1);
3183 /* index 0 means no limit */
3184 MLX5_SET(sqc
, sqc
, packet_pacing_rate_limit_index
, rl_index
);
3187 err
= mlx5_core_modify_sq(dev
, sq
->base
.mqp
.qpn
, in
, inlen
);
3189 /* Remove new rate from table if failed */
3191 mlx5_rl_remove_rate(dev
, &new_rl
);
3195 /* Only remove the old rate after new rate was set */
3197 !mlx5_rl_are_equal(&old_rl
, &new_rl
)) ||
3198 (new_state
!= MLX5_SQC_STATE_RDY
))
3199 mlx5_rl_remove_rate(dev
, &old_rl
);
3202 sq
->state
= new_state
;
3209 static int modify_raw_packet_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
3210 const struct mlx5_modify_raw_qp_param
*raw_qp_param
,
3213 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
3214 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
3215 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
3216 int modify_rq
= !!qp
->rq
.wqe_cnt
;
3217 int modify_sq
= !!qp
->sq
.wqe_cnt
;
3222 switch (raw_qp_param
->operation
) {
3223 case MLX5_CMD_OP_RST2INIT_QP
:
3224 rq_state
= MLX5_RQC_STATE_RDY
;
3225 sq_state
= MLX5_SQC_STATE_RDY
;
3227 case MLX5_CMD_OP_2ERR_QP
:
3228 rq_state
= MLX5_RQC_STATE_ERR
;
3229 sq_state
= MLX5_SQC_STATE_ERR
;
3231 case MLX5_CMD_OP_2RST_QP
:
3232 rq_state
= MLX5_RQC_STATE_RST
;
3233 sq_state
= MLX5_SQC_STATE_RST
;
3235 case MLX5_CMD_OP_RTR2RTS_QP
:
3236 case MLX5_CMD_OP_RTS2RTS_QP
:
3237 if (raw_qp_param
->set_mask
==
3238 MLX5_RAW_QP_RATE_LIMIT
) {
3240 sq_state
= sq
->state
;
3242 return raw_qp_param
->set_mask
? -EINVAL
: 0;
3245 case MLX5_CMD_OP_INIT2INIT_QP
:
3246 case MLX5_CMD_OP_INIT2RTR_QP
:
3247 if (raw_qp_param
->set_mask
)
3257 err
= modify_raw_packet_qp_rq(dev
, rq
, rq_state
, raw_qp_param
,
3265 err
= modify_raw_packet_tx_affinity(dev
->mdev
, sq
,
3272 return modify_raw_packet_qp_sq(dev
->mdev
, sq
, sq_state
,
3273 raw_qp_param
, qp
->ibqp
.pd
);
3279 static unsigned int get_tx_affinity(struct mlx5_ib_dev
*dev
,
3280 struct mlx5_ib_pd
*pd
,
3281 struct mlx5_ib_qp_base
*qp_base
,
3282 u8 port_num
, struct ib_udata
*udata
)
3284 struct mlx5_ib_ucontext
*ucontext
= rdma_udata_to_drv_context(
3285 udata
, struct mlx5_ib_ucontext
, ibucontext
);
3286 unsigned int tx_port_affinity
;
3289 tx_port_affinity
= (unsigned int)atomic_add_return(
3290 1, &ucontext
->tx_port_affinity
) %
3293 mlx5_ib_dbg(dev
, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
3294 tx_port_affinity
, qp_base
->mqp
.qpn
, ucontext
);
3297 (unsigned int)atomic_add_return(
3298 1, &dev
->roce
[port_num
].tx_port_affinity
) %
3301 mlx5_ib_dbg(dev
, "Set tx affinity 0x%x to qpn 0x%x\n",
3302 tx_port_affinity
, qp_base
->mqp
.qpn
);
3305 return tx_port_affinity
;
3308 static int __mlx5_ib_modify_qp(struct ib_qp
*ibqp
,
3309 const struct ib_qp_attr
*attr
, int attr_mask
,
3310 enum ib_qp_state cur_state
,
3311 enum ib_qp_state new_state
,
3312 const struct mlx5_ib_modify_qp
*ucmd
,
3313 struct ib_udata
*udata
)
3315 static const u16 optab
[MLX5_QP_NUM_STATE
][MLX5_QP_NUM_STATE
] = {
3316 [MLX5_QP_STATE_RST
] = {
3317 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3318 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3319 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_RST2INIT_QP
,
3321 [MLX5_QP_STATE_INIT
] = {
3322 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3323 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3324 [MLX5_QP_STATE_INIT
] = MLX5_CMD_OP_INIT2INIT_QP
,
3325 [MLX5_QP_STATE_RTR
] = MLX5_CMD_OP_INIT2RTR_QP
,
3327 [MLX5_QP_STATE_RTR
] = {
3328 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3329 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3330 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTR2RTS_QP
,
3332 [MLX5_QP_STATE_RTS
] = {
3333 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3334 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3335 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_RTS2RTS_QP
,
3337 [MLX5_QP_STATE_SQD
] = {
3338 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3339 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3341 [MLX5_QP_STATE_SQER
] = {
3342 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3343 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3344 [MLX5_QP_STATE_RTS
] = MLX5_CMD_OP_SQERR2RTS_QP
,
3346 [MLX5_QP_STATE_ERR
] = {
3347 [MLX5_QP_STATE_RST
] = MLX5_CMD_OP_2RST_QP
,
3348 [MLX5_QP_STATE_ERR
] = MLX5_CMD_OP_2ERR_QP
,
3352 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3353 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3354 struct mlx5_ib_qp_base
*base
= &qp
->trans_qp
.base
;
3355 struct mlx5_ib_cq
*send_cq
, *recv_cq
;
3356 struct mlx5_qp_context
*context
;
3357 struct mlx5_ib_pd
*pd
;
3358 struct mlx5_ib_port
*mibport
= NULL
;
3359 enum mlx5_qp_state mlx5_cur
, mlx5_new
;
3360 enum mlx5_qp_optpar optpar
;
3366 mlx5_st
= to_mlx5_st(ibqp
->qp_type
== IB_QPT_DRIVER
?
3367 qp
->qp_sub_type
: ibqp
->qp_type
);
3371 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
3376 context
->flags
= cpu_to_be32(mlx5_st
<< 16);
3378 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
)) {
3379 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
3381 switch (attr
->path_mig_state
) {
3382 case IB_MIG_MIGRATED
:
3383 context
->flags
|= cpu_to_be32(MLX5_QP_PM_MIGRATED
<< 11);
3386 context
->flags
|= cpu_to_be32(MLX5_QP_PM_REARM
<< 11);
3389 context
->flags
|= cpu_to_be32(MLX5_QP_PM_ARMED
<< 11);
3394 if ((cur_state
== IB_QPS_RESET
) && (new_state
== IB_QPS_INIT
)) {
3395 if ((ibqp
->qp_type
== IB_QPT_RC
) ||
3396 (ibqp
->qp_type
== IB_QPT_UD
&&
3397 !(qp
->flags
& MLX5_IB_QP_SQPN_QP1
)) ||
3398 (ibqp
->qp_type
== IB_QPT_UC
) ||
3399 (ibqp
->qp_type
== IB_QPT_RAW_PACKET
) ||
3400 (ibqp
->qp_type
== IB_QPT_XRC_INI
) ||
3401 (ibqp
->qp_type
== IB_QPT_XRC_TGT
)) {
3402 if (dev
->lag_active
) {
3403 u8 p
= mlx5_core_native_port_num(dev
->mdev
);
3404 tx_affinity
= get_tx_affinity(dev
, pd
, base
, p
,
3406 context
->flags
|= cpu_to_be32(tx_affinity
<< 24);
3411 if (is_sqp(ibqp
->qp_type
)) {
3412 context
->mtu_msgmax
= (IB_MTU_256
<< 5) | 8;
3413 } else if ((ibqp
->qp_type
== IB_QPT_UD
&&
3414 !(qp
->flags
& MLX5_IB_QP_UNDERLAY
)) ||
3415 ibqp
->qp_type
== MLX5_IB_QPT_REG_UMR
) {
3416 context
->mtu_msgmax
= (IB_MTU_4096
<< 5) | 12;
3417 } else if (attr_mask
& IB_QP_PATH_MTU
) {
3418 if (attr
->path_mtu
< IB_MTU_256
||
3419 attr
->path_mtu
> IB_MTU_4096
) {
3420 mlx5_ib_warn(dev
, "invalid mtu %d\n", attr
->path_mtu
);
3424 context
->mtu_msgmax
= (attr
->path_mtu
<< 5) |
3425 (u8
)MLX5_CAP_GEN(dev
->mdev
, log_max_msg
);
3428 if (attr_mask
& IB_QP_DEST_QPN
)
3429 context
->log_pg_sz_remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
3431 if (attr_mask
& IB_QP_PKEY_INDEX
)
3432 context
->pri_path
.pkey_index
= cpu_to_be16(attr
->pkey_index
);
3434 /* todo implement counter_index functionality */
3436 if (is_sqp(ibqp
->qp_type
))
3437 context
->pri_path
.port
= qp
->port
;
3439 if (attr_mask
& IB_QP_PORT
)
3440 context
->pri_path
.port
= attr
->port_num
;
3442 if (attr_mask
& IB_QP_AV
) {
3443 err
= mlx5_set_path(dev
, qp
, &attr
->ah_attr
, &context
->pri_path
,
3444 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
,
3445 attr_mask
, 0, attr
, false);
3450 if (attr_mask
& IB_QP_TIMEOUT
)
3451 context
->pri_path
.ackto_lt
|= attr
->timeout
<< 3;
3453 if (attr_mask
& IB_QP_ALT_PATH
) {
3454 err
= mlx5_set_path(dev
, qp
, &attr
->alt_ah_attr
,
3457 attr_mask
| IB_QP_PKEY_INDEX
| IB_QP_TIMEOUT
,
3463 get_cqs(qp
->ibqp
.qp_type
, qp
->ibqp
.send_cq
, qp
->ibqp
.recv_cq
,
3464 &send_cq
, &recv_cq
);
3466 context
->flags_pd
= cpu_to_be32(pd
? pd
->pdn
: to_mpd(dev
->devr
.p0
)->pdn
);
3467 context
->cqn_send
= send_cq
? cpu_to_be32(send_cq
->mcq
.cqn
) : 0;
3468 context
->cqn_recv
= recv_cq
? cpu_to_be32(recv_cq
->mcq
.cqn
) : 0;
3469 context
->params1
= cpu_to_be32(MLX5_IB_ACK_REQ_FREQ
<< 28);
3471 if (attr_mask
& IB_QP_RNR_RETRY
)
3472 context
->params1
|= cpu_to_be32(attr
->rnr_retry
<< 13);
3474 if (attr_mask
& IB_QP_RETRY_CNT
)
3475 context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
3477 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
3478 if (attr
->max_rd_atomic
)
3480 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
3483 if (attr_mask
& IB_QP_SQ_PSN
)
3484 context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
3486 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
3487 if (attr
->max_dest_rd_atomic
)
3489 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
3492 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
3493 __be32 access_flags
;
3495 err
= to_mlx5_access_flags(qp
, attr
, attr_mask
, &access_flags
);
3499 context
->params2
|= access_flags
;
3502 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
3503 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
3505 if (attr_mask
& IB_QP_RQ_PSN
)
3506 context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
3508 if (attr_mask
& IB_QP_QKEY
)
3509 context
->qkey
= cpu_to_be32(attr
->qkey
);
3511 if (qp
->rq
.wqe_cnt
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
3512 context
->db_rec_addr
= cpu_to_be64(qp
->db
.dma
);
3514 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3515 u8 port_num
= (attr_mask
& IB_QP_PORT
? attr
->port_num
:
3518 /* Underlay port should be used - index 0 function per port */
3519 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
)
3522 mibport
= &dev
->port
[port_num
];
3523 context
->qp_counter_set_usr_page
|=
3524 cpu_to_be32((u32
)(mibport
->cnts
.set_id
) << 24);
3527 if (!ibqp
->uobject
&& cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
3528 context
->sq_crq_size
|= cpu_to_be16(1 << 4);
3530 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
3531 context
->deth_sqpn
= cpu_to_be32(1);
3533 mlx5_cur
= to_mlx5_state(cur_state
);
3534 mlx5_new
= to_mlx5_state(new_state
);
3536 if (mlx5_cur
>= MLX5_QP_NUM_STATE
|| mlx5_new
>= MLX5_QP_NUM_STATE
||
3537 !optab
[mlx5_cur
][mlx5_new
]) {
3542 op
= optab
[mlx5_cur
][mlx5_new
];
3543 optpar
= ib_mask_to_mlx5_opt(attr_mask
);
3544 optpar
&= opt_mask
[mlx5_cur
][mlx5_new
][mlx5_st
];
3546 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
3547 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
3548 struct mlx5_modify_raw_qp_param raw_qp_param
= {};
3550 raw_qp_param
.operation
= op
;
3551 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3552 raw_qp_param
.rq_q_ctr_id
= mibport
->cnts
.set_id
;
3553 raw_qp_param
.set_mask
|= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID
;
3556 if (attr_mask
& IB_QP_RATE_LIMIT
) {
3557 raw_qp_param
.rl
.rate
= attr
->rate_limit
;
3559 if (ucmd
->burst_info
.max_burst_sz
) {
3560 if (attr
->rate_limit
&&
3561 MLX5_CAP_QOS(dev
->mdev
, packet_pacing_burst_bound
)) {
3562 raw_qp_param
.rl
.max_burst_sz
=
3563 ucmd
->burst_info
.max_burst_sz
;
3570 if (ucmd
->burst_info
.typical_pkt_sz
) {
3571 if (attr
->rate_limit
&&
3572 MLX5_CAP_QOS(dev
->mdev
, packet_pacing_typical_size
)) {
3573 raw_qp_param
.rl
.typical_pkt_sz
=
3574 ucmd
->burst_info
.typical_pkt_sz
;
3581 raw_qp_param
.set_mask
|= MLX5_RAW_QP_RATE_LIMIT
;
3584 err
= modify_raw_packet_qp(dev
, qp
, &raw_qp_param
, tx_affinity
);
3586 err
= mlx5_core_qp_modify(dev
->mdev
, op
, optpar
, context
,
3593 qp
->state
= new_state
;
3595 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
3596 qp
->trans_qp
.atomic_rd_en
= attr
->qp_access_flags
;
3597 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
3598 qp
->trans_qp
.resp_depth
= attr
->max_dest_rd_atomic
;
3599 if (attr_mask
& IB_QP_PORT
)
3600 qp
->port
= attr
->port_num
;
3601 if (attr_mask
& IB_QP_ALT_PATH
)
3602 qp
->trans_qp
.alt_port
= attr
->alt_port_num
;
3605 * If we moved a kernel QP to RESET, clean up all old CQ
3606 * entries and reinitialize the QP.
3608 if (new_state
== IB_QPS_RESET
&&
3609 !ibqp
->uobject
&& ibqp
->qp_type
!= IB_QPT_XRC_TGT
) {
3610 mlx5_ib_cq_clean(recv_cq
, base
->mqp
.qpn
,
3611 ibqp
->srq
? to_msrq(ibqp
->srq
) : NULL
);
3612 if (send_cq
!= recv_cq
)
3613 mlx5_ib_cq_clean(send_cq
, base
->mqp
.qpn
, NULL
);
3619 qp
->sq
.cur_post
= 0;
3621 qp
->sq
.cur_edge
= get_sq_edge(&qp
->sq
, 0);
3622 qp
->db
.db
[MLX5_RCV_DBR
] = 0;
3623 qp
->db
.db
[MLX5_SND_DBR
] = 0;
3631 static inline bool is_valid_mask(int mask
, int req
, int opt
)
3633 if ((mask
& req
) != req
)
3636 if (mask
& ~(req
| opt
))
3642 /* check valid transition for driver QP types
3643 * for now the only QP type that this function supports is DCI
3645 static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state
, enum ib_qp_state new_state
,
3646 enum ib_qp_attr_mask attr_mask
)
3648 int req
= IB_QP_STATE
;
3651 if (new_state
== IB_QPS_RESET
) {
3652 return is_valid_mask(attr_mask
, req
, opt
);
3653 } else if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3654 req
|= IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3655 return is_valid_mask(attr_mask
, req
, opt
);
3656 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_INIT
) {
3657 opt
= IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3658 return is_valid_mask(attr_mask
, req
, opt
);
3659 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
3660 req
|= IB_QP_PATH_MTU
;
3661 opt
= IB_QP_PKEY_INDEX
| IB_QP_AV
;
3662 return is_valid_mask(attr_mask
, req
, opt
);
3663 } else if (cur_state
== IB_QPS_RTR
&& new_state
== IB_QPS_RTS
) {
3664 req
|= IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
| IB_QP_RNR_RETRY
|
3665 IB_QP_MAX_QP_RD_ATOMIC
| IB_QP_SQ_PSN
;
3666 opt
= IB_QP_MIN_RNR_TIMER
;
3667 return is_valid_mask(attr_mask
, req
, opt
);
3668 } else if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_RTS
) {
3669 opt
= IB_QP_MIN_RNR_TIMER
;
3670 return is_valid_mask(attr_mask
, req
, opt
);
3671 } else if (cur_state
!= IB_QPS_RESET
&& new_state
== IB_QPS_ERR
) {
3672 return is_valid_mask(attr_mask
, req
, opt
);
3677 /* mlx5_ib_modify_dct: modify a DCT QP
3678 * valid transitions are:
3679 * RESET to INIT: must set access_flags, pkey_index and port
3680 * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
3681 * mtu, gid_index and hop_limit
3682 * Other transitions and attributes are illegal
3684 static int mlx5_ib_modify_dct(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
3685 int attr_mask
, struct ib_udata
*udata
)
3687 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3688 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3689 enum ib_qp_state cur_state
, new_state
;
3691 int required
= IB_QP_STATE
;
3694 if (!(attr_mask
& IB_QP_STATE
))
3697 cur_state
= qp
->state
;
3698 new_state
= attr
->qp_state
;
3700 dctc
= MLX5_ADDR_OF(create_dct_in
, qp
->dct
.in
, dct_context_entry
);
3701 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
) {
3702 required
|= IB_QP_ACCESS_FLAGS
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
3703 if (!is_valid_mask(attr_mask
, required
, 0))
3706 if (attr
->port_num
== 0 ||
3707 attr
->port_num
> MLX5_CAP_GEN(dev
->mdev
, num_ports
)) {
3708 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
3709 attr
->port_num
, dev
->num_ports
);
3712 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)
3713 MLX5_SET(dctc
, dctc
, rre
, 1);
3714 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)
3715 MLX5_SET(dctc
, dctc
, rwe
, 1);
3716 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) {
3719 atomic_mode
= get_atomic_mode(dev
, MLX5_IB_QPT_DCT
);
3720 if (atomic_mode
< 0)
3723 MLX5_SET(dctc
, dctc
, atomic_mode
, atomic_mode
);
3724 MLX5_SET(dctc
, dctc
, rae
, 1);
3726 MLX5_SET(dctc
, dctc
, pkey_index
, attr
->pkey_index
);
3727 MLX5_SET(dctc
, dctc
, port
, attr
->port_num
);
3728 MLX5_SET(dctc
, dctc
, counter_set_id
, dev
->port
[attr
->port_num
- 1].cnts
.set_id
);
3730 } else if (cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
3731 struct mlx5_ib_modify_qp_resp resp
= {};
3732 u32 min_resp_len
= offsetof(typeof(resp
), dctn
) +
3735 if (udata
->outlen
< min_resp_len
)
3737 resp
.response_length
= min_resp_len
;
3739 required
|= IB_QP_MIN_RNR_TIMER
| IB_QP_AV
| IB_QP_PATH_MTU
;
3740 if (!is_valid_mask(attr_mask
, required
, 0))
3742 MLX5_SET(dctc
, dctc
, min_rnr_nak
, attr
->min_rnr_timer
);
3743 MLX5_SET(dctc
, dctc
, tclass
, attr
->ah_attr
.grh
.traffic_class
);
3744 MLX5_SET(dctc
, dctc
, flow_label
, attr
->ah_attr
.grh
.flow_label
);
3745 MLX5_SET(dctc
, dctc
, mtu
, attr
->path_mtu
);
3746 MLX5_SET(dctc
, dctc
, my_addr_index
, attr
->ah_attr
.grh
.sgid_index
);
3747 MLX5_SET(dctc
, dctc
, hop_limit
, attr
->ah_attr
.grh
.hop_limit
);
3749 err
= mlx5_core_create_dct(dev
->mdev
, &qp
->dct
.mdct
, qp
->dct
.in
,
3750 MLX5_ST_SZ_BYTES(create_dct_in
));
3753 resp
.dctn
= qp
->dct
.mdct
.mqp
.qpn
;
3754 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
3756 mlx5_core_destroy_dct(dev
->mdev
, &qp
->dct
.mdct
);
3760 mlx5_ib_warn(dev
, "Modify DCT: Invalid transition from %d to %d\n", cur_state
, new_state
);
3764 qp
->state
= IB_QPS_ERR
;
3766 qp
->state
= new_state
;
3770 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
3771 int attr_mask
, struct ib_udata
*udata
)
3773 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
3774 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
3775 struct mlx5_ib_modify_qp ucmd
= {};
3776 enum ib_qp_type qp_type
;
3777 enum ib_qp_state cur_state
, new_state
;
3778 size_t required_cmd_sz
;
3782 if (ibqp
->rwq_ind_tbl
)
3785 if (udata
&& udata
->inlen
) {
3786 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) +
3787 sizeof(ucmd
.reserved
);
3788 if (udata
->inlen
< required_cmd_sz
)
3791 if (udata
->inlen
> sizeof(ucmd
) &&
3792 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
3793 udata
->inlen
- sizeof(ucmd
)))
3796 if (ib_copy_from_udata(&ucmd
, udata
,
3797 min(udata
->inlen
, sizeof(ucmd
))))
3800 if (ucmd
.comp_mask
||
3801 memchr_inv(&ucmd
.reserved
, 0, sizeof(ucmd
.reserved
)) ||
3802 memchr_inv(&ucmd
.burst_info
.reserved
, 0,
3803 sizeof(ucmd
.burst_info
.reserved
)))
3807 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
3808 return mlx5_ib_gsi_modify_qp(ibqp
, attr
, attr_mask
);
3810 if (ibqp
->qp_type
== IB_QPT_DRIVER
)
3811 qp_type
= qp
->qp_sub_type
;
3813 qp_type
= (unlikely(ibqp
->qp_type
== MLX5_IB_QPT_HW_GSI
)) ?
3814 IB_QPT_GSI
: ibqp
->qp_type
;
3816 if (qp_type
== MLX5_IB_QPT_DCT
)
3817 return mlx5_ib_modify_dct(ibqp
, attr
, attr_mask
, udata
);
3819 mutex_lock(&qp
->mutex
);
3821 cur_state
= attr_mask
& IB_QP_CUR_STATE
? attr
->cur_qp_state
: qp
->state
;
3822 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
3824 if (!(cur_state
== new_state
&& cur_state
== IB_QPS_RESET
)) {
3825 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
3828 if (qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
3829 if (attr_mask
& ~(IB_QP_STATE
| IB_QP_CUR_STATE
)) {
3830 mlx5_ib_dbg(dev
, "invalid attr_mask 0x%x when underlay QP is used\n",
3834 } else if (qp_type
!= MLX5_IB_QPT_REG_UMR
&&
3835 qp_type
!= MLX5_IB_QPT_DCI
&&
3836 !ib_modify_qp_is_ok(cur_state
, new_state
, qp_type
,
3838 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
3839 cur_state
, new_state
, ibqp
->qp_type
, attr_mask
);
3841 } else if (qp_type
== MLX5_IB_QPT_DCI
&&
3842 !modify_dci_qp_is_ok(cur_state
, new_state
, attr_mask
)) {
3843 mlx5_ib_dbg(dev
, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
3844 cur_state
, new_state
, qp_type
, attr_mask
);
3848 if ((attr_mask
& IB_QP_PORT
) &&
3849 (attr
->port_num
== 0 ||
3850 attr
->port_num
> dev
->num_ports
)) {
3851 mlx5_ib_dbg(dev
, "invalid port number %d. number of ports is %d\n",
3852 attr
->port_num
, dev
->num_ports
);
3856 if (attr_mask
& IB_QP_PKEY_INDEX
) {
3857 port
= attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
;
3858 if (attr
->pkey_index
>=
3859 dev
->mdev
->port_caps
[port
- 1].pkey_table_len
) {
3860 mlx5_ib_dbg(dev
, "invalid pkey index %d\n",
3866 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
3867 attr
->max_rd_atomic
>
3868 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_res_qp
))) {
3869 mlx5_ib_dbg(dev
, "invalid max_rd_atomic value %d\n",
3870 attr
->max_rd_atomic
);
3874 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
3875 attr
->max_dest_rd_atomic
>
3876 (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_ra_req_qp
))) {
3877 mlx5_ib_dbg(dev
, "invalid max_dest_rd_atomic value %d\n",
3878 attr
->max_dest_rd_atomic
);
3882 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
3887 err
= __mlx5_ib_modify_qp(ibqp
, attr
, attr_mask
, cur_state
,
3888 new_state
, &ucmd
, udata
);
3891 mutex_unlock(&qp
->mutex
);
3895 static void _handle_post_send_edge(struct mlx5_ib_wq
*sq
, void **seg
,
3896 u32 wqe_sz
, void **cur_edge
)
3900 idx
= (sq
->cur_post
+ (wqe_sz
>> 2)) & (sq
->wqe_cnt
- 1);
3901 *cur_edge
= get_sq_edge(sq
, idx
);
3903 *seg
= mlx5_frag_buf_get_wqe(&sq
->fbc
, idx
);
3906 /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
3907 * next nearby edge and get new address translation for current WQE position.
3909 * @seg: Current WQE position (16B aligned).
3910 * @wqe_sz: Total current WQE size [16B].
3911 * @cur_edge: Updated current edge.
3913 static inline void handle_post_send_edge(struct mlx5_ib_wq
*sq
, void **seg
,
3914 u32 wqe_sz
, void **cur_edge
)
3916 if (likely(*seg
!= *cur_edge
))
3919 _handle_post_send_edge(sq
, seg
, wqe_sz
, cur_edge
);
3922 /* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
3923 * pointers. At the end @seg is aligned to 16B regardless the copied size.
3925 * @cur_edge: Updated current edge.
3926 * @seg: Current WQE position (16B aligned).
3927 * @wqe_sz: Total current WQE size [16B].
3928 * @src: Pointer to copy from.
3929 * @n: Number of bytes to copy.
3931 static inline void memcpy_send_wqe(struct mlx5_ib_wq
*sq
, void **cur_edge
,
3932 void **seg
, u32
*wqe_sz
, const void *src
,
3936 size_t leftlen
= *cur_edge
- *seg
;
3937 size_t copysz
= min_t(size_t, leftlen
, n
);
3940 memcpy(*seg
, src
, copysz
);
3944 stride
= !n
? ALIGN(copysz
, 16) : copysz
;
3946 *wqe_sz
+= stride
>> 4;
3947 handle_post_send_edge(sq
, seg
, *wqe_sz
, cur_edge
);
3951 static int mlx5_wq_overflow(struct mlx5_ib_wq
*wq
, int nreq
, struct ib_cq
*ib_cq
)
3953 struct mlx5_ib_cq
*cq
;
3956 cur
= wq
->head
- wq
->tail
;
3957 if (likely(cur
+ nreq
< wq
->max_post
))
3961 spin_lock(&cq
->lock
);
3962 cur
= wq
->head
- wq
->tail
;
3963 spin_unlock(&cq
->lock
);
3965 return cur
+ nreq
>= wq
->max_post
;
3968 static __always_inline
void set_raddr_seg(struct mlx5_wqe_raddr_seg
*rseg
,
3969 u64 remote_addr
, u32 rkey
)
3971 rseg
->raddr
= cpu_to_be64(remote_addr
);
3972 rseg
->rkey
= cpu_to_be32(rkey
);
3976 static void set_eth_seg(const struct ib_send_wr
*wr
, struct mlx5_ib_qp
*qp
,
3977 void **seg
, int *size
, void **cur_edge
)
3979 struct mlx5_wqe_eth_seg
*eseg
= *seg
;
3981 memset(eseg
, 0, sizeof(struct mlx5_wqe_eth_seg
));
3983 if (wr
->send_flags
& IB_SEND_IP_CSUM
)
3984 eseg
->cs_flags
= MLX5_ETH_WQE_L3_CSUM
|
3985 MLX5_ETH_WQE_L4_CSUM
;
3987 if (wr
->opcode
== IB_WR_LSO
) {
3988 struct ib_ud_wr
*ud_wr
= container_of(wr
, struct ib_ud_wr
, wr
);
3989 size_t left
, copysz
;
3990 void *pdata
= ud_wr
->header
;
3994 eseg
->mss
= cpu_to_be16(ud_wr
->mss
);
3995 eseg
->inline_hdr
.sz
= cpu_to_be16(left
);
3997 /* memcpy_send_wqe should get a 16B align address. Hence, we
3998 * first copy up to the current edge and then, if needed,
3999 * fall-through to memcpy_send_wqe.
4001 copysz
= min_t(u64
, *cur_edge
- (void *)eseg
->inline_hdr
.start
,
4003 memcpy(eseg
->inline_hdr
.start
, pdata
, copysz
);
4004 stride
= ALIGN(sizeof(struct mlx5_wqe_eth_seg
) -
4005 sizeof(eseg
->inline_hdr
.start
) + copysz
, 16);
4006 *size
+= stride
/ 16;
4009 if (copysz
< left
) {
4010 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4013 memcpy_send_wqe(&qp
->sq
, cur_edge
, seg
, size
, pdata
,
4020 *seg
+= sizeof(struct mlx5_wqe_eth_seg
);
4021 *size
+= sizeof(struct mlx5_wqe_eth_seg
) / 16;
4024 static void set_datagram_seg(struct mlx5_wqe_datagram_seg
*dseg
,
4025 const struct ib_send_wr
*wr
)
4027 memcpy(&dseg
->av
, &to_mah(ud_wr(wr
)->ah
)->av
, sizeof(struct mlx5_av
));
4028 dseg
->av
.dqp_dct
= cpu_to_be32(ud_wr(wr
)->remote_qpn
| MLX5_EXTENDED_UD_AV
);
4029 dseg
->av
.key
.qkey
.qkey
= cpu_to_be32(ud_wr(wr
)->remote_qkey
);
4032 static void set_data_ptr_seg(struct mlx5_wqe_data_seg
*dseg
, struct ib_sge
*sg
)
4034 dseg
->byte_count
= cpu_to_be32(sg
->length
);
4035 dseg
->lkey
= cpu_to_be32(sg
->lkey
);
4036 dseg
->addr
= cpu_to_be64(sg
->addr
);
4039 static u64
get_xlt_octo(u64 bytes
)
4041 return ALIGN(bytes
, MLX5_IB_UMR_XLT_ALIGNMENT
) /
4042 MLX5_IB_UMR_OCTOWORD
;
4045 static __be64
frwr_mkey_mask(void)
4049 result
= MLX5_MKEY_MASK_LEN
|
4050 MLX5_MKEY_MASK_PAGE_SIZE
|
4051 MLX5_MKEY_MASK_START_ADDR
|
4052 MLX5_MKEY_MASK_EN_RINVAL
|
4053 MLX5_MKEY_MASK_KEY
|
4059 MLX5_MKEY_MASK_SMALL_FENCE
|
4060 MLX5_MKEY_MASK_FREE
;
4062 return cpu_to_be64(result
);
4065 static __be64
sig_mkey_mask(void)
4069 result
= MLX5_MKEY_MASK_LEN
|
4070 MLX5_MKEY_MASK_PAGE_SIZE
|
4071 MLX5_MKEY_MASK_START_ADDR
|
4072 MLX5_MKEY_MASK_EN_SIGERR
|
4073 MLX5_MKEY_MASK_EN_RINVAL
|
4074 MLX5_MKEY_MASK_KEY
|
4079 MLX5_MKEY_MASK_SMALL_FENCE
|
4080 MLX5_MKEY_MASK_FREE
|
4081 MLX5_MKEY_MASK_BSF_EN
;
4083 return cpu_to_be64(result
);
4086 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
,
4087 struct mlx5_ib_mr
*mr
, bool umr_inline
)
4089 int size
= mr
->ndescs
* mr
->desc_size
;
4091 memset(umr
, 0, sizeof(*umr
));
4093 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
;
4095 umr
->flags
|= MLX5_UMR_INLINE
;
4096 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
4097 umr
->mkey_mask
= frwr_mkey_mask();
4100 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg
*umr
)
4102 memset(umr
, 0, sizeof(*umr
));
4103 umr
->mkey_mask
= cpu_to_be64(MLX5_MKEY_MASK_FREE
);
4104 umr
->flags
= MLX5_UMR_INLINE
;
4107 static __be64
get_umr_enable_mr_mask(void)
4111 result
= MLX5_MKEY_MASK_KEY
|
4112 MLX5_MKEY_MASK_FREE
;
4114 return cpu_to_be64(result
);
4117 static __be64
get_umr_disable_mr_mask(void)
4121 result
= MLX5_MKEY_MASK_FREE
;
4123 return cpu_to_be64(result
);
4126 static __be64
get_umr_update_translation_mask(void)
4130 result
= MLX5_MKEY_MASK_LEN
|
4131 MLX5_MKEY_MASK_PAGE_SIZE
|
4132 MLX5_MKEY_MASK_START_ADDR
;
4134 return cpu_to_be64(result
);
4137 static __be64
get_umr_update_access_mask(int atomic
)
4141 result
= MLX5_MKEY_MASK_LR
|
4147 result
|= MLX5_MKEY_MASK_A
;
4149 return cpu_to_be64(result
);
4152 static __be64
get_umr_update_pd_mask(void)
4156 result
= MLX5_MKEY_MASK_PD
;
4158 return cpu_to_be64(result
);
4161 static int umr_check_mkey_mask(struct mlx5_ib_dev
*dev
, u64 mask
)
4163 if ((mask
& MLX5_MKEY_MASK_PAGE_SIZE
&&
4164 MLX5_CAP_GEN(dev
->mdev
, umr_modify_entity_size_disabled
)) ||
4165 (mask
& MLX5_MKEY_MASK_A
&&
4166 MLX5_CAP_GEN(dev
->mdev
, umr_modify_atomic_disabled
)))
4171 static int set_reg_umr_segment(struct mlx5_ib_dev
*dev
,
4172 struct mlx5_wqe_umr_ctrl_seg
*umr
,
4173 const struct ib_send_wr
*wr
, int atomic
)
4175 const struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
4177 memset(umr
, 0, sizeof(*umr
));
4179 if (wr
->send_flags
& MLX5_IB_SEND_UMR_FAIL_IF_FREE
)
4180 umr
->flags
= MLX5_UMR_CHECK_FREE
; /* fail if free */
4182 umr
->flags
= MLX5_UMR_CHECK_NOT_FREE
; /* fail if not free */
4184 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(umrwr
->xlt_size
));
4185 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_XLT
) {
4186 u64 offset
= get_xlt_octo(umrwr
->offset
);
4188 umr
->xlt_offset
= cpu_to_be16(offset
& 0xffff);
4189 umr
->xlt_offset_47_16
= cpu_to_be32(offset
>> 16);
4190 umr
->flags
|= MLX5_UMR_TRANSLATION_OFFSET_EN
;
4192 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
)
4193 umr
->mkey_mask
|= get_umr_update_translation_mask();
4194 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
) {
4195 umr
->mkey_mask
|= get_umr_update_access_mask(atomic
);
4196 umr
->mkey_mask
|= get_umr_update_pd_mask();
4198 if (wr
->send_flags
& MLX5_IB_SEND_UMR_ENABLE_MR
)
4199 umr
->mkey_mask
|= get_umr_enable_mr_mask();
4200 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
4201 umr
->mkey_mask
|= get_umr_disable_mr_mask();
4204 umr
->flags
|= MLX5_UMR_INLINE
;
4206 return umr_check_mkey_mask(dev
, be64_to_cpu(umr
->mkey_mask
));
4209 static u8
get_umr_flags(int acc
)
4211 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
4212 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
4213 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
4214 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
4215 MLX5_PERM_LOCAL_READ
| MLX5_PERM_UMR_EN
;
4218 static void set_reg_mkey_seg(struct mlx5_mkey_seg
*seg
,
4219 struct mlx5_ib_mr
*mr
,
4220 u32 key
, int access
)
4222 int ndescs
= ALIGN(mr
->ndescs
, 8) >> 1;
4224 memset(seg
, 0, sizeof(*seg
));
4226 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_MTT
)
4227 seg
->log2_page_size
= ilog2(mr
->ibmr
.page_size
);
4228 else if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
4229 /* KLMs take twice the size of MTTs */
4232 seg
->flags
= get_umr_flags(access
) | mr
->access_mode
;
4233 seg
->qpn_mkey7_0
= cpu_to_be32((key
& 0xff) | 0xffffff00);
4234 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
);
4235 seg
->start_addr
= cpu_to_be64(mr
->ibmr
.iova
);
4236 seg
->len
= cpu_to_be64(mr
->ibmr
.length
);
4237 seg
->xlt_oct_size
= cpu_to_be32(ndescs
);
4240 static void set_linv_mkey_seg(struct mlx5_mkey_seg
*seg
)
4242 memset(seg
, 0, sizeof(*seg
));
4243 seg
->status
= MLX5_MKEY_STATUS_FREE
;
4246 static void set_reg_mkey_segment(struct mlx5_mkey_seg
*seg
,
4247 const struct ib_send_wr
*wr
)
4249 const struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
4251 memset(seg
, 0, sizeof(*seg
));
4252 if (wr
->send_flags
& MLX5_IB_SEND_UMR_DISABLE_MR
)
4253 seg
->status
= MLX5_MKEY_STATUS_FREE
;
4255 seg
->flags
= convert_access(umrwr
->access_flags
);
4257 seg
->flags_pd
= cpu_to_be32(to_mpd(umrwr
->pd
)->pdn
);
4258 if (wr
->send_flags
& MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
&&
4260 seg
->flags_pd
|= cpu_to_be32(MLX5_MKEY_LEN64
);
4262 seg
->start_addr
= cpu_to_be64(umrwr
->virt_addr
);
4263 seg
->len
= cpu_to_be64(umrwr
->length
);
4264 seg
->log2_page_size
= umrwr
->page_shift
;
4265 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff00 |
4266 mlx5_mkey_variant(umrwr
->mkey
));
4269 static void set_reg_data_seg(struct mlx5_wqe_data_seg
*dseg
,
4270 struct mlx5_ib_mr
*mr
,
4271 struct mlx5_ib_pd
*pd
)
4273 int bcount
= mr
->desc_size
* mr
->ndescs
;
4275 dseg
->addr
= cpu_to_be64(mr
->desc_map
);
4276 dseg
->byte_count
= cpu_to_be32(ALIGN(bcount
, 64));
4277 dseg
->lkey
= cpu_to_be32(pd
->ibpd
.local_dma_lkey
);
4280 static __be32
send_ieth(const struct ib_send_wr
*wr
)
4282 switch (wr
->opcode
) {
4283 case IB_WR_SEND_WITH_IMM
:
4284 case IB_WR_RDMA_WRITE_WITH_IMM
:
4285 return wr
->ex
.imm_data
;
4287 case IB_WR_SEND_WITH_INV
:
4288 return cpu_to_be32(wr
->ex
.invalidate_rkey
);
4295 static u8
calc_sig(void *wqe
, int size
)
4301 for (i
= 0; i
< size
; i
++)
4307 static u8
wq_sig(void *wqe
)
4309 return calc_sig(wqe
, (*((u8
*)wqe
+ 8) & 0x3f) << 4);
4312 static int set_data_inl_seg(struct mlx5_ib_qp
*qp
, const struct ib_send_wr
*wr
,
4313 void **wqe
, int *wqe_sz
, void **cur_edge
)
4315 struct mlx5_wqe_inline_seg
*seg
;
4321 *wqe
+= sizeof(*seg
);
4322 offset
= sizeof(*seg
);
4324 for (i
= 0; i
< wr
->num_sge
; i
++) {
4325 size_t len
= wr
->sg_list
[i
].length
;
4326 void *addr
= (void *)(unsigned long)(wr
->sg_list
[i
].addr
);
4330 if (unlikely(inl
> qp
->max_inline_data
))
4333 while (likely(len
)) {
4337 handle_post_send_edge(&qp
->sq
, wqe
,
4338 *wqe_sz
+ (offset
>> 4),
4341 leftlen
= *cur_edge
- *wqe
;
4342 copysz
= min_t(size_t, leftlen
, len
);
4344 memcpy(*wqe
, addr
, copysz
);
4352 seg
->byte_count
= cpu_to_be32(inl
| MLX5_INLINE_SEG
);
4354 *wqe_sz
+= ALIGN(inl
+ sizeof(seg
->byte_count
), 16) / 16;
4359 static u16
prot_field_size(enum ib_signature_type type
)
4362 case IB_SIG_TYPE_T10_DIF
:
4363 return MLX5_DIF_SIZE
;
4369 static u8
bs_selector(int block_size
)
4371 switch (block_size
) {
4372 case 512: return 0x1;
4373 case 520: return 0x2;
4374 case 4096: return 0x3;
4375 case 4160: return 0x4;
4376 case 1073741824: return 0x5;
4381 static void mlx5_fill_inl_bsf(struct ib_sig_domain
*domain
,
4382 struct mlx5_bsf_inl
*inl
)
4384 /* Valid inline section and allow BSF refresh */
4385 inl
->vld_refresh
= cpu_to_be16(MLX5_BSF_INL_VALID
|
4386 MLX5_BSF_REFRESH_DIF
);
4387 inl
->dif_apptag
= cpu_to_be16(domain
->sig
.dif
.app_tag
);
4388 inl
->dif_reftag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
4389 /* repeating block */
4390 inl
->rp_inv_seed
= MLX5_BSF_REPEAT_BLOCK
;
4391 inl
->sig_type
= domain
->sig
.dif
.bg_type
== IB_T10DIF_CRC
?
4392 MLX5_DIF_CRC
: MLX5_DIF_IPCS
;
4394 if (domain
->sig
.dif
.ref_remap
)
4395 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_INC_REFTAG
;
4397 if (domain
->sig
.dif
.app_escape
) {
4398 if (domain
->sig
.dif
.ref_escape
)
4399 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPREF_ESCAPE
;
4401 inl
->dif_inc_ref_guard_check
|= MLX5_BSF_APPTAG_ESCAPE
;
4404 inl
->dif_app_bitmask_check
=
4405 cpu_to_be16(domain
->sig
.dif
.apptag_check_mask
);
4408 static int mlx5_set_bsf(struct ib_mr
*sig_mr
,
4409 struct ib_sig_attrs
*sig_attrs
,
4410 struct mlx5_bsf
*bsf
, u32 data_size
)
4412 struct mlx5_core_sig_ctx
*msig
= to_mmr(sig_mr
)->sig
;
4413 struct mlx5_bsf_basic
*basic
= &bsf
->basic
;
4414 struct ib_sig_domain
*mem
= &sig_attrs
->mem
;
4415 struct ib_sig_domain
*wire
= &sig_attrs
->wire
;
4417 memset(bsf
, 0, sizeof(*bsf
));
4419 /* Basic + Extended + Inline */
4420 basic
->bsf_size_sbs
= 1 << 7;
4421 /* Input domain check byte mask */
4422 basic
->check_byte_mask
= sig_attrs
->check_mask
;
4423 basic
->raw_data_size
= cpu_to_be32(data_size
);
4426 switch (sig_attrs
->mem
.sig_type
) {
4427 case IB_SIG_TYPE_NONE
:
4429 case IB_SIG_TYPE_T10_DIF
:
4430 basic
->mem
.bs_selector
= bs_selector(mem
->sig
.dif
.pi_interval
);
4431 basic
->m_bfs_psv
= cpu_to_be32(msig
->psv_memory
.psv_idx
);
4432 mlx5_fill_inl_bsf(mem
, &bsf
->m_inl
);
4439 switch (sig_attrs
->wire
.sig_type
) {
4440 case IB_SIG_TYPE_NONE
:
4442 case IB_SIG_TYPE_T10_DIF
:
4443 if (mem
->sig
.dif
.pi_interval
== wire
->sig
.dif
.pi_interval
&&
4444 mem
->sig_type
== wire
->sig_type
) {
4445 /* Same block structure */
4446 basic
->bsf_size_sbs
|= 1 << 4;
4447 if (mem
->sig
.dif
.bg_type
== wire
->sig
.dif
.bg_type
)
4448 basic
->wire
.copy_byte_mask
|= MLX5_CPY_GRD_MASK
;
4449 if (mem
->sig
.dif
.app_tag
== wire
->sig
.dif
.app_tag
)
4450 basic
->wire
.copy_byte_mask
|= MLX5_CPY_APP_MASK
;
4451 if (mem
->sig
.dif
.ref_tag
== wire
->sig
.dif
.ref_tag
)
4452 basic
->wire
.copy_byte_mask
|= MLX5_CPY_REF_MASK
;
4454 basic
->wire
.bs_selector
= bs_selector(wire
->sig
.dif
.pi_interval
);
4456 basic
->w_bfs_psv
= cpu_to_be32(msig
->psv_wire
.psv_idx
);
4457 mlx5_fill_inl_bsf(wire
, &bsf
->w_inl
);
4466 static int set_sig_data_segment(const struct ib_sig_handover_wr
*wr
,
4467 struct mlx5_ib_qp
*qp
, void **seg
,
4468 int *size
, void **cur_edge
)
4470 struct ib_sig_attrs
*sig_attrs
= wr
->sig_attrs
;
4471 struct ib_mr
*sig_mr
= wr
->sig_mr
;
4472 struct mlx5_bsf
*bsf
;
4473 u32 data_len
= wr
->wr
.sg_list
->length
;
4474 u32 data_key
= wr
->wr
.sg_list
->lkey
;
4475 u64 data_va
= wr
->wr
.sg_list
->addr
;
4480 (data_key
== wr
->prot
->lkey
&&
4481 data_va
== wr
->prot
->addr
&&
4482 data_len
== wr
->prot
->length
)) {
4484 * Source domain doesn't contain signature information
4485 * or data and protection are interleaved in memory.
4486 * So need construct:
4487 * ------------------
4489 * ------------------
4491 * ------------------
4493 struct mlx5_klm
*data_klm
= *seg
;
4495 data_klm
->bcount
= cpu_to_be32(data_len
);
4496 data_klm
->key
= cpu_to_be32(data_key
);
4497 data_klm
->va
= cpu_to_be64(data_va
);
4498 wqe_size
= ALIGN(sizeof(*data_klm
), 64);
4501 * Source domain contains signature information
4502 * So need construct a strided block format:
4503 * ---------------------------
4504 * | stride_block_ctrl |
4505 * ---------------------------
4507 * ---------------------------
4509 * ---------------------------
4511 * ---------------------------
4513 struct mlx5_stride_block_ctrl_seg
*sblock_ctrl
;
4514 struct mlx5_stride_block_entry
*data_sentry
;
4515 struct mlx5_stride_block_entry
*prot_sentry
;
4516 u32 prot_key
= wr
->prot
->lkey
;
4517 u64 prot_va
= wr
->prot
->addr
;
4518 u16 block_size
= sig_attrs
->mem
.sig
.dif
.pi_interval
;
4522 data_sentry
= (void *)sblock_ctrl
+ sizeof(*sblock_ctrl
);
4523 prot_sentry
= (void *)data_sentry
+ sizeof(*data_sentry
);
4525 prot_size
= prot_field_size(sig_attrs
->mem
.sig_type
);
4527 pr_err("Bad block size given: %u\n", block_size
);
4530 sblock_ctrl
->bcount_per_cycle
= cpu_to_be32(block_size
+
4532 sblock_ctrl
->op
= cpu_to_be32(MLX5_STRIDE_BLOCK_OP
);
4533 sblock_ctrl
->repeat_count
= cpu_to_be32(data_len
/ block_size
);
4534 sblock_ctrl
->num_entries
= cpu_to_be16(2);
4536 data_sentry
->bcount
= cpu_to_be16(block_size
);
4537 data_sentry
->key
= cpu_to_be32(data_key
);
4538 data_sentry
->va
= cpu_to_be64(data_va
);
4539 data_sentry
->stride
= cpu_to_be16(block_size
);
4541 prot_sentry
->bcount
= cpu_to_be16(prot_size
);
4542 prot_sentry
->key
= cpu_to_be32(prot_key
);
4543 prot_sentry
->va
= cpu_to_be64(prot_va
);
4544 prot_sentry
->stride
= cpu_to_be16(prot_size
);
4546 wqe_size
= ALIGN(sizeof(*sblock_ctrl
) + sizeof(*data_sentry
) +
4547 sizeof(*prot_sentry
), 64);
4551 *size
+= wqe_size
/ 16;
4552 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4555 ret
= mlx5_set_bsf(sig_mr
, sig_attrs
, bsf
, data_len
);
4559 *seg
+= sizeof(*bsf
);
4560 *size
+= sizeof(*bsf
) / 16;
4561 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4566 static void set_sig_mkey_segment(struct mlx5_mkey_seg
*seg
,
4567 const struct ib_sig_handover_wr
*wr
, u32 size
,
4568 u32 length
, u32 pdn
)
4570 struct ib_mr
*sig_mr
= wr
->sig_mr
;
4571 u32 sig_key
= sig_mr
->rkey
;
4572 u8 sigerr
= to_mmr(sig_mr
)->sig
->sigerr_count
& 1;
4574 memset(seg
, 0, sizeof(*seg
));
4576 seg
->flags
= get_umr_flags(wr
->access_flags
) |
4577 MLX5_MKC_ACCESS_MODE_KLMS
;
4578 seg
->qpn_mkey7_0
= cpu_to_be32((sig_key
& 0xff) | 0xffffff00);
4579 seg
->flags_pd
= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL
| sigerr
<< 26 |
4580 MLX5_MKEY_BSF_EN
| pdn
);
4581 seg
->len
= cpu_to_be64(length
);
4582 seg
->xlt_oct_size
= cpu_to_be32(get_xlt_octo(size
));
4583 seg
->bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
4586 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg
*umr
,
4589 memset(umr
, 0, sizeof(*umr
));
4591 umr
->flags
= MLX5_FLAGS_INLINE
| MLX5_FLAGS_CHECK_FREE
;
4592 umr
->xlt_octowords
= cpu_to_be16(get_xlt_octo(size
));
4593 umr
->bsf_octowords
= cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE
);
4594 umr
->mkey_mask
= sig_mkey_mask();
4598 static int set_sig_umr_wr(const struct ib_send_wr
*send_wr
,
4599 struct mlx5_ib_qp
*qp
, void **seg
, int *size
,
4602 const struct ib_sig_handover_wr
*wr
= sig_handover_wr(send_wr
);
4603 struct mlx5_ib_mr
*sig_mr
= to_mmr(wr
->sig_mr
);
4604 u32 pdn
= get_pd(qp
)->pdn
;
4606 int region_len
, ret
;
4608 if (unlikely(wr
->wr
.num_sge
!= 1) ||
4609 unlikely(wr
->access_flags
& IB_ACCESS_REMOTE_ATOMIC
) ||
4610 unlikely(!sig_mr
->sig
) || unlikely(!qp
->signature_en
) ||
4611 unlikely(!sig_mr
->sig
->sig_status_checked
))
4614 /* length of the protected region, data + protection */
4615 region_len
= wr
->wr
.sg_list
->length
;
4617 (wr
->prot
->lkey
!= wr
->wr
.sg_list
->lkey
||
4618 wr
->prot
->addr
!= wr
->wr
.sg_list
->addr
||
4619 wr
->prot
->length
!= wr
->wr
.sg_list
->length
))
4620 region_len
+= wr
->prot
->length
;
4623 * KLM octoword size - if protection was provided
4624 * then we use strided block format (3 octowords),
4625 * else we use single KLM (1 octoword)
4627 xlt_size
= wr
->prot
? 0x30 : sizeof(struct mlx5_klm
);
4629 set_sig_umr_segment(*seg
, xlt_size
);
4630 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4631 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4632 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4634 set_sig_mkey_segment(*seg
, wr
, xlt_size
, region_len
, pdn
);
4635 *seg
+= sizeof(struct mlx5_mkey_seg
);
4636 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4637 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4639 ret
= set_sig_data_segment(wr
, qp
, seg
, size
, cur_edge
);
4643 sig_mr
->sig
->sig_status_checked
= false;
4647 static int set_psv_wr(struct ib_sig_domain
*domain
,
4648 u32 psv_idx
, void **seg
, int *size
)
4650 struct mlx5_seg_set_psv
*psv_seg
= *seg
;
4652 memset(psv_seg
, 0, sizeof(*psv_seg
));
4653 psv_seg
->psv_num
= cpu_to_be32(psv_idx
);
4654 switch (domain
->sig_type
) {
4655 case IB_SIG_TYPE_NONE
:
4657 case IB_SIG_TYPE_T10_DIF
:
4658 psv_seg
->transient_sig
= cpu_to_be32(domain
->sig
.dif
.bg
<< 16 |
4659 domain
->sig
.dif
.app_tag
);
4660 psv_seg
->ref_tag
= cpu_to_be32(domain
->sig
.dif
.ref_tag
);
4663 pr_err("Bad signature type (%d) is given.\n",
4668 *seg
+= sizeof(*psv_seg
);
4669 *size
+= sizeof(*psv_seg
) / 16;
4674 static int set_reg_wr(struct mlx5_ib_qp
*qp
,
4675 const struct ib_reg_wr
*wr
,
4676 void **seg
, int *size
, void **cur_edge
)
4678 struct mlx5_ib_mr
*mr
= to_mmr(wr
->mr
);
4679 struct mlx5_ib_pd
*pd
= to_mpd(qp
->ibqp
.pd
);
4680 size_t mr_list_size
= mr
->ndescs
* mr
->desc_size
;
4681 bool umr_inline
= mr_list_size
<= MLX5_IB_SQ_UMR_INLINE_THRESHOLD
;
4683 if (unlikely(wr
->wr
.send_flags
& IB_SEND_INLINE
)) {
4684 mlx5_ib_warn(to_mdev(qp
->ibqp
.device
),
4685 "Invalid IB_SEND_INLINE send flag\n");
4689 set_reg_umr_seg(*seg
, mr
, umr_inline
);
4690 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4691 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4692 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4694 set_reg_mkey_seg(*seg
, mr
, wr
->key
, wr
->access
);
4695 *seg
+= sizeof(struct mlx5_mkey_seg
);
4696 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4697 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4700 memcpy_send_wqe(&qp
->sq
, cur_edge
, seg
, size
, mr
->descs
,
4702 *size
= ALIGN(*size
, MLX5_SEND_WQE_BB
>> 4);
4704 set_reg_data_seg(*seg
, mr
, pd
);
4705 *seg
+= sizeof(struct mlx5_wqe_data_seg
);
4706 *size
+= (sizeof(struct mlx5_wqe_data_seg
) / 16);
4711 static void set_linv_wr(struct mlx5_ib_qp
*qp
, void **seg
, int *size
,
4714 set_linv_umr_seg(*seg
);
4715 *seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
4716 *size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
4717 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4718 set_linv_mkey_seg(*seg
);
4719 *seg
+= sizeof(struct mlx5_mkey_seg
);
4720 *size
+= sizeof(struct mlx5_mkey_seg
) / 16;
4721 handle_post_send_edge(&qp
->sq
, seg
, *size
, cur_edge
);
4724 static void dump_wqe(struct mlx5_ib_qp
*qp
, u32 idx
, int size_16
)
4730 pr_debug("dump WQE index %u:\n", idx
);
4731 for (i
= 0, j
= 0; i
< size_16
* 4; i
+= 4, j
+= 4) {
4732 if ((i
& 0xf) == 0) {
4733 tidx
= (tidx
+ 1) & (qp
->sq
.wqe_cnt
- 1);
4734 p
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, tidx
);
4735 pr_debug("WQBB at %p:\n", (void *)p
);
4738 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p
[j
]),
4739 be32_to_cpu(p
[j
+ 1]), be32_to_cpu(p
[j
+ 2]),
4740 be32_to_cpu(p
[j
+ 3]));
4744 static int __begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
4745 struct mlx5_wqe_ctrl_seg
**ctrl
,
4746 const struct ib_send_wr
*wr
, unsigned int *idx
,
4747 int *size
, void **cur_edge
, int nreq
,
4748 bool send_signaled
, bool solicited
)
4750 if (unlikely(mlx5_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)))
4753 *idx
= qp
->sq
.cur_post
& (qp
->sq
.wqe_cnt
- 1);
4754 *seg
= mlx5_frag_buf_get_wqe(&qp
->sq
.fbc
, *idx
);
4756 *(uint32_t *)(*seg
+ 8) = 0;
4757 (*ctrl
)->imm
= send_ieth(wr
);
4758 (*ctrl
)->fm_ce_se
= qp
->sq_signal_bits
|
4759 (send_signaled
? MLX5_WQE_CTRL_CQ_UPDATE
: 0) |
4760 (solicited
? MLX5_WQE_CTRL_SOLICITED
: 0);
4762 *seg
+= sizeof(**ctrl
);
4763 *size
= sizeof(**ctrl
) / 16;
4764 *cur_edge
= qp
->sq
.cur_edge
;
4769 static int begin_wqe(struct mlx5_ib_qp
*qp
, void **seg
,
4770 struct mlx5_wqe_ctrl_seg
**ctrl
,
4771 const struct ib_send_wr
*wr
, unsigned *idx
,
4772 int *size
, void **cur_edge
, int nreq
)
4774 return __begin_wqe(qp
, seg
, ctrl
, wr
, idx
, size
, cur_edge
, nreq
,
4775 wr
->send_flags
& IB_SEND_SIGNALED
,
4776 wr
->send_flags
& IB_SEND_SOLICITED
);
4779 static void finish_wqe(struct mlx5_ib_qp
*qp
,
4780 struct mlx5_wqe_ctrl_seg
*ctrl
,
4781 void *seg
, u8 size
, void *cur_edge
,
4782 unsigned int idx
, u64 wr_id
, int nreq
, u8 fence
,
4787 ctrl
->opmod_idx_opcode
= cpu_to_be32(((u32
)(qp
->sq
.cur_post
) << 8) |
4788 mlx5_opcode
| ((u32
)opmod
<< 24));
4789 ctrl
->qpn_ds
= cpu_to_be32(size
| (qp
->trans_qp
.base
.mqp
.qpn
<< 8));
4790 ctrl
->fm_ce_se
|= fence
;
4791 if (unlikely(qp
->wq_sig
))
4792 ctrl
->signature
= wq_sig(ctrl
);
4794 qp
->sq
.wrid
[idx
] = wr_id
;
4795 qp
->sq
.w_list
[idx
].opcode
= mlx5_opcode
;
4796 qp
->sq
.wqe_head
[idx
] = qp
->sq
.head
+ nreq
;
4797 qp
->sq
.cur_post
+= DIV_ROUND_UP(size
* 16, MLX5_SEND_WQE_BB
);
4798 qp
->sq
.w_list
[idx
].next
= qp
->sq
.cur_post
;
4800 /* We save the edge which was possibly updated during the WQE
4801 * construction, into SQ's cache.
4803 seg
= PTR_ALIGN(seg
, MLX5_SEND_WQE_BB
);
4804 qp
->sq
.cur_edge
= (unlikely(seg
== cur_edge
)) ?
4805 get_sq_edge(&qp
->sq
, qp
->sq
.cur_post
&
4806 (qp
->sq
.wqe_cnt
- 1)) :
4810 static int _mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
4811 const struct ib_send_wr
**bad_wr
, bool drain
)
4813 struct mlx5_wqe_ctrl_seg
*ctrl
= NULL
; /* compiler warning */
4814 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
4815 struct mlx5_core_dev
*mdev
= dev
->mdev
;
4816 struct mlx5_ib_qp
*qp
;
4817 struct mlx5_ib_mr
*mr
;
4818 struct mlx5_wqe_xrc_seg
*xrc
;
4821 int uninitialized_var(size
);
4822 unsigned long flags
;
4832 if (unlikely(mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
&&
4838 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
4839 return mlx5_ib_gsi_post_send(ibqp
, wr
, bad_wr
);
4844 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
4846 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
4847 if (unlikely(wr
->opcode
>= ARRAY_SIZE(mlx5_ib_opcode
))) {
4848 mlx5_ib_warn(dev
, "\n");
4854 num_sge
= wr
->num_sge
;
4855 if (unlikely(num_sge
> qp
->sq
.max_gs
)) {
4856 mlx5_ib_warn(dev
, "\n");
4862 err
= begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
, &size
, &cur_edge
,
4865 mlx5_ib_warn(dev
, "\n");
4871 if (wr
->opcode
== IB_WR_REG_MR
) {
4872 fence
= dev
->umr_fence
;
4873 next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
4875 if (wr
->send_flags
& IB_SEND_FENCE
) {
4877 fence
= MLX5_FENCE_MODE_SMALL_AND_FENCE
;
4879 fence
= MLX5_FENCE_MODE_FENCE
;
4881 fence
= qp
->next_fence
;
4885 switch (ibqp
->qp_type
) {
4886 case IB_QPT_XRC_INI
:
4888 seg
+= sizeof(*xrc
);
4889 size
+= sizeof(*xrc
) / 16;
4892 switch (wr
->opcode
) {
4893 case IB_WR_RDMA_READ
:
4894 case IB_WR_RDMA_WRITE
:
4895 case IB_WR_RDMA_WRITE_WITH_IMM
:
4896 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
4898 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
4899 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
4902 case IB_WR_ATOMIC_CMP_AND_SWP
:
4903 case IB_WR_ATOMIC_FETCH_AND_ADD
:
4904 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
4905 mlx5_ib_warn(dev
, "Atomic operations are not supported yet\n");
4910 case IB_WR_LOCAL_INV
:
4911 qp
->sq
.wr_data
[idx
] = IB_WR_LOCAL_INV
;
4912 ctrl
->imm
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
4913 set_linv_wr(qp
, &seg
, &size
, &cur_edge
);
4918 qp
->sq
.wr_data
[idx
] = IB_WR_REG_MR
;
4919 ctrl
->imm
= cpu_to_be32(reg_wr(wr
)->key
);
4920 err
= set_reg_wr(qp
, reg_wr(wr
), &seg
, &size
,
4929 case IB_WR_REG_SIG_MR
:
4930 qp
->sq
.wr_data
[idx
] = IB_WR_REG_SIG_MR
;
4931 mr
= to_mmr(sig_handover_wr(wr
)->sig_mr
);
4933 ctrl
->imm
= cpu_to_be32(mr
->ibmr
.rkey
);
4934 err
= set_sig_umr_wr(wr
, qp
, &seg
, &size
,
4937 mlx5_ib_warn(dev
, "\n");
4942 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
,
4943 wr
->wr_id
, nreq
, fence
,
4946 * SET_PSV WQEs are not signaled and solicited
4949 err
= __begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
,
4950 &size
, &cur_edge
, nreq
, false,
4953 mlx5_ib_warn(dev
, "\n");
4959 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->mem
,
4960 mr
->sig
->psv_memory
.psv_idx
, &seg
,
4963 mlx5_ib_warn(dev
, "\n");
4968 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
,
4969 wr
->wr_id
, nreq
, fence
,
4970 MLX5_OPCODE_SET_PSV
);
4971 err
= __begin_wqe(qp
, &seg
, &ctrl
, wr
, &idx
,
4972 &size
, &cur_edge
, nreq
, false,
4975 mlx5_ib_warn(dev
, "\n");
4981 err
= set_psv_wr(&sig_handover_wr(wr
)->sig_attrs
->wire
,
4982 mr
->sig
->psv_wire
.psv_idx
, &seg
,
4985 mlx5_ib_warn(dev
, "\n");
4990 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
,
4991 wr
->wr_id
, nreq
, fence
,
4992 MLX5_OPCODE_SET_PSV
);
4993 qp
->next_fence
= MLX5_FENCE_MODE_INITIATOR_SMALL
;
5003 switch (wr
->opcode
) {
5004 case IB_WR_RDMA_WRITE
:
5005 case IB_WR_RDMA_WRITE_WITH_IMM
:
5006 set_raddr_seg(seg
, rdma_wr(wr
)->remote_addr
,
5008 seg
+= sizeof(struct mlx5_wqe_raddr_seg
);
5009 size
+= sizeof(struct mlx5_wqe_raddr_seg
) / 16;
5018 if (unlikely(!mdev
->port_caps
[qp
->port
- 1].has_smi
)) {
5019 mlx5_ib_warn(dev
, "Send SMP MADs is not allowed\n");
5025 case MLX5_IB_QPT_HW_GSI
:
5026 set_datagram_seg(seg
, wr
);
5027 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
5028 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
5029 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5033 set_datagram_seg(seg
, wr
);
5034 seg
+= sizeof(struct mlx5_wqe_datagram_seg
);
5035 size
+= sizeof(struct mlx5_wqe_datagram_seg
) / 16;
5036 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5038 /* handle qp that supports ud offload */
5039 if (qp
->flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
5040 struct mlx5_wqe_eth_pad
*pad
;
5043 memset(pad
, 0, sizeof(struct mlx5_wqe_eth_pad
));
5044 seg
+= sizeof(struct mlx5_wqe_eth_pad
);
5045 size
+= sizeof(struct mlx5_wqe_eth_pad
) / 16;
5046 set_eth_seg(wr
, qp
, &seg
, &size
, &cur_edge
);
5047 handle_post_send_edge(&qp
->sq
, &seg
, size
,
5051 case MLX5_IB_QPT_REG_UMR
:
5052 if (wr
->opcode
!= MLX5_IB_WR_UMR
) {
5054 mlx5_ib_warn(dev
, "bad opcode\n");
5057 qp
->sq
.wr_data
[idx
] = MLX5_IB_WR_UMR
;
5058 ctrl
->imm
= cpu_to_be32(umr_wr(wr
)->mkey
);
5059 err
= set_reg_umr_segment(dev
, seg
, wr
, !!(MLX5_CAP_GEN(mdev
, atomic
)));
5062 seg
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
);
5063 size
+= sizeof(struct mlx5_wqe_umr_ctrl_seg
) / 16;
5064 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5065 set_reg_mkey_segment(seg
, wr
);
5066 seg
+= sizeof(struct mlx5_mkey_seg
);
5067 size
+= sizeof(struct mlx5_mkey_seg
) / 16;
5068 handle_post_send_edge(&qp
->sq
, &seg
, size
, &cur_edge
);
5075 if (wr
->send_flags
& IB_SEND_INLINE
&& num_sge
) {
5076 err
= set_data_inl_seg(qp
, wr
, &seg
, &size
, &cur_edge
);
5077 if (unlikely(err
)) {
5078 mlx5_ib_warn(dev
, "\n");
5083 for (i
= 0; i
< num_sge
; i
++) {
5084 handle_post_send_edge(&qp
->sq
, &seg
, size
,
5086 if (likely(wr
->sg_list
[i
].length
)) {
5088 ((struct mlx5_wqe_data_seg
*)seg
,
5090 size
+= sizeof(struct mlx5_wqe_data_seg
) / 16;
5091 seg
+= sizeof(struct mlx5_wqe_data_seg
);
5096 qp
->next_fence
= next_fence
;
5097 finish_wqe(qp
, ctrl
, seg
, size
, cur_edge
, idx
, wr
->wr_id
, nreq
,
5098 fence
, mlx5_ib_opcode
[wr
->opcode
]);
5101 dump_wqe(qp
, idx
, size
);
5106 qp
->sq
.head
+= nreq
;
5108 /* Make sure that descriptors are written before
5109 * updating doorbell record and ringing the doorbell
5113 qp
->db
.db
[MLX5_SND_DBR
] = cpu_to_be32(qp
->sq
.cur_post
);
5115 /* Make sure doorbell record is visible to the HCA before
5116 * we hit doorbell */
5119 /* currently we support only regular doorbells */
5120 mlx5_write64((__be32
*)ctrl
, bf
->bfreg
->map
+ bf
->offset
, NULL
);
5121 /* Make sure doorbells don't leak out of SQ spinlock
5122 * and reach the HCA out of order.
5125 bf
->offset
^= bf
->buf_size
;
5128 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
5133 int mlx5_ib_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
5134 const struct ib_send_wr
**bad_wr
)
5136 return _mlx5_ib_post_send(ibqp
, wr
, bad_wr
, false);
5139 static void set_sig_seg(struct mlx5_rwqe_sig
*sig
, int size
)
5141 sig
->signature
= calc_sig(sig
, size
);
5144 static int _mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
5145 const struct ib_recv_wr
**bad_wr
, bool drain
)
5147 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
5148 struct mlx5_wqe_data_seg
*scat
;
5149 struct mlx5_rwqe_sig
*sig
;
5150 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
5151 struct mlx5_core_dev
*mdev
= dev
->mdev
;
5152 unsigned long flags
;
5158 if (unlikely(mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
&&
5164 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
5165 return mlx5_ib_gsi_post_recv(ibqp
, wr
, bad_wr
);
5167 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
5169 ind
= qp
->rq
.head
& (qp
->rq
.wqe_cnt
- 1);
5171 for (nreq
= 0; wr
; nreq
++, wr
= wr
->next
) {
5172 if (mlx5_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
5178 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
5184 scat
= mlx5_frag_buf_get_wqe(&qp
->rq
.fbc
, ind
);
5188 for (i
= 0; i
< wr
->num_sge
; i
++)
5189 set_data_ptr_seg(scat
+ i
, wr
->sg_list
+ i
);
5191 if (i
< qp
->rq
.max_gs
) {
5192 scat
[i
].byte_count
= 0;
5193 scat
[i
].lkey
= cpu_to_be32(MLX5_INVALID_LKEY
);
5198 sig
= (struct mlx5_rwqe_sig
*)scat
;
5199 set_sig_seg(sig
, (qp
->rq
.max_gs
+ 1) << 2);
5202 qp
->rq
.wrid
[ind
] = wr
->wr_id
;
5204 ind
= (ind
+ 1) & (qp
->rq
.wqe_cnt
- 1);
5209 qp
->rq
.head
+= nreq
;
5211 /* Make sure that descriptors are written before
5216 *qp
->db
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
5219 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
5224 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
5225 const struct ib_recv_wr
**bad_wr
)
5227 return _mlx5_ib_post_recv(ibqp
, wr
, bad_wr
, false);
5230 static inline enum ib_qp_state
to_ib_qp_state(enum mlx5_qp_state mlx5_state
)
5232 switch (mlx5_state
) {
5233 case MLX5_QP_STATE_RST
: return IB_QPS_RESET
;
5234 case MLX5_QP_STATE_INIT
: return IB_QPS_INIT
;
5235 case MLX5_QP_STATE_RTR
: return IB_QPS_RTR
;
5236 case MLX5_QP_STATE_RTS
: return IB_QPS_RTS
;
5237 case MLX5_QP_STATE_SQ_DRAINING
:
5238 case MLX5_QP_STATE_SQD
: return IB_QPS_SQD
;
5239 case MLX5_QP_STATE_SQER
: return IB_QPS_SQE
;
5240 case MLX5_QP_STATE_ERR
: return IB_QPS_ERR
;
5245 static inline enum ib_mig_state
to_ib_mig_state(int mlx5_mig_state
)
5247 switch (mlx5_mig_state
) {
5248 case MLX5_QP_PM_ARMED
: return IB_MIG_ARMED
;
5249 case MLX5_QP_PM_REARM
: return IB_MIG_REARM
;
5250 case MLX5_QP_PM_MIGRATED
: return IB_MIG_MIGRATED
;
5255 static int to_ib_qp_access_flags(int mlx5_flags
)
5259 if (mlx5_flags
& MLX5_QP_BIT_RRE
)
5260 ib_flags
|= IB_ACCESS_REMOTE_READ
;
5261 if (mlx5_flags
& MLX5_QP_BIT_RWE
)
5262 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
5263 if (mlx5_flags
& MLX5_QP_BIT_RAE
)
5264 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
5269 static void to_rdma_ah_attr(struct mlx5_ib_dev
*ibdev
,
5270 struct rdma_ah_attr
*ah_attr
,
5271 struct mlx5_qp_path
*path
)
5274 memset(ah_attr
, 0, sizeof(*ah_attr
));
5276 if (!path
->port
|| path
->port
> ibdev
->num_ports
)
5279 ah_attr
->type
= rdma_ah_find_type(&ibdev
->ib_dev
, path
->port
);
5281 rdma_ah_set_port_num(ah_attr
, path
->port
);
5282 rdma_ah_set_sl(ah_attr
, path
->dci_cfi_prio_sl
& 0xf);
5284 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
5285 rdma_ah_set_path_bits(ah_attr
, path
->grh_mlid
& 0x7f);
5286 rdma_ah_set_static_rate(ah_attr
,
5287 path
->static_rate
? path
->static_rate
- 5 : 0);
5288 if (path
->grh_mlid
& (1 << 7)) {
5289 u32 tc_fl
= be32_to_cpu(path
->tclass_flowlabel
);
5291 rdma_ah_set_grh(ah_attr
, NULL
,
5295 (tc_fl
>> 20) & 0xff);
5296 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
5300 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev
*dev
,
5301 struct mlx5_ib_sq
*sq
,
5306 err
= mlx5_core_query_sq_state(dev
->mdev
, sq
->base
.mqp
.qpn
, sq_state
);
5309 sq
->state
= *sq_state
;
5315 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev
*dev
,
5316 struct mlx5_ib_rq
*rq
,
5324 inlen
= MLX5_ST_SZ_BYTES(query_rq_out
);
5325 out
= kvzalloc(inlen
, GFP_KERNEL
);
5329 err
= mlx5_core_query_rq(dev
->mdev
, rq
->base
.mqp
.qpn
, out
);
5333 rqc
= MLX5_ADDR_OF(query_rq_out
, out
, rq_context
);
5334 *rq_state
= MLX5_GET(rqc
, rqc
, state
);
5335 rq
->state
= *rq_state
;
5342 static int sqrq_state_to_qp_state(u8 sq_state
, u8 rq_state
,
5343 struct mlx5_ib_qp
*qp
, u8
*qp_state
)
5345 static const u8 sqrq_trans
[MLX5_RQ_NUM_STATE
][MLX5_SQ_NUM_STATE
] = {
5346 [MLX5_RQC_STATE_RST
] = {
5347 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
5348 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
5349 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE_BAD
,
5350 [MLX5_SQ_STATE_NA
] = IB_QPS_RESET
,
5352 [MLX5_RQC_STATE_RDY
] = {
5353 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
5354 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
5355 [MLX5_SQC_STATE_ERR
] = IB_QPS_SQE
,
5356 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE
,
5358 [MLX5_RQC_STATE_ERR
] = {
5359 [MLX5_SQC_STATE_RST
] = MLX5_QP_STATE_BAD
,
5360 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE_BAD
,
5361 [MLX5_SQC_STATE_ERR
] = IB_QPS_ERR
,
5362 [MLX5_SQ_STATE_NA
] = IB_QPS_ERR
,
5364 [MLX5_RQ_STATE_NA
] = {
5365 [MLX5_SQC_STATE_RST
] = IB_QPS_RESET
,
5366 [MLX5_SQC_STATE_RDY
] = MLX5_QP_STATE
,
5367 [MLX5_SQC_STATE_ERR
] = MLX5_QP_STATE
,
5368 [MLX5_SQ_STATE_NA
] = MLX5_QP_STATE_BAD
,
5372 *qp_state
= sqrq_trans
[rq_state
][sq_state
];
5374 if (*qp_state
== MLX5_QP_STATE_BAD
) {
5375 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
5376 qp
->raw_packet_qp
.sq
.base
.mqp
.qpn
, sq_state
,
5377 qp
->raw_packet_qp
.rq
.base
.mqp
.qpn
, rq_state
);
5381 if (*qp_state
== MLX5_QP_STATE
)
5382 *qp_state
= qp
->state
;
5387 static int query_raw_packet_qp_state(struct mlx5_ib_dev
*dev
,
5388 struct mlx5_ib_qp
*qp
,
5389 u8
*raw_packet_qp_state
)
5391 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
= &qp
->raw_packet_qp
;
5392 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
5393 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
5395 u8 sq_state
= MLX5_SQ_STATE_NA
;
5396 u8 rq_state
= MLX5_RQ_STATE_NA
;
5398 if (qp
->sq
.wqe_cnt
) {
5399 err
= query_raw_packet_qp_sq_state(dev
, sq
, &sq_state
);
5404 if (qp
->rq
.wqe_cnt
) {
5405 err
= query_raw_packet_qp_rq_state(dev
, rq
, &rq_state
);
5410 return sqrq_state_to_qp_state(sq_state
, rq_state
, qp
,
5411 raw_packet_qp_state
);
5414 static int query_qp_attr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*qp
,
5415 struct ib_qp_attr
*qp_attr
)
5417 int outlen
= MLX5_ST_SZ_BYTES(query_qp_out
);
5418 struct mlx5_qp_context
*context
;
5423 outb
= kzalloc(outlen
, GFP_KERNEL
);
5427 err
= mlx5_core_qp_query(dev
->mdev
, &qp
->trans_qp
.base
.mqp
, outb
,
5432 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
5433 context
= (struct mlx5_qp_context
*)MLX5_ADDR_OF(query_qp_out
, outb
, qpc
);
5435 mlx5_state
= be32_to_cpu(context
->flags
) >> 28;
5437 qp
->state
= to_ib_qp_state(mlx5_state
);
5438 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
5439 qp_attr
->path_mig_state
=
5440 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
5441 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
5442 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
5443 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
5444 qp_attr
->dest_qp_num
= be32_to_cpu(context
->log_pg_sz_remote_qpn
) & 0xffffff;
5445 qp_attr
->qp_access_flags
=
5446 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
5448 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
) {
5449 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
5450 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
5451 qp_attr
->alt_pkey_index
=
5452 be16_to_cpu(context
->alt_path
.pkey_index
);
5453 qp_attr
->alt_port_num
=
5454 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
5457 qp_attr
->pkey_index
= be16_to_cpu(context
->pri_path
.pkey_index
);
5458 qp_attr
->port_num
= context
->pri_path
.port
;
5460 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
5461 qp_attr
->sq_draining
= mlx5_state
== MLX5_QP_STATE_SQ_DRAINING
;
5463 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
5465 qp_attr
->max_dest_rd_atomic
=
5466 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
5467 qp_attr
->min_rnr_timer
=
5468 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
5469 qp_attr
->timeout
= context
->pri_path
.ackto_lt
>> 3;
5470 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
5471 qp_attr
->rnr_retry
= (be32_to_cpu(context
->params1
) >> 13) & 0x7;
5472 qp_attr
->alt_timeout
= context
->alt_path
.ackto_lt
>> 3;
5479 static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev
*dev
, struct mlx5_ib_qp
*mqp
,
5480 struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
5481 struct ib_qp_init_attr
*qp_init_attr
)
5483 struct mlx5_core_dct
*dct
= &mqp
->dct
.mdct
;
5485 u32 access_flags
= 0;
5486 int outlen
= MLX5_ST_SZ_BYTES(query_dct_out
);
5489 int supported_mask
= IB_QP_STATE
|
5490 IB_QP_ACCESS_FLAGS
|
5492 IB_QP_MIN_RNR_TIMER
|
5497 if (qp_attr_mask
& ~supported_mask
)
5499 if (mqp
->state
!= IB_QPS_RTR
)
5502 out
= kzalloc(outlen
, GFP_KERNEL
);
5506 err
= mlx5_core_dct_query(dev
->mdev
, dct
, out
, outlen
);
5510 dctc
= MLX5_ADDR_OF(query_dct_out
, out
, dct_context_entry
);
5512 if (qp_attr_mask
& IB_QP_STATE
)
5513 qp_attr
->qp_state
= IB_QPS_RTR
;
5515 if (qp_attr_mask
& IB_QP_ACCESS_FLAGS
) {
5516 if (MLX5_GET(dctc
, dctc
, rre
))
5517 access_flags
|= IB_ACCESS_REMOTE_READ
;
5518 if (MLX5_GET(dctc
, dctc
, rwe
))
5519 access_flags
|= IB_ACCESS_REMOTE_WRITE
;
5520 if (MLX5_GET(dctc
, dctc
, rae
))
5521 access_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
5522 qp_attr
->qp_access_flags
= access_flags
;
5525 if (qp_attr_mask
& IB_QP_PORT
)
5526 qp_attr
->port_num
= MLX5_GET(dctc
, dctc
, port
);
5527 if (qp_attr_mask
& IB_QP_MIN_RNR_TIMER
)
5528 qp_attr
->min_rnr_timer
= MLX5_GET(dctc
, dctc
, min_rnr_nak
);
5529 if (qp_attr_mask
& IB_QP_AV
) {
5530 qp_attr
->ah_attr
.grh
.traffic_class
= MLX5_GET(dctc
, dctc
, tclass
);
5531 qp_attr
->ah_attr
.grh
.flow_label
= MLX5_GET(dctc
, dctc
, flow_label
);
5532 qp_attr
->ah_attr
.grh
.sgid_index
= MLX5_GET(dctc
, dctc
, my_addr_index
);
5533 qp_attr
->ah_attr
.grh
.hop_limit
= MLX5_GET(dctc
, dctc
, hop_limit
);
5535 if (qp_attr_mask
& IB_QP_PATH_MTU
)
5536 qp_attr
->path_mtu
= MLX5_GET(dctc
, dctc
, mtu
);
5537 if (qp_attr_mask
& IB_QP_PKEY_INDEX
)
5538 qp_attr
->pkey_index
= MLX5_GET(dctc
, dctc
, pkey_index
);
5544 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
,
5545 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
5547 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
5548 struct mlx5_ib_qp
*qp
= to_mqp(ibqp
);
5550 u8 raw_packet_qp_state
;
5552 if (ibqp
->rwq_ind_tbl
)
5555 if (unlikely(ibqp
->qp_type
== IB_QPT_GSI
))
5556 return mlx5_ib_gsi_query_qp(ibqp
, qp_attr
, qp_attr_mask
,
5559 /* Not all of output fields are applicable, make sure to zero them */
5560 memset(qp_init_attr
, 0, sizeof(*qp_init_attr
));
5561 memset(qp_attr
, 0, sizeof(*qp_attr
));
5563 if (unlikely(qp
->qp_sub_type
== MLX5_IB_QPT_DCT
))
5564 return mlx5_ib_dct_query_qp(dev
, qp
, qp_attr
,
5565 qp_attr_mask
, qp_init_attr
);
5567 mutex_lock(&qp
->mutex
);
5569 if (qp
->ibqp
.qp_type
== IB_QPT_RAW_PACKET
||
5570 qp
->flags
& MLX5_IB_QP_UNDERLAY
) {
5571 err
= query_raw_packet_qp_state(dev
, qp
, &raw_packet_qp_state
);
5574 qp
->state
= raw_packet_qp_state
;
5575 qp_attr
->port_num
= 1;
5577 err
= query_qp_attr(dev
, qp
, qp_attr
);
5582 qp_attr
->qp_state
= qp
->state
;
5583 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
5584 qp_attr
->cap
.max_recv_wr
= qp
->rq
.wqe_cnt
;
5585 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
5587 if (!ibqp
->uobject
) {
5588 qp_attr
->cap
.max_send_wr
= qp
->sq
.max_post
;
5589 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
5590 qp_init_attr
->qp_context
= ibqp
->qp_context
;
5592 qp_attr
->cap
.max_send_wr
= 0;
5593 qp_attr
->cap
.max_send_sge
= 0;
5596 qp_init_attr
->qp_type
= ibqp
->qp_type
;
5597 qp_init_attr
->recv_cq
= ibqp
->recv_cq
;
5598 qp_init_attr
->send_cq
= ibqp
->send_cq
;
5599 qp_init_attr
->srq
= ibqp
->srq
;
5600 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
5602 qp_init_attr
->cap
= qp_attr
->cap
;
5604 qp_init_attr
->create_flags
= 0;
5605 if (qp
->flags
& MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
)
5606 qp_init_attr
->create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
5608 if (qp
->flags
& MLX5_IB_QP_CROSS_CHANNEL
)
5609 qp_init_attr
->create_flags
|= IB_QP_CREATE_CROSS_CHANNEL
;
5610 if (qp
->flags
& MLX5_IB_QP_MANAGED_SEND
)
5611 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_SEND
;
5612 if (qp
->flags
& MLX5_IB_QP_MANAGED_RECV
)
5613 qp_init_attr
->create_flags
|= IB_QP_CREATE_MANAGED_RECV
;
5614 if (qp
->flags
& MLX5_IB_QP_SQPN_QP1
)
5615 qp_init_attr
->create_flags
|= mlx5_ib_create_qp_sqpn_qp1();
5617 qp_init_attr
->sq_sig_type
= qp
->sq_signal_bits
& MLX5_WQE_CTRL_CQ_UPDATE
?
5618 IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
5621 mutex_unlock(&qp
->mutex
);
5625 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
5626 struct ib_ucontext
*context
,
5627 struct ib_udata
*udata
)
5629 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
5630 struct mlx5_ib_xrcd
*xrcd
;
5633 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
5634 return ERR_PTR(-ENOSYS
);
5636 xrcd
= kmalloc(sizeof(*xrcd
), GFP_KERNEL
);
5638 return ERR_PTR(-ENOMEM
);
5640 err
= mlx5_cmd_xrcd_alloc(dev
->mdev
, &xrcd
->xrcdn
, 0);
5643 return ERR_PTR(-ENOMEM
);
5646 return &xrcd
->ibxrcd
;
5649 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
)
5651 struct mlx5_ib_dev
*dev
= to_mdev(xrcd
->device
);
5652 u32 xrcdn
= to_mxrcd(xrcd
)->xrcdn
;
5655 err
= mlx5_cmd_xrcd_dealloc(dev
->mdev
, xrcdn
, 0);
5657 mlx5_ib_warn(dev
, "failed to dealloc xrcdn 0x%x\n", xrcdn
);
5663 static void mlx5_ib_wq_event(struct mlx5_core_qp
*core_qp
, int type
)
5665 struct mlx5_ib_rwq
*rwq
= to_mibrwq(core_qp
);
5666 struct mlx5_ib_dev
*dev
= to_mdev(rwq
->ibwq
.device
);
5667 struct ib_event event
;
5669 if (rwq
->ibwq
.event_handler
) {
5670 event
.device
= rwq
->ibwq
.device
;
5671 event
.element
.wq
= &rwq
->ibwq
;
5673 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
5674 event
.event
= IB_EVENT_WQ_FATAL
;
5677 mlx5_ib_warn(dev
, "Unexpected event type %d on WQ %06x\n", type
, core_qp
->qpn
);
5681 rwq
->ibwq
.event_handler(&event
, rwq
->ibwq
.wq_context
);
5685 static int set_delay_drop(struct mlx5_ib_dev
*dev
)
5689 mutex_lock(&dev
->delay_drop
.lock
);
5690 if (dev
->delay_drop
.activate
)
5693 err
= mlx5_core_set_delay_drop(dev
->mdev
, dev
->delay_drop
.timeout
);
5697 dev
->delay_drop
.activate
= true;
5699 mutex_unlock(&dev
->delay_drop
.lock
);
5702 atomic_inc(&dev
->delay_drop
.rqs_cnt
);
5706 static int create_rq(struct mlx5_ib_rwq
*rwq
, struct ib_pd
*pd
,
5707 struct ib_wq_init_attr
*init_attr
)
5709 struct mlx5_ib_dev
*dev
;
5710 int has_net_offloads
;
5718 dev
= to_mdev(pd
->device
);
5720 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) + sizeof(u64
) * rwq
->rq_num_pas
;
5721 in
= kvzalloc(inlen
, GFP_KERNEL
);
5725 MLX5_SET(create_rq_in
, in
, uid
, to_mpd(pd
)->uid
);
5726 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
5727 MLX5_SET(rqc
, rqc
, mem_rq_type
,
5728 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE
);
5729 MLX5_SET(rqc
, rqc
, user_index
, rwq
->user_index
);
5730 MLX5_SET(rqc
, rqc
, cqn
, to_mcq(init_attr
->cq
)->mcq
.cqn
);
5731 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
5732 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
5733 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
5734 MLX5_SET(wq
, wq
, wq_type
,
5735 rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
?
5736 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ
: MLX5_WQ_TYPE_CYCLIC
);
5737 if (init_attr
->create_flags
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
5738 if (!MLX5_CAP_GEN(dev
->mdev
, end_pad
)) {
5739 mlx5_ib_dbg(dev
, "Scatter end padding is not supported\n");
5743 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
5746 MLX5_SET(wq
, wq
, log_wq_stride
, rwq
->log_rq_stride
);
5747 if (rwq
->create_flags
& MLX5_IB_WQ_FLAGS_STRIDING_RQ
) {
5748 MLX5_SET(wq
, wq
, two_byte_shift_en
, rwq
->two_byte_shift_en
);
5749 MLX5_SET(wq
, wq
, log_wqe_stride_size
,
5750 rwq
->single_stride_log_num_of_bytes
-
5751 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
);
5752 MLX5_SET(wq
, wq
, log_wqe_num_of_strides
, rwq
->log_num_strides
-
5753 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
);
5755 MLX5_SET(wq
, wq
, log_wq_sz
, rwq
->log_rq_size
);
5756 MLX5_SET(wq
, wq
, pd
, to_mpd(pd
)->pdn
);
5757 MLX5_SET(wq
, wq
, page_offset
, rwq
->rq_page_offset
);
5758 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rwq
->log_page_size
);
5759 MLX5_SET(wq
, wq
, wq_signature
, rwq
->wq_sig
);
5760 MLX5_SET64(wq
, wq
, dbr_addr
, rwq
->db
.dma
);
5761 has_net_offloads
= MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
);
5762 if (init_attr
->create_flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
5763 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
5764 mlx5_ib_dbg(dev
, "VLAN offloads are not supported\n");
5769 MLX5_SET(rqc
, rqc
, vsd
, 1);
5771 if (init_attr
->create_flags
& IB_WQ_FLAGS_SCATTER_FCS
) {
5772 if (!(has_net_offloads
&& MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
))) {
5773 mlx5_ib_dbg(dev
, "Scatter FCS is not supported\n");
5777 MLX5_SET(rqc
, rqc
, scatter_fcs
, 1);
5779 if (init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
5780 if (!(dev
->ib_dev
.attrs
.raw_packet_caps
&
5781 IB_RAW_PACKET_CAP_DELAY_DROP
)) {
5782 mlx5_ib_dbg(dev
, "Delay drop is not supported\n");
5786 MLX5_SET(rqc
, rqc
, delay_drop_en
, 1);
5788 rq_pas0
= (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
);
5789 mlx5_ib_populate_pas(dev
, rwq
->umem
, rwq
->page_shift
, rq_pas0
, 0);
5790 err
= mlx5_core_create_rq_tracked(dev
->mdev
, in
, inlen
, &rwq
->core_qp
);
5791 if (!err
&& init_attr
->create_flags
& IB_WQ_FLAGS_DELAY_DROP
) {
5792 err
= set_delay_drop(dev
);
5794 mlx5_ib_warn(dev
, "Failed to enable delay drop err=%d\n",
5796 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
5798 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_DELAY_DROP
;
5806 static int set_user_rq_size(struct mlx5_ib_dev
*dev
,
5807 struct ib_wq_init_attr
*wq_init_attr
,
5808 struct mlx5_ib_create_wq
*ucmd
,
5809 struct mlx5_ib_rwq
*rwq
)
5811 /* Sanity check RQ size before proceeding */
5812 if (wq_init_attr
->max_wr
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_wq_sz
)))
5815 if (!ucmd
->rq_wqe_count
)
5818 rwq
->wqe_count
= ucmd
->rq_wqe_count
;
5819 rwq
->wqe_shift
= ucmd
->rq_wqe_shift
;
5820 if (check_shl_overflow(rwq
->wqe_count
, rwq
->wqe_shift
, &rwq
->buf_size
))
5823 rwq
->log_rq_stride
= rwq
->wqe_shift
;
5824 rwq
->log_rq_size
= ilog2(rwq
->wqe_count
);
5828 static int prepare_user_rq(struct ib_pd
*pd
,
5829 struct ib_wq_init_attr
*init_attr
,
5830 struct ib_udata
*udata
,
5831 struct mlx5_ib_rwq
*rwq
)
5833 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
5834 struct mlx5_ib_create_wq ucmd
= {};
5836 size_t required_cmd_sz
;
5838 required_cmd_sz
= offsetof(typeof(ucmd
), single_stride_log_num_of_bytes
)
5839 + sizeof(ucmd
.single_stride_log_num_of_bytes
);
5840 if (udata
->inlen
< required_cmd_sz
) {
5841 mlx5_ib_dbg(dev
, "invalid inlen\n");
5845 if (udata
->inlen
> sizeof(ucmd
) &&
5846 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
5847 udata
->inlen
- sizeof(ucmd
))) {
5848 mlx5_ib_dbg(dev
, "inlen is not supported\n");
5852 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
))) {
5853 mlx5_ib_dbg(dev
, "copy failed\n");
5857 if (ucmd
.comp_mask
& (~MLX5_IB_CREATE_WQ_STRIDING_RQ
)) {
5858 mlx5_ib_dbg(dev
, "invalid comp mask\n");
5860 } else if (ucmd
.comp_mask
& MLX5_IB_CREATE_WQ_STRIDING_RQ
) {
5861 if (!MLX5_CAP_GEN(dev
->mdev
, striding_rq
)) {
5862 mlx5_ib_dbg(dev
, "Striding RQ is not supported\n");
5865 if ((ucmd
.single_stride_log_num_of_bytes
<
5866 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
) ||
5867 (ucmd
.single_stride_log_num_of_bytes
>
5868 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
)) {
5869 mlx5_ib_dbg(dev
, "Invalid log stride size (%u. Range is %u - %u)\n",
5870 ucmd
.single_stride_log_num_of_bytes
,
5871 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
,
5872 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
);
5875 if ((ucmd
.single_wqe_log_num_of_strides
>
5876 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
) ||
5877 (ucmd
.single_wqe_log_num_of_strides
<
5878 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
)) {
5879 mlx5_ib_dbg(dev
, "Invalid log num strides (%u. Range is %u - %u)\n",
5880 ucmd
.single_wqe_log_num_of_strides
,
5881 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
,
5882 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
);
5885 rwq
->single_stride_log_num_of_bytes
=
5886 ucmd
.single_stride_log_num_of_bytes
;
5887 rwq
->log_num_strides
= ucmd
.single_wqe_log_num_of_strides
;
5888 rwq
->two_byte_shift_en
= !!ucmd
.two_byte_shift_en
;
5889 rwq
->create_flags
|= MLX5_IB_WQ_FLAGS_STRIDING_RQ
;
5892 err
= set_user_rq_size(dev
, init_attr
, &ucmd
, rwq
);
5894 mlx5_ib_dbg(dev
, "err %d\n", err
);
5898 err
= create_user_rq(dev
, pd
, udata
, rwq
, &ucmd
);
5900 mlx5_ib_dbg(dev
, "err %d\n", err
);
5904 rwq
->user_index
= ucmd
.user_index
;
5908 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
5909 struct ib_wq_init_attr
*init_attr
,
5910 struct ib_udata
*udata
)
5912 struct mlx5_ib_dev
*dev
;
5913 struct mlx5_ib_rwq
*rwq
;
5914 struct mlx5_ib_create_wq_resp resp
= {};
5915 size_t min_resp_len
;
5919 return ERR_PTR(-ENOSYS
);
5921 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
5922 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
5923 return ERR_PTR(-EINVAL
);
5925 dev
= to_mdev(pd
->device
);
5926 switch (init_attr
->wq_type
) {
5928 rwq
= kzalloc(sizeof(*rwq
), GFP_KERNEL
);
5930 return ERR_PTR(-ENOMEM
);
5931 err
= prepare_user_rq(pd
, init_attr
, udata
, rwq
);
5934 err
= create_rq(rwq
, pd
, init_attr
);
5939 mlx5_ib_dbg(dev
, "unsupported wq type %d\n",
5940 init_attr
->wq_type
);
5941 return ERR_PTR(-EINVAL
);
5944 rwq
->ibwq
.wq_num
= rwq
->core_qp
.qpn
;
5945 rwq
->ibwq
.state
= IB_WQS_RESET
;
5946 if (udata
->outlen
) {
5947 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
5948 sizeof(resp
.response_length
);
5949 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
5954 rwq
->core_qp
.event
= mlx5_ib_wq_event
;
5955 rwq
->ibwq
.event_handler
= init_attr
->event_handler
;
5959 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
5961 destroy_user_rq(dev
, pd
, rwq
);
5964 return ERR_PTR(err
);
5967 int mlx5_ib_destroy_wq(struct ib_wq
*wq
)
5969 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
5970 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
5972 mlx5_core_destroy_rq_tracked(dev
->mdev
, &rwq
->core_qp
);
5973 destroy_user_rq(dev
, wq
->pd
, rwq
);
5979 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
5980 struct ib_rwq_ind_table_init_attr
*init_attr
,
5981 struct ib_udata
*udata
)
5983 struct mlx5_ib_dev
*dev
= to_mdev(device
);
5984 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
;
5985 int sz
= 1 << init_attr
->log_ind_tbl_size
;
5986 struct mlx5_ib_create_rwq_ind_tbl_resp resp
= {};
5987 size_t min_resp_len
;
5994 if (udata
->inlen
> 0 &&
5995 !ib_is_udata_cleared(udata
, 0,
5997 return ERR_PTR(-EOPNOTSUPP
);
5999 if (init_attr
->log_ind_tbl_size
>
6000 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
)) {
6001 mlx5_ib_dbg(dev
, "log_ind_tbl_size = %d is bigger than supported = %d\n",
6002 init_attr
->log_ind_tbl_size
,
6003 MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
));
6004 return ERR_PTR(-EINVAL
);
6007 min_resp_len
= offsetof(typeof(resp
), reserved
) + sizeof(resp
.reserved
);
6008 if (udata
->outlen
&& udata
->outlen
< min_resp_len
)
6009 return ERR_PTR(-EINVAL
);
6011 rwq_ind_tbl
= kzalloc(sizeof(*rwq_ind_tbl
), GFP_KERNEL
);
6013 return ERR_PTR(-ENOMEM
);
6015 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
6016 in
= kvzalloc(inlen
, GFP_KERNEL
);
6022 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
6024 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
6025 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
6027 for (i
= 0; i
< sz
; i
++)
6028 MLX5_SET(rqtc
, rqtc
, rq_num
[i
], init_attr
->ind_tbl
[i
]->wq_num
);
6030 rwq_ind_tbl
->uid
= to_mpd(init_attr
->ind_tbl
[0]->pd
)->uid
;
6031 MLX5_SET(create_rqt_in
, in
, uid
, rwq_ind_tbl
->uid
);
6033 err
= mlx5_core_create_rqt(dev
->mdev
, in
, inlen
, &rwq_ind_tbl
->rqtn
);
6039 rwq_ind_tbl
->ib_rwq_ind_tbl
.ind_tbl_num
= rwq_ind_tbl
->rqtn
;
6040 if (udata
->outlen
) {
6041 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
6042 sizeof(resp
.response_length
);
6043 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
6048 return &rwq_ind_tbl
->ib_rwq_ind_tbl
;
6051 mlx5_cmd_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
, rwq_ind_tbl
->uid
);
6054 return ERR_PTR(err
);
6057 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
6059 struct mlx5_ib_rwq_ind_table
*rwq_ind_tbl
= to_mrwq_ind_table(ib_rwq_ind_tbl
);
6060 struct mlx5_ib_dev
*dev
= to_mdev(ib_rwq_ind_tbl
->device
);
6062 mlx5_cmd_destroy_rqt(dev
->mdev
, rwq_ind_tbl
->rqtn
, rwq_ind_tbl
->uid
);
6068 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
6069 u32 wq_attr_mask
, struct ib_udata
*udata
)
6071 struct mlx5_ib_dev
*dev
= to_mdev(wq
->device
);
6072 struct mlx5_ib_rwq
*rwq
= to_mrwq(wq
);
6073 struct mlx5_ib_modify_wq ucmd
= {};
6074 size_t required_cmd_sz
;
6082 required_cmd_sz
= offsetof(typeof(ucmd
), reserved
) + sizeof(ucmd
.reserved
);
6083 if (udata
->inlen
< required_cmd_sz
)
6086 if (udata
->inlen
> sizeof(ucmd
) &&
6087 !ib_is_udata_cleared(udata
, sizeof(ucmd
),
6088 udata
->inlen
- sizeof(ucmd
)))
6091 if (ib_copy_from_udata(&ucmd
, udata
, min(sizeof(ucmd
), udata
->inlen
)))
6094 if (ucmd
.comp_mask
|| ucmd
.reserved
)
6097 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
6098 in
= kvzalloc(inlen
, GFP_KERNEL
);
6102 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
6104 curr_wq_state
= (wq_attr_mask
& IB_WQ_CUR_STATE
) ?
6105 wq_attr
->curr_wq_state
: wq
->state
;
6106 wq_state
= (wq_attr_mask
& IB_WQ_STATE
) ?
6107 wq_attr
->wq_state
: curr_wq_state
;
6108 if (curr_wq_state
== IB_WQS_ERR
)
6109 curr_wq_state
= MLX5_RQC_STATE_ERR
;
6110 if (wq_state
== IB_WQS_ERR
)
6111 wq_state
= MLX5_RQC_STATE_ERR
;
6112 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_wq_state
);
6113 MLX5_SET(modify_rq_in
, in
, uid
, to_mpd(wq
->pd
)->uid
);
6114 MLX5_SET(rqc
, rqc
, state
, wq_state
);
6116 if (wq_attr_mask
& IB_WQ_FLAGS
) {
6117 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_CVLAN_STRIPPING
) {
6118 if (!(MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
6119 MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))) {
6120 mlx5_ib_dbg(dev
, "VLAN offloads are not "
6125 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
6126 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD
);
6127 MLX5_SET(rqc
, rqc
, vsd
,
6128 (wq_attr
->flags
& IB_WQ_FLAGS_CVLAN_STRIPPING
) ? 0 : 1);
6131 if (wq_attr
->flags_mask
& IB_WQ_FLAGS_PCI_WRITE_END_PADDING
) {
6132 mlx5_ib_dbg(dev
, "Modifying scatter end padding is not supported\n");
6138 if (curr_wq_state
== IB_WQS_RESET
&& wq_state
== IB_WQS_RDY
) {
6139 if (MLX5_CAP_GEN(dev
->mdev
, modify_rq_counter_set_id
)) {
6140 MLX5_SET64(modify_rq_in
, in
, modify_bitmask
,
6141 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID
);
6142 MLX5_SET(rqc
, rqc
, counter_set_id
,
6143 dev
->port
->cnts
.set_id
);
6147 "Receive WQ counters are not supported on current FW\n");
6150 err
= mlx5_core_modify_rq(dev
->mdev
, rwq
->core_qp
.qpn
, in
, inlen
);
6152 rwq
->ibwq
.state
= (wq_state
== MLX5_RQC_STATE_ERR
) ? IB_WQS_ERR
: wq_state
;
6159 struct mlx5_ib_drain_cqe
{
6161 struct completion done
;
6164 static void mlx5_ib_drain_qp_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
6166 struct mlx5_ib_drain_cqe
*cqe
= container_of(wc
->wr_cqe
,
6167 struct mlx5_ib_drain_cqe
,
6170 complete(&cqe
->done
);
6173 /* This function returns only once the drained WR was completed */
6174 static void handle_drain_completion(struct ib_cq
*cq
,
6175 struct mlx5_ib_drain_cqe
*sdrain
,
6176 struct mlx5_ib_dev
*dev
)
6178 struct mlx5_core_dev
*mdev
= dev
->mdev
;
6180 if (cq
->poll_ctx
== IB_POLL_DIRECT
) {
6181 while (wait_for_completion_timeout(&sdrain
->done
, HZ
/ 10) <= 0)
6182 ib_process_cq_direct(cq
, -1);
6186 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
6187 struct mlx5_ib_cq
*mcq
= to_mcq(cq
);
6188 bool triggered
= false;
6189 unsigned long flags
;
6191 spin_lock_irqsave(&dev
->reset_flow_resource_lock
, flags
);
6192 /* Make sure that the CQ handler won't run if wasn't run yet */
6193 if (!mcq
->mcq
.reset_notify_added
)
6194 mcq
->mcq
.reset_notify_added
= 1;
6197 spin_unlock_irqrestore(&dev
->reset_flow_resource_lock
, flags
);
6200 /* Wait for any scheduled/running task to be ended */
6201 switch (cq
->poll_ctx
) {
6202 case IB_POLL_SOFTIRQ
:
6203 irq_poll_disable(&cq
->iop
);
6204 irq_poll_enable(&cq
->iop
);
6206 case IB_POLL_WORKQUEUE
:
6207 cancel_work_sync(&cq
->work
);
6214 /* Run the CQ handler - this makes sure that the drain WR will
6215 * be processed if wasn't processed yet.
6217 mcq
->mcq
.comp(&mcq
->mcq
);
6220 wait_for_completion(&sdrain
->done
);
6223 void mlx5_ib_drain_sq(struct ib_qp
*qp
)
6225 struct ib_cq
*cq
= qp
->send_cq
;
6226 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
6227 struct mlx5_ib_drain_cqe sdrain
;
6228 const struct ib_send_wr
*bad_swr
;
6229 struct ib_rdma_wr swr
= {
6232 { .wr_cqe
= &sdrain
.cqe
, },
6233 .opcode
= IB_WR_RDMA_WRITE
,
6237 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
6238 struct mlx5_core_dev
*mdev
= dev
->mdev
;
6240 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
6241 if (ret
&& mdev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
6242 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
6246 sdrain
.cqe
.done
= mlx5_ib_drain_qp_done
;
6247 init_completion(&sdrain
.done
);
6249 ret
= _mlx5_ib_post_send(qp
, &swr
.wr
, &bad_swr
, true);
6251 WARN_ONCE(ret
, "failed to drain send queue: %d\n", ret
);
6255 handle_drain_completion(cq
, &sdrain
, dev
);
6258 void mlx5_ib_drain_rq(struct ib_qp
*qp
)
6260 struct ib_cq
*cq
= qp
->recv_cq
;
6261 struct ib_qp_attr attr
= { .qp_state
= IB_QPS_ERR
};
6262 struct mlx5_ib_drain_cqe rdrain
;
6263 struct ib_recv_wr rwr
= {};
6264 const struct ib_recv_wr
*bad_rwr
;
6266 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
6267 struct mlx5_core_dev
*mdev
= dev
->mdev
;
6269 ret
= ib_modify_qp(qp
, &attr
, IB_QP_STATE
);
6270 if (ret
&& mdev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
6271 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
6275 rwr
.wr_cqe
= &rdrain
.cqe
;
6276 rdrain
.cqe
.done
= mlx5_ib_drain_qp_done
;
6277 init_completion(&rdrain
.done
);
6279 ret
= _mlx5_ib_post_recv(qp
, &rwr
, &bad_rwr
, true);
6281 WARN_ONCE(ret
, "failed to drain recv queue: %d\n", ret
);
6285 handle_drain_completion(cq
, &rdrain
, dev
);