2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45 #include <linux/mlx5/transobj.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/mlx5-abi.h>
48 #include <rdma/uverbs_ioctl.h>
50 #define mlx5_ib_dbg(dev, format, arg...) \
51 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
52 __LINE__, current->pid, ##arg)
54 #define mlx5_ib_err(dev, format, arg...) \
55 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
56 __LINE__, current->pid, ##arg)
58 #define mlx5_ib_warn(dev, format, arg...) \
59 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
60 __LINE__, current->pid, ##arg)
62 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
63 sizeof(((type *)0)->fld) <= (sz))
64 #define MLX5_IB_DEFAULT_UIDX 0xffffff
65 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
67 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
70 MLX5_IB_MMAP_CMD_SHIFT
= 8,
71 MLX5_IB_MMAP_CMD_MASK
= 0xff,
75 MLX5_RES_SCAT_DATA32_CQE
= 0x1,
76 MLX5_RES_SCAT_DATA64_CQE
= 0x2,
77 MLX5_REQ_SCAT_DATA32_CQE
= 0x11,
78 MLX5_REQ_SCAT_DATA64_CQE
= 0x22,
81 enum mlx5_ib_mad_ifc_flags
{
82 MLX5_MAD_IFC_IGNORE_MKEY
= 1,
83 MLX5_MAD_IFC_IGNORE_BKEY
= 2,
84 MLX5_MAD_IFC_NET_VIEW
= 4,
88 MLX5_CROSS_CHANNEL_BFREG
= 0,
97 MLX5_TM_MAX_RNDV_MSG_SIZE
= 64,
102 MLX5_IB_INVALID_UAR_INDEX
= BIT(31),
103 MLX5_IB_INVALID_BFREG
= BIT(31),
107 MLX5_MAX_MEMIC_PAGES
= 0x100,
108 MLX5_MEMIC_ALLOC_SIZE_MASK
= 0x3f,
112 MLX5_MEMIC_BASE_ALIGN
= 6,
113 MLX5_MEMIC_BASE_SIZE
= 1 << MLX5_MEMIC_BASE_ALIGN
,
116 struct mlx5_ib_vma_private_data
{
117 struct list_head list
;
118 struct vm_area_struct
*vma
;
119 /* protect vma_private_list add/del */
120 struct mutex
*vma_private_list_mutex
;
123 struct mlx5_ib_ucontext
{
124 struct ib_ucontext ibucontext
;
125 struct list_head db_page_list
;
127 /* protect doorbell record alloc/free
129 struct mutex db_page_mutex
;
130 struct mlx5_bfreg_info bfregi
;
132 /* Transport Domain number */
134 struct list_head vma_private_list
;
135 /* protect vma_private_list add/del */
136 struct mutex vma_private_list_mutex
;
139 DECLARE_BITMAP(dm_pages
, MLX5_MAX_MEMIC_PAGES
);
143 static inline struct mlx5_ib_ucontext
*to_mucontext(struct ib_ucontext
*ibucontext
)
145 return container_of(ibucontext
, struct mlx5_ib_ucontext
, ibucontext
);
153 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
154 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
155 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
156 #error "Invalid number of bypass priorities"
158 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
160 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
161 #define MLX5_IB_NUM_SNIFFER_FTS 2
162 #define MLX5_IB_NUM_EGRESS_FTS 1
163 struct mlx5_ib_flow_prio
{
164 struct mlx5_flow_table
*flow_table
;
165 unsigned int refcount
;
168 struct mlx5_ib_flow_handler
{
169 struct list_head list
;
170 struct ib_flow ibflow
;
171 struct mlx5_ib_flow_prio
*prio
;
172 struct mlx5_flow_handle
*rule
;
173 struct ib_counters
*ibcounters
;
176 struct mlx5_ib_flow_db
{
177 struct mlx5_ib_flow_prio prios
[MLX5_IB_NUM_FLOW_FT
];
178 struct mlx5_ib_flow_prio sniffer
[MLX5_IB_NUM_SNIFFER_FTS
];
179 struct mlx5_ib_flow_prio egress
[MLX5_IB_NUM_EGRESS_FTS
];
180 struct mlx5_flow_table
*lag_demux_ft
;
181 /* Protect flow steering bypass flow tables
182 * when add/del flow rules.
183 * only single add/removal of flow steering rule could be done
189 /* Use macros here so that don't have to duplicate
190 * enum ib_send_flags and enum ib_qp_type for low-level driver
193 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
194 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
195 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
196 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
197 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
198 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
200 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
202 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
203 * creates the actual hardware QP.
205 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
206 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
207 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
208 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
210 #define MLX5_IB_UMR_OCTOWORD 16
211 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
213 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
214 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
215 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
216 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
217 #define MLX5_IB_UPD_XLT_PD BIT(4)
218 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
219 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
221 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
223 * These flags are intended for internal use by the mlx5_ib driver, and they
224 * rely on the range reserved for that use in the ib_qp_create_flags enum.
227 /* Create a UD QP whose source QP number is 1 */
228 static inline enum ib_qp_create_flags
mlx5_ib_create_qp_sqpn_qp1(void)
230 return IB_QP_CREATE_RESERVED_START
;
238 enum mlx5_ib_rq_flags
{
239 MLX5_IB_RQ_CVLAN_STRIPPING
= 1 << 0,
240 MLX5_IB_RQ_PCI_WRITE_END_PADDING
= 1 << 1,
246 struct wr_list
*w_list
;
250 /* serialize post to the work queue
265 enum mlx5_ib_wq_flags
{
266 MLX5_IB_WQ_FLAGS_DELAY_DROP
= 0x1,
267 MLX5_IB_WQ_FLAGS_STRIDING_RQ
= 0x2,
270 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
271 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
272 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
273 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
277 struct mlx5_core_qp core_qp
;
284 u32 two_byte_shift_en
;
285 u32 single_stride_log_num_of_bytes
;
286 struct ib_umem
*umem
;
288 unsigned int page_shift
;
295 u32 create_flags
; /* Use enum mlx5_ib_wq_flags */
309 struct mlx5_ib_rwq_ind_table
{
310 struct ib_rwq_ind_table ib_rwq_ind_tbl
;
314 struct mlx5_ib_ubuffer
{
315 struct ib_umem
*umem
;
320 struct mlx5_ib_qp_base
{
321 struct mlx5_ib_qp
*container_mibqp
;
322 struct mlx5_core_qp mqp
;
323 struct mlx5_ib_ubuffer ubuffer
;
326 struct mlx5_ib_qp_trans
{
327 struct mlx5_ib_qp_base base
;
334 struct mlx5_ib_rss_qp
{
339 struct mlx5_ib_qp_base base
;
340 struct mlx5_ib_wq
*rq
;
341 struct mlx5_ib_ubuffer ubuffer
;
342 struct mlx5_db
*doorbell
;
349 struct mlx5_ib_qp_base base
;
350 struct mlx5_ib_wq
*sq
;
351 struct mlx5_ib_ubuffer ubuffer
;
352 struct mlx5_db
*doorbell
;
353 struct mlx5_flow_handle
*flow_rule
;
358 struct mlx5_ib_raw_packet_qp
{
359 struct mlx5_ib_sq sq
;
360 struct mlx5_ib_rq rq
;
365 unsigned long offset
;
366 struct mlx5_sq_bfreg
*bfreg
;
370 struct mlx5_core_dct mdct
;
377 struct mlx5_ib_qp_trans trans_qp
;
378 struct mlx5_ib_raw_packet_qp raw_packet_qp
;
379 struct mlx5_ib_rss_qp rss_qp
;
380 struct mlx5_ib_dct dct
;
382 struct mlx5_frag_buf buf
;
385 struct mlx5_ib_wq rq
;
389 struct mlx5_ib_wq sq
;
391 /* serialize qp state modifications
403 /* only for user space QPs. For kernel
404 * we have it from the bf object
410 /* Store signature errors */
413 struct list_head qps_list
;
414 struct list_head cq_recv_list
;
415 struct list_head cq_send_list
;
416 struct mlx5_rate_limit rl
;
418 bool tunnel_offload_en
;
419 /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
420 enum ib_qp_type qp_sub_type
;
423 struct mlx5_ib_cq_buf
{
424 struct mlx5_frag_buf_ctrl fbc
;
425 struct ib_umem
*umem
;
430 enum mlx5_ib_qp_flags
{
431 MLX5_IB_QP_LSO
= IB_QP_CREATE_IPOIB_UD_LSO
,
432 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK
= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
,
433 MLX5_IB_QP_CROSS_CHANNEL
= IB_QP_CREATE_CROSS_CHANNEL
,
434 MLX5_IB_QP_MANAGED_SEND
= IB_QP_CREATE_MANAGED_SEND
,
435 MLX5_IB_QP_MANAGED_RECV
= IB_QP_CREATE_MANAGED_RECV
,
436 MLX5_IB_QP_SIGNATURE_HANDLING
= 1 << 5,
437 /* QP uses 1 as its source QP number */
438 MLX5_IB_QP_SQPN_QP1
= 1 << 6,
439 MLX5_IB_QP_CAP_SCATTER_FCS
= 1 << 7,
440 MLX5_IB_QP_RSS
= 1 << 8,
441 MLX5_IB_QP_CVLAN_STRIPPING
= 1 << 9,
442 MLX5_IB_QP_UNDERLAY
= 1 << 10,
443 MLX5_IB_QP_PCI_WRITE_END_PADDING
= 1 << 11,
444 MLX5_IB_QP_TUNNEL_OFFLOAD
= 1 << 12,
448 struct ib_send_wr wr
;
452 unsigned int page_shift
;
453 unsigned int xlt_size
;
459 static inline struct mlx5_umr_wr
*umr_wr(struct ib_send_wr
*wr
)
461 return container_of(wr
, struct mlx5_umr_wr
, wr
);
464 struct mlx5_shared_mr_info
{
466 struct ib_umem
*umem
;
469 enum mlx5_ib_cq_pr_flags
{
470 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD
= 1 << 0,
475 struct mlx5_core_cq mcq
;
476 struct mlx5_ib_cq_buf buf
;
479 /* serialize access to the CQ
485 struct mutex resize_mutex
;
486 struct mlx5_ib_cq_buf
*resize_buf
;
487 struct ib_umem
*resize_umem
;
489 struct list_head list_send_qp
;
490 struct list_head list_recv_qp
;
492 struct list_head wc_list
;
493 enum ib_cq_notify_flags notify_flags
;
494 struct work_struct notify_work
;
495 u16 private_flags
; /* Use mlx5_ib_cq_pr_flags */
500 struct list_head list
;
505 struct mlx5_core_srq msrq
;
506 struct mlx5_frag_buf buf
;
509 /* protect SRQ hanlding
515 struct ib_umem
*umem
;
516 /* serialize arming a SRQ
522 struct mlx5_ib_xrcd
{
523 struct ib_xrcd ibxrcd
;
527 enum mlx5_ib_mtt_access_flags
{
528 MLX5_IB_MTT_READ
= (1 << 0),
529 MLX5_IB_MTT_WRITE
= (1 << 1),
534 phys_addr_t dev_addr
;
537 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
539 #define MLX5_IB_DM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
540 IB_ACCESS_REMOTE_WRITE |\
541 IB_ACCESS_REMOTE_READ |\
542 IB_ACCESS_REMOTE_ATOMIC |\
553 struct mlx5_core_mkey mmkey
;
554 struct ib_umem
*umem
;
555 struct mlx5_shared_mr_info
*smr_info
;
556 struct list_head list
;
558 bool allocated_from_cache
;
560 struct mlx5_ib_dev
*dev
;
561 u32 out
[MLX5_ST_SZ_DW(create_mkey_out
)];
562 struct mlx5_core_sig_ctx
*sig
;
565 int access_flags
; /* Needed for rereg MR */
567 struct mlx5_ib_mr
*parent
;
568 atomic_t num_leaf_free
;
569 wait_queue_head_t q_leaf_free
;
574 struct mlx5_core_mkey mmkey
;
578 struct mlx5_ib_umr_context
{
580 enum ib_wc_status status
;
581 struct completion done
;
588 /* control access to UMR QP
590 struct semaphore sem
;
599 struct mlx5_cache_ent
{
600 struct list_head head
;
601 /* sync access to the cahce entry
618 struct dentry
*fsize
;
620 struct dentry
*fmiss
;
621 struct dentry
*flimit
;
623 struct mlx5_ib_dev
*dev
;
624 struct work_struct work
;
625 struct delayed_work dwork
;
627 struct completion
compl;
630 struct mlx5_mr_cache
{
631 struct workqueue_struct
*wq
;
632 struct mlx5_cache_ent ent
[MAX_MR_CACHE_ENTRIES
];
635 unsigned long last_add
;
638 struct mlx5_ib_gsi_qp
;
640 struct mlx5_ib_port_resources
{
641 struct mlx5_ib_resources
*devr
;
642 struct mlx5_ib_gsi_qp
*gsi
;
643 struct work_struct pkey_change_work
;
646 struct mlx5_ib_resources
{
653 struct mlx5_ib_port_resources ports
[2];
654 /* Protects changes to the port resources */
658 struct mlx5_ib_counters
{
662 u32 num_cong_counters
;
663 u32 num_ext_ppcnt_counters
;
668 struct mlx5_ib_multiport_info
;
670 struct mlx5_ib_multiport
{
671 struct mlx5_ib_multiport_info
*mpi
;
672 /* To be held when accessing the multiport info */
676 struct mlx5_ib_port
{
677 struct mlx5_ib_counters cnts
;
678 struct mlx5_ib_multiport mp
;
679 struct mlx5_ib_dbg_cc_params
*dbg_cc_params
;
683 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
686 rwlock_t netdev_lock
;
687 struct net_device
*netdev
;
688 struct notifier_block nb
;
690 enum ib_port_state last_port_state
;
691 struct mlx5_ib_dev
*dev
;
695 struct mlx5_ib_dbg_param
{
697 struct mlx5_ib_dev
*dev
;
698 struct dentry
*dentry
;
702 enum mlx5_ib_dbg_cc_types
{
703 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE
,
704 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI
,
705 MLX5_IB_DBG_CC_RP_TIME_RESET
,
706 MLX5_IB_DBG_CC_RP_BYTE_RESET
,
707 MLX5_IB_DBG_CC_RP_THRESHOLD
,
708 MLX5_IB_DBG_CC_RP_AI_RATE
,
709 MLX5_IB_DBG_CC_RP_HAI_RATE
,
710 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC
,
711 MLX5_IB_DBG_CC_RP_MIN_RATE
,
712 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP
,
713 MLX5_IB_DBG_CC_RP_DCE_TCP_G
,
714 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT
,
715 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD
,
716 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE
,
717 MLX5_IB_DBG_CC_RP_GD
,
718 MLX5_IB_DBG_CC_NP_CNP_DSCP
,
719 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE
,
720 MLX5_IB_DBG_CC_NP_CNP_PRIO
,
724 struct mlx5_ib_dbg_cc_params
{
726 struct mlx5_ib_dbg_param params
[MLX5_IB_DBG_CC_MAX
];
730 MLX5_MAX_DELAY_DROP_TIMEOUT_MS
= 100,
733 struct mlx5_ib_dbg_delay_drop
{
734 struct dentry
*dir_debugfs
;
735 struct dentry
*rqs_cnt_debugfs
;
736 struct dentry
*events_cnt_debugfs
;
737 struct dentry
*timeout_debugfs
;
740 struct mlx5_ib_delay_drop
{
741 struct mlx5_ib_dev
*dev
;
742 struct work_struct delay_drop_work
;
743 /* serialize setting of delay drop */
749 struct mlx5_ib_dbg_delay_drop
*dbg
;
752 enum mlx5_ib_stages
{
754 MLX5_IB_STAGE_FLOW_DB
,
756 MLX5_IB_STAGE_NON_DEFAULT_CB
,
758 MLX5_IB_STAGE_DEVICE_RESOURCES
,
760 MLX5_IB_STAGE_COUNTERS
,
761 MLX5_IB_STAGE_CONG_DEBUGFS
,
764 MLX5_IB_STAGE_PRE_IB_REG_UMR
,
766 MLX5_IB_STAGE_IB_REG
,
767 MLX5_IB_STAGE_POST_IB_REG_UMR
,
768 MLX5_IB_STAGE_DELAY_DROP
,
769 MLX5_IB_STAGE_CLASS_ATTR
,
770 MLX5_IB_STAGE_REP_REG
,
774 struct mlx5_ib_stage
{
775 int (*init
)(struct mlx5_ib_dev
*dev
);
776 void (*cleanup
)(struct mlx5_ib_dev
*dev
);
779 #define STAGE_CREATE(_stage, _init, _cleanup) \
780 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
782 struct mlx5_ib_profile
{
783 struct mlx5_ib_stage stage
[MLX5_IB_STAGE_MAX
];
786 struct mlx5_ib_multiport_info
{
787 struct list_head list
;
788 struct mlx5_ib_dev
*ibdev
;
789 struct mlx5_core_dev
*mdev
;
790 struct completion unref_comp
;
797 struct mlx5_ib_flow_action
{
798 struct ib_flow_action ib_action
;
802 struct mlx5_accel_esp_xfrm
*ctx
;
808 struct mlx5_core_dev
*dev
;
809 spinlock_t memic_lock
;
810 DECLARE_BITMAP(memic_alloc_pages
, MLX5_MAX_MEMIC_PAGES
);
813 struct mlx5_read_counters_attr
{
814 struct mlx5_fc
*hw_cntrs_hndl
;
819 enum mlx5_ib_counters_type
{
820 MLX5_IB_COUNTERS_FLOW
,
823 struct mlx5_ib_mcounters
{
824 struct ib_counters ibcntrs
;
825 enum mlx5_ib_counters_type type
;
826 /* number of counters supported for this counters type */
828 struct mlx5_fc
*hw_cntrs_hndl
;
829 /* read function for this counters type */
830 int (*read_counters
)(struct ib_device
*ibdev
,
831 struct mlx5_read_counters_attr
*read_attr
);
832 /* max index set as part of create_flow */
834 /* number of counters data entries (<description,index> pair) */
836 /* counters data array for descriptions and indexes */
837 struct mlx5_ib_flow_counters_desc
*counters_data
;
838 /* protects access to mcounters internal data */
839 struct mutex mcntrs_mutex
;
842 static inline struct mlx5_ib_mcounters
*
843 to_mcounters(struct ib_counters
*ibcntrs
)
845 return container_of(ibcntrs
, struct mlx5_ib_mcounters
, ibcntrs
);
849 struct ib_device ib_dev
;
850 struct mlx5_core_dev
*mdev
;
851 struct mlx5_roce roce
[MLX5_MAX_PORTS
];
853 /* serialize update of capability mask
855 struct mutex cap_mask_mutex
;
857 struct umr_common umrc
;
858 /* sync used page count stats
860 struct mlx5_ib_resources devr
;
861 struct mlx5_mr_cache cache
;
862 struct timer_list delay_timer
;
863 /* Prevents soft lock on massive reg MRs */
864 struct mutex slow_path_mutex
;
866 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
867 struct ib_odp_caps odp_caps
;
870 * Sleepable RCU that prevents destruction of MRs while they are still
871 * being used by a page fault handler.
873 struct srcu_struct mr_srcu
;
876 struct mlx5_ib_flow_db
*flow_db
;
877 /* protect resources needed as part of reset flow */
878 spinlock_t reset_flow_resource_lock
;
879 struct list_head qp_list
;
880 /* Array with num_ports elements */
881 struct mlx5_ib_port
*port
;
882 struct mlx5_sq_bfreg bfreg
;
883 struct mlx5_sq_bfreg fp_bfreg
;
884 struct mlx5_ib_delay_drop delay_drop
;
885 const struct mlx5_ib_profile
*profile
;
886 struct mlx5_eswitch_rep
*rep
;
888 /* protect the user_td */
889 struct mutex lb_mutex
;
892 struct list_head ib_dev_list
;
894 struct mlx5_memic memic
;
897 static inline struct mlx5_ib_cq
*to_mibcq(struct mlx5_core_cq
*mcq
)
899 return container_of(mcq
, struct mlx5_ib_cq
, mcq
);
902 static inline struct mlx5_ib_xrcd
*to_mxrcd(struct ib_xrcd
*ibxrcd
)
904 return container_of(ibxrcd
, struct mlx5_ib_xrcd
, ibxrcd
);
907 static inline struct mlx5_ib_dev
*to_mdev(struct ib_device
*ibdev
)
909 return container_of(ibdev
, struct mlx5_ib_dev
, ib_dev
);
912 static inline struct mlx5_ib_cq
*to_mcq(struct ib_cq
*ibcq
)
914 return container_of(ibcq
, struct mlx5_ib_cq
, ibcq
);
917 static inline struct mlx5_ib_qp
*to_mibqp(struct mlx5_core_qp
*mqp
)
919 return container_of(mqp
, struct mlx5_ib_qp_base
, mqp
)->container_mibqp
;
922 static inline struct mlx5_ib_rwq
*to_mibrwq(struct mlx5_core_qp
*core_qp
)
924 return container_of(core_qp
, struct mlx5_ib_rwq
, core_qp
);
927 static inline struct mlx5_ib_mr
*to_mibmr(struct mlx5_core_mkey
*mmkey
)
929 return container_of(mmkey
, struct mlx5_ib_mr
, mmkey
);
932 static inline struct mlx5_ib_pd
*to_mpd(struct ib_pd
*ibpd
)
934 return container_of(ibpd
, struct mlx5_ib_pd
, ibpd
);
937 static inline struct mlx5_ib_srq
*to_msrq(struct ib_srq
*ibsrq
)
939 return container_of(ibsrq
, struct mlx5_ib_srq
, ibsrq
);
942 static inline struct mlx5_ib_qp
*to_mqp(struct ib_qp
*ibqp
)
944 return container_of(ibqp
, struct mlx5_ib_qp
, ibqp
);
947 static inline struct mlx5_ib_rwq
*to_mrwq(struct ib_wq
*ibwq
)
949 return container_of(ibwq
, struct mlx5_ib_rwq
, ibwq
);
952 static inline struct mlx5_ib_rwq_ind_table
*to_mrwq_ind_table(struct ib_rwq_ind_table
*ib_rwq_ind_tbl
)
954 return container_of(ib_rwq_ind_tbl
, struct mlx5_ib_rwq_ind_table
, ib_rwq_ind_tbl
);
957 static inline struct mlx5_ib_srq
*to_mibsrq(struct mlx5_core_srq
*msrq
)
959 return container_of(msrq
, struct mlx5_ib_srq
, msrq
);
962 static inline struct mlx5_ib_dm
*to_mdm(struct ib_dm
*ibdm
)
964 return container_of(ibdm
, struct mlx5_ib_dm
, ibdm
);
967 static inline struct mlx5_ib_mr
*to_mmr(struct ib_mr
*ibmr
)
969 return container_of(ibmr
, struct mlx5_ib_mr
, ibmr
);
972 static inline struct mlx5_ib_mw
*to_mmw(struct ib_mw
*ibmw
)
974 return container_of(ibmw
, struct mlx5_ib_mw
, ibmw
);
977 static inline struct mlx5_ib_flow_action
*
978 to_mflow_act(struct ib_flow_action
*ibact
)
980 return container_of(ibact
, struct mlx5_ib_flow_action
, ib_action
);
983 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext
*context
, unsigned long virt
,
985 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext
*context
, struct mlx5_db
*db
);
986 void __mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
987 void mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
);
988 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq
*srq
, int wqe_index
);
989 int mlx5_MAD_IFC(struct mlx5_ib_dev
*dev
, int ignore_mkey
, int ignore_bkey
,
990 u8 port
, const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
991 const void *in_mad
, void *response_mad
);
992 struct ib_ah
*mlx5_ib_create_ah(struct ib_pd
*pd
, struct rdma_ah_attr
*ah_attr
,
993 struct ib_udata
*udata
);
994 int mlx5_ib_query_ah(struct ib_ah
*ibah
, struct rdma_ah_attr
*ah_attr
);
995 int mlx5_ib_destroy_ah(struct ib_ah
*ah
);
996 struct ib_srq
*mlx5_ib_create_srq(struct ib_pd
*pd
,
997 struct ib_srq_init_attr
*init_attr
,
998 struct ib_udata
*udata
);
999 int mlx5_ib_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
1000 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
);
1001 int mlx5_ib_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
);
1002 int mlx5_ib_destroy_srq(struct ib_srq
*srq
);
1003 int mlx5_ib_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
1004 struct ib_recv_wr
**bad_wr
);
1005 struct ib_qp
*mlx5_ib_create_qp(struct ib_pd
*pd
,
1006 struct ib_qp_init_attr
*init_attr
,
1007 struct ib_udata
*udata
);
1008 int mlx5_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1009 int attr_mask
, struct ib_udata
*udata
);
1010 int mlx5_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
1011 struct ib_qp_init_attr
*qp_init_attr
);
1012 int mlx5_ib_destroy_qp(struct ib_qp
*qp
);
1013 void mlx5_ib_drain_sq(struct ib_qp
*qp
);
1014 void mlx5_ib_drain_rq(struct ib_qp
*qp
);
1015 int mlx5_ib_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1016 struct ib_send_wr
**bad_wr
);
1017 int mlx5_ib_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1018 struct ib_recv_wr
**bad_wr
);
1019 void *mlx5_get_send_wqe(struct mlx5_ib_qp
*qp
, int n
);
1020 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp
*qp
, int send
, int wqe_index
,
1021 void *buffer
, u32 length
,
1022 struct mlx5_ib_qp_base
*base
);
1023 struct ib_cq
*mlx5_ib_create_cq(struct ib_device
*ibdev
,
1024 const struct ib_cq_init_attr
*attr
,
1025 struct ib_ucontext
*context
,
1026 struct ib_udata
*udata
);
1027 int mlx5_ib_destroy_cq(struct ib_cq
*cq
);
1028 int mlx5_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
);
1029 int mlx5_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
);
1030 int mlx5_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
);
1031 int mlx5_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
);
1032 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
);
1033 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1034 u64 virt_addr
, int access_flags
,
1035 struct ib_udata
*udata
);
1036 struct ib_mw
*mlx5_ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
1037 struct ib_udata
*udata
);
1038 int mlx5_ib_dealloc_mw(struct ib_mw
*mw
);
1039 int mlx5_ib_update_xlt(struct mlx5_ib_mr
*mr
, u64 idx
, int npages
,
1040 int page_shift
, int flags
);
1041 struct mlx5_ib_mr
*mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd
*pd
,
1043 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr
*mr
);
1044 int mlx5_ib_rereg_user_mr(struct ib_mr
*ib_mr
, int flags
, u64 start
,
1045 u64 length
, u64 virt_addr
, int access_flags
,
1046 struct ib_pd
*pd
, struct ib_udata
*udata
);
1047 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
);
1048 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
,
1049 enum ib_mr_type mr_type
,
1051 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
1052 unsigned int *sg_offset
);
1053 int mlx5_ib_process_mad(struct ib_device
*ibdev
, int mad_flags
, u8 port_num
,
1054 const struct ib_wc
*in_wc
, const struct ib_grh
*in_grh
,
1055 const struct ib_mad_hdr
*in
, size_t in_mad_size
,
1056 struct ib_mad_hdr
*out
, size_t *out_mad_size
,
1057 u16
*out_mad_pkey_index
);
1058 struct ib_xrcd
*mlx5_ib_alloc_xrcd(struct ib_device
*ibdev
,
1059 struct ib_ucontext
*context
,
1060 struct ib_udata
*udata
);
1061 int mlx5_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
);
1062 int mlx5_ib_get_buf_offset(u64 addr
, int page_shift
, u32
*offset
);
1063 int mlx5_query_ext_port_caps(struct mlx5_ib_dev
*dev
, u8 port
);
1064 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device
*ibdev
,
1065 struct ib_smp
*out_mad
);
1066 int mlx5_query_mad_ifc_system_image_guid(struct ib_device
*ibdev
,
1067 __be64
*sys_image_guid
);
1068 int mlx5_query_mad_ifc_max_pkeys(struct ib_device
*ibdev
,
1070 int mlx5_query_mad_ifc_vendor_id(struct ib_device
*ibdev
,
1072 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev
*dev
, char *node_desc
);
1073 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev
*dev
, __be64
*node_guid
);
1074 int mlx5_query_mad_ifc_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
1076 int mlx5_query_mad_ifc_gids(struct ib_device
*ibdev
, u8 port
, int index
,
1078 int mlx5_query_mad_ifc_port(struct ib_device
*ibdev
, u8 port
,
1079 struct ib_port_attr
*props
);
1080 int mlx5_ib_query_port(struct ib_device
*ibdev
, u8 port
,
1081 struct ib_port_attr
*props
);
1082 int mlx5_ib_init_fmr(struct mlx5_ib_dev
*dev
);
1083 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev
*dev
);
1084 void mlx5_ib_cont_pages(struct ib_umem
*umem
, u64 addr
,
1085 unsigned long max_page_shift
,
1086 int *count
, int *shift
,
1087 int *ncont
, int *order
);
1088 void __mlx5_ib_populate_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
1089 int page_shift
, size_t offset
, size_t num_pages
,
1090 __be64
*pas
, int access_flags
);
1091 void mlx5_ib_populate_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
1092 int page_shift
, __be64
*pas
, int access_flags
);
1093 void mlx5_ib_copy_pas(u64
*old
, u64
*new, int step
, int num
);
1094 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev
*dev
, struct ib_cq
*ibcq
);
1095 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
);
1096 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
);
1098 struct mlx5_ib_mr
*mlx5_mr_cache_alloc(struct mlx5_ib_dev
*dev
, int entry
);
1099 void mlx5_mr_cache_free(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
1100 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
1101 struct ib_mr_status
*mr_status
);
1102 struct ib_wq
*mlx5_ib_create_wq(struct ib_pd
*pd
,
1103 struct ib_wq_init_attr
*init_attr
,
1104 struct ib_udata
*udata
);
1105 int mlx5_ib_destroy_wq(struct ib_wq
*wq
);
1106 int mlx5_ib_modify_wq(struct ib_wq
*wq
, struct ib_wq_attr
*wq_attr
,
1107 u32 wq_attr_mask
, struct ib_udata
*udata
);
1108 struct ib_rwq_ind_table
*mlx5_ib_create_rwq_ind_table(struct ib_device
*device
,
1109 struct ib_rwq_ind_table_init_attr
*init_attr
,
1110 struct ib_udata
*udata
);
1111 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table
*wq_ind_table
);
1112 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev
*dev
);
1113 struct ib_dm
*mlx5_ib_alloc_dm(struct ib_device
*ibdev
,
1114 struct ib_ucontext
*context
,
1115 struct ib_dm_alloc_attr
*attr
,
1116 struct uverbs_attr_bundle
*attrs
);
1117 int mlx5_ib_dealloc_dm(struct ib_dm
*ibdm
);
1118 struct ib_mr
*mlx5_ib_reg_dm_mr(struct ib_pd
*pd
, struct ib_dm
*dm
,
1119 struct ib_dm_mr_attr
*attr
,
1120 struct uverbs_attr_bundle
*attrs
);
1122 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1123 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
);
1124 void mlx5_ib_pfault(struct mlx5_core_dev
*mdev
, void *context
,
1125 struct mlx5_pagefault
*pfault
);
1126 int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
);
1127 int __init
mlx5_ib_odp_init(void);
1128 void mlx5_ib_odp_cleanup(void);
1129 void mlx5_ib_invalidate_range(struct ib_umem
*umem
, unsigned long start
,
1131 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent
*ent
);
1132 void mlx5_odp_populate_klm(struct mlx5_klm
*pklm
, size_t offset
,
1133 size_t nentries
, struct mlx5_ib_mr
*mr
, int flags
);
1134 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1135 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev
*dev
)
1140 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev
*ibdev
) { return 0; }
1141 static inline int mlx5_ib_odp_init(void) { return 0; }
1142 static inline void mlx5_ib_odp_cleanup(void) {}
1143 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent
*ent
) {}
1144 static inline void mlx5_odp_populate_klm(struct mlx5_klm
*pklm
, size_t offset
,
1145 size_t nentries
, struct mlx5_ib_mr
*mr
,
1148 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1150 /* Needed for rep profile */
1151 int mlx5_ib_stage_init_init(struct mlx5_ib_dev
*dev
);
1152 void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev
*dev
);
1153 int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev
*dev
);
1154 int mlx5_ib_stage_caps_init(struct mlx5_ib_dev
*dev
);
1155 int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev
*dev
);
1156 int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev
*dev
);
1157 void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev
*dev
);
1158 int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev
*dev
);
1159 void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev
*dev
);
1160 int mlx5_ib_stage_counters_init(struct mlx5_ib_dev
*dev
);
1161 void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev
*dev
);
1162 int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev
*dev
);
1163 void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev
*dev
);
1164 void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev
*dev
);
1165 int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev
*dev
);
1166 void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev
*dev
);
1167 int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev
*dev
);
1168 int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev
*dev
);
1169 void __mlx5_ib_remove(struct mlx5_ib_dev
*dev
,
1170 const struct mlx5_ib_profile
*profile
,
1172 void *__mlx5_ib_add(struct mlx5_ib_dev
*dev
,
1173 const struct mlx5_ib_profile
*profile
);
1175 int mlx5_ib_get_vf_config(struct ib_device
*device
, int vf
,
1176 u8 port
, struct ifla_vf_info
*info
);
1177 int mlx5_ib_set_vf_link_state(struct ib_device
*device
, int vf
,
1178 u8 port
, int state
);
1179 int mlx5_ib_get_vf_stats(struct ib_device
*device
, int vf
,
1180 u8 port
, struct ifla_vf_stats
*stats
);
1181 int mlx5_ib_set_vf_guid(struct ib_device
*device
, int vf
, u8 port
,
1182 u64 guid
, int type
);
1184 __be16
mlx5_get_roce_udp_sport(struct mlx5_ib_dev
*dev
,
1185 const struct ib_gid_attr
*attr
);
1187 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev
*dev
, u8 port_num
);
1188 int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev
*dev
, u8 port_num
);
1190 /* GSI QP helper functions */
1191 struct ib_qp
*mlx5_ib_gsi_create_qp(struct ib_pd
*pd
,
1192 struct ib_qp_init_attr
*init_attr
);
1193 int mlx5_ib_gsi_destroy_qp(struct ib_qp
*qp
);
1194 int mlx5_ib_gsi_modify_qp(struct ib_qp
*qp
, struct ib_qp_attr
*attr
,
1196 int mlx5_ib_gsi_query_qp(struct ib_qp
*qp
, struct ib_qp_attr
*qp_attr
,
1198 struct ib_qp_init_attr
*qp_init_attr
);
1199 int mlx5_ib_gsi_post_send(struct ib_qp
*qp
, struct ib_send_wr
*wr
,
1200 struct ib_send_wr
**bad_wr
);
1201 int mlx5_ib_gsi_post_recv(struct ib_qp
*qp
, struct ib_recv_wr
*wr
,
1202 struct ib_recv_wr
**bad_wr
);
1203 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp
*gsi
);
1205 int mlx5_ib_generate_wc(struct ib_cq
*ibcq
, struct ib_wc
*wc
);
1207 void mlx5_ib_free_bfreg(struct mlx5_ib_dev
*dev
, struct mlx5_bfreg_info
*bfregi
,
1209 struct mlx5_ib_dev
*mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info
*mpi
);
1210 struct mlx5_core_dev
*mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev
*dev
,
1212 u8
*native_port_num
);
1213 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev
*dev
,
1216 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
1217 int mlx5_ib_devx_create(struct mlx5_ib_dev
*dev
,
1218 struct mlx5_ib_ucontext
*context
);
1219 void mlx5_ib_devx_destroy(struct mlx5_ib_dev
*dev
,
1220 struct mlx5_ib_ucontext
*context
);
1221 const struct uverbs_object_tree_def
*mlx5_ib_get_devx_tree(void);
1224 mlx5_ib_devx_create(struct mlx5_ib_dev
*dev
,
1225 struct mlx5_ib_ucontext
*context
) { return -EOPNOTSUPP
; };
1226 static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev
*dev
,
1227 struct mlx5_ib_ucontext
*context
) {}
1228 static inline const struct uverbs_object_tree_def
*
1229 mlx5_ib_get_devx_tree(void) { return NULL
; }
1231 static inline void init_query_mad(struct ib_smp
*mad
)
1233 mad
->base_version
= 1;
1234 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
1235 mad
->class_version
= 1;
1236 mad
->method
= IB_MGMT_METHOD_GET
;
1239 static inline u8
convert_access(int acc
)
1241 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MLX5_PERM_ATOMIC
: 0) |
1242 (acc
& IB_ACCESS_REMOTE_WRITE
? MLX5_PERM_REMOTE_WRITE
: 0) |
1243 (acc
& IB_ACCESS_REMOTE_READ
? MLX5_PERM_REMOTE_READ
: 0) |
1244 (acc
& IB_ACCESS_LOCAL_WRITE
? MLX5_PERM_LOCAL_WRITE
: 0) |
1245 MLX5_PERM_LOCAL_READ
;
1248 static inline int is_qp1(enum ib_qp_type qp_type
)
1250 return qp_type
== MLX5_IB_QPT_HW_GSI
;
1253 #define MLX5_MAX_UMR_SHIFT 16
1254 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1256 static inline u32
check_cq_create_flags(u32 flags
)
1259 * It returns non-zero value for unsupported CQ
1260 * create flags, otherwise it returns zero.
1262 return (flags
& ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN
|
1263 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
));
1266 static inline int verify_assign_uidx(u8 cqe_version
, u32 cmd_uidx
,
1270 if ((cmd_uidx
== MLX5_IB_DEFAULT_UIDX
) ||
1271 (cmd_uidx
& ~MLX5_USER_ASSIGNED_UIDX_MASK
))
1273 *user_index
= cmd_uidx
;
1275 *user_index
= MLX5_IB_DEFAULT_UIDX
;
1281 static inline int get_qp_user_index(struct mlx5_ib_ucontext
*ucontext
,
1282 struct mlx5_ib_create_qp
*ucmd
,
1286 u8 cqe_version
= ucontext
->cqe_version
;
1288 if (field_avail(struct mlx5_ib_create_qp
, uidx
, inlen
) &&
1289 !cqe_version
&& (ucmd
->uidx
== MLX5_IB_DEFAULT_UIDX
))
1292 if (!!(field_avail(struct mlx5_ib_create_qp
, uidx
, inlen
) !=
1296 return verify_assign_uidx(cqe_version
, ucmd
->uidx
, user_index
);
1299 static inline int get_srq_user_index(struct mlx5_ib_ucontext
*ucontext
,
1300 struct mlx5_ib_create_srq
*ucmd
,
1304 u8 cqe_version
= ucontext
->cqe_version
;
1306 if (field_avail(struct mlx5_ib_create_srq
, uidx
, inlen
) &&
1307 !cqe_version
&& (ucmd
->uidx
== MLX5_IB_DEFAULT_UIDX
))
1310 if (!!(field_avail(struct mlx5_ib_create_srq
, uidx
, inlen
) !=
1314 return verify_assign_uidx(cqe_version
, ucmd
->uidx
, user_index
);
1317 static inline int get_uars_per_sys_page(struct mlx5_ib_dev
*dev
, bool lib_support
)
1319 return lib_support
&& MLX5_CAP_GEN(dev
->mdev
, uar_4k
) ?
1320 MLX5_UARS_IN_PAGE
: 1;
1323 static inline int get_num_static_uars(struct mlx5_ib_dev
*dev
,
1324 struct mlx5_bfreg_info
*bfregi
)
1326 return get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
) * bfregi
->num_static_sys_pages
;
1329 unsigned long mlx5_ib_get_xlt_emergency_page(void);
1330 void mlx5_ib_put_xlt_emergency_page(void);
1332 int bfregn_to_uar_index(struct mlx5_ib_dev
*dev
,
1333 struct mlx5_bfreg_info
*bfregi
, int bfregn
,
1335 #endif /* MLX5_IB_H */