]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/infiniband/hw/mlx5/mlx5_ib.h
RDMA/mlx5: Melt consecutive calls to alloc_bfreg() in one call
[thirdparty/kernel/stable.git] / drivers / infiniband / hw / mlx5 / mlx5_ib.h
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_IB_H
34 #define MLX5_IB_H
35
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45 #include <linux/mlx5/transobj.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/mlx5-abi.h>
48 #include <rdma/uverbs_ioctl.h>
49
50 #define mlx5_ib_dbg(dev, format, arg...) \
51 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
52 __LINE__, current->pid, ##arg)
53
54 #define mlx5_ib_err(dev, format, arg...) \
55 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
56 __LINE__, current->pid, ##arg)
57
58 #define mlx5_ib_warn(dev, format, arg...) \
59 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
60 __LINE__, current->pid, ##arg)
61
62 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
63 sizeof(((type *)0)->fld) <= (sz))
64 #define MLX5_IB_DEFAULT_UIDX 0xffffff
65 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
66
67 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
68
69 enum {
70 MLX5_IB_MMAP_CMD_SHIFT = 8,
71 MLX5_IB_MMAP_CMD_MASK = 0xff,
72 };
73
74 enum {
75 MLX5_RES_SCAT_DATA32_CQE = 0x1,
76 MLX5_RES_SCAT_DATA64_CQE = 0x2,
77 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
78 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
79 };
80
81 enum mlx5_ib_mad_ifc_flags {
82 MLX5_MAD_IFC_IGNORE_MKEY = 1,
83 MLX5_MAD_IFC_IGNORE_BKEY = 2,
84 MLX5_MAD_IFC_NET_VIEW = 4,
85 };
86
87 enum {
88 MLX5_CROSS_CHANNEL_BFREG = 0,
89 };
90
91 enum {
92 MLX5_CQE_VERSION_V0,
93 MLX5_CQE_VERSION_V1,
94 };
95
96 enum {
97 MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
98 MLX5_TM_MAX_SGE = 1,
99 };
100
101 enum {
102 MLX5_IB_INVALID_UAR_INDEX = BIT(31),
103 MLX5_IB_INVALID_BFREG = BIT(31),
104 };
105
106 enum {
107 MLX5_MAX_MEMIC_PAGES = 0x100,
108 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
109 };
110
111 enum {
112 MLX5_MEMIC_BASE_ALIGN = 6,
113 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
114 };
115
116 struct mlx5_ib_vma_private_data {
117 struct list_head list;
118 struct vm_area_struct *vma;
119 /* protect vma_private_list add/del */
120 struct mutex *vma_private_list_mutex;
121 };
122
123 struct mlx5_ib_ucontext {
124 struct ib_ucontext ibucontext;
125 struct list_head db_page_list;
126
127 /* protect doorbell record alloc/free
128 */
129 struct mutex db_page_mutex;
130 struct mlx5_bfreg_info bfregi;
131 u8 cqe_version;
132 /* Transport Domain number */
133 u32 tdn;
134 struct list_head vma_private_list;
135 /* protect vma_private_list add/del */
136 struct mutex vma_private_list_mutex;
137
138 u64 lib_caps;
139 DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
140 u16 devx_uid;
141 };
142
143 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
144 {
145 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
146 }
147
148 struct mlx5_ib_pd {
149 struct ib_pd ibpd;
150 u32 pdn;
151 };
152
153 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
154 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
155 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
156 #error "Invalid number of bypass priorities"
157 #endif
158 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
159
160 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
161 #define MLX5_IB_NUM_SNIFFER_FTS 2
162 #define MLX5_IB_NUM_EGRESS_FTS 1
163 struct mlx5_ib_flow_prio {
164 struct mlx5_flow_table *flow_table;
165 unsigned int refcount;
166 };
167
168 struct mlx5_ib_flow_handler {
169 struct list_head list;
170 struct ib_flow ibflow;
171 struct mlx5_ib_flow_prio *prio;
172 struct mlx5_flow_handle *rule;
173 struct ib_counters *ibcounters;
174 };
175
176 struct mlx5_ib_flow_db {
177 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
178 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
179 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
180 struct mlx5_flow_table *lag_demux_ft;
181 /* Protect flow steering bypass flow tables
182 * when add/del flow rules.
183 * only single add/removal of flow steering rule could be done
184 * simultaneously.
185 */
186 struct mutex lock;
187 };
188
189 /* Use macros here so that don't have to duplicate
190 * enum ib_send_flags and enum ib_qp_type for low-level driver
191 */
192
193 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
194 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
195 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
196 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
197 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
198 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
199
200 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
201 /*
202 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
203 * creates the actual hardware QP.
204 */
205 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
206 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
207 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
208 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
209
210 #define MLX5_IB_UMR_OCTOWORD 16
211 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
212
213 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
214 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
215 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
216 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
217 #define MLX5_IB_UPD_XLT_PD BIT(4)
218 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
219 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
220
221 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
222 *
223 * These flags are intended for internal use by the mlx5_ib driver, and they
224 * rely on the range reserved for that use in the ib_qp_create_flags enum.
225 */
226
227 /* Create a UD QP whose source QP number is 1 */
228 static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
229 {
230 return IB_QP_CREATE_RESERVED_START;
231 }
232
233 struct wr_list {
234 u16 opcode;
235 u16 next;
236 };
237
238 enum mlx5_ib_rq_flags {
239 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
240 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
241 };
242
243 struct mlx5_ib_wq {
244 u64 *wrid;
245 u32 *wr_data;
246 struct wr_list *w_list;
247 unsigned *wqe_head;
248 u16 unsig_count;
249
250 /* serialize post to the work queue
251 */
252 spinlock_t lock;
253 int wqe_cnt;
254 int max_post;
255 int max_gs;
256 int offset;
257 int wqe_shift;
258 unsigned head;
259 unsigned tail;
260 u16 cur_post;
261 u16 last_poll;
262 void *qend;
263 };
264
265 enum mlx5_ib_wq_flags {
266 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
267 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
268 };
269
270 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
271 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
272 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
273 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
274
275 struct mlx5_ib_rwq {
276 struct ib_wq ibwq;
277 struct mlx5_core_qp core_qp;
278 u32 rq_num_pas;
279 u32 log_rq_stride;
280 u32 log_rq_size;
281 u32 rq_page_offset;
282 u32 log_page_size;
283 u32 log_num_strides;
284 u32 two_byte_shift_en;
285 u32 single_stride_log_num_of_bytes;
286 struct ib_umem *umem;
287 size_t buf_size;
288 unsigned int page_shift;
289 int create_type;
290 struct mlx5_db db;
291 u32 user_index;
292 u32 wqe_count;
293 u32 wqe_shift;
294 int wq_sig;
295 u32 create_flags; /* Use enum mlx5_ib_wq_flags */
296 };
297
298 enum {
299 MLX5_QP_USER,
300 MLX5_QP_KERNEL,
301 MLX5_QP_EMPTY
302 };
303
304 enum {
305 MLX5_WQ_USER,
306 MLX5_WQ_KERNEL
307 };
308
309 struct mlx5_ib_rwq_ind_table {
310 struct ib_rwq_ind_table ib_rwq_ind_tbl;
311 u32 rqtn;
312 };
313
314 struct mlx5_ib_ubuffer {
315 struct ib_umem *umem;
316 int buf_size;
317 u64 buf_addr;
318 };
319
320 struct mlx5_ib_qp_base {
321 struct mlx5_ib_qp *container_mibqp;
322 struct mlx5_core_qp mqp;
323 struct mlx5_ib_ubuffer ubuffer;
324 };
325
326 struct mlx5_ib_qp_trans {
327 struct mlx5_ib_qp_base base;
328 u16 xrcdn;
329 u8 alt_port;
330 u8 atomic_rd_en;
331 u8 resp_depth;
332 };
333
334 struct mlx5_ib_rss_qp {
335 u32 tirn;
336 };
337
338 struct mlx5_ib_rq {
339 struct mlx5_ib_qp_base base;
340 struct mlx5_ib_wq *rq;
341 struct mlx5_ib_ubuffer ubuffer;
342 struct mlx5_db *doorbell;
343 u32 tirn;
344 u8 state;
345 u32 flags;
346 };
347
348 struct mlx5_ib_sq {
349 struct mlx5_ib_qp_base base;
350 struct mlx5_ib_wq *sq;
351 struct mlx5_ib_ubuffer ubuffer;
352 struct mlx5_db *doorbell;
353 struct mlx5_flow_handle *flow_rule;
354 u32 tisn;
355 u8 state;
356 };
357
358 struct mlx5_ib_raw_packet_qp {
359 struct mlx5_ib_sq sq;
360 struct mlx5_ib_rq rq;
361 };
362
363 struct mlx5_bf {
364 int buf_size;
365 unsigned long offset;
366 struct mlx5_sq_bfreg *bfreg;
367 };
368
369 struct mlx5_ib_dct {
370 struct mlx5_core_dct mdct;
371 u32 *in;
372 };
373
374 struct mlx5_ib_qp {
375 struct ib_qp ibqp;
376 union {
377 struct mlx5_ib_qp_trans trans_qp;
378 struct mlx5_ib_raw_packet_qp raw_packet_qp;
379 struct mlx5_ib_rss_qp rss_qp;
380 struct mlx5_ib_dct dct;
381 };
382 struct mlx5_frag_buf buf;
383
384 struct mlx5_db db;
385 struct mlx5_ib_wq rq;
386
387 u8 sq_signal_bits;
388 u8 next_fence;
389 struct mlx5_ib_wq sq;
390
391 /* serialize qp state modifications
392 */
393 struct mutex mutex;
394 u32 flags;
395 u8 port;
396 u8 state;
397 int wq_sig;
398 int scat_cqe;
399 int max_inline_data;
400 struct mlx5_bf bf;
401 int has_rq;
402
403 /* only for user space QPs. For kernel
404 * we have it from the bf object
405 */
406 int bfregn;
407
408 int create_type;
409
410 /* Store signature errors */
411 bool signature_en;
412
413 struct list_head qps_list;
414 struct list_head cq_recv_list;
415 struct list_head cq_send_list;
416 struct mlx5_rate_limit rl;
417 u32 underlay_qpn;
418 bool tunnel_offload_en;
419 /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
420 enum ib_qp_type qp_sub_type;
421 };
422
423 struct mlx5_ib_cq_buf {
424 struct mlx5_frag_buf_ctrl fbc;
425 struct ib_umem *umem;
426 int cqe_size;
427 int nent;
428 };
429
430 enum mlx5_ib_qp_flags {
431 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
432 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
433 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
434 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
435 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
436 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
437 /* QP uses 1 as its source QP number */
438 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
439 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
440 MLX5_IB_QP_RSS = 1 << 8,
441 MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
442 MLX5_IB_QP_UNDERLAY = 1 << 10,
443 MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
444 MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
445 };
446
447 struct mlx5_umr_wr {
448 struct ib_send_wr wr;
449 u64 virt_addr;
450 u64 offset;
451 struct ib_pd *pd;
452 unsigned int page_shift;
453 unsigned int xlt_size;
454 u64 length;
455 int access_flags;
456 u32 mkey;
457 };
458
459 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
460 {
461 return container_of(wr, struct mlx5_umr_wr, wr);
462 }
463
464 struct mlx5_shared_mr_info {
465 int mr_id;
466 struct ib_umem *umem;
467 };
468
469 enum mlx5_ib_cq_pr_flags {
470 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
471 };
472
473 struct mlx5_ib_cq {
474 struct ib_cq ibcq;
475 struct mlx5_core_cq mcq;
476 struct mlx5_ib_cq_buf buf;
477 struct mlx5_db db;
478
479 /* serialize access to the CQ
480 */
481 spinlock_t lock;
482
483 /* protect resize cq
484 */
485 struct mutex resize_mutex;
486 struct mlx5_ib_cq_buf *resize_buf;
487 struct ib_umem *resize_umem;
488 int cqe_size;
489 struct list_head list_send_qp;
490 struct list_head list_recv_qp;
491 u32 create_flags;
492 struct list_head wc_list;
493 enum ib_cq_notify_flags notify_flags;
494 struct work_struct notify_work;
495 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
496 };
497
498 struct mlx5_ib_wc {
499 struct ib_wc wc;
500 struct list_head list;
501 };
502
503 struct mlx5_ib_srq {
504 struct ib_srq ibsrq;
505 struct mlx5_core_srq msrq;
506 struct mlx5_frag_buf buf;
507 struct mlx5_db db;
508 u64 *wrid;
509 /* protect SRQ hanlding
510 */
511 spinlock_t lock;
512 int head;
513 int tail;
514 u16 wqe_ctr;
515 struct ib_umem *umem;
516 /* serialize arming a SRQ
517 */
518 struct mutex mutex;
519 int wq_sig;
520 };
521
522 struct mlx5_ib_xrcd {
523 struct ib_xrcd ibxrcd;
524 u32 xrcdn;
525 };
526
527 enum mlx5_ib_mtt_access_flags {
528 MLX5_IB_MTT_READ = (1 << 0),
529 MLX5_IB_MTT_WRITE = (1 << 1),
530 };
531
532 struct mlx5_ib_dm {
533 struct ib_dm ibdm;
534 phys_addr_t dev_addr;
535 };
536
537 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
538
539 #define MLX5_IB_DM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
540 IB_ACCESS_REMOTE_WRITE |\
541 IB_ACCESS_REMOTE_READ |\
542 IB_ACCESS_REMOTE_ATOMIC |\
543 IB_ZERO_BASED)
544
545 struct mlx5_ib_mr {
546 struct ib_mr ibmr;
547 void *descs;
548 dma_addr_t desc_map;
549 int ndescs;
550 int max_descs;
551 int desc_size;
552 int access_mode;
553 struct mlx5_core_mkey mmkey;
554 struct ib_umem *umem;
555 struct mlx5_shared_mr_info *smr_info;
556 struct list_head list;
557 int order;
558 bool allocated_from_cache;
559 int npages;
560 struct mlx5_ib_dev *dev;
561 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
562 struct mlx5_core_sig_ctx *sig;
563 int live;
564 void *descs_alloc;
565 int access_flags; /* Needed for rereg MR */
566
567 struct mlx5_ib_mr *parent;
568 atomic_t num_leaf_free;
569 wait_queue_head_t q_leaf_free;
570 };
571
572 struct mlx5_ib_mw {
573 struct ib_mw ibmw;
574 struct mlx5_core_mkey mmkey;
575 int ndescs;
576 };
577
578 struct mlx5_ib_umr_context {
579 struct ib_cqe cqe;
580 enum ib_wc_status status;
581 struct completion done;
582 };
583
584 struct umr_common {
585 struct ib_pd *pd;
586 struct ib_cq *cq;
587 struct ib_qp *qp;
588 /* control access to UMR QP
589 */
590 struct semaphore sem;
591 };
592
593 enum {
594 MLX5_FMR_INVALID,
595 MLX5_FMR_VALID,
596 MLX5_FMR_BUSY,
597 };
598
599 struct mlx5_cache_ent {
600 struct list_head head;
601 /* sync access to the cahce entry
602 */
603 spinlock_t lock;
604
605
606 struct dentry *dir;
607 char name[4];
608 u32 order;
609 u32 xlt;
610 u32 access_mode;
611 u32 page;
612
613 u32 size;
614 u32 cur;
615 u32 miss;
616 u32 limit;
617
618 struct dentry *fsize;
619 struct dentry *fcur;
620 struct dentry *fmiss;
621 struct dentry *flimit;
622
623 struct mlx5_ib_dev *dev;
624 struct work_struct work;
625 struct delayed_work dwork;
626 int pending;
627 struct completion compl;
628 };
629
630 struct mlx5_mr_cache {
631 struct workqueue_struct *wq;
632 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
633 int stopped;
634 struct dentry *root;
635 unsigned long last_add;
636 };
637
638 struct mlx5_ib_gsi_qp;
639
640 struct mlx5_ib_port_resources {
641 struct mlx5_ib_resources *devr;
642 struct mlx5_ib_gsi_qp *gsi;
643 struct work_struct pkey_change_work;
644 };
645
646 struct mlx5_ib_resources {
647 struct ib_cq *c0;
648 struct ib_xrcd *x0;
649 struct ib_xrcd *x1;
650 struct ib_pd *p0;
651 struct ib_srq *s0;
652 struct ib_srq *s1;
653 struct mlx5_ib_port_resources ports[2];
654 /* Protects changes to the port resources */
655 struct mutex mutex;
656 };
657
658 struct mlx5_ib_counters {
659 const char **names;
660 size_t *offsets;
661 u32 num_q_counters;
662 u32 num_cong_counters;
663 u32 num_ext_ppcnt_counters;
664 u16 set_id;
665 bool set_id_valid;
666 };
667
668 struct mlx5_ib_multiport_info;
669
670 struct mlx5_ib_multiport {
671 struct mlx5_ib_multiport_info *mpi;
672 /* To be held when accessing the multiport info */
673 spinlock_t mpi_lock;
674 };
675
676 struct mlx5_ib_port {
677 struct mlx5_ib_counters cnts;
678 struct mlx5_ib_multiport mp;
679 struct mlx5_ib_dbg_cc_params *dbg_cc_params;
680 };
681
682 struct mlx5_roce {
683 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
684 * netdev pointer
685 */
686 rwlock_t netdev_lock;
687 struct net_device *netdev;
688 struct notifier_block nb;
689 atomic_t next_port;
690 enum ib_port_state last_port_state;
691 struct mlx5_ib_dev *dev;
692 u8 native_port_num;
693 };
694
695 struct mlx5_ib_dbg_param {
696 int offset;
697 struct mlx5_ib_dev *dev;
698 struct dentry *dentry;
699 u8 port_num;
700 };
701
702 enum mlx5_ib_dbg_cc_types {
703 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
704 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
705 MLX5_IB_DBG_CC_RP_TIME_RESET,
706 MLX5_IB_DBG_CC_RP_BYTE_RESET,
707 MLX5_IB_DBG_CC_RP_THRESHOLD,
708 MLX5_IB_DBG_CC_RP_AI_RATE,
709 MLX5_IB_DBG_CC_RP_HAI_RATE,
710 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
711 MLX5_IB_DBG_CC_RP_MIN_RATE,
712 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
713 MLX5_IB_DBG_CC_RP_DCE_TCP_G,
714 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
715 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
716 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
717 MLX5_IB_DBG_CC_RP_GD,
718 MLX5_IB_DBG_CC_NP_CNP_DSCP,
719 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
720 MLX5_IB_DBG_CC_NP_CNP_PRIO,
721 MLX5_IB_DBG_CC_MAX,
722 };
723
724 struct mlx5_ib_dbg_cc_params {
725 struct dentry *root;
726 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
727 };
728
729 enum {
730 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
731 };
732
733 struct mlx5_ib_dbg_delay_drop {
734 struct dentry *dir_debugfs;
735 struct dentry *rqs_cnt_debugfs;
736 struct dentry *events_cnt_debugfs;
737 struct dentry *timeout_debugfs;
738 };
739
740 struct mlx5_ib_delay_drop {
741 struct mlx5_ib_dev *dev;
742 struct work_struct delay_drop_work;
743 /* serialize setting of delay drop */
744 struct mutex lock;
745 u32 timeout;
746 bool activate;
747 atomic_t events_cnt;
748 atomic_t rqs_cnt;
749 struct mlx5_ib_dbg_delay_drop *dbg;
750 };
751
752 enum mlx5_ib_stages {
753 MLX5_IB_STAGE_INIT,
754 MLX5_IB_STAGE_FLOW_DB,
755 MLX5_IB_STAGE_CAPS,
756 MLX5_IB_STAGE_NON_DEFAULT_CB,
757 MLX5_IB_STAGE_ROCE,
758 MLX5_IB_STAGE_DEVICE_RESOURCES,
759 MLX5_IB_STAGE_ODP,
760 MLX5_IB_STAGE_COUNTERS,
761 MLX5_IB_STAGE_CONG_DEBUGFS,
762 MLX5_IB_STAGE_UAR,
763 MLX5_IB_STAGE_BFREG,
764 MLX5_IB_STAGE_PRE_IB_REG_UMR,
765 MLX5_IB_STAGE_SPECS,
766 MLX5_IB_STAGE_IB_REG,
767 MLX5_IB_STAGE_POST_IB_REG_UMR,
768 MLX5_IB_STAGE_DELAY_DROP,
769 MLX5_IB_STAGE_CLASS_ATTR,
770 MLX5_IB_STAGE_REP_REG,
771 MLX5_IB_STAGE_MAX,
772 };
773
774 struct mlx5_ib_stage {
775 int (*init)(struct mlx5_ib_dev *dev);
776 void (*cleanup)(struct mlx5_ib_dev *dev);
777 };
778
779 #define STAGE_CREATE(_stage, _init, _cleanup) \
780 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
781
782 struct mlx5_ib_profile {
783 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
784 };
785
786 struct mlx5_ib_multiport_info {
787 struct list_head list;
788 struct mlx5_ib_dev *ibdev;
789 struct mlx5_core_dev *mdev;
790 struct completion unref_comp;
791 u64 sys_image_guid;
792 u32 mdev_refcnt;
793 bool is_master;
794 bool unaffiliate;
795 };
796
797 struct mlx5_ib_flow_action {
798 struct ib_flow_action ib_action;
799 union {
800 struct {
801 u64 ib_flags;
802 struct mlx5_accel_esp_xfrm *ctx;
803 } esp_aes_gcm;
804 };
805 };
806
807 struct mlx5_memic {
808 struct mlx5_core_dev *dev;
809 spinlock_t memic_lock;
810 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
811 };
812
813 struct mlx5_read_counters_attr {
814 struct mlx5_fc *hw_cntrs_hndl;
815 u64 *out;
816 u32 flags;
817 };
818
819 enum mlx5_ib_counters_type {
820 MLX5_IB_COUNTERS_FLOW,
821 };
822
823 struct mlx5_ib_mcounters {
824 struct ib_counters ibcntrs;
825 enum mlx5_ib_counters_type type;
826 /* number of counters supported for this counters type */
827 u32 counters_num;
828 struct mlx5_fc *hw_cntrs_hndl;
829 /* read function for this counters type */
830 int (*read_counters)(struct ib_device *ibdev,
831 struct mlx5_read_counters_attr *read_attr);
832 /* max index set as part of create_flow */
833 u32 cntrs_max_index;
834 /* number of counters data entries (<description,index> pair) */
835 u32 ncounters;
836 /* counters data array for descriptions and indexes */
837 struct mlx5_ib_flow_counters_desc *counters_data;
838 /* protects access to mcounters internal data */
839 struct mutex mcntrs_mutex;
840 };
841
842 static inline struct mlx5_ib_mcounters *
843 to_mcounters(struct ib_counters *ibcntrs)
844 {
845 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
846 }
847
848 struct mlx5_ib_dev {
849 struct ib_device ib_dev;
850 struct mlx5_core_dev *mdev;
851 struct mlx5_roce roce[MLX5_MAX_PORTS];
852 int num_ports;
853 /* serialize update of capability mask
854 */
855 struct mutex cap_mask_mutex;
856 bool ib_active;
857 struct umr_common umrc;
858 /* sync used page count stats
859 */
860 struct mlx5_ib_resources devr;
861 struct mlx5_mr_cache cache;
862 struct timer_list delay_timer;
863 /* Prevents soft lock on massive reg MRs */
864 struct mutex slow_path_mutex;
865 int fill_delay;
866 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
867 struct ib_odp_caps odp_caps;
868 u64 odp_max_size;
869 /*
870 * Sleepable RCU that prevents destruction of MRs while they are still
871 * being used by a page fault handler.
872 */
873 struct srcu_struct mr_srcu;
874 u32 null_mkey;
875 #endif
876 struct mlx5_ib_flow_db *flow_db;
877 /* protect resources needed as part of reset flow */
878 spinlock_t reset_flow_resource_lock;
879 struct list_head qp_list;
880 /* Array with num_ports elements */
881 struct mlx5_ib_port *port;
882 struct mlx5_sq_bfreg bfreg;
883 struct mlx5_sq_bfreg fp_bfreg;
884 struct mlx5_ib_delay_drop delay_drop;
885 const struct mlx5_ib_profile *profile;
886 struct mlx5_eswitch_rep *rep;
887
888 /* protect the user_td */
889 struct mutex lb_mutex;
890 u32 user_td;
891 u8 umr_fence;
892 struct list_head ib_dev_list;
893 u64 sys_image_guid;
894 struct mlx5_memic memic;
895 };
896
897 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
898 {
899 return container_of(mcq, struct mlx5_ib_cq, mcq);
900 }
901
902 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
903 {
904 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
905 }
906
907 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
908 {
909 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
910 }
911
912 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
913 {
914 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
915 }
916
917 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
918 {
919 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
920 }
921
922 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
923 {
924 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
925 }
926
927 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
928 {
929 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
930 }
931
932 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
933 {
934 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
935 }
936
937 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
938 {
939 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
940 }
941
942 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
943 {
944 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
945 }
946
947 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
948 {
949 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
950 }
951
952 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
953 {
954 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
955 }
956
957 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
958 {
959 return container_of(msrq, struct mlx5_ib_srq, msrq);
960 }
961
962 static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
963 {
964 return container_of(ibdm, struct mlx5_ib_dm, ibdm);
965 }
966
967 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
968 {
969 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
970 }
971
972 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
973 {
974 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
975 }
976
977 static inline struct mlx5_ib_flow_action *
978 to_mflow_act(struct ib_flow_action *ibact)
979 {
980 return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
981 }
982
983 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
984 struct mlx5_db *db);
985 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
986 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
987 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
988 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
989 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
990 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
991 const void *in_mad, void *response_mad);
992 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
993 struct ib_udata *udata);
994 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
995 int mlx5_ib_destroy_ah(struct ib_ah *ah);
996 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
997 struct ib_srq_init_attr *init_attr,
998 struct ib_udata *udata);
999 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1000 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1001 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1002 int mlx5_ib_destroy_srq(struct ib_srq *srq);
1003 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1004 struct ib_recv_wr **bad_wr);
1005 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1006 struct ib_qp_init_attr *init_attr,
1007 struct ib_udata *udata);
1008 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1009 int attr_mask, struct ib_udata *udata);
1010 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1011 struct ib_qp_init_attr *qp_init_attr);
1012 int mlx5_ib_destroy_qp(struct ib_qp *qp);
1013 void mlx5_ib_drain_sq(struct ib_qp *qp);
1014 void mlx5_ib_drain_rq(struct ib_qp *qp);
1015 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1016 struct ib_send_wr **bad_wr);
1017 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1018 struct ib_recv_wr **bad_wr);
1019 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
1020 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
1021 void *buffer, u32 length,
1022 struct mlx5_ib_qp_base *base);
1023 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
1024 const struct ib_cq_init_attr *attr,
1025 struct ib_ucontext *context,
1026 struct ib_udata *udata);
1027 int mlx5_ib_destroy_cq(struct ib_cq *cq);
1028 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1029 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1030 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1031 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1032 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1033 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1034 u64 virt_addr, int access_flags,
1035 struct ib_udata *udata);
1036 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1037 struct ib_udata *udata);
1038 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1039 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1040 int page_shift, int flags);
1041 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1042 int access_flags);
1043 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
1044 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1045 u64 length, u64 virt_addr, int access_flags,
1046 struct ib_pd *pd, struct ib_udata *udata);
1047 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
1048 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1049 enum ib_mr_type mr_type,
1050 u32 max_num_sg);
1051 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1052 unsigned int *sg_offset);
1053 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1054 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1055 const struct ib_mad_hdr *in, size_t in_mad_size,
1056 struct ib_mad_hdr *out, size_t *out_mad_size,
1057 u16 *out_mad_pkey_index);
1058 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
1059 struct ib_ucontext *context,
1060 struct ib_udata *udata);
1061 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
1062 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
1063 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
1064 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
1065 struct ib_smp *out_mad);
1066 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1067 __be64 *sys_image_guid);
1068 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1069 u16 *max_pkeys);
1070 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1071 u32 *vendor_id);
1072 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1073 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1074 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
1075 u16 *pkey);
1076 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
1077 union ib_gid *gid);
1078 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
1079 struct ib_port_attr *props);
1080 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1081 struct ib_port_attr *props);
1082 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
1083 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
1084 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
1085 unsigned long max_page_shift,
1086 int *count, int *shift,
1087 int *ncont, int *order);
1088 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1089 int page_shift, size_t offset, size_t num_pages,
1090 __be64 *pas, int access_flags);
1091 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1092 int page_shift, __be64 *pas, int access_flags);
1093 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
1094 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
1095 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
1096 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
1097
1098 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
1099 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
1100 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1101 struct ib_mr_status *mr_status);
1102 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1103 struct ib_wq_init_attr *init_attr,
1104 struct ib_udata *udata);
1105 int mlx5_ib_destroy_wq(struct ib_wq *wq);
1106 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1107 u32 wq_attr_mask, struct ib_udata *udata);
1108 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
1109 struct ib_rwq_ind_table_init_attr *init_attr,
1110 struct ib_udata *udata);
1111 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1112 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
1113 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
1114 struct ib_ucontext *context,
1115 struct ib_dm_alloc_attr *attr,
1116 struct uverbs_attr_bundle *attrs);
1117 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm);
1118 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1119 struct ib_dm_mr_attr *attr,
1120 struct uverbs_attr_bundle *attrs);
1121
1122 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1123 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
1124 void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
1125 struct mlx5_pagefault *pfault);
1126 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1127 int __init mlx5_ib_odp_init(void);
1128 void mlx5_ib_odp_cleanup(void);
1129 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
1130 unsigned long end);
1131 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
1132 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
1133 size_t nentries, struct mlx5_ib_mr *mr, int flags);
1134 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1135 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
1136 {
1137 return;
1138 }
1139
1140 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
1141 static inline int mlx5_ib_odp_init(void) { return 0; }
1142 static inline void mlx5_ib_odp_cleanup(void) {}
1143 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
1144 static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
1145 size_t nentries, struct mlx5_ib_mr *mr,
1146 int flags) {}
1147
1148 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1149
1150 /* Needed for rep profile */
1151 int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev);
1152 void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev);
1153 int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev);
1154 int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev);
1155 int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev);
1156 int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev);
1157 void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev);
1158 int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev);
1159 void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev);
1160 int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
1161 void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
1162 int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
1163 void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
1164 void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
1165 int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
1166 void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
1167 int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
1168 int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
1169 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1170 const struct mlx5_ib_profile *profile,
1171 int stage);
1172 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
1173 const struct mlx5_ib_profile *profile);
1174
1175 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1176 u8 port, struct ifla_vf_info *info);
1177 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1178 u8 port, int state);
1179 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1180 u8 port, struct ifla_vf_stats *stats);
1181 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
1182 u64 guid, int type);
1183
1184 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
1185 const struct ib_gid_attr *attr);
1186
1187 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1188 int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
1189
1190 /* GSI QP helper functions */
1191 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
1192 struct ib_qp_init_attr *init_attr);
1193 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
1194 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1195 int attr_mask);
1196 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1197 int qp_attr_mask,
1198 struct ib_qp_init_attr *qp_init_attr);
1199 int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
1200 struct ib_send_wr **bad_wr);
1201 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
1202 struct ib_recv_wr **bad_wr);
1203 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1204
1205 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1206
1207 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1208 int bfregn);
1209 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1210 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1211 u8 ib_port_num,
1212 u8 *native_port_num);
1213 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1214 u8 port_num);
1215
1216 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
1217 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
1218 struct mlx5_ib_ucontext *context);
1219 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
1220 struct mlx5_ib_ucontext *context);
1221 const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
1222 #else
1223 static inline int
1224 mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
1225 struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
1226 static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
1227 struct mlx5_ib_ucontext *context) {}
1228 static inline const struct uverbs_object_tree_def *
1229 mlx5_ib_get_devx_tree(void) { return NULL; }
1230 #endif
1231 static inline void init_query_mad(struct ib_smp *mad)
1232 {
1233 mad->base_version = 1;
1234 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1235 mad->class_version = 1;
1236 mad->method = IB_MGMT_METHOD_GET;
1237 }
1238
1239 static inline u8 convert_access(int acc)
1240 {
1241 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1242 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1243 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1244 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1245 MLX5_PERM_LOCAL_READ;
1246 }
1247
1248 static inline int is_qp1(enum ib_qp_type qp_type)
1249 {
1250 return qp_type == MLX5_IB_QPT_HW_GSI;
1251 }
1252
1253 #define MLX5_MAX_UMR_SHIFT 16
1254 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1255
1256 static inline u32 check_cq_create_flags(u32 flags)
1257 {
1258 /*
1259 * It returns non-zero value for unsupported CQ
1260 * create flags, otherwise it returns zero.
1261 */
1262 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1263 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1264 }
1265
1266 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1267 u32 *user_index)
1268 {
1269 if (cqe_version) {
1270 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1271 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1272 return -EINVAL;
1273 *user_index = cmd_uidx;
1274 } else {
1275 *user_index = MLX5_IB_DEFAULT_UIDX;
1276 }
1277
1278 return 0;
1279 }
1280
1281 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1282 struct mlx5_ib_create_qp *ucmd,
1283 int inlen,
1284 u32 *user_index)
1285 {
1286 u8 cqe_version = ucontext->cqe_version;
1287
1288 if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
1289 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1290 return 0;
1291
1292 if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
1293 !!cqe_version))
1294 return -EINVAL;
1295
1296 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1297 }
1298
1299 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1300 struct mlx5_ib_create_srq *ucmd,
1301 int inlen,
1302 u32 *user_index)
1303 {
1304 u8 cqe_version = ucontext->cqe_version;
1305
1306 if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
1307 !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1308 return 0;
1309
1310 if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
1311 !!cqe_version))
1312 return -EINVAL;
1313
1314 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1315 }
1316
1317 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1318 {
1319 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1320 MLX5_UARS_IN_PAGE : 1;
1321 }
1322
1323 static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
1324 struct mlx5_bfreg_info *bfregi)
1325 {
1326 return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
1327 }
1328
1329 unsigned long mlx5_ib_get_xlt_emergency_page(void);
1330 void mlx5_ib_put_xlt_emergency_page(void);
1331
1332 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1333 struct mlx5_bfreg_info *bfregi, int bfregn,
1334 bool dyn_bfreg);
1335 #endif /* MLX5_IB_H */