]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/infiniband/hw/mlx5/mlx5_ib.h
IB/mlx5: Add Scatter FCS support for Raw Packet QP
[thirdparty/kernel/stable.git] / drivers / infiniband / hw / mlx5 / mlx5_ib.h
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_IB_H
34 #define MLX5_IB_H
35
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45 #include <linux/mlx5/transobj.h>
46 #include <rdma/ib_user_verbs.h>
47
48 #define mlx5_ib_dbg(dev, format, arg...) \
49 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
50 __LINE__, current->pid, ##arg)
51
52 #define mlx5_ib_err(dev, format, arg...) \
53 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
54 __LINE__, current->pid, ##arg)
55
56 #define mlx5_ib_warn(dev, format, arg...) \
57 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
58 __LINE__, current->pid, ##arg)
59
60 #define field_avail(type, fld, sz) (offsetof(type, fld) + \
61 sizeof(((type *)0)->fld) <= (sz))
62 #define MLX5_IB_DEFAULT_UIDX 0xffffff
63 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
64
65 enum {
66 MLX5_IB_MMAP_CMD_SHIFT = 8,
67 MLX5_IB_MMAP_CMD_MASK = 0xff,
68 };
69
70 enum mlx5_ib_mmap_cmd {
71 MLX5_IB_MMAP_REGULAR_PAGE = 0,
72 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
73 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
74 MLX5_IB_MMAP_CORE_CLOCK = 5,
75 };
76
77 enum {
78 MLX5_RES_SCAT_DATA32_CQE = 0x1,
79 MLX5_RES_SCAT_DATA64_CQE = 0x2,
80 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
81 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
82 };
83
84 enum mlx5_ib_latency_class {
85 MLX5_IB_LATENCY_CLASS_LOW,
86 MLX5_IB_LATENCY_CLASS_MEDIUM,
87 MLX5_IB_LATENCY_CLASS_HIGH,
88 MLX5_IB_LATENCY_CLASS_FAST_PATH
89 };
90
91 enum mlx5_ib_mad_ifc_flags {
92 MLX5_MAD_IFC_IGNORE_MKEY = 1,
93 MLX5_MAD_IFC_IGNORE_BKEY = 2,
94 MLX5_MAD_IFC_NET_VIEW = 4,
95 };
96
97 enum {
98 MLX5_CROSS_CHANNEL_UUAR = 0,
99 };
100
101 enum {
102 MLX5_CQE_VERSION_V0,
103 MLX5_CQE_VERSION_V1,
104 };
105
106 struct mlx5_ib_ucontext {
107 struct ib_ucontext ibucontext;
108 struct list_head db_page_list;
109
110 /* protect doorbell record alloc/free
111 */
112 struct mutex db_page_mutex;
113 struct mlx5_uuar_info uuari;
114 u8 cqe_version;
115 /* Transport Domain number */
116 u32 tdn;
117 };
118
119 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
120 {
121 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
122 }
123
124 struct mlx5_ib_pd {
125 struct ib_pd ibpd;
126 u32 pdn;
127 };
128
129 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
130 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
131 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
132 #error "Invalid number of bypass priorities"
133 #endif
134 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
135
136 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
137 struct mlx5_ib_flow_prio {
138 struct mlx5_flow_table *flow_table;
139 unsigned int refcount;
140 };
141
142 struct mlx5_ib_flow_handler {
143 struct list_head list;
144 struct ib_flow ibflow;
145 unsigned int prio;
146 struct mlx5_flow_rule *rule;
147 };
148
149 struct mlx5_ib_flow_db {
150 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
151 /* Protect flow steering bypass flow tables
152 * when add/del flow rules.
153 * only single add/removal of flow steering rule could be done
154 * simultaneously.
155 */
156 struct mutex lock;
157 };
158
159 /* Use macros here so that don't have to duplicate
160 * enum ib_send_flags and enum ib_qp_type for low-level driver
161 */
162
163 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
164 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
165 #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
166
167 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
168 #define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
169 #define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
170
171 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
172 /*
173 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
174 * creates the actual hardware QP.
175 */
176 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
177 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
178
179 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
180 *
181 * These flags are intended for internal use by the mlx5_ib driver, and they
182 * rely on the range reserved for that use in the ib_qp_create_flags enum.
183 */
184
185 /* Create a UD QP whose source QP number is 1 */
186 static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
187 {
188 return IB_QP_CREATE_RESERVED_START;
189 }
190
191 struct wr_list {
192 u16 opcode;
193 u16 next;
194 };
195
196 struct mlx5_ib_wq {
197 u64 *wrid;
198 u32 *wr_data;
199 struct wr_list *w_list;
200 unsigned *wqe_head;
201 u16 unsig_count;
202
203 /* serialize post to the work queue
204 */
205 spinlock_t lock;
206 int wqe_cnt;
207 int max_post;
208 int max_gs;
209 int offset;
210 int wqe_shift;
211 unsigned head;
212 unsigned tail;
213 u16 cur_post;
214 u16 last_poll;
215 void *qend;
216 };
217
218 enum {
219 MLX5_QP_USER,
220 MLX5_QP_KERNEL,
221 MLX5_QP_EMPTY
222 };
223
224 /*
225 * Connect-IB can trigger up to four concurrent pagefaults
226 * per-QP.
227 */
228 enum mlx5_ib_pagefault_context {
229 MLX5_IB_PAGEFAULT_RESPONDER_READ,
230 MLX5_IB_PAGEFAULT_REQUESTOR_READ,
231 MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
232 MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
233 MLX5_IB_PAGEFAULT_CONTEXTS
234 };
235
236 static inline enum mlx5_ib_pagefault_context
237 mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
238 {
239 return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
240 }
241
242 struct mlx5_ib_pfault {
243 struct work_struct work;
244 struct mlx5_pagefault mpfault;
245 };
246
247 struct mlx5_ib_ubuffer {
248 struct ib_umem *umem;
249 int buf_size;
250 u64 buf_addr;
251 };
252
253 struct mlx5_ib_qp_base {
254 struct mlx5_ib_qp *container_mibqp;
255 struct mlx5_core_qp mqp;
256 struct mlx5_ib_ubuffer ubuffer;
257 };
258
259 struct mlx5_ib_qp_trans {
260 struct mlx5_ib_qp_base base;
261 u16 xrcdn;
262 u8 alt_port;
263 u8 atomic_rd_en;
264 u8 resp_depth;
265 };
266
267 struct mlx5_ib_rq {
268 struct mlx5_ib_qp_base base;
269 struct mlx5_ib_wq *rq;
270 struct mlx5_ib_ubuffer ubuffer;
271 struct mlx5_db *doorbell;
272 u32 tirn;
273 u8 state;
274 };
275
276 struct mlx5_ib_sq {
277 struct mlx5_ib_qp_base base;
278 struct mlx5_ib_wq *sq;
279 struct mlx5_ib_ubuffer ubuffer;
280 struct mlx5_db *doorbell;
281 u32 tisn;
282 u8 state;
283 };
284
285 struct mlx5_ib_raw_packet_qp {
286 struct mlx5_ib_sq sq;
287 struct mlx5_ib_rq rq;
288 };
289
290 struct mlx5_ib_qp {
291 struct ib_qp ibqp;
292 union {
293 struct mlx5_ib_qp_trans trans_qp;
294 struct mlx5_ib_raw_packet_qp raw_packet_qp;
295 };
296 struct mlx5_buf buf;
297
298 struct mlx5_db db;
299 struct mlx5_ib_wq rq;
300
301 u8 sq_signal_bits;
302 u8 fm_cache;
303 struct mlx5_ib_wq sq;
304
305 /* serialize qp state modifications
306 */
307 struct mutex mutex;
308 u32 flags;
309 u8 port;
310 u8 state;
311 int wq_sig;
312 int scat_cqe;
313 int max_inline_data;
314 struct mlx5_bf *bf;
315 int has_rq;
316
317 /* only for user space QPs. For kernel
318 * we have it from the bf object
319 */
320 int uuarn;
321
322 int create_type;
323
324 /* Store signature errors */
325 bool signature_en;
326
327 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
328 /*
329 * A flag that is true for QP's that are in a state that doesn't
330 * allow page faults, and shouldn't schedule any more faults.
331 */
332 int disable_page_faults;
333 /*
334 * The disable_page_faults_lock protects a QP's disable_page_faults
335 * field, allowing for a thread to atomically check whether the QP
336 * allows page faults, and if so schedule a page fault.
337 */
338 spinlock_t disable_page_faults_lock;
339 struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
340 #endif
341 };
342
343 struct mlx5_ib_cq_buf {
344 struct mlx5_buf buf;
345 struct ib_umem *umem;
346 int cqe_size;
347 int nent;
348 };
349
350 enum mlx5_ib_qp_flags {
351 MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
352 MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
353 MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
354 MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
355 MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
356 MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
357 /* QP uses 1 as its source QP number */
358 MLX5_IB_QP_SQPN_QP1 = 1 << 6,
359 MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
360 };
361
362 struct mlx5_umr_wr {
363 struct ib_send_wr wr;
364 union {
365 u64 virt_addr;
366 u64 offset;
367 } target;
368 struct ib_pd *pd;
369 unsigned int page_shift;
370 unsigned int npages;
371 u32 length;
372 int access_flags;
373 u32 mkey;
374 };
375
376 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
377 {
378 return container_of(wr, struct mlx5_umr_wr, wr);
379 }
380
381 struct mlx5_shared_mr_info {
382 int mr_id;
383 struct ib_umem *umem;
384 };
385
386 struct mlx5_ib_cq {
387 struct ib_cq ibcq;
388 struct mlx5_core_cq mcq;
389 struct mlx5_ib_cq_buf buf;
390 struct mlx5_db db;
391
392 /* serialize access to the CQ
393 */
394 spinlock_t lock;
395
396 /* protect resize cq
397 */
398 struct mutex resize_mutex;
399 struct mlx5_ib_cq_buf *resize_buf;
400 struct ib_umem *resize_umem;
401 int cqe_size;
402 u32 create_flags;
403 struct list_head wc_list;
404 enum ib_cq_notify_flags notify_flags;
405 struct work_struct notify_work;
406 };
407
408 struct mlx5_ib_wc {
409 struct ib_wc wc;
410 struct list_head list;
411 };
412
413 struct mlx5_ib_srq {
414 struct ib_srq ibsrq;
415 struct mlx5_core_srq msrq;
416 struct mlx5_buf buf;
417 struct mlx5_db db;
418 u64 *wrid;
419 /* protect SRQ hanlding
420 */
421 spinlock_t lock;
422 int head;
423 int tail;
424 u16 wqe_ctr;
425 struct ib_umem *umem;
426 /* serialize arming a SRQ
427 */
428 struct mutex mutex;
429 int wq_sig;
430 };
431
432 struct mlx5_ib_xrcd {
433 struct ib_xrcd ibxrcd;
434 u32 xrcdn;
435 };
436
437 enum mlx5_ib_mtt_access_flags {
438 MLX5_IB_MTT_READ = (1 << 0),
439 MLX5_IB_MTT_WRITE = (1 << 1),
440 };
441
442 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
443
444 struct mlx5_ib_mr {
445 struct ib_mr ibmr;
446 void *descs;
447 dma_addr_t desc_map;
448 int ndescs;
449 int max_descs;
450 int desc_size;
451 int access_mode;
452 struct mlx5_core_mkey mmkey;
453 struct ib_umem *umem;
454 struct mlx5_shared_mr_info *smr_info;
455 struct list_head list;
456 int order;
457 int umred;
458 int npages;
459 struct mlx5_ib_dev *dev;
460 struct mlx5_create_mkey_mbox_out out;
461 struct mlx5_core_sig_ctx *sig;
462 int live;
463 void *descs_alloc;
464 int access_flags; /* Needed for rereg MR */
465 };
466
467 struct mlx5_ib_mw {
468 struct ib_mw ibmw;
469 struct mlx5_core_mkey mmkey;
470 };
471
472 struct mlx5_ib_umr_context {
473 struct ib_cqe cqe;
474 enum ib_wc_status status;
475 struct completion done;
476 };
477
478 struct umr_common {
479 struct ib_pd *pd;
480 struct ib_cq *cq;
481 struct ib_qp *qp;
482 /* control access to UMR QP
483 */
484 struct semaphore sem;
485 };
486
487 enum {
488 MLX5_FMR_INVALID,
489 MLX5_FMR_VALID,
490 MLX5_FMR_BUSY,
491 };
492
493 struct mlx5_cache_ent {
494 struct list_head head;
495 /* sync access to the cahce entry
496 */
497 spinlock_t lock;
498
499
500 struct dentry *dir;
501 char name[4];
502 u32 order;
503 u32 size;
504 u32 cur;
505 u32 miss;
506 u32 limit;
507
508 struct dentry *fsize;
509 struct dentry *fcur;
510 struct dentry *fmiss;
511 struct dentry *flimit;
512
513 struct mlx5_ib_dev *dev;
514 struct work_struct work;
515 struct delayed_work dwork;
516 int pending;
517 };
518
519 struct mlx5_mr_cache {
520 struct workqueue_struct *wq;
521 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
522 int stopped;
523 struct dentry *root;
524 unsigned long last_add;
525 };
526
527 struct mlx5_ib_gsi_qp;
528
529 struct mlx5_ib_port_resources {
530 struct mlx5_ib_resources *devr;
531 struct mlx5_ib_gsi_qp *gsi;
532 struct work_struct pkey_change_work;
533 };
534
535 struct mlx5_ib_resources {
536 struct ib_cq *c0;
537 struct ib_xrcd *x0;
538 struct ib_xrcd *x1;
539 struct ib_pd *p0;
540 struct ib_srq *s0;
541 struct ib_srq *s1;
542 struct mlx5_ib_port_resources ports[2];
543 /* Protects changes to the port resources */
544 struct mutex mutex;
545 };
546
547 struct mlx5_roce {
548 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
549 * netdev pointer
550 */
551 rwlock_t netdev_lock;
552 struct net_device *netdev;
553 struct notifier_block nb;
554 };
555
556 struct mlx5_ib_dev {
557 struct ib_device ib_dev;
558 struct mlx5_core_dev *mdev;
559 struct mlx5_roce roce;
560 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
561 int num_ports;
562 /* serialize update of capability mask
563 */
564 struct mutex cap_mask_mutex;
565 bool ib_active;
566 struct umr_common umrc;
567 /* sync used page count stats
568 */
569 struct mlx5_ib_resources devr;
570 struct mlx5_mr_cache cache;
571 struct timer_list delay_timer;
572 int fill_delay;
573 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
574 struct ib_odp_caps odp_caps;
575 /*
576 * Sleepable RCU that prevents destruction of MRs while they are still
577 * being used by a page fault handler.
578 */
579 struct srcu_struct mr_srcu;
580 #endif
581 struct mlx5_ib_flow_db flow_db;
582 };
583
584 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
585 {
586 return container_of(mcq, struct mlx5_ib_cq, mcq);
587 }
588
589 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
590 {
591 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
592 }
593
594 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
595 {
596 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
597 }
598
599 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
600 {
601 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
602 }
603
604 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
605 {
606 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
607 }
608
609 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
610 {
611 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
612 }
613
614 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
615 {
616 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
617 }
618
619 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
620 {
621 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
622 }
623
624 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
625 {
626 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
627 }
628
629 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
630 {
631 return container_of(msrq, struct mlx5_ib_srq, msrq);
632 }
633
634 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
635 {
636 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
637 }
638
639 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
640 {
641 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
642 }
643
644 struct mlx5_ib_ah {
645 struct ib_ah ibah;
646 struct mlx5_av av;
647 };
648
649 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
650 {
651 return container_of(ibah, struct mlx5_ib_ah, ibah);
652 }
653
654 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
655 struct mlx5_db *db);
656 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
657 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
658 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
659 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
660 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
661 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
662 const void *in_mad, void *response_mad);
663 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
664 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
665 int mlx5_ib_destroy_ah(struct ib_ah *ah);
666 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
667 struct ib_srq_init_attr *init_attr,
668 struct ib_udata *udata);
669 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
670 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
671 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
672 int mlx5_ib_destroy_srq(struct ib_srq *srq);
673 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
674 struct ib_recv_wr **bad_wr);
675 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
676 struct ib_qp_init_attr *init_attr,
677 struct ib_udata *udata);
678 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
679 int attr_mask, struct ib_udata *udata);
680 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
681 struct ib_qp_init_attr *qp_init_attr);
682 int mlx5_ib_destroy_qp(struct ib_qp *qp);
683 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
684 struct ib_send_wr **bad_wr);
685 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
686 struct ib_recv_wr **bad_wr);
687 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
688 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
689 void *buffer, u32 length,
690 struct mlx5_ib_qp_base *base);
691 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
692 const struct ib_cq_init_attr *attr,
693 struct ib_ucontext *context,
694 struct ib_udata *udata);
695 int mlx5_ib_destroy_cq(struct ib_cq *cq);
696 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
697 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
698 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
699 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
700 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
701 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
702 u64 virt_addr, int access_flags,
703 struct ib_udata *udata);
704 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
705 struct ib_udata *udata);
706 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
707 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
708 int npages, int zap);
709 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
710 u64 length, u64 virt_addr, int access_flags,
711 struct ib_pd *pd, struct ib_udata *udata);
712 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
713 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
714 enum ib_mr_type mr_type,
715 u32 max_num_sg);
716 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
717 struct scatterlist *sg,
718 int sg_nents);
719 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
720 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
721 const struct ib_mad_hdr *in, size_t in_mad_size,
722 struct ib_mad_hdr *out, size_t *out_mad_size,
723 u16 *out_mad_pkey_index);
724 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
725 struct ib_ucontext *context,
726 struct ib_udata *udata);
727 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
728 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
729 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
730 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
731 struct ib_smp *out_mad);
732 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
733 __be64 *sys_image_guid);
734 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
735 u16 *max_pkeys);
736 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
737 u32 *vendor_id);
738 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
739 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
740 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
741 u16 *pkey);
742 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
743 union ib_gid *gid);
744 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
745 struct ib_port_attr *props);
746 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
747 struct ib_port_attr *props);
748 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
749 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
750 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
751 int *ncont, int *order);
752 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
753 int page_shift, size_t offset, size_t num_pages,
754 __be64 *pas, int access_flags);
755 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
756 int page_shift, __be64 *pas, int access_flags);
757 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
758 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
759 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
760 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
761 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
762 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
763 struct ib_mr_status *mr_status);
764
765 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
766 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
767
768 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
769 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
770 struct mlx5_ib_pfault *pfault);
771 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
772 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
773 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
774 int __init mlx5_ib_odp_init(void);
775 void mlx5_ib_odp_cleanup(void);
776 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
777 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
778 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
779 unsigned long end);
780 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
781 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
782 {
783 return;
784 }
785
786 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
787 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
788 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
789 static inline int mlx5_ib_odp_init(void) { return 0; }
790 static inline void mlx5_ib_odp_cleanup(void) {}
791 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
792 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
793
794 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
795
796 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
797 u8 port, struct ifla_vf_info *info);
798 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
799 u8 port, int state);
800 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
801 u8 port, struct ifla_vf_stats *stats);
802 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
803 u64 guid, int type);
804
805 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
806 int index);
807
808 /* GSI QP helper functions */
809 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
810 struct ib_qp_init_attr *init_attr);
811 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
812 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
813 int attr_mask);
814 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
815 int qp_attr_mask,
816 struct ib_qp_init_attr *qp_init_attr);
817 int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
818 struct ib_send_wr **bad_wr);
819 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
820 struct ib_recv_wr **bad_wr);
821 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
822
823 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
824
825 static inline void init_query_mad(struct ib_smp *mad)
826 {
827 mad->base_version = 1;
828 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
829 mad->class_version = 1;
830 mad->method = IB_MGMT_METHOD_GET;
831 }
832
833 static inline u8 convert_access(int acc)
834 {
835 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
836 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
837 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
838 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
839 MLX5_PERM_LOCAL_READ;
840 }
841
842 static inline int is_qp1(enum ib_qp_type qp_type)
843 {
844 return qp_type == MLX5_IB_QPT_HW_GSI;
845 }
846
847 #define MLX5_MAX_UMR_SHIFT 16
848 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
849
850 static inline u32 check_cq_create_flags(u32 flags)
851 {
852 /*
853 * It returns non-zero value for unsupported CQ
854 * create flags, otherwise it returns zero.
855 */
856 return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
857 IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
858 }
859
860 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
861 u32 *user_index)
862 {
863 if (cqe_version) {
864 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
865 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
866 return -EINVAL;
867 *user_index = cmd_uidx;
868 } else {
869 *user_index = MLX5_IB_DEFAULT_UIDX;
870 }
871
872 return 0;
873 }
874 #endif /* MLX5_IB_H */