1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 * Copyright (c) 2020, Intel Corporation. All rights reserved.
7 #include <linux/debugfs.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/pci.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/bitmap.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/task.h>
19 #include <linux/delay.h>
20 #include <rdma/ib_user_verbs.h>
21 #include <rdma/ib_addr.h>
22 #include <rdma/ib_cache.h>
23 #include <linux/mlx5/port.h>
24 #include <linux/mlx5/vport.h>
25 #include <linux/mlx5/fs.h>
26 #include <linux/mlx5/eswitch.h>
27 #include <linux/list.h>
28 #include <rdma/ib_smi.h>
29 #include <rdma/ib_umem_odp.h>
32 #include <linux/etherdevice.h>
45 #include <rdma/uverbs_std_types.h>
46 #include <rdma/uverbs_ioctl.h>
47 #include <rdma/mlx5_user_ioctl_verbs.h>
48 #include <rdma/mlx5_user_ioctl_cmds.h>
50 #define UVERBS_MODULE_NAME mlx5_ib
51 #include <rdma/uverbs_named_ioctl.h>
53 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
54 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
55 MODULE_LICENSE("Dual BSD/GPL");
57 struct mlx5_ib_event_work
{
58 struct work_struct work
;
60 struct mlx5_ib_dev
*dev
;
61 struct mlx5_ib_multiport_info
*mpi
;
69 MLX5_ATOMIC_SIZE_QP_8BYTES
= 1 << 3,
72 static struct workqueue_struct
*mlx5_ib_event_wq
;
73 static LIST_HEAD(mlx5_ib_unaffiliated_port_list
);
74 static LIST_HEAD(mlx5_ib_dev_list
);
76 * This mutex should be held when accessing either of the above lists
78 static DEFINE_MUTEX(mlx5_ib_multiport_mutex
);
80 struct mlx5_ib_dev
*mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info
*mpi
)
82 struct mlx5_ib_dev
*dev
;
84 mutex_lock(&mlx5_ib_multiport_mutex
);
86 mutex_unlock(&mlx5_ib_multiport_mutex
);
90 static enum rdma_link_layer
91 mlx5_port_type_cap_to_rdma_ll(int port_type_cap
)
93 switch (port_type_cap
) {
94 case MLX5_CAP_PORT_TYPE_IB
:
95 return IB_LINK_LAYER_INFINIBAND
;
96 case MLX5_CAP_PORT_TYPE_ETH
:
97 return IB_LINK_LAYER_ETHERNET
;
99 return IB_LINK_LAYER_UNSPECIFIED
;
103 static enum rdma_link_layer
104 mlx5_ib_port_link_layer(struct ib_device
*device
, u32 port_num
)
106 struct mlx5_ib_dev
*dev
= to_mdev(device
);
107 int port_type_cap
= MLX5_CAP_GEN(dev
->mdev
, port_type
);
109 return mlx5_port_type_cap_to_rdma_ll(port_type_cap
);
112 static int get_port_state(struct ib_device
*ibdev
,
114 enum ib_port_state
*state
)
116 struct ib_port_attr attr
;
119 memset(&attr
, 0, sizeof(attr
));
120 ret
= ibdev
->ops
.query_port(ibdev
, port_num
, &attr
);
126 static struct mlx5_roce
*mlx5_get_rep_roce(struct mlx5_ib_dev
*dev
,
127 struct net_device
*ndev
,
128 struct net_device
*upper
,
131 struct net_device
*rep_ndev
;
132 struct mlx5_ib_port
*port
;
135 for (i
= 0; i
< dev
->num_ports
; i
++) {
136 port
= &dev
->port
[i
];
140 if (upper
== ndev
&& port
->rep
->vport
== MLX5_VPORT_UPLINK
) {
145 if (upper
&& port
->rep
->vport
== MLX5_VPORT_UPLINK
)
148 read_lock(&port
->roce
.netdev_lock
);
149 rep_ndev
= mlx5_ib_get_rep_netdev(port
->rep
->esw
,
151 if (rep_ndev
== ndev
) {
152 read_unlock(&port
->roce
.netdev_lock
);
156 read_unlock(&port
->roce
.netdev_lock
);
162 static int mlx5_netdev_event(struct notifier_block
*this,
163 unsigned long event
, void *ptr
)
165 struct mlx5_roce
*roce
= container_of(this, struct mlx5_roce
, nb
);
166 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
167 u32 port_num
= roce
->native_port_num
;
168 struct mlx5_core_dev
*mdev
;
169 struct mlx5_ib_dev
*ibdev
;
172 mdev
= mlx5_ib_get_native_port_mdev(ibdev
, port_num
, NULL
);
177 case NETDEV_REGISTER
:
178 /* Should already be registered during the load */
181 write_lock(&roce
->netdev_lock
);
182 if (ndev
->dev
.parent
== mdev
->device
)
184 write_unlock(&roce
->netdev_lock
);
187 case NETDEV_UNREGISTER
:
188 /* In case of reps, ib device goes away before the netdevs */
189 write_lock(&roce
->netdev_lock
);
190 if (roce
->netdev
== ndev
)
192 write_unlock(&roce
->netdev_lock
);
198 struct net_device
*lag_ndev
= mlx5_lag_get_roce_netdev(mdev
);
199 struct net_device
*upper
= NULL
;
202 upper
= netdev_master_upper_dev_get(lag_ndev
);
207 roce
= mlx5_get_rep_roce(ibdev
, ndev
, upper
, &port_num
);
210 if ((upper
== ndev
||
211 ((!upper
|| ibdev
->is_rep
) && ndev
== roce
->netdev
)) &&
213 struct ib_event ibev
= { };
214 enum ib_port_state port_state
;
216 if (get_port_state(&ibdev
->ib_dev
, port_num
,
220 if (roce
->last_port_state
== port_state
)
223 roce
->last_port_state
= port_state
;
224 ibev
.device
= &ibdev
->ib_dev
;
225 if (port_state
== IB_PORT_DOWN
)
226 ibev
.event
= IB_EVENT_PORT_ERR
;
227 else if (port_state
== IB_PORT_ACTIVE
)
228 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
232 ibev
.element
.port_num
= port_num
;
233 ib_dispatch_event(&ibev
);
242 mlx5_ib_put_native_port_mdev(ibdev
, port_num
);
246 static struct net_device
*mlx5_ib_get_netdev(struct ib_device
*device
,
249 struct mlx5_ib_dev
*ibdev
= to_mdev(device
);
250 struct net_device
*ndev
;
251 struct mlx5_core_dev
*mdev
;
253 mdev
= mlx5_ib_get_native_port_mdev(ibdev
, port_num
, NULL
);
257 ndev
= mlx5_lag_get_roce_netdev(mdev
);
261 /* Ensure ndev does not disappear before we invoke dev_hold()
263 read_lock(&ibdev
->port
[port_num
- 1].roce
.netdev_lock
);
264 ndev
= ibdev
->port
[port_num
- 1].roce
.netdev
;
267 read_unlock(&ibdev
->port
[port_num
- 1].roce
.netdev_lock
);
270 mlx5_ib_put_native_port_mdev(ibdev
, port_num
);
274 struct mlx5_core_dev
*mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev
*ibdev
,
276 u32
*native_port_num
)
278 enum rdma_link_layer ll
= mlx5_ib_port_link_layer(&ibdev
->ib_dev
,
280 struct mlx5_core_dev
*mdev
= NULL
;
281 struct mlx5_ib_multiport_info
*mpi
;
282 struct mlx5_ib_port
*port
;
284 if (!mlx5_core_mp_enabled(ibdev
->mdev
) ||
285 ll
!= IB_LINK_LAYER_ETHERNET
) {
287 *native_port_num
= ib_port_num
;
292 *native_port_num
= 1;
294 port
= &ibdev
->port
[ib_port_num
- 1];
295 spin_lock(&port
->mp
.mpi_lock
);
296 mpi
= ibdev
->port
[ib_port_num
- 1].mp
.mpi
;
297 if (mpi
&& !mpi
->unaffiliate
) {
299 /* If it's the master no need to refcount, it'll exist
300 * as long as the ib_dev exists.
305 spin_unlock(&port
->mp
.mpi_lock
);
310 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev
*ibdev
, u32 port_num
)
312 enum rdma_link_layer ll
= mlx5_ib_port_link_layer(&ibdev
->ib_dev
,
314 struct mlx5_ib_multiport_info
*mpi
;
315 struct mlx5_ib_port
*port
;
317 if (!mlx5_core_mp_enabled(ibdev
->mdev
) || ll
!= IB_LINK_LAYER_ETHERNET
)
320 port
= &ibdev
->port
[port_num
- 1];
322 spin_lock(&port
->mp
.mpi_lock
);
323 mpi
= ibdev
->port
[port_num
- 1].mp
.mpi
;
328 if (mpi
->unaffiliate
)
329 complete(&mpi
->unref_comp
);
331 spin_unlock(&port
->mp
.mpi_lock
);
334 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper
,
335 u16
*active_speed
, u8
*active_width
)
337 switch (eth_proto_oper
) {
338 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII
):
339 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX
):
340 case MLX5E_PROT_MASK(MLX5E_100BASE_TX
):
341 case MLX5E_PROT_MASK(MLX5E_1000BASE_T
):
342 *active_width
= IB_WIDTH_1X
;
343 *active_speed
= IB_SPEED_SDR
;
345 case MLX5E_PROT_MASK(MLX5E_10GBASE_T
):
346 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4
):
347 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4
):
348 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR
):
349 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR
):
350 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR
):
351 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER
):
352 *active_width
= IB_WIDTH_1X
;
353 *active_speed
= IB_SPEED_QDR
;
355 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR
):
356 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR
):
357 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR
):
358 *active_width
= IB_WIDTH_1X
;
359 *active_speed
= IB_SPEED_EDR
;
361 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4
):
362 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4
):
363 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4
):
364 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4
):
365 *active_width
= IB_WIDTH_4X
;
366 *active_speed
= IB_SPEED_QDR
;
368 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2
):
369 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2
):
370 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2
):
371 *active_width
= IB_WIDTH_1X
;
372 *active_speed
= IB_SPEED_HDR
;
374 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4
):
375 *active_width
= IB_WIDTH_4X
;
376 *active_speed
= IB_SPEED_FDR
;
378 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4
):
379 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4
):
380 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4
):
381 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4
):
382 *active_width
= IB_WIDTH_4X
;
383 *active_speed
= IB_SPEED_EDR
;
392 static int translate_eth_ext_proto_oper(u32 eth_proto_oper
, u16
*active_speed
,
395 switch (eth_proto_oper
) {
396 case MLX5E_PROT_MASK(MLX5E_SGMII_100M
):
397 case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII
):
398 *active_width
= IB_WIDTH_1X
;
399 *active_speed
= IB_SPEED_SDR
;
401 case MLX5E_PROT_MASK(MLX5E_5GBASE_R
):
402 *active_width
= IB_WIDTH_1X
;
403 *active_speed
= IB_SPEED_DDR
;
405 case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1
):
406 *active_width
= IB_WIDTH_1X
;
407 *active_speed
= IB_SPEED_QDR
;
409 case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4
):
410 *active_width
= IB_WIDTH_4X
;
411 *active_speed
= IB_SPEED_QDR
;
413 case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR
):
414 *active_width
= IB_WIDTH_1X
;
415 *active_speed
= IB_SPEED_EDR
;
417 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2
):
418 *active_width
= IB_WIDTH_2X
;
419 *active_speed
= IB_SPEED_EDR
;
421 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR
):
422 *active_width
= IB_WIDTH_1X
;
423 *active_speed
= IB_SPEED_HDR
;
425 case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4
):
426 *active_width
= IB_WIDTH_4X
;
427 *active_speed
= IB_SPEED_EDR
;
429 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2
):
430 *active_width
= IB_WIDTH_2X
;
431 *active_speed
= IB_SPEED_HDR
;
433 case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR
):
434 *active_width
= IB_WIDTH_1X
;
435 *active_speed
= IB_SPEED_NDR
;
437 case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4
):
438 *active_width
= IB_WIDTH_4X
;
439 *active_speed
= IB_SPEED_HDR
;
441 case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2
):
442 *active_width
= IB_WIDTH_2X
;
443 *active_speed
= IB_SPEED_NDR
;
445 case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4
):
446 *active_width
= IB_WIDTH_4X
;
447 *active_speed
= IB_SPEED_NDR
;
456 static int translate_eth_proto_oper(u32 eth_proto_oper
, u16
*active_speed
,
457 u8
*active_width
, bool ext
)
460 translate_eth_ext_proto_oper(eth_proto_oper
, active_speed
,
462 translate_eth_legacy_proto_oper(eth_proto_oper
, active_speed
,
466 static int mlx5_query_port_roce(struct ib_device
*device
, u32 port_num
,
467 struct ib_port_attr
*props
)
469 struct mlx5_ib_dev
*dev
= to_mdev(device
);
470 u32 out
[MLX5_ST_SZ_DW(ptys_reg
)] = {0};
471 struct mlx5_core_dev
*mdev
;
472 struct net_device
*ndev
, *upper
;
473 enum ib_mtu ndev_ib_mtu
;
474 bool put_mdev
= true;
480 mdev
= mlx5_ib_get_native_port_mdev(dev
, port_num
, &mdev_port_num
);
482 /* This means the port isn't affiliated yet. Get the
483 * info for the master port instead.
491 /* Possible bad flows are checked before filling out props so in case
492 * of an error it will still be zeroed out.
493 * Use native port in case of reps
496 err
= mlx5_query_port_ptys(mdev
, out
, sizeof(out
), MLX5_PTYS_EN
,
499 err
= mlx5_query_port_ptys(mdev
, out
, sizeof(out
), MLX5_PTYS_EN
,
503 ext
= !!MLX5_GET_ETH_PROTO(ptys_reg
, out
, true, eth_proto_capability
);
504 eth_prot_oper
= MLX5_GET_ETH_PROTO(ptys_reg
, out
, ext
, eth_proto_oper
);
506 props
->active_width
= IB_WIDTH_4X
;
507 props
->active_speed
= IB_SPEED_QDR
;
509 translate_eth_proto_oper(eth_prot_oper
, &props
->active_speed
,
510 &props
->active_width
, ext
);
512 if (!dev
->is_rep
&& dev
->mdev
->roce
.roce_en
) {
515 props
->port_cap_flags
|= IB_PORT_CM_SUP
;
516 props
->ip_gids
= true;
517 props
->gid_tbl_len
= MLX5_CAP_ROCE(dev
->mdev
,
518 roce_address_table_size
);
519 mlx5_query_nic_vport_qkey_viol_cntr(mdev
, &qkey_viol_cntr
);
520 props
->qkey_viol_cntr
= qkey_viol_cntr
;
522 props
->max_mtu
= IB_MTU_4096
;
523 props
->max_msg_sz
= 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_msg
);
524 props
->pkey_tbl_len
= 1;
525 props
->state
= IB_PORT_DOWN
;
526 props
->phys_state
= IB_PORT_PHYS_STATE_DISABLED
;
528 /* If this is a stub query for an unaffiliated port stop here */
532 ndev
= mlx5_ib_get_netdev(device
, port_num
);
536 if (dev
->lag_active
) {
538 upper
= netdev_master_upper_dev_get_rcu(ndev
);
547 if (netif_running(ndev
) && netif_carrier_ok(ndev
)) {
548 props
->state
= IB_PORT_ACTIVE
;
549 props
->phys_state
= IB_PORT_PHYS_STATE_LINK_UP
;
552 ndev_ib_mtu
= iboe_get_mtu(ndev
->mtu
);
556 props
->active_mtu
= min(props
->max_mtu
, ndev_ib_mtu
);
559 mlx5_ib_put_native_port_mdev(dev
, port_num
);
563 static int set_roce_addr(struct mlx5_ib_dev
*dev
, u32 port_num
,
564 unsigned int index
, const union ib_gid
*gid
,
565 const struct ib_gid_attr
*attr
)
567 enum ib_gid_type gid_type
;
568 u16 vlan_id
= 0xffff;
574 gid_type
= attr
->gid_type
;
576 ret
= rdma_read_gid_l2_fields(attr
, &vlan_id
, &mac
[0]);
582 case IB_GID_TYPE_ROCE
:
583 roce_version
= MLX5_ROCE_VERSION_1
;
585 case IB_GID_TYPE_ROCE_UDP_ENCAP
:
586 roce_version
= MLX5_ROCE_VERSION_2
;
587 if (gid
&& ipv6_addr_v4mapped((void *)gid
))
588 roce_l3_type
= MLX5_ROCE_L3_TYPE_IPV4
;
590 roce_l3_type
= MLX5_ROCE_L3_TYPE_IPV6
;
594 mlx5_ib_warn(dev
, "Unexpected GID type %u\n", gid_type
);
597 return mlx5_core_roce_gid_set(dev
->mdev
, index
, roce_version
,
598 roce_l3_type
, gid
->raw
, mac
,
599 vlan_id
< VLAN_CFI_MASK
, vlan_id
,
603 static int mlx5_ib_add_gid(const struct ib_gid_attr
*attr
,
604 __always_unused
void **context
)
606 return set_roce_addr(to_mdev(attr
->device
), attr
->port_num
,
607 attr
->index
, &attr
->gid
, attr
);
610 static int mlx5_ib_del_gid(const struct ib_gid_attr
*attr
,
611 __always_unused
void **context
)
613 return set_roce_addr(to_mdev(attr
->device
), attr
->port_num
,
614 attr
->index
, NULL
, attr
);
617 __be16
mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev
*dev
,
618 const struct ib_gid_attr
*attr
)
620 if (attr
->gid_type
!= IB_GID_TYPE_ROCE_UDP_ENCAP
)
623 return cpu_to_be16(MLX5_CAP_ROCE(dev
->mdev
, r_roce_min_src_udp_port
));
626 static int mlx5_use_mad_ifc(struct mlx5_ib_dev
*dev
)
628 if (MLX5_CAP_GEN(dev
->mdev
, port_type
) == MLX5_CAP_PORT_TYPE_IB
)
629 return !MLX5_CAP_GEN(dev
->mdev
, ib_virt
);
634 MLX5_VPORT_ACCESS_METHOD_MAD
,
635 MLX5_VPORT_ACCESS_METHOD_HCA
,
636 MLX5_VPORT_ACCESS_METHOD_NIC
,
639 static int mlx5_get_vport_access_method(struct ib_device
*ibdev
)
641 if (mlx5_use_mad_ifc(to_mdev(ibdev
)))
642 return MLX5_VPORT_ACCESS_METHOD_MAD
;
644 if (mlx5_ib_port_link_layer(ibdev
, 1) ==
645 IB_LINK_LAYER_ETHERNET
)
646 return MLX5_VPORT_ACCESS_METHOD_NIC
;
648 return MLX5_VPORT_ACCESS_METHOD_HCA
;
651 static void get_atomic_caps(struct mlx5_ib_dev
*dev
,
653 struct ib_device_attr
*props
)
656 u8 atomic_operations
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_operations
);
657 u8 atomic_req_8B_endianness_mode
=
658 MLX5_CAP_ATOMIC(dev
->mdev
, atomic_req_8B_endianness_mode
);
660 /* Check if HW supports 8 bytes standard atomic operations and capable
661 * of host endianness respond
663 tmp
= MLX5_ATOMIC_OPS_CMP_SWAP
| MLX5_ATOMIC_OPS_FETCH_ADD
;
664 if (((atomic_operations
& tmp
) == tmp
) &&
665 (atomic_size_qp
& MLX5_ATOMIC_SIZE_QP_8BYTES
) &&
666 (atomic_req_8B_endianness_mode
)) {
667 props
->atomic_cap
= IB_ATOMIC_HCA
;
669 props
->atomic_cap
= IB_ATOMIC_NONE
;
673 static void get_atomic_caps_qp(struct mlx5_ib_dev
*dev
,
674 struct ib_device_attr
*props
)
676 u8 atomic_size_qp
= MLX5_CAP_ATOMIC(dev
->mdev
, atomic_size_qp
);
678 get_atomic_caps(dev
, atomic_size_qp
, props
);
681 static int mlx5_query_system_image_guid(struct ib_device
*ibdev
,
682 __be64
*sys_image_guid
)
684 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
685 struct mlx5_core_dev
*mdev
= dev
->mdev
;
689 switch (mlx5_get_vport_access_method(ibdev
)) {
690 case MLX5_VPORT_ACCESS_METHOD_MAD
:
691 return mlx5_query_mad_ifc_system_image_guid(ibdev
,
694 case MLX5_VPORT_ACCESS_METHOD_HCA
:
695 err
= mlx5_query_hca_vport_system_image_guid(mdev
, &tmp
);
698 case MLX5_VPORT_ACCESS_METHOD_NIC
:
699 err
= mlx5_query_nic_vport_system_image_guid(mdev
, &tmp
);
707 *sys_image_guid
= cpu_to_be64(tmp
);
713 static int mlx5_query_max_pkeys(struct ib_device
*ibdev
,
716 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
717 struct mlx5_core_dev
*mdev
= dev
->mdev
;
719 switch (mlx5_get_vport_access_method(ibdev
)) {
720 case MLX5_VPORT_ACCESS_METHOD_MAD
:
721 return mlx5_query_mad_ifc_max_pkeys(ibdev
, max_pkeys
);
723 case MLX5_VPORT_ACCESS_METHOD_HCA
:
724 case MLX5_VPORT_ACCESS_METHOD_NIC
:
725 *max_pkeys
= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev
,
734 static int mlx5_query_vendor_id(struct ib_device
*ibdev
,
737 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
739 switch (mlx5_get_vport_access_method(ibdev
)) {
740 case MLX5_VPORT_ACCESS_METHOD_MAD
:
741 return mlx5_query_mad_ifc_vendor_id(ibdev
, vendor_id
);
743 case MLX5_VPORT_ACCESS_METHOD_HCA
:
744 case MLX5_VPORT_ACCESS_METHOD_NIC
:
745 return mlx5_core_query_vendor_id(dev
->mdev
, vendor_id
);
752 static int mlx5_query_node_guid(struct mlx5_ib_dev
*dev
,
758 switch (mlx5_get_vport_access_method(&dev
->ib_dev
)) {
759 case MLX5_VPORT_ACCESS_METHOD_MAD
:
760 return mlx5_query_mad_ifc_node_guid(dev
, node_guid
);
762 case MLX5_VPORT_ACCESS_METHOD_HCA
:
763 err
= mlx5_query_hca_vport_node_guid(dev
->mdev
, &tmp
);
766 case MLX5_VPORT_ACCESS_METHOD_NIC
:
767 err
= mlx5_query_nic_vport_node_guid(dev
->mdev
, &tmp
);
775 *node_guid
= cpu_to_be64(tmp
);
780 struct mlx5_reg_node_desc
{
781 u8 desc
[IB_DEVICE_NODE_DESC_MAX
];
784 static int mlx5_query_node_desc(struct mlx5_ib_dev
*dev
, char *node_desc
)
786 struct mlx5_reg_node_desc in
;
788 if (mlx5_use_mad_ifc(dev
))
789 return mlx5_query_mad_ifc_node_desc(dev
, node_desc
);
791 memset(&in
, 0, sizeof(in
));
793 return mlx5_core_access_reg(dev
->mdev
, &in
, sizeof(in
), node_desc
,
794 sizeof(struct mlx5_reg_node_desc
),
795 MLX5_REG_NODE_DESC
, 0, 0);
798 static int mlx5_ib_query_device(struct ib_device
*ibdev
,
799 struct ib_device_attr
*props
,
800 struct ib_udata
*uhw
)
802 size_t uhw_outlen
= (uhw
) ? uhw
->outlen
: 0;
803 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
804 struct mlx5_core_dev
*mdev
= dev
->mdev
;
809 u64 min_page_size
= 1ull << MLX5_CAP_GEN(mdev
, log_pg_sz
);
810 bool raw_support
= !mlx5_core_mp_enabled(mdev
);
811 struct mlx5_ib_query_device_resp resp
= {};
815 resp_len
= sizeof(resp
.comp_mask
) + sizeof(resp
.response_length
);
816 if (uhw_outlen
&& uhw_outlen
< resp_len
)
819 resp
.response_length
= resp_len
;
821 if (uhw
&& uhw
->inlen
&& !ib_is_udata_cleared(uhw
, 0, uhw
->inlen
))
824 memset(props
, 0, sizeof(*props
));
825 err
= mlx5_query_system_image_guid(ibdev
,
826 &props
->sys_image_guid
);
830 props
->max_pkeys
= dev
->pkey_table_len
;
832 err
= mlx5_query_vendor_id(ibdev
, &props
->vendor_id
);
836 props
->fw_ver
= ((u64
)fw_rev_maj(dev
->mdev
) << 32) |
837 (fw_rev_min(dev
->mdev
) << 16) |
838 fw_rev_sub(dev
->mdev
);
839 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
840 IB_DEVICE_PORT_ACTIVE_EVENT
|
841 IB_DEVICE_SYS_IMAGE_GUID
|
842 IB_DEVICE_RC_RNR_NAK_GEN
;
844 if (MLX5_CAP_GEN(mdev
, pkv
))
845 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
846 if (MLX5_CAP_GEN(mdev
, qkv
))
847 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
848 if (MLX5_CAP_GEN(mdev
, apm
))
849 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
850 if (MLX5_CAP_GEN(mdev
, xrc
))
851 props
->device_cap_flags
|= IB_DEVICE_XRC
;
852 if (MLX5_CAP_GEN(mdev
, imaicl
)) {
853 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW
|
854 IB_DEVICE_MEM_WINDOW_TYPE_2B
;
855 props
->max_mw
= 1 << MLX5_CAP_GEN(mdev
, log_max_mkey
);
856 /* We support 'Gappy' memory registration too */
857 props
->kernel_cap_flags
|= IBK_SG_GAPS_REG
;
859 /* IB_WR_REG_MR always requires changing the entity size with UMR */
860 if (!MLX5_CAP_GEN(dev
->mdev
, umr_modify_entity_size_disabled
))
861 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
862 if (MLX5_CAP_GEN(mdev
, sho
)) {
863 props
->kernel_cap_flags
|= IBK_INTEGRITY_HANDOVER
;
864 /* At this stage no support for signature handover */
865 props
->sig_prot_cap
= IB_PROT_T10DIF_TYPE_1
|
866 IB_PROT_T10DIF_TYPE_2
|
867 IB_PROT_T10DIF_TYPE_3
;
868 props
->sig_guard_cap
= IB_GUARD_T10DIF_CRC
|
869 IB_GUARD_T10DIF_CSUM
;
871 if (MLX5_CAP_GEN(mdev
, block_lb_mc
))
872 props
->kernel_cap_flags
|= IBK_BLOCK_MULTICAST_LOOPBACK
;
874 if (MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) && raw_support
) {
875 if (MLX5_CAP_ETH(mdev
, csum_cap
)) {
876 /* Legacy bit to support old userspace libraries */
877 props
->device_cap_flags
|= IB_DEVICE_RAW_IP_CSUM
;
878 props
->raw_packet_caps
|= IB_RAW_PACKET_CAP_IP_CSUM
;
881 if (MLX5_CAP_ETH(dev
->mdev
, vlan_cap
))
882 props
->raw_packet_caps
|=
883 IB_RAW_PACKET_CAP_CVLAN_STRIPPING
;
885 if (offsetofend(typeof(resp
), tso_caps
) <= uhw_outlen
) {
886 max_tso
= MLX5_CAP_ETH(mdev
, max_lso_cap
);
888 resp
.tso_caps
.max_tso
= 1 << max_tso
;
889 resp
.tso_caps
.supported_qpts
|=
890 1 << IB_QPT_RAW_PACKET
;
891 resp
.response_length
+= sizeof(resp
.tso_caps
);
895 if (offsetofend(typeof(resp
), rss_caps
) <= uhw_outlen
) {
896 resp
.rss_caps
.rx_hash_function
=
897 MLX5_RX_HASH_FUNC_TOEPLITZ
;
898 resp
.rss_caps
.rx_hash_fields_mask
=
899 MLX5_RX_HASH_SRC_IPV4
|
900 MLX5_RX_HASH_DST_IPV4
|
901 MLX5_RX_HASH_SRC_IPV6
|
902 MLX5_RX_HASH_DST_IPV6
|
903 MLX5_RX_HASH_SRC_PORT_TCP
|
904 MLX5_RX_HASH_DST_PORT_TCP
|
905 MLX5_RX_HASH_SRC_PORT_UDP
|
906 MLX5_RX_HASH_DST_PORT_UDP
|
908 resp
.response_length
+= sizeof(resp
.rss_caps
);
911 if (offsetofend(typeof(resp
), tso_caps
) <= uhw_outlen
)
912 resp
.response_length
+= sizeof(resp
.tso_caps
);
913 if (offsetofend(typeof(resp
), rss_caps
) <= uhw_outlen
)
914 resp
.response_length
+= sizeof(resp
.rss_caps
);
917 if (MLX5_CAP_GEN(mdev
, ipoib_basic_offloads
)) {
918 props
->device_cap_flags
|= IB_DEVICE_UD_IP_CSUM
;
919 props
->kernel_cap_flags
|= IBK_UD_TSO
;
922 if (MLX5_CAP_GEN(dev
->mdev
, rq_delay_drop
) &&
923 MLX5_CAP_GEN(dev
->mdev
, general_notification_event
) &&
925 props
->raw_packet_caps
|= IB_RAW_PACKET_CAP_DELAY_DROP
;
927 if (MLX5_CAP_GEN(mdev
, ipoib_enhanced_offloads
) &&
928 MLX5_CAP_IPOIB_ENHANCED(mdev
, csum_cap
))
929 props
->device_cap_flags
|= IB_DEVICE_UD_IP_CSUM
;
931 if (MLX5_CAP_GEN(dev
->mdev
, eth_net_offloads
) &&
932 MLX5_CAP_ETH(dev
->mdev
, scatter_fcs
) &&
934 /* Legacy bit to support old userspace libraries */
935 props
->device_cap_flags
|= IB_DEVICE_RAW_SCATTER_FCS
;
936 props
->raw_packet_caps
|= IB_RAW_PACKET_CAP_SCATTER_FCS
;
939 if (MLX5_CAP_DEV_MEM(mdev
, memic
)) {
941 MLX5_CAP_DEV_MEM(mdev
, max_memic_size
);
944 if (mlx5_get_flow_namespace(dev
->mdev
, MLX5_FLOW_NAMESPACE_BYPASS
))
945 props
->device_cap_flags
|= IB_DEVICE_MANAGED_FLOW_STEERING
;
947 if (MLX5_CAP_GEN(mdev
, end_pad
))
948 props
->device_cap_flags
|= IB_DEVICE_PCI_WRITE_END_PADDING
;
950 props
->vendor_part_id
= mdev
->pdev
->device
;
951 props
->hw_ver
= mdev
->pdev
->revision
;
953 props
->max_mr_size
= ~0ull;
954 props
->page_size_cap
= ~(min_page_size
- 1);
955 props
->max_qp
= 1 << MLX5_CAP_GEN(mdev
, log_max_qp
);
956 props
->max_qp_wr
= 1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
);
957 max_rq_sg
= MLX5_CAP_GEN(mdev
, max_wqe_sz_rq
) /
958 sizeof(struct mlx5_wqe_data_seg
);
959 max_sq_desc
= min_t(int, MLX5_CAP_GEN(mdev
, max_wqe_sz_sq
), 512);
960 max_sq_sg
= (max_sq_desc
- sizeof(struct mlx5_wqe_ctrl_seg
) -
961 sizeof(struct mlx5_wqe_raddr_seg
)) /
962 sizeof(struct mlx5_wqe_data_seg
);
963 props
->max_send_sge
= max_sq_sg
;
964 props
->max_recv_sge
= max_rq_sg
;
965 props
->max_sge_rd
= MLX5_MAX_SGE_RD
;
966 props
->max_cq
= 1 << MLX5_CAP_GEN(mdev
, log_max_cq
);
967 props
->max_cqe
= (1 << MLX5_CAP_GEN(mdev
, log_max_cq_sz
)) - 1;
968 props
->max_mr
= 1 << MLX5_CAP_GEN(mdev
, log_max_mkey
);
969 props
->max_pd
= 1 << MLX5_CAP_GEN(mdev
, log_max_pd
);
970 props
->max_qp_rd_atom
= 1 << MLX5_CAP_GEN(mdev
, log_max_ra_req_qp
);
971 props
->max_qp_init_rd_atom
= 1 << MLX5_CAP_GEN(mdev
, log_max_ra_res_qp
);
972 props
->max_srq
= 1 << MLX5_CAP_GEN(mdev
, log_max_srq
);
973 props
->max_srq_wr
= (1 << MLX5_CAP_GEN(mdev
, log_max_srq_sz
)) - 1;
974 props
->local_ca_ack_delay
= MLX5_CAP_GEN(mdev
, local_ca_ack_delay
);
975 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
976 props
->max_srq_sge
= max_rq_sg
- 1;
977 props
->max_fast_reg_page_list_len
=
978 1 << MLX5_CAP_GEN(mdev
, log_max_klm_list_size
);
979 props
->max_pi_fast_reg_page_list_len
=
980 props
->max_fast_reg_page_list_len
/ 2;
982 MLX5_CAP_GEN(mdev
, max_sgl_for_optimized_performance
);
983 get_atomic_caps_qp(dev
, props
);
984 props
->masked_atomic_cap
= IB_ATOMIC_NONE
;
985 props
->max_mcast_grp
= 1 << MLX5_CAP_GEN(mdev
, log_max_mcg
);
986 props
->max_mcast_qp_attach
= MLX5_CAP_GEN(mdev
, max_qp_mcg
);
987 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
988 props
->max_mcast_grp
;
989 props
->max_ah
= INT_MAX
;
990 props
->hca_core_clock
= MLX5_CAP_GEN(mdev
, device_frequency_khz
);
991 props
->timestamp_mask
= 0x7FFFFFFFFFFFFFFFULL
;
993 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
)) {
994 if (dev
->odp_caps
.general_caps
& IB_ODP_SUPPORT
)
995 props
->kernel_cap_flags
|= IBK_ON_DEMAND_PAGING
;
996 props
->odp_caps
= dev
->odp_caps
;
998 /* ODP for kernel QPs is not implemented for receive
1001 props
->odp_caps
.per_transport_caps
.rc_odp_caps
&=
1002 ~(IB_ODP_SUPPORT_READ
|
1003 IB_ODP_SUPPORT_SRQ_RECV
);
1004 props
->odp_caps
.per_transport_caps
.uc_odp_caps
&=
1005 ~(IB_ODP_SUPPORT_READ
|
1006 IB_ODP_SUPPORT_SRQ_RECV
);
1007 props
->odp_caps
.per_transport_caps
.ud_odp_caps
&=
1008 ~(IB_ODP_SUPPORT_READ
|
1009 IB_ODP_SUPPORT_SRQ_RECV
);
1010 props
->odp_caps
.per_transport_caps
.xrc_odp_caps
&=
1011 ~(IB_ODP_SUPPORT_READ
|
1012 IB_ODP_SUPPORT_SRQ_RECV
);
1016 if (mlx5_core_is_vf(mdev
))
1017 props
->kernel_cap_flags
|= IBK_VIRTUAL_FUNCTION
;
1019 if (mlx5_ib_port_link_layer(ibdev
, 1) ==
1020 IB_LINK_LAYER_ETHERNET
&& raw_support
) {
1021 props
->rss_caps
.max_rwq_indirection_tables
=
1022 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_rqt
);
1023 props
->rss_caps
.max_rwq_indirection_table_size
=
1024 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_rqt_size
);
1025 props
->rss_caps
.supported_qpts
= 1 << IB_QPT_RAW_PACKET
;
1026 props
->max_wq_type_rq
=
1027 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_rq
);
1030 if (MLX5_CAP_GEN(mdev
, tag_matching
)) {
1031 props
->tm_caps
.max_num_tags
=
1032 (1 << MLX5_CAP_GEN(mdev
, log_tag_matching_list_sz
)) - 1;
1033 props
->tm_caps
.max_ops
=
1034 1 << MLX5_CAP_GEN(mdev
, log_max_qp_sz
);
1035 props
->tm_caps
.max_sge
= MLX5_TM_MAX_SGE
;
1038 if (MLX5_CAP_GEN(mdev
, tag_matching
) &&
1039 MLX5_CAP_GEN(mdev
, rndv_offload_rc
)) {
1040 props
->tm_caps
.flags
= IB_TM_CAP_RNDV_RC
;
1041 props
->tm_caps
.max_rndv_hdr_size
= MLX5_TM_MAX_RNDV_MSG_SIZE
;
1044 if (MLX5_CAP_GEN(dev
->mdev
, cq_moderation
)) {
1045 props
->cq_caps
.max_cq_moderation_count
=
1047 props
->cq_caps
.max_cq_moderation_period
=
1051 if (offsetofend(typeof(resp
), cqe_comp_caps
) <= uhw_outlen
) {
1052 resp
.response_length
+= sizeof(resp
.cqe_comp_caps
);
1054 if (MLX5_CAP_GEN(dev
->mdev
, cqe_compression
)) {
1055 resp
.cqe_comp_caps
.max_num
=
1056 MLX5_CAP_GEN(dev
->mdev
,
1057 cqe_compression_max_num
);
1059 resp
.cqe_comp_caps
.supported_format
=
1060 MLX5_IB_CQE_RES_FORMAT_HASH
|
1061 MLX5_IB_CQE_RES_FORMAT_CSUM
;
1063 if (MLX5_CAP_GEN(dev
->mdev
, mini_cqe_resp_stride_index
))
1064 resp
.cqe_comp_caps
.supported_format
|=
1065 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX
;
1069 if (offsetofend(typeof(resp
), packet_pacing_caps
) <= uhw_outlen
&&
1071 if (MLX5_CAP_QOS(mdev
, packet_pacing
) &&
1072 MLX5_CAP_GEN(mdev
, qos
)) {
1073 resp
.packet_pacing_caps
.qp_rate_limit_max
=
1074 MLX5_CAP_QOS(mdev
, packet_pacing_max_rate
);
1075 resp
.packet_pacing_caps
.qp_rate_limit_min
=
1076 MLX5_CAP_QOS(mdev
, packet_pacing_min_rate
);
1077 resp
.packet_pacing_caps
.supported_qpts
|=
1078 1 << IB_QPT_RAW_PACKET
;
1079 if (MLX5_CAP_QOS(mdev
, packet_pacing_burst_bound
) &&
1080 MLX5_CAP_QOS(mdev
, packet_pacing_typical_size
))
1081 resp
.packet_pacing_caps
.cap_flags
|=
1082 MLX5_IB_PP_SUPPORT_BURST
;
1084 resp
.response_length
+= sizeof(resp
.packet_pacing_caps
);
1087 if (offsetofend(typeof(resp
), mlx5_ib_support_multi_pkt_send_wqes
) <=
1089 if (MLX5_CAP_ETH(mdev
, multi_pkt_send_wqe
))
1090 resp
.mlx5_ib_support_multi_pkt_send_wqes
=
1093 if (MLX5_CAP_ETH(mdev
, enhanced_multi_pkt_send_wqe
))
1094 resp
.mlx5_ib_support_multi_pkt_send_wqes
|=
1095 MLX5_IB_SUPPORT_EMPW
;
1097 resp
.response_length
+=
1098 sizeof(resp
.mlx5_ib_support_multi_pkt_send_wqes
);
1101 if (offsetofend(typeof(resp
), flags
) <= uhw_outlen
) {
1102 resp
.response_length
+= sizeof(resp
.flags
);
1104 if (MLX5_CAP_GEN(mdev
, cqe_compression_128
))
1106 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP
;
1108 if (MLX5_CAP_GEN(mdev
, cqe_128_always
))
1109 resp
.flags
|= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD
;
1110 if (MLX5_CAP_GEN(mdev
, qp_packet_based
))
1112 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE
;
1114 resp
.flags
|= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT
;
1117 if (offsetofend(typeof(resp
), sw_parsing_caps
) <= uhw_outlen
) {
1118 resp
.response_length
+= sizeof(resp
.sw_parsing_caps
);
1119 if (MLX5_CAP_ETH(mdev
, swp
)) {
1120 resp
.sw_parsing_caps
.sw_parsing_offloads
|=
1123 if (MLX5_CAP_ETH(mdev
, swp_csum
))
1124 resp
.sw_parsing_caps
.sw_parsing_offloads
|=
1125 MLX5_IB_SW_PARSING_CSUM
;
1127 if (MLX5_CAP_ETH(mdev
, swp_lso
))
1128 resp
.sw_parsing_caps
.sw_parsing_offloads
|=
1129 MLX5_IB_SW_PARSING_LSO
;
1131 if (resp
.sw_parsing_caps
.sw_parsing_offloads
)
1132 resp
.sw_parsing_caps
.supported_qpts
=
1133 BIT(IB_QPT_RAW_PACKET
);
1137 if (offsetofend(typeof(resp
), striding_rq_caps
) <= uhw_outlen
&&
1139 resp
.response_length
+= sizeof(resp
.striding_rq_caps
);
1140 if (MLX5_CAP_GEN(mdev
, striding_rq
)) {
1141 resp
.striding_rq_caps
.min_single_stride_log_num_of_bytes
=
1142 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES
;
1143 resp
.striding_rq_caps
.max_single_stride_log_num_of_bytes
=
1144 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES
;
1145 if (MLX5_CAP_GEN(dev
->mdev
, ext_stride_num_range
))
1146 resp
.striding_rq_caps
1147 .min_single_wqe_log_num_of_strides
=
1148 MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES
;
1150 resp
.striding_rq_caps
1151 .min_single_wqe_log_num_of_strides
=
1152 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES
;
1153 resp
.striding_rq_caps
.max_single_wqe_log_num_of_strides
=
1154 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES
;
1155 resp
.striding_rq_caps
.supported_qpts
=
1156 BIT(IB_QPT_RAW_PACKET
);
1160 if (offsetofend(typeof(resp
), tunnel_offloads_caps
) <= uhw_outlen
) {
1161 resp
.response_length
+= sizeof(resp
.tunnel_offloads_caps
);
1162 if (MLX5_CAP_ETH(mdev
, tunnel_stateless_vxlan
))
1163 resp
.tunnel_offloads_caps
|=
1164 MLX5_IB_TUNNELED_OFFLOADS_VXLAN
;
1165 if (MLX5_CAP_ETH(mdev
, tunnel_stateless_geneve_rx
))
1166 resp
.tunnel_offloads_caps
|=
1167 MLX5_IB_TUNNELED_OFFLOADS_GENEVE
;
1168 if (MLX5_CAP_ETH(mdev
, tunnel_stateless_gre
))
1169 resp
.tunnel_offloads_caps
|=
1170 MLX5_IB_TUNNELED_OFFLOADS_GRE
;
1171 if (MLX5_CAP_ETH(mdev
, tunnel_stateless_mpls_over_gre
))
1172 resp
.tunnel_offloads_caps
|=
1173 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE
;
1174 if (MLX5_CAP_ETH(mdev
, tunnel_stateless_mpls_over_udp
))
1175 resp
.tunnel_offloads_caps
|=
1176 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP
;
1179 if (offsetofend(typeof(resp
), dci_streams_caps
) <= uhw_outlen
) {
1180 resp
.response_length
+= sizeof(resp
.dci_streams_caps
);
1182 resp
.dci_streams_caps
.max_log_num_concurent
=
1183 MLX5_CAP_GEN(mdev
, log_max_dci_stream_channels
);
1185 resp
.dci_streams_caps
.max_log_num_errored
=
1186 MLX5_CAP_GEN(mdev
, log_max_dci_errored_streams
);
1190 err
= ib_copy_to_udata(uhw
, &resp
, resp
.response_length
);
1199 static void translate_active_width(struct ib_device
*ibdev
, u16 active_width
,
1202 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
1204 if (active_width
& MLX5_PTYS_WIDTH_1X
)
1205 *ib_width
= IB_WIDTH_1X
;
1206 else if (active_width
& MLX5_PTYS_WIDTH_2X
)
1207 *ib_width
= IB_WIDTH_2X
;
1208 else if (active_width
& MLX5_PTYS_WIDTH_4X
)
1209 *ib_width
= IB_WIDTH_4X
;
1210 else if (active_width
& MLX5_PTYS_WIDTH_8X
)
1211 *ib_width
= IB_WIDTH_8X
;
1212 else if (active_width
& MLX5_PTYS_WIDTH_12X
)
1213 *ib_width
= IB_WIDTH_12X
;
1215 mlx5_ib_dbg(dev
, "Invalid active_width %d, setting width to default value: 4x\n",
1217 *ib_width
= IB_WIDTH_4X
;
1223 static int mlx5_mtu_to_ib_mtu(int mtu
)
1228 case 1024: return 3;
1229 case 2048: return 4;
1230 case 4096: return 5;
1232 pr_warn("invalid mtu\n");
1237 enum ib_max_vl_num
{
1239 __IB_MAX_VL_0_1
= 2,
1240 __IB_MAX_VL_0_3
= 3,
1241 __IB_MAX_VL_0_7
= 4,
1242 __IB_MAX_VL_0_14
= 5,
1245 enum mlx5_vl_hw_cap
{
1254 MLX5_VL_HW_0_14
= 15
1257 static int translate_max_vl_num(struct ib_device
*ibdev
, u8 vl_hw_cap
,
1260 switch (vl_hw_cap
) {
1262 *max_vl_num
= __IB_MAX_VL_0
;
1264 case MLX5_VL_HW_0_1
:
1265 *max_vl_num
= __IB_MAX_VL_0_1
;
1267 case MLX5_VL_HW_0_3
:
1268 *max_vl_num
= __IB_MAX_VL_0_3
;
1270 case MLX5_VL_HW_0_7
:
1271 *max_vl_num
= __IB_MAX_VL_0_7
;
1273 case MLX5_VL_HW_0_14
:
1274 *max_vl_num
= __IB_MAX_VL_0_14
;
1284 static int mlx5_query_hca_port(struct ib_device
*ibdev
, u32 port
,
1285 struct ib_port_attr
*props
)
1287 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
1288 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1289 struct mlx5_hca_vport_context
*rep
;
1293 u16 ib_link_width_oper
;
1296 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
1302 /* props being zeroed by the caller, avoid zeroing it here */
1304 err
= mlx5_query_hca_vport_context(mdev
, 0, port
, 0, rep
);
1308 props
->lid
= rep
->lid
;
1309 props
->lmc
= rep
->lmc
;
1310 props
->sm_lid
= rep
->sm_lid
;
1311 props
->sm_sl
= rep
->sm_sl
;
1312 props
->state
= rep
->vport_state
;
1313 props
->phys_state
= rep
->port_physical_state
;
1314 props
->port_cap_flags
= rep
->cap_mask1
;
1315 props
->gid_tbl_len
= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev
, gid_table_size
));
1316 props
->max_msg_sz
= 1 << MLX5_CAP_GEN(mdev
, log_max_msg
);
1317 props
->pkey_tbl_len
= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev
, pkey_table_size
));
1318 props
->bad_pkey_cntr
= rep
->pkey_violation_counter
;
1319 props
->qkey_viol_cntr
= rep
->qkey_violation_counter
;
1320 props
->subnet_timeout
= rep
->subnet_timeout
;
1321 props
->init_type_reply
= rep
->init_type_reply
;
1323 if (props
->port_cap_flags
& IB_PORT_CAP_MASK2_SUP
)
1324 props
->port_cap_flags2
= rep
->cap_mask2
;
1326 err
= mlx5_query_ib_port_oper(mdev
, &ib_link_width_oper
,
1327 &props
->active_speed
, port
);
1331 translate_active_width(ibdev
, ib_link_width_oper
, &props
->active_width
);
1333 mlx5_query_port_max_mtu(mdev
, &max_mtu
, port
);
1335 props
->max_mtu
= mlx5_mtu_to_ib_mtu(max_mtu
);
1337 mlx5_query_port_oper_mtu(mdev
, &oper_mtu
, port
);
1339 props
->active_mtu
= mlx5_mtu_to_ib_mtu(oper_mtu
);
1341 err
= mlx5_query_port_vl_hw_cap(mdev
, &vl_hw_cap
, port
);
1345 err
= translate_max_vl_num(ibdev
, vl_hw_cap
,
1346 &props
->max_vl_num
);
1352 int mlx5_ib_query_port(struct ib_device
*ibdev
, u32 port
,
1353 struct ib_port_attr
*props
)
1358 switch (mlx5_get_vport_access_method(ibdev
)) {
1359 case MLX5_VPORT_ACCESS_METHOD_MAD
:
1360 ret
= mlx5_query_mad_ifc_port(ibdev
, port
, props
);
1363 case MLX5_VPORT_ACCESS_METHOD_HCA
:
1364 ret
= mlx5_query_hca_port(ibdev
, port
, props
);
1367 case MLX5_VPORT_ACCESS_METHOD_NIC
:
1368 ret
= mlx5_query_port_roce(ibdev
, port
, props
);
1375 if (!ret
&& props
) {
1376 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
1377 struct mlx5_core_dev
*mdev
;
1378 bool put_mdev
= true;
1380 mdev
= mlx5_ib_get_native_port_mdev(dev
, port
, NULL
);
1382 /* If the port isn't affiliated yet query the master.
1383 * The master and slave will have the same values.
1389 count
= mlx5_core_reserved_gids_count(mdev
);
1391 mlx5_ib_put_native_port_mdev(dev
, port
);
1392 props
->gid_tbl_len
-= count
;
1397 static int mlx5_ib_rep_query_port(struct ib_device
*ibdev
, u32 port
,
1398 struct ib_port_attr
*props
)
1400 return mlx5_query_port_roce(ibdev
, port
, props
);
1403 static int mlx5_ib_rep_query_pkey(struct ib_device
*ibdev
, u32 port
, u16 index
,
1406 /* Default special Pkey for representor device port as per the
1407 * IB specification 1.3 section 10.9.1.2.
1413 static int mlx5_ib_query_gid(struct ib_device
*ibdev
, u32 port
, int index
,
1416 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
1417 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1419 switch (mlx5_get_vport_access_method(ibdev
)) {
1420 case MLX5_VPORT_ACCESS_METHOD_MAD
:
1421 return mlx5_query_mad_ifc_gids(ibdev
, port
, index
, gid
);
1423 case MLX5_VPORT_ACCESS_METHOD_HCA
:
1424 return mlx5_query_hca_vport_gid(mdev
, 0, port
, 0, index
, gid
);
1432 static int mlx5_query_hca_nic_pkey(struct ib_device
*ibdev
, u32 port
,
1433 u16 index
, u16
*pkey
)
1435 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
1436 struct mlx5_core_dev
*mdev
;
1437 bool put_mdev
= true;
1441 mdev
= mlx5_ib_get_native_port_mdev(dev
, port
, &mdev_port_num
);
1443 /* The port isn't affiliated yet, get the PKey from the master
1444 * port. For RoCE the PKey tables will be the same.
1451 err
= mlx5_query_hca_vport_pkey(mdev
, 0, mdev_port_num
, 0,
1454 mlx5_ib_put_native_port_mdev(dev
, port
);
1459 static int mlx5_ib_query_pkey(struct ib_device
*ibdev
, u32 port
, u16 index
,
1462 switch (mlx5_get_vport_access_method(ibdev
)) {
1463 case MLX5_VPORT_ACCESS_METHOD_MAD
:
1464 return mlx5_query_mad_ifc_pkey(ibdev
, port
, index
, pkey
);
1466 case MLX5_VPORT_ACCESS_METHOD_HCA
:
1467 case MLX5_VPORT_ACCESS_METHOD_NIC
:
1468 return mlx5_query_hca_nic_pkey(ibdev
, port
, index
, pkey
);
1474 static int mlx5_ib_modify_device(struct ib_device
*ibdev
, int mask
,
1475 struct ib_device_modify
*props
)
1477 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
1478 struct mlx5_reg_node_desc in
;
1479 struct mlx5_reg_node_desc out
;
1482 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
1485 if (!(mask
& IB_DEVICE_MODIFY_NODE_DESC
))
1489 * If possible, pass node desc to FW, so it can generate
1490 * a 144 trap. If cmd fails, just ignore.
1492 memcpy(&in
, props
->node_desc
, IB_DEVICE_NODE_DESC_MAX
);
1493 err
= mlx5_core_access_reg(dev
->mdev
, &in
, sizeof(in
), &out
,
1494 sizeof(out
), MLX5_REG_NODE_DESC
, 0, 1);
1498 memcpy(ibdev
->node_desc
, props
->node_desc
, IB_DEVICE_NODE_DESC_MAX
);
1503 static int set_port_caps_atomic(struct mlx5_ib_dev
*dev
, u32 port_num
, u32 mask
,
1506 struct mlx5_hca_vport_context ctx
= {};
1507 struct mlx5_core_dev
*mdev
;
1511 mdev
= mlx5_ib_get_native_port_mdev(dev
, port_num
, &mdev_port_num
);
1515 err
= mlx5_query_hca_vport_context(mdev
, 0, mdev_port_num
, 0, &ctx
);
1519 if (~ctx
.cap_mask1_perm
& mask
) {
1520 mlx5_ib_warn(dev
, "trying to change bitmask 0x%X but change supported 0x%X\n",
1521 mask
, ctx
.cap_mask1_perm
);
1526 ctx
.cap_mask1
= value
;
1527 ctx
.cap_mask1_perm
= mask
;
1528 err
= mlx5_core_modify_hca_vport_context(mdev
, 0, mdev_port_num
,
1532 mlx5_ib_put_native_port_mdev(dev
, port_num
);
1537 static int mlx5_ib_modify_port(struct ib_device
*ibdev
, u32 port
, int mask
,
1538 struct ib_port_modify
*props
)
1540 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
1541 struct ib_port_attr attr
;
1546 bool is_ib
= (mlx5_ib_port_link_layer(ibdev
, port
) ==
1547 IB_LINK_LAYER_INFINIBAND
);
1549 /* CM layer calls ib_modify_port() regardless of the link layer. For
1550 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1555 if (MLX5_CAP_GEN(dev
->mdev
, ib_virt
) && is_ib
) {
1556 change_mask
= props
->clr_port_cap_mask
| props
->set_port_cap_mask
;
1557 value
= ~props
->clr_port_cap_mask
| props
->set_port_cap_mask
;
1558 return set_port_caps_atomic(dev
, port
, change_mask
, value
);
1561 mutex_lock(&dev
->cap_mask_mutex
);
1563 err
= ib_query_port(ibdev
, port
, &attr
);
1567 tmp
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
1568 ~props
->clr_port_cap_mask
;
1570 err
= mlx5_set_port_caps(dev
->mdev
, port
, tmp
);
1573 mutex_unlock(&dev
->cap_mask_mutex
);
1577 static void print_lib_caps(struct mlx5_ib_dev
*dev
, u64 caps
)
1579 mlx5_ib_dbg(dev
, "MLX5_LIB_CAP_4K_UAR = %s\n",
1580 caps
& MLX5_LIB_CAP_4K_UAR
? "y" : "n");
1583 static u16
calc_dynamic_bfregs(int uars_per_sys_page
)
1585 /* Large page with non 4k uar support might limit the dynamic size */
1586 if (uars_per_sys_page
== 1 && PAGE_SIZE
> 4096)
1587 return MLX5_MIN_DYN_BFREGS
;
1589 return MLX5_MAX_DYN_BFREGS
;
1592 static int calc_total_bfregs(struct mlx5_ib_dev
*dev
, bool lib_uar_4k
,
1593 struct mlx5_ib_alloc_ucontext_req_v2
*req
,
1594 struct mlx5_bfreg_info
*bfregi
)
1596 int uars_per_sys_page
;
1597 int bfregs_per_sys_page
;
1598 int ref_bfregs
= req
->total_num_bfregs
;
1600 if (req
->total_num_bfregs
== 0)
1603 BUILD_BUG_ON(MLX5_MAX_BFREGS
% MLX5_NON_FP_BFREGS_IN_PAGE
);
1604 BUILD_BUG_ON(MLX5_MAX_BFREGS
< MLX5_NON_FP_BFREGS_IN_PAGE
);
1606 if (req
->total_num_bfregs
> MLX5_MAX_BFREGS
)
1609 uars_per_sys_page
= get_uars_per_sys_page(dev
, lib_uar_4k
);
1610 bfregs_per_sys_page
= uars_per_sys_page
* MLX5_NON_FP_BFREGS_PER_UAR
;
1611 /* This holds the required static allocation asked by the user */
1612 req
->total_num_bfregs
= ALIGN(req
->total_num_bfregs
, bfregs_per_sys_page
);
1613 if (req
->num_low_latency_bfregs
> req
->total_num_bfregs
- 1)
1616 bfregi
->num_static_sys_pages
= req
->total_num_bfregs
/ bfregs_per_sys_page
;
1617 bfregi
->num_dyn_bfregs
= ALIGN(calc_dynamic_bfregs(uars_per_sys_page
), bfregs_per_sys_page
);
1618 bfregi
->total_num_bfregs
= req
->total_num_bfregs
+ bfregi
->num_dyn_bfregs
;
1619 bfregi
->num_sys_pages
= bfregi
->total_num_bfregs
/ bfregs_per_sys_page
;
1621 mlx5_ib_dbg(dev
, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1622 MLX5_CAP_GEN(dev
->mdev
, uar_4k
) ? "yes" : "no",
1623 lib_uar_4k
? "yes" : "no", ref_bfregs
,
1624 req
->total_num_bfregs
, bfregi
->total_num_bfregs
,
1625 bfregi
->num_sys_pages
);
1630 static int allocate_uars(struct mlx5_ib_dev
*dev
, struct mlx5_ib_ucontext
*context
)
1632 struct mlx5_bfreg_info
*bfregi
;
1636 bfregi
= &context
->bfregi
;
1637 for (i
= 0; i
< bfregi
->num_static_sys_pages
; i
++) {
1638 err
= mlx5_cmd_uar_alloc(dev
->mdev
, &bfregi
->sys_pages
[i
],
1643 mlx5_ib_dbg(dev
, "allocated uar %d\n", bfregi
->sys_pages
[i
]);
1646 for (i
= bfregi
->num_static_sys_pages
; i
< bfregi
->num_sys_pages
; i
++)
1647 bfregi
->sys_pages
[i
] = MLX5_IB_INVALID_UAR_INDEX
;
1652 for (--i
; i
>= 0; i
--)
1653 if (mlx5_cmd_uar_dealloc(dev
->mdev
, bfregi
->sys_pages
[i
],
1655 mlx5_ib_warn(dev
, "failed to free uar %d\n", i
);
1660 static void deallocate_uars(struct mlx5_ib_dev
*dev
,
1661 struct mlx5_ib_ucontext
*context
)
1663 struct mlx5_bfreg_info
*bfregi
;
1666 bfregi
= &context
->bfregi
;
1667 for (i
= 0; i
< bfregi
->num_sys_pages
; i
++)
1668 if (i
< bfregi
->num_static_sys_pages
||
1669 bfregi
->sys_pages
[i
] != MLX5_IB_INVALID_UAR_INDEX
)
1670 mlx5_cmd_uar_dealloc(dev
->mdev
, bfregi
->sys_pages
[i
],
1674 int mlx5_ib_enable_lb(struct mlx5_ib_dev
*dev
, bool td
, bool qp
)
1678 mutex_lock(&dev
->lb
.mutex
);
1684 if (dev
->lb
.user_td
== 2 ||
1686 if (!dev
->lb
.enabled
) {
1687 err
= mlx5_nic_vport_update_local_lb(dev
->mdev
, true);
1688 dev
->lb
.enabled
= true;
1692 mutex_unlock(&dev
->lb
.mutex
);
1697 void mlx5_ib_disable_lb(struct mlx5_ib_dev
*dev
, bool td
, bool qp
)
1699 mutex_lock(&dev
->lb
.mutex
);
1705 if (dev
->lb
.user_td
== 1 &&
1707 if (dev
->lb
.enabled
) {
1708 mlx5_nic_vport_update_local_lb(dev
->mdev
, false);
1709 dev
->lb
.enabled
= false;
1713 mutex_unlock(&dev
->lb
.mutex
);
1716 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev
*dev
, u32
*tdn
,
1721 if (!MLX5_CAP_GEN(dev
->mdev
, log_max_transport_domain
))
1724 err
= mlx5_cmd_alloc_transport_domain(dev
->mdev
, tdn
, uid
);
1728 if ((MLX5_CAP_GEN(dev
->mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
) ||
1729 (!MLX5_CAP_GEN(dev
->mdev
, disable_local_lb_uc
) &&
1730 !MLX5_CAP_GEN(dev
->mdev
, disable_local_lb_mc
)))
1733 return mlx5_ib_enable_lb(dev
, true, false);
1736 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev
*dev
, u32 tdn
,
1739 if (!MLX5_CAP_GEN(dev
->mdev
, log_max_transport_domain
))
1742 mlx5_cmd_dealloc_transport_domain(dev
->mdev
, tdn
, uid
);
1744 if ((MLX5_CAP_GEN(dev
->mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
) ||
1745 (!MLX5_CAP_GEN(dev
->mdev
, disable_local_lb_uc
) &&
1746 !MLX5_CAP_GEN(dev
->mdev
, disable_local_lb_mc
)))
1749 mlx5_ib_disable_lb(dev
, true, false);
1752 static int set_ucontext_resp(struct ib_ucontext
*uctx
,
1753 struct mlx5_ib_alloc_ucontext_resp
*resp
)
1755 struct ib_device
*ibdev
= uctx
->device
;
1756 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
1757 struct mlx5_ib_ucontext
*context
= to_mucontext(uctx
);
1758 struct mlx5_bfreg_info
*bfregi
= &context
->bfregi
;
1761 if (MLX5_CAP_GEN(dev
->mdev
, dump_fill_mkey
)) {
1762 err
= mlx5_cmd_dump_fill_mkey(dev
->mdev
,
1763 &resp
->dump_fill_mkey
);
1767 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY
;
1770 resp
->qp_tab_size
= 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp
);
1771 if (dev
->wc_support
)
1772 resp
->bf_reg_size
= 1 << MLX5_CAP_GEN(dev
->mdev
,
1774 resp
->cache_line_size
= cache_line_size();
1775 resp
->max_sq_desc_sz
= MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_sq
);
1776 resp
->max_rq_desc_sz
= MLX5_CAP_GEN(dev
->mdev
, max_wqe_sz_rq
);
1777 resp
->max_send_wqebb
= 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
);
1778 resp
->max_recv_wr
= 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_qp_sz
);
1779 resp
->max_srq_recv_wr
= 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_srq_sz
);
1780 resp
->cqe_version
= context
->cqe_version
;
1781 resp
->log_uar_size
= MLX5_CAP_GEN(dev
->mdev
, uar_4k
) ?
1782 MLX5_ADAPTER_PAGE_SHIFT
: PAGE_SHIFT
;
1783 resp
->num_uars_per_page
= MLX5_CAP_GEN(dev
->mdev
, uar_4k
) ?
1784 MLX5_CAP_GEN(dev
->mdev
,
1785 num_of_uars_per_page
) : 1;
1786 resp
->tot_bfregs
= bfregi
->lib_uar_dyn
? 0 :
1787 bfregi
->total_num_bfregs
- bfregi
->num_dyn_bfregs
;
1788 resp
->num_ports
= dev
->num_ports
;
1789 resp
->cmds_supp_uhw
|= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE
|
1790 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH
;
1792 if (mlx5_ib_port_link_layer(ibdev
, 1) == IB_LINK_LAYER_ETHERNET
) {
1793 mlx5_query_min_inline(dev
->mdev
, &resp
->eth_min_inline
);
1794 resp
->eth_min_inline
++;
1797 if (dev
->mdev
->clock_info
)
1798 resp
->clock_info_versions
= BIT(MLX5_IB_CLOCK_INFO_V1
);
1801 * We don't want to expose information from the PCI bar that is located
1802 * after 4096 bytes, so if the arch only supports larger pages, let's
1803 * pretend we don't support reading the HCA's core clock. This is also
1804 * forced by mmap function.
1806 if (PAGE_SIZE
<= 4096) {
1808 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET
;
1809 resp
->hca_core_clock_offset
=
1810 offsetof(struct mlx5_init_seg
,
1811 internal_timer_h
) % PAGE_SIZE
;
1814 if (MLX5_CAP_GEN(dev
->mdev
, ece_support
))
1815 resp
->comp_mask
|= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE
;
1817 if (rt_supported(MLX5_CAP_GEN(dev
->mdev
, sq_ts_format
)) &&
1818 rt_supported(MLX5_CAP_GEN(dev
->mdev
, rq_ts_format
)) &&
1819 rt_supported(MLX5_CAP_ROCE(dev
->mdev
, qp_ts_format
)))
1821 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS
;
1823 resp
->num_dyn_bfregs
= bfregi
->num_dyn_bfregs
;
1825 if (MLX5_CAP_GEN(dev
->mdev
, drain_sigerr
))
1826 resp
->comp_mask
|= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS
;
1829 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG
;
1834 static int mlx5_ib_alloc_ucontext(struct ib_ucontext
*uctx
,
1835 struct ib_udata
*udata
)
1837 struct ib_device
*ibdev
= uctx
->device
;
1838 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
1839 struct mlx5_ib_alloc_ucontext_req_v2 req
= {};
1840 struct mlx5_ib_alloc_ucontext_resp resp
= {};
1841 struct mlx5_ib_ucontext
*context
= to_mucontext(uctx
);
1842 struct mlx5_bfreg_info
*bfregi
;
1845 size_t min_req_v2
= offsetof(struct mlx5_ib_alloc_ucontext_req_v2
,
1850 if (!dev
->ib_active
)
1853 if (udata
->inlen
== sizeof(struct mlx5_ib_alloc_ucontext_req
))
1855 else if (udata
->inlen
>= min_req_v2
)
1860 err
= ib_copy_from_udata(&req
, udata
, min(udata
->inlen
, sizeof(req
)));
1864 if (req
.flags
& ~MLX5_IB_ALLOC_UCTX_DEVX
)
1867 if (req
.comp_mask
|| req
.reserved0
|| req
.reserved1
|| req
.reserved2
)
1870 req
.total_num_bfregs
= ALIGN(req
.total_num_bfregs
,
1871 MLX5_NON_FP_BFREGS_PER_UAR
);
1872 if (req
.num_low_latency_bfregs
> req
.total_num_bfregs
- 1)
1875 if (req
.flags
& MLX5_IB_ALLOC_UCTX_DEVX
) {
1876 err
= mlx5_ib_devx_create(dev
, true);
1879 context
->devx_uid
= err
;
1882 lib_uar_4k
= req
.lib_caps
& MLX5_LIB_CAP_4K_UAR
;
1883 lib_uar_dyn
= req
.lib_caps
& MLX5_LIB_CAP_DYN_UAR
;
1884 bfregi
= &context
->bfregi
;
1887 bfregi
->lib_uar_dyn
= lib_uar_dyn
;
1891 /* updates req->total_num_bfregs */
1892 err
= calc_total_bfregs(dev
, lib_uar_4k
, &req
, bfregi
);
1896 mutex_init(&bfregi
->lock
);
1897 bfregi
->lib_uar_4k
= lib_uar_4k
;
1898 bfregi
->count
= kcalloc(bfregi
->total_num_bfregs
, sizeof(*bfregi
->count
),
1900 if (!bfregi
->count
) {
1905 bfregi
->sys_pages
= kcalloc(bfregi
->num_sys_pages
,
1906 sizeof(*bfregi
->sys_pages
),
1908 if (!bfregi
->sys_pages
) {
1913 err
= allocate_uars(dev
, context
);
1918 err
= mlx5_ib_alloc_transport_domain(dev
, &context
->tdn
,
1923 INIT_LIST_HEAD(&context
->db_page_list
);
1924 mutex_init(&context
->db_page_mutex
);
1926 context
->cqe_version
= min_t(__u8
,
1927 (__u8
)MLX5_CAP_GEN(dev
->mdev
, cqe_version
),
1928 req
.max_cqe_version
);
1930 err
= set_ucontext_resp(uctx
, &resp
);
1934 resp
.response_length
= min(udata
->outlen
, sizeof(resp
));
1935 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
1940 bfregi
->num_low_latency_bfregs
= req
.num_low_latency_bfregs
;
1941 context
->lib_caps
= req
.lib_caps
;
1942 print_lib_caps(dev
, context
->lib_caps
);
1944 if (mlx5_ib_lag_should_assign_affinity(dev
)) {
1945 u32 port
= mlx5_core_native_port_num(dev
->mdev
) - 1;
1947 atomic_set(&context
->tx_port_affinity
,
1949 1, &dev
->port
[port
].roce
.tx_port_affinity
));
1955 mlx5_ib_dealloc_transport_domain(dev
, context
->tdn
, context
->devx_uid
);
1958 deallocate_uars(dev
, context
);
1961 kfree(bfregi
->sys_pages
);
1964 kfree(bfregi
->count
);
1967 if (req
.flags
& MLX5_IB_ALLOC_UCTX_DEVX
)
1968 mlx5_ib_devx_destroy(dev
, context
->devx_uid
);
1974 static int mlx5_ib_query_ucontext(struct ib_ucontext
*ibcontext
,
1975 struct uverbs_attr_bundle
*attrs
)
1977 struct mlx5_ib_alloc_ucontext_resp uctx_resp
= {};
1980 ret
= set_ucontext_resp(ibcontext
, &uctx_resp
);
1984 uctx_resp
.response_length
=
1986 uverbs_attr_get_len(attrs
,
1987 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX
),
1990 ret
= uverbs_copy_to_struct_or_zero(attrs
,
1991 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX
,
1997 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
1999 struct mlx5_ib_ucontext
*context
= to_mucontext(ibcontext
);
2000 struct mlx5_ib_dev
*dev
= to_mdev(ibcontext
->device
);
2001 struct mlx5_bfreg_info
*bfregi
;
2003 bfregi
= &context
->bfregi
;
2004 mlx5_ib_dealloc_transport_domain(dev
, context
->tdn
, context
->devx_uid
);
2006 deallocate_uars(dev
, context
);
2007 kfree(bfregi
->sys_pages
);
2008 kfree(bfregi
->count
);
2010 if (context
->devx_uid
)
2011 mlx5_ib_devx_destroy(dev
, context
->devx_uid
);
2014 static phys_addr_t
uar_index2pfn(struct mlx5_ib_dev
*dev
,
2017 int fw_uars_per_page
;
2019 fw_uars_per_page
= MLX5_CAP_GEN(dev
->mdev
, uar_4k
) ? MLX5_UARS_IN_PAGE
: 1;
2021 return (dev
->mdev
->bar_addr
>> PAGE_SHIFT
) + uar_idx
/ fw_uars_per_page
;
2024 static u64
uar_index2paddress(struct mlx5_ib_dev
*dev
,
2027 unsigned int fw_uars_per_page
;
2029 fw_uars_per_page
= MLX5_CAP_GEN(dev
->mdev
, uar_4k
) ?
2030 MLX5_UARS_IN_PAGE
: 1;
2032 return (dev
->mdev
->bar_addr
+ (uar_idx
/ fw_uars_per_page
) * PAGE_SIZE
);
2035 static int get_command(unsigned long offset
)
2037 return (offset
>> MLX5_IB_MMAP_CMD_SHIFT
) & MLX5_IB_MMAP_CMD_MASK
;
2040 static int get_arg(unsigned long offset
)
2042 return offset
& ((1 << MLX5_IB_MMAP_CMD_SHIFT
) - 1);
2045 static int get_index(unsigned long offset
)
2047 return get_arg(offset
);
2050 /* Index resides in an extra byte to enable larger values than 255 */
2051 static int get_extended_index(unsigned long offset
)
2053 return get_arg(offset
) | ((offset
>> 16) & 0xff) << 8;
2057 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext
*ibcontext
)
2061 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd
)
2064 case MLX5_IB_MMAP_WC_PAGE
:
2066 case MLX5_IB_MMAP_REGULAR_PAGE
:
2067 return "best effort WC";
2068 case MLX5_IB_MMAP_NC_PAGE
:
2070 case MLX5_IB_MMAP_DEVICE_MEM
:
2071 return "Device Memory";
2077 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev
*dev
,
2078 struct vm_area_struct
*vma
,
2079 struct mlx5_ib_ucontext
*context
)
2081 if ((vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
) ||
2082 !(vma
->vm_flags
& VM_SHARED
))
2085 if (get_index(vma
->vm_pgoff
) != MLX5_IB_CLOCK_INFO_V1
)
2088 if (vma
->vm_flags
& (VM_WRITE
| VM_EXEC
))
2090 vm_flags_clear(vma
, VM_MAYWRITE
);
2092 if (!dev
->mdev
->clock_info
)
2095 return vm_insert_page(vma
, vma
->vm_start
,
2096 virt_to_page(dev
->mdev
->clock_info
));
2099 static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry
*entry
)
2101 struct mlx5_user_mmap_entry
*mentry
= to_mmmap(entry
);
2102 struct mlx5_ib_dev
*dev
= to_mdev(entry
->ucontext
->device
);
2103 struct mlx5_var_table
*var_table
= &dev
->var_table
;
2104 struct mlx5_ib_ucontext
*context
= to_mucontext(entry
->ucontext
);
2106 switch (mentry
->mmap_flag
) {
2107 case MLX5_IB_MMAP_TYPE_MEMIC
:
2108 case MLX5_IB_MMAP_TYPE_MEMIC_OP
:
2109 mlx5_ib_dm_mmap_free(dev
, mentry
);
2111 case MLX5_IB_MMAP_TYPE_VAR
:
2112 mutex_lock(&var_table
->bitmap_lock
);
2113 clear_bit(mentry
->page_idx
, var_table
->bitmap
);
2114 mutex_unlock(&var_table
->bitmap_lock
);
2117 case MLX5_IB_MMAP_TYPE_UAR_WC
:
2118 case MLX5_IB_MMAP_TYPE_UAR_NC
:
2119 mlx5_cmd_uar_dealloc(dev
->mdev
, mentry
->page_idx
,
2128 static int uar_mmap(struct mlx5_ib_dev
*dev
, enum mlx5_ib_mmap_cmd cmd
,
2129 struct vm_area_struct
*vma
,
2130 struct mlx5_ib_ucontext
*context
)
2132 struct mlx5_bfreg_info
*bfregi
= &context
->bfregi
;
2137 u32 bfreg_dyn_idx
= 0;
2139 int dyn_uar
= (cmd
== MLX5_IB_MMAP_ALLOC_WC
);
2140 int max_valid_idx
= dyn_uar
? bfregi
->num_sys_pages
:
2141 bfregi
->num_static_sys_pages
;
2143 if (bfregi
->lib_uar_dyn
)
2146 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
2150 idx
= get_extended_index(vma
->vm_pgoff
) + bfregi
->num_static_sys_pages
;
2152 idx
= get_index(vma
->vm_pgoff
);
2154 if (idx
>= max_valid_idx
) {
2155 mlx5_ib_warn(dev
, "invalid uar index %lu, max=%d\n",
2156 idx
, max_valid_idx
);
2161 case MLX5_IB_MMAP_WC_PAGE
:
2162 case MLX5_IB_MMAP_ALLOC_WC
:
2163 case MLX5_IB_MMAP_REGULAR_PAGE
:
2164 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2165 prot
= pgprot_writecombine(vma
->vm_page_prot
);
2167 case MLX5_IB_MMAP_NC_PAGE
:
2168 prot
= pgprot_noncached(vma
->vm_page_prot
);
2177 uars_per_page
= get_uars_per_sys_page(dev
, bfregi
->lib_uar_4k
);
2178 bfreg_dyn_idx
= idx
* (uars_per_page
* MLX5_NON_FP_BFREGS_PER_UAR
);
2179 if (bfreg_dyn_idx
>= bfregi
->total_num_bfregs
) {
2180 mlx5_ib_warn(dev
, "invalid bfreg_dyn_idx %u, max=%u\n",
2181 bfreg_dyn_idx
, bfregi
->total_num_bfregs
);
2185 mutex_lock(&bfregi
->lock
);
2186 /* Fail if uar already allocated, first bfreg index of each
2187 * page holds its count.
2189 if (bfregi
->count
[bfreg_dyn_idx
]) {
2190 mlx5_ib_warn(dev
, "wrong offset, idx %lu is busy, bfregn=%u\n", idx
, bfreg_dyn_idx
);
2191 mutex_unlock(&bfregi
->lock
);
2195 bfregi
->count
[bfreg_dyn_idx
]++;
2196 mutex_unlock(&bfregi
->lock
);
2198 err
= mlx5_cmd_uar_alloc(dev
->mdev
, &uar_index
,
2201 mlx5_ib_warn(dev
, "UAR alloc failed\n");
2205 uar_index
= bfregi
->sys_pages
[idx
];
2208 pfn
= uar_index2pfn(dev
, uar_index
);
2209 mlx5_ib_dbg(dev
, "uar idx 0x%lx, pfn %pa\n", idx
, &pfn
);
2211 err
= rdma_user_mmap_io(&context
->ibucontext
, vma
, pfn
, PAGE_SIZE
,
2215 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2216 err
, mmap_cmd2str(cmd
));
2221 bfregi
->sys_pages
[idx
] = uar_index
;
2228 mlx5_cmd_uar_dealloc(dev
->mdev
, idx
, context
->devx_uid
);
2231 mlx5_ib_free_bfreg(dev
, bfregi
, bfreg_dyn_idx
);
2236 static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct
*vma
)
2241 command
= get_command(vma
->vm_pgoff
);
2242 idx
= get_extended_index(vma
->vm_pgoff
);
2244 return (command
<< 16 | idx
);
2247 static int mlx5_ib_mmap_offset(struct mlx5_ib_dev
*dev
,
2248 struct vm_area_struct
*vma
,
2249 struct ib_ucontext
*ucontext
)
2251 struct mlx5_user_mmap_entry
*mentry
;
2252 struct rdma_user_mmap_entry
*entry
;
2253 unsigned long pgoff
;
2258 pgoff
= mlx5_vma_to_pgoff(vma
);
2259 entry
= rdma_user_mmap_entry_get_pgoff(ucontext
, pgoff
);
2263 mentry
= to_mmmap(entry
);
2264 pfn
= (mentry
->address
>> PAGE_SHIFT
);
2265 if (mentry
->mmap_flag
== MLX5_IB_MMAP_TYPE_VAR
||
2266 mentry
->mmap_flag
== MLX5_IB_MMAP_TYPE_UAR_NC
)
2267 prot
= pgprot_noncached(vma
->vm_page_prot
);
2269 prot
= pgprot_writecombine(vma
->vm_page_prot
);
2270 ret
= rdma_user_mmap_io(ucontext
, vma
, pfn
,
2271 entry
->npages
* PAGE_SIZE
,
2274 rdma_user_mmap_entry_put(&mentry
->rdma_entry
);
2278 static u64
mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry
*entry
)
2280 u64 cmd
= (entry
->rdma_entry
.start_pgoff
>> 16) & 0xFFFF;
2281 u64 index
= entry
->rdma_entry
.start_pgoff
& 0xFFFF;
2283 return (((index
>> 8) << 16) | (cmd
<< MLX5_IB_MMAP_CMD_SHIFT
) |
2284 (index
& 0xFF)) << PAGE_SHIFT
;
2287 static int mlx5_ib_mmap(struct ib_ucontext
*ibcontext
, struct vm_area_struct
*vma
)
2289 struct mlx5_ib_ucontext
*context
= to_mucontext(ibcontext
);
2290 struct mlx5_ib_dev
*dev
= to_mdev(ibcontext
->device
);
2291 unsigned long command
;
2294 command
= get_command(vma
->vm_pgoff
);
2296 case MLX5_IB_MMAP_WC_PAGE
:
2297 case MLX5_IB_MMAP_ALLOC_WC
:
2298 if (!dev
->wc_support
)
2301 case MLX5_IB_MMAP_NC_PAGE
:
2302 case MLX5_IB_MMAP_REGULAR_PAGE
:
2303 return uar_mmap(dev
, command
, vma
, context
);
2305 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES
:
2308 case MLX5_IB_MMAP_CORE_CLOCK
:
2309 if (vma
->vm_end
- vma
->vm_start
!= PAGE_SIZE
)
2312 if (vma
->vm_flags
& VM_WRITE
)
2314 vm_flags_clear(vma
, VM_MAYWRITE
);
2316 /* Don't expose to user-space information it shouldn't have */
2317 if (PAGE_SIZE
> 4096)
2320 pfn
= (dev
->mdev
->iseg_base
+
2321 offsetof(struct mlx5_init_seg
, internal_timer_h
)) >>
2323 return rdma_user_mmap_io(&context
->ibucontext
, vma
, pfn
,
2325 pgprot_noncached(vma
->vm_page_prot
),
2327 case MLX5_IB_MMAP_CLOCK_INFO
:
2328 return mlx5_ib_mmap_clock_info_page(dev
, vma
, context
);
2331 return mlx5_ib_mmap_offset(dev
, vma
, ibcontext
);
2337 static int mlx5_ib_alloc_pd(struct ib_pd
*ibpd
, struct ib_udata
*udata
)
2339 struct mlx5_ib_pd
*pd
= to_mpd(ibpd
);
2340 struct ib_device
*ibdev
= ibpd
->device
;
2341 struct mlx5_ib_alloc_pd_resp resp
;
2343 u32 out
[MLX5_ST_SZ_DW(alloc_pd_out
)] = {};
2344 u32 in
[MLX5_ST_SZ_DW(alloc_pd_in
)] = {};
2346 struct mlx5_ib_ucontext
*context
= rdma_udata_to_drv_context(
2347 udata
, struct mlx5_ib_ucontext
, ibucontext
);
2349 uid
= context
? context
->devx_uid
: 0;
2350 MLX5_SET(alloc_pd_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_PD
);
2351 MLX5_SET(alloc_pd_in
, in
, uid
, uid
);
2352 err
= mlx5_cmd_exec_inout(to_mdev(ibdev
)->mdev
, alloc_pd
, in
, out
);
2356 pd
->pdn
= MLX5_GET(alloc_pd_out
, out
, pd
);
2360 if (ib_copy_to_udata(udata
, &resp
, sizeof(resp
))) {
2361 mlx5_cmd_dealloc_pd(to_mdev(ibdev
)->mdev
, pd
->pdn
, uid
);
2369 static int mlx5_ib_dealloc_pd(struct ib_pd
*pd
, struct ib_udata
*udata
)
2371 struct mlx5_ib_dev
*mdev
= to_mdev(pd
->device
);
2372 struct mlx5_ib_pd
*mpd
= to_mpd(pd
);
2374 return mlx5_cmd_dealloc_pd(mdev
->mdev
, mpd
->pdn
, mpd
->uid
);
2377 static int mlx5_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
2379 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2380 struct mlx5_ib_qp
*mqp
= to_mqp(ibqp
);
2385 to_mpd(ibqp
->pd
)->uid
: 0;
2387 if (mqp
->flags
& IB_QP_CREATE_SOURCE_QPN
) {
2388 mlx5_ib_dbg(dev
, "Attaching a multi cast group to underlay QP is not supported\n");
2392 err
= mlx5_cmd_attach_mcg(dev
->mdev
, gid
, ibqp
->qp_num
, uid
);
2394 mlx5_ib_warn(dev
, "failed attaching QPN 0x%x, MGID %pI6\n",
2395 ibqp
->qp_num
, gid
->raw
);
2400 static int mlx5_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
2402 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
2407 to_mpd(ibqp
->pd
)->uid
: 0;
2408 err
= mlx5_cmd_detach_mcg(dev
->mdev
, gid
, ibqp
->qp_num
, uid
);
2410 mlx5_ib_warn(dev
, "failed detaching QPN 0x%x, MGID %pI6\n",
2411 ibqp
->qp_num
, gid
->raw
);
2416 static int init_node_data(struct mlx5_ib_dev
*dev
)
2420 err
= mlx5_query_node_desc(dev
, dev
->ib_dev
.node_desc
);
2424 dev
->mdev
->rev_id
= dev
->mdev
->pdev
->revision
;
2426 return mlx5_query_node_guid(dev
, &dev
->ib_dev
.node_guid
);
2429 static ssize_t
fw_pages_show(struct device
*device
,
2430 struct device_attribute
*attr
, char *buf
)
2432 struct mlx5_ib_dev
*dev
=
2433 rdma_device_to_drv_device(device
, struct mlx5_ib_dev
, ib_dev
);
2435 return sysfs_emit(buf
, "%d\n", dev
->mdev
->priv
.fw_pages
);
2437 static DEVICE_ATTR_RO(fw_pages
);
2439 static ssize_t
reg_pages_show(struct device
*device
,
2440 struct device_attribute
*attr
, char *buf
)
2442 struct mlx5_ib_dev
*dev
=
2443 rdma_device_to_drv_device(device
, struct mlx5_ib_dev
, ib_dev
);
2445 return sysfs_emit(buf
, "%d\n", atomic_read(&dev
->mdev
->priv
.reg_pages
));
2447 static DEVICE_ATTR_RO(reg_pages
);
2449 static ssize_t
hca_type_show(struct device
*device
,
2450 struct device_attribute
*attr
, char *buf
)
2452 struct mlx5_ib_dev
*dev
=
2453 rdma_device_to_drv_device(device
, struct mlx5_ib_dev
, ib_dev
);
2455 return sysfs_emit(buf
, "MT%d\n", dev
->mdev
->pdev
->device
);
2457 static DEVICE_ATTR_RO(hca_type
);
2459 static ssize_t
hw_rev_show(struct device
*device
,
2460 struct device_attribute
*attr
, char *buf
)
2462 struct mlx5_ib_dev
*dev
=
2463 rdma_device_to_drv_device(device
, struct mlx5_ib_dev
, ib_dev
);
2465 return sysfs_emit(buf
, "%x\n", dev
->mdev
->rev_id
);
2467 static DEVICE_ATTR_RO(hw_rev
);
2469 static ssize_t
board_id_show(struct device
*device
,
2470 struct device_attribute
*attr
, char *buf
)
2472 struct mlx5_ib_dev
*dev
=
2473 rdma_device_to_drv_device(device
, struct mlx5_ib_dev
, ib_dev
);
2475 return sysfs_emit(buf
, "%.*s\n", MLX5_BOARD_ID_LEN
,
2476 dev
->mdev
->board_id
);
2478 static DEVICE_ATTR_RO(board_id
);
2480 static struct attribute
*mlx5_class_attributes
[] = {
2481 &dev_attr_hw_rev
.attr
,
2482 &dev_attr_hca_type
.attr
,
2483 &dev_attr_board_id
.attr
,
2484 &dev_attr_fw_pages
.attr
,
2485 &dev_attr_reg_pages
.attr
,
2489 static const struct attribute_group mlx5_attr_group
= {
2490 .attrs
= mlx5_class_attributes
,
2493 static void pkey_change_handler(struct work_struct
*work
)
2495 struct mlx5_ib_port_resources
*ports
=
2496 container_of(work
, struct mlx5_ib_port_resources
,
2501 * We got this event before device was fully configured
2502 * and MAD registration code wasn't called/finished yet.
2506 mlx5_ib_gsi_pkey_change(ports
->gsi
);
2509 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev
*ibdev
)
2511 struct mlx5_ib_qp
*mqp
;
2512 struct mlx5_ib_cq
*send_mcq
, *recv_mcq
;
2513 struct mlx5_core_cq
*mcq
;
2514 struct list_head cq_armed_list
;
2515 unsigned long flags_qp
;
2516 unsigned long flags_cq
;
2517 unsigned long flags
;
2519 INIT_LIST_HEAD(&cq_armed_list
);
2521 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
2522 spin_lock_irqsave(&ibdev
->reset_flow_resource_lock
, flags
);
2523 list_for_each_entry(mqp
, &ibdev
->qp_list
, qps_list
) {
2524 spin_lock_irqsave(&mqp
->sq
.lock
, flags_qp
);
2525 if (mqp
->sq
.tail
!= mqp
->sq
.head
) {
2526 send_mcq
= to_mcq(mqp
->ibqp
.send_cq
);
2527 spin_lock_irqsave(&send_mcq
->lock
, flags_cq
);
2528 if (send_mcq
->mcq
.comp
&&
2529 mqp
->ibqp
.send_cq
->comp_handler
) {
2530 if (!send_mcq
->mcq
.reset_notify_added
) {
2531 send_mcq
->mcq
.reset_notify_added
= 1;
2532 list_add_tail(&send_mcq
->mcq
.reset_notify
,
2536 spin_unlock_irqrestore(&send_mcq
->lock
, flags_cq
);
2538 spin_unlock_irqrestore(&mqp
->sq
.lock
, flags_qp
);
2539 spin_lock_irqsave(&mqp
->rq
.lock
, flags_qp
);
2540 /* no handling is needed for SRQ */
2541 if (!mqp
->ibqp
.srq
) {
2542 if (mqp
->rq
.tail
!= mqp
->rq
.head
) {
2543 recv_mcq
= to_mcq(mqp
->ibqp
.recv_cq
);
2544 spin_lock_irqsave(&recv_mcq
->lock
, flags_cq
);
2545 if (recv_mcq
->mcq
.comp
&&
2546 mqp
->ibqp
.recv_cq
->comp_handler
) {
2547 if (!recv_mcq
->mcq
.reset_notify_added
) {
2548 recv_mcq
->mcq
.reset_notify_added
= 1;
2549 list_add_tail(&recv_mcq
->mcq
.reset_notify
,
2553 spin_unlock_irqrestore(&recv_mcq
->lock
,
2557 spin_unlock_irqrestore(&mqp
->rq
.lock
, flags_qp
);
2559 /*At that point all inflight post send were put to be executed as of we
2560 * lock/unlock above locks Now need to arm all involved CQs.
2562 list_for_each_entry(mcq
, &cq_armed_list
, reset_notify
) {
2563 mcq
->comp(mcq
, NULL
);
2565 spin_unlock_irqrestore(&ibdev
->reset_flow_resource_lock
, flags
);
2568 static void delay_drop_handler(struct work_struct
*work
)
2571 struct mlx5_ib_delay_drop
*delay_drop
=
2572 container_of(work
, struct mlx5_ib_delay_drop
,
2575 atomic_inc(&delay_drop
->events_cnt
);
2577 mutex_lock(&delay_drop
->lock
);
2578 err
= mlx5_core_set_delay_drop(delay_drop
->dev
, delay_drop
->timeout
);
2580 mlx5_ib_warn(delay_drop
->dev
, "Failed to set delay drop, timeout=%u\n",
2581 delay_drop
->timeout
);
2582 delay_drop
->activate
= false;
2584 mutex_unlock(&delay_drop
->lock
);
2587 static void handle_general_event(struct mlx5_ib_dev
*ibdev
, struct mlx5_eqe
*eqe
,
2588 struct ib_event
*ibev
)
2590 u32 port
= (eqe
->data
.port
.port
>> 4) & 0xf;
2592 switch (eqe
->sub_type
) {
2593 case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT
:
2594 if (mlx5_ib_port_link_layer(&ibdev
->ib_dev
, port
) ==
2595 IB_LINK_LAYER_ETHERNET
)
2596 schedule_work(&ibdev
->delay_drop
.delay_drop_work
);
2598 default: /* do nothing */
2603 static int handle_port_change(struct mlx5_ib_dev
*ibdev
, struct mlx5_eqe
*eqe
,
2604 struct ib_event
*ibev
)
2606 u32 port
= (eqe
->data
.port
.port
>> 4) & 0xf;
2608 ibev
->element
.port_num
= port
;
2610 switch (eqe
->sub_type
) {
2611 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
2612 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
2613 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED
:
2614 /* In RoCE, port up/down events are handled in
2615 * mlx5_netdev_event().
2617 if (mlx5_ib_port_link_layer(&ibdev
->ib_dev
, port
) ==
2618 IB_LINK_LAYER_ETHERNET
)
2621 ibev
->event
= (eqe
->sub_type
== MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
) ?
2622 IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
2625 case MLX5_PORT_CHANGE_SUBTYPE_LID
:
2626 ibev
->event
= IB_EVENT_LID_CHANGE
;
2629 case MLX5_PORT_CHANGE_SUBTYPE_PKEY
:
2630 ibev
->event
= IB_EVENT_PKEY_CHANGE
;
2631 schedule_work(&ibdev
->devr
.ports
[port
- 1].pkey_change_work
);
2634 case MLX5_PORT_CHANGE_SUBTYPE_GUID
:
2635 ibev
->event
= IB_EVENT_GID_CHANGE
;
2638 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG
:
2639 ibev
->event
= IB_EVENT_CLIENT_REREGISTER
;
2648 static void mlx5_ib_handle_event(struct work_struct
*_work
)
2650 struct mlx5_ib_event_work
*work
=
2651 container_of(_work
, struct mlx5_ib_event_work
, work
);
2652 struct mlx5_ib_dev
*ibdev
;
2653 struct ib_event ibev
;
2656 if (work
->is_slave
) {
2657 ibdev
= mlx5_ib_get_ibdev_from_mpi(work
->mpi
);
2664 switch (work
->event
) {
2665 case MLX5_DEV_EVENT_SYS_ERROR
:
2666 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
2667 mlx5_ib_handle_internal_error(ibdev
);
2668 ibev
.element
.port_num
= (u8
)(unsigned long)work
->param
;
2671 case MLX5_EVENT_TYPE_PORT_CHANGE
:
2672 if (handle_port_change(ibdev
, work
->param
, &ibev
))
2675 case MLX5_EVENT_TYPE_GENERAL_EVENT
:
2676 handle_general_event(ibdev
, work
->param
, &ibev
);
2682 ibev
.device
= &ibdev
->ib_dev
;
2684 if (!rdma_is_port_valid(&ibdev
->ib_dev
, ibev
.element
.port_num
)) {
2685 mlx5_ib_warn(ibdev
, "warning: event on port %d\n", ibev
.element
.port_num
);
2689 if (ibdev
->ib_active
)
2690 ib_dispatch_event(&ibev
);
2693 ibdev
->ib_active
= false;
2698 static int mlx5_ib_event(struct notifier_block
*nb
,
2699 unsigned long event
, void *param
)
2701 struct mlx5_ib_event_work
*work
;
2703 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
2707 INIT_WORK(&work
->work
, mlx5_ib_handle_event
);
2708 work
->dev
= container_of(nb
, struct mlx5_ib_dev
, mdev_events
);
2709 work
->is_slave
= false;
2710 work
->param
= param
;
2711 work
->event
= event
;
2713 queue_work(mlx5_ib_event_wq
, &work
->work
);
2718 static int mlx5_ib_event_slave_port(struct notifier_block
*nb
,
2719 unsigned long event
, void *param
)
2721 struct mlx5_ib_event_work
*work
;
2723 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
2727 INIT_WORK(&work
->work
, mlx5_ib_handle_event
);
2728 work
->mpi
= container_of(nb
, struct mlx5_ib_multiport_info
, mdev_events
);
2729 work
->is_slave
= true;
2730 work
->param
= param
;
2731 work
->event
= event
;
2732 queue_work(mlx5_ib_event_wq
, &work
->work
);
2737 static int set_has_smi_cap(struct mlx5_ib_dev
*dev
)
2739 struct mlx5_hca_vport_context vport_ctx
;
2743 if (MLX5_CAP_GEN(dev
->mdev
, port_type
) != MLX5_CAP_PORT_TYPE_IB
)
2746 for (port
= 1; port
<= dev
->num_ports
; port
++) {
2747 if (!MLX5_CAP_GEN(dev
->mdev
, ib_virt
)) {
2748 dev
->port_caps
[port
- 1].has_smi
= true;
2751 err
= mlx5_query_hca_vport_context(dev
->mdev
, 0, port
, 0,
2754 mlx5_ib_err(dev
, "query_hca_vport_context for port=%d failed %d\n",
2758 dev
->port_caps
[port
- 1].has_smi
= vport_ctx
.has_smi
;
2764 static void get_ext_port_caps(struct mlx5_ib_dev
*dev
)
2768 rdma_for_each_port (&dev
->ib_dev
, port
)
2769 mlx5_query_ext_port_caps(dev
, port
);
2772 static u8
mlx5_get_umr_fence(u8 umr_fence_cap
)
2774 switch (umr_fence_cap
) {
2775 case MLX5_CAP_UMR_FENCE_NONE
:
2776 return MLX5_FENCE_MODE_NONE
;
2777 case MLX5_CAP_UMR_FENCE_SMALL
:
2778 return MLX5_FENCE_MODE_INITIATOR_SMALL
;
2780 return MLX5_FENCE_MODE_STRONG_ORDERING
;
2784 static int mlx5_ib_dev_res_init(struct mlx5_ib_dev
*dev
)
2786 struct mlx5_ib_resources
*devr
= &dev
->devr
;
2787 struct ib_srq_init_attr attr
;
2788 struct ib_device
*ibdev
;
2789 struct ib_cq_init_attr cq_attr
= {.cqe
= 1};
2793 ibdev
= &dev
->ib_dev
;
2795 if (!MLX5_CAP_GEN(dev
->mdev
, xrc
))
2798 devr
->p0
= ib_alloc_pd(ibdev
, 0);
2799 if (IS_ERR(devr
->p0
))
2800 return PTR_ERR(devr
->p0
);
2802 devr
->c0
= ib_create_cq(ibdev
, NULL
, NULL
, NULL
, &cq_attr
);
2803 if (IS_ERR(devr
->c0
)) {
2804 ret
= PTR_ERR(devr
->c0
);
2808 ret
= mlx5_cmd_xrcd_alloc(dev
->mdev
, &devr
->xrcdn0
, 0);
2812 ret
= mlx5_cmd_xrcd_alloc(dev
->mdev
, &devr
->xrcdn1
, 0);
2816 memset(&attr
, 0, sizeof(attr
));
2817 attr
.attr
.max_sge
= 1;
2818 attr
.attr
.max_wr
= 1;
2819 attr
.srq_type
= IB_SRQT_XRC
;
2820 attr
.ext
.cq
= devr
->c0
;
2822 devr
->s0
= ib_create_srq(devr
->p0
, &attr
);
2823 if (IS_ERR(devr
->s0
)) {
2824 ret
= PTR_ERR(devr
->s0
);
2828 memset(&attr
, 0, sizeof(attr
));
2829 attr
.attr
.max_sge
= 1;
2830 attr
.attr
.max_wr
= 1;
2831 attr
.srq_type
= IB_SRQT_BASIC
;
2833 devr
->s1
= ib_create_srq(devr
->p0
, &attr
);
2834 if (IS_ERR(devr
->s1
)) {
2835 ret
= PTR_ERR(devr
->s1
);
2839 for (port
= 0; port
< ARRAY_SIZE(devr
->ports
); ++port
)
2840 INIT_WORK(&devr
->ports
[port
].pkey_change_work
,
2841 pkey_change_handler
);
2846 ib_destroy_srq(devr
->s0
);
2848 mlx5_cmd_xrcd_dealloc(dev
->mdev
, devr
->xrcdn1
, 0);
2850 mlx5_cmd_xrcd_dealloc(dev
->mdev
, devr
->xrcdn0
, 0);
2852 ib_destroy_cq(devr
->c0
);
2854 ib_dealloc_pd(devr
->p0
);
2858 static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev
*dev
)
2860 struct mlx5_ib_resources
*devr
= &dev
->devr
;
2864 * Make sure no change P_Key work items are still executing.
2866 * At this stage, the mlx5_ib_event should be unregistered
2867 * and it ensures that no new works are added.
2869 for (port
= 0; port
< ARRAY_SIZE(devr
->ports
); ++port
)
2870 cancel_work_sync(&devr
->ports
[port
].pkey_change_work
);
2872 ib_destroy_srq(devr
->s1
);
2873 ib_destroy_srq(devr
->s0
);
2874 mlx5_cmd_xrcd_dealloc(dev
->mdev
, devr
->xrcdn1
, 0);
2875 mlx5_cmd_xrcd_dealloc(dev
->mdev
, devr
->xrcdn0
, 0);
2876 ib_destroy_cq(devr
->c0
);
2877 ib_dealloc_pd(devr
->p0
);
2880 static u32
get_core_cap_flags(struct ib_device
*ibdev
,
2881 struct mlx5_hca_vport_context
*rep
)
2883 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
2884 enum rdma_link_layer ll
= mlx5_ib_port_link_layer(ibdev
, 1);
2885 u8 l3_type_cap
= MLX5_CAP_ROCE(dev
->mdev
, l3_type
);
2886 u8 roce_version_cap
= MLX5_CAP_ROCE(dev
->mdev
, roce_version
);
2887 bool raw_support
= !mlx5_core_mp_enabled(dev
->mdev
);
2890 if (rep
->grh_required
)
2891 ret
|= RDMA_CORE_CAP_IB_GRH_REQUIRED
;
2893 if (ll
== IB_LINK_LAYER_INFINIBAND
)
2894 return ret
| RDMA_CORE_PORT_IBA_IB
;
2897 ret
|= RDMA_CORE_PORT_RAW_PACKET
;
2899 if (!(l3_type_cap
& MLX5_ROCE_L3_TYPE_IPV4_CAP
))
2902 if (!(l3_type_cap
& MLX5_ROCE_L3_TYPE_IPV6_CAP
))
2905 if (roce_version_cap
& MLX5_ROCE_VERSION_1_CAP
)
2906 ret
|= RDMA_CORE_PORT_IBA_ROCE
;
2908 if (roce_version_cap
& MLX5_ROCE_VERSION_2_CAP
)
2909 ret
|= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
2914 static int mlx5_port_immutable(struct ib_device
*ibdev
, u32 port_num
,
2915 struct ib_port_immutable
*immutable
)
2917 struct ib_port_attr attr
;
2918 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
2919 enum rdma_link_layer ll
= mlx5_ib_port_link_layer(ibdev
, port_num
);
2920 struct mlx5_hca_vport_context rep
= {0};
2923 err
= ib_query_port(ibdev
, port_num
, &attr
);
2927 if (ll
== IB_LINK_LAYER_INFINIBAND
) {
2928 err
= mlx5_query_hca_vport_context(dev
->mdev
, 0, port_num
, 0,
2934 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
2935 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
2936 immutable
->core_cap_flags
= get_core_cap_flags(ibdev
, &rep
);
2937 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
2942 static int mlx5_port_rep_immutable(struct ib_device
*ibdev
, u32 port_num
,
2943 struct ib_port_immutable
*immutable
)
2945 struct ib_port_attr attr
;
2948 immutable
->core_cap_flags
= RDMA_CORE_PORT_RAW_PACKET
;
2950 err
= ib_query_port(ibdev
, port_num
, &attr
);
2954 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
2955 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
2956 immutable
->core_cap_flags
= RDMA_CORE_PORT_RAW_PACKET
;
2961 static void get_dev_fw_str(struct ib_device
*ibdev
, char *str
)
2963 struct mlx5_ib_dev
*dev
=
2964 container_of(ibdev
, struct mlx5_ib_dev
, ib_dev
);
2965 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%d.%d.%04d",
2966 fw_rev_maj(dev
->mdev
), fw_rev_min(dev
->mdev
),
2967 fw_rev_sub(dev
->mdev
));
2970 static int mlx5_eth_lag_init(struct mlx5_ib_dev
*dev
)
2972 struct mlx5_core_dev
*mdev
= dev
->mdev
;
2973 struct mlx5_flow_namespace
*ns
= mlx5_get_flow_namespace(mdev
,
2974 MLX5_FLOW_NAMESPACE_LAG
);
2975 struct mlx5_flow_table
*ft
;
2978 if (!ns
|| !mlx5_lag_is_active(mdev
))
2981 err
= mlx5_cmd_create_vport_lag(mdev
);
2985 ft
= mlx5_create_lag_demux_flow_table(ns
, 0, 0);
2988 goto err_destroy_vport_lag
;
2991 dev
->flow_db
->lag_demux_ft
= ft
;
2992 dev
->lag_ports
= mlx5_lag_get_num_ports(mdev
);
2993 dev
->lag_active
= true;
2996 err_destroy_vport_lag
:
2997 mlx5_cmd_destroy_vport_lag(mdev
);
3001 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev
*dev
)
3003 struct mlx5_core_dev
*mdev
= dev
->mdev
;
3005 if (dev
->lag_active
) {
3006 dev
->lag_active
= false;
3008 mlx5_destroy_flow_table(dev
->flow_db
->lag_demux_ft
);
3009 dev
->flow_db
->lag_demux_ft
= NULL
;
3011 mlx5_cmd_destroy_vport_lag(mdev
);
3015 static void mlx5_netdev_notifier_register(struct mlx5_roce
*roce
,
3016 struct net_device
*netdev
)
3020 if (roce
->tracking_netdev
)
3022 roce
->tracking_netdev
= netdev
;
3023 roce
->nb
.notifier_call
= mlx5_netdev_event
;
3024 err
= register_netdevice_notifier_dev_net(netdev
, &roce
->nb
, &roce
->nn
);
3028 static void mlx5_netdev_notifier_unregister(struct mlx5_roce
*roce
)
3030 if (!roce
->tracking_netdev
)
3032 unregister_netdevice_notifier_dev_net(roce
->tracking_netdev
, &roce
->nb
,
3034 roce
->tracking_netdev
= NULL
;
3037 static int mlx5e_mdev_notifier_event(struct notifier_block
*nb
,
3038 unsigned long event
, void *data
)
3040 struct mlx5_roce
*roce
= container_of(nb
, struct mlx5_roce
, mdev_nb
);
3041 struct net_device
*netdev
= data
;
3044 case MLX5_DRIVER_EVENT_UPLINK_NETDEV
:
3046 mlx5_netdev_notifier_register(roce
, netdev
);
3048 mlx5_netdev_notifier_unregister(roce
);
3057 static void mlx5_mdev_netdev_track(struct mlx5_ib_dev
*dev
, u32 port_num
)
3059 struct mlx5_roce
*roce
= &dev
->port
[port_num
].roce
;
3061 roce
->mdev_nb
.notifier_call
= mlx5e_mdev_notifier_event
;
3062 mlx5_blocking_notifier_register(dev
->mdev
, &roce
->mdev_nb
);
3063 mlx5_core_uplink_netdev_event_replay(dev
->mdev
);
3066 static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev
*dev
, u32 port_num
)
3068 struct mlx5_roce
*roce
= &dev
->port
[port_num
].roce
;
3070 mlx5_blocking_notifier_unregister(dev
->mdev
, &roce
->mdev_nb
);
3071 mlx5_netdev_notifier_unregister(roce
);
3074 static int mlx5_enable_eth(struct mlx5_ib_dev
*dev
)
3078 if (!dev
->is_rep
&& dev
->profile
!= &raw_eth_profile
) {
3079 err
= mlx5_nic_vport_enable_roce(dev
->mdev
);
3084 err
= mlx5_eth_lag_init(dev
);
3086 goto err_disable_roce
;
3091 if (!dev
->is_rep
&& dev
->profile
!= &raw_eth_profile
)
3092 mlx5_nic_vport_disable_roce(dev
->mdev
);
3097 static void mlx5_disable_eth(struct mlx5_ib_dev
*dev
)
3099 mlx5_eth_lag_cleanup(dev
);
3100 if (!dev
->is_rep
&& dev
->profile
!= &raw_eth_profile
)
3101 mlx5_nic_vport_disable_roce(dev
->mdev
);
3104 static int mlx5_ib_rn_get_params(struct ib_device
*device
, u32 port_num
,
3105 enum rdma_netdev_t type
,
3106 struct rdma_netdev_alloc_params
*params
)
3108 if (type
!= RDMA_NETDEV_IPOIB
)
3111 return mlx5_rdma_rn_get_params(to_mdev(device
)->mdev
, device
, params
);
3114 static ssize_t
delay_drop_timeout_read(struct file
*filp
, char __user
*buf
,
3115 size_t count
, loff_t
*pos
)
3117 struct mlx5_ib_delay_drop
*delay_drop
= filp
->private_data
;
3121 len
= snprintf(lbuf
, sizeof(lbuf
), "%u\n", delay_drop
->timeout
);
3122 return simple_read_from_buffer(buf
, count
, pos
, lbuf
, len
);
3125 static ssize_t
delay_drop_timeout_write(struct file
*filp
, const char __user
*buf
,
3126 size_t count
, loff_t
*pos
)
3128 struct mlx5_ib_delay_drop
*delay_drop
= filp
->private_data
;
3132 if (kstrtouint_from_user(buf
, count
, 0, &var
))
3135 timeout
= min_t(u32
, roundup(var
, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS
*
3138 mlx5_ib_dbg(delay_drop
->dev
, "Round delay drop timeout to %u usec\n",
3141 delay_drop
->timeout
= timeout
;
3146 static const struct file_operations fops_delay_drop_timeout
= {
3147 .owner
= THIS_MODULE
,
3148 .open
= simple_open
,
3149 .write
= delay_drop_timeout_write
,
3150 .read
= delay_drop_timeout_read
,
3153 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev
*ibdev
,
3154 struct mlx5_ib_multiport_info
*mpi
)
3156 u32 port_num
= mlx5_core_native_port_num(mpi
->mdev
) - 1;
3157 struct mlx5_ib_port
*port
= &ibdev
->port
[port_num
];
3162 lockdep_assert_held(&mlx5_ib_multiport_mutex
);
3164 mlx5_ib_cleanup_cong_debugfs(ibdev
, port_num
);
3166 spin_lock(&port
->mp
.mpi_lock
);
3168 spin_unlock(&port
->mp
.mpi_lock
);
3174 spin_unlock(&port
->mp
.mpi_lock
);
3175 if (mpi
->mdev_events
.notifier_call
)
3176 mlx5_notifier_unregister(mpi
->mdev
, &mpi
->mdev_events
);
3177 mpi
->mdev_events
.notifier_call
= NULL
;
3178 mlx5_mdev_netdev_untrack(ibdev
, port_num
);
3179 spin_lock(&port
->mp
.mpi_lock
);
3181 comps
= mpi
->mdev_refcnt
;
3183 mpi
->unaffiliate
= true;
3184 init_completion(&mpi
->unref_comp
);
3185 spin_unlock(&port
->mp
.mpi_lock
);
3187 for (i
= 0; i
< comps
; i
++)
3188 wait_for_completion(&mpi
->unref_comp
);
3190 spin_lock(&port
->mp
.mpi_lock
);
3191 mpi
->unaffiliate
= false;
3194 port
->mp
.mpi
= NULL
;
3196 spin_unlock(&port
->mp
.mpi_lock
);
3198 err
= mlx5_nic_vport_unaffiliate_multiport(mpi
->mdev
);
3200 mlx5_ib_dbg(ibdev
, "unaffiliated port %u\n", port_num
+ 1);
3201 /* Log an error, still needed to cleanup the pointers and add
3202 * it back to the list.
3205 mlx5_ib_err(ibdev
, "Failed to unaffiliate port %u\n",
3208 ibdev
->port
[port_num
].roce
.last_port_state
= IB_PORT_DOWN
;
3211 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev
*ibdev
,
3212 struct mlx5_ib_multiport_info
*mpi
)
3214 u32 port_num
= mlx5_core_native_port_num(mpi
->mdev
) - 1;
3217 lockdep_assert_held(&mlx5_ib_multiport_mutex
);
3219 spin_lock(&ibdev
->port
[port_num
].mp
.mpi_lock
);
3220 if (ibdev
->port
[port_num
].mp
.mpi
) {
3221 mlx5_ib_dbg(ibdev
, "port %u already affiliated.\n",
3223 spin_unlock(&ibdev
->port
[port_num
].mp
.mpi_lock
);
3227 ibdev
->port
[port_num
].mp
.mpi
= mpi
;
3229 mpi
->mdev_events
.notifier_call
= NULL
;
3230 spin_unlock(&ibdev
->port
[port_num
].mp
.mpi_lock
);
3232 err
= mlx5_nic_vport_affiliate_multiport(ibdev
->mdev
, mpi
->mdev
);
3236 mlx5_mdev_netdev_track(ibdev
, port_num
);
3238 mpi
->mdev_events
.notifier_call
= mlx5_ib_event_slave_port
;
3239 mlx5_notifier_register(mpi
->mdev
, &mpi
->mdev_events
);
3241 mlx5_ib_init_cong_debugfs(ibdev
, port_num
);
3246 mlx5_ib_unbind_slave_port(ibdev
, mpi
);
3250 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev
*dev
)
3252 u32 port_num
= mlx5_core_native_port_num(dev
->mdev
) - 1;
3253 enum rdma_link_layer ll
= mlx5_ib_port_link_layer(&dev
->ib_dev
,
3255 struct mlx5_ib_multiport_info
*mpi
;
3259 if (!mlx5_core_is_mp_master(dev
->mdev
) || ll
!= IB_LINK_LAYER_ETHERNET
)
3262 err
= mlx5_query_nic_vport_system_image_guid(dev
->mdev
,
3263 &dev
->sys_image_guid
);
3267 err
= mlx5_nic_vport_enable_roce(dev
->mdev
);
3271 mutex_lock(&mlx5_ib_multiport_mutex
);
3272 for (i
= 0; i
< dev
->num_ports
; i
++) {
3275 /* build a stub multiport info struct for the native port. */
3276 if (i
== port_num
) {
3277 mpi
= kzalloc(sizeof(*mpi
), GFP_KERNEL
);
3279 mutex_unlock(&mlx5_ib_multiport_mutex
);
3280 mlx5_nic_vport_disable_roce(dev
->mdev
);
3284 mpi
->is_master
= true;
3285 mpi
->mdev
= dev
->mdev
;
3286 mpi
->sys_image_guid
= dev
->sys_image_guid
;
3287 dev
->port
[i
].mp
.mpi
= mpi
;
3293 list_for_each_entry(mpi
, &mlx5_ib_unaffiliated_port_list
,
3295 if (dev
->sys_image_guid
== mpi
->sys_image_guid
&&
3296 (mlx5_core_native_port_num(mpi
->mdev
) - 1) == i
) {
3297 bound
= mlx5_ib_bind_slave_port(dev
, mpi
);
3301 dev_dbg(mpi
->mdev
->device
,
3302 "removing port from unaffiliated list.\n");
3303 mlx5_ib_dbg(dev
, "port %d bound\n", i
+ 1);
3304 list_del(&mpi
->list
);
3309 mlx5_ib_dbg(dev
, "no free port found for port %d\n",
3313 list_add_tail(&dev
->ib_dev_list
, &mlx5_ib_dev_list
);
3314 mutex_unlock(&mlx5_ib_multiport_mutex
);
3318 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev
*dev
)
3320 u32 port_num
= mlx5_core_native_port_num(dev
->mdev
) - 1;
3321 enum rdma_link_layer ll
= mlx5_ib_port_link_layer(&dev
->ib_dev
,
3325 if (!mlx5_core_is_mp_master(dev
->mdev
) || ll
!= IB_LINK_LAYER_ETHERNET
)
3328 mutex_lock(&mlx5_ib_multiport_mutex
);
3329 for (i
= 0; i
< dev
->num_ports
; i
++) {
3330 if (dev
->port
[i
].mp
.mpi
) {
3331 /* Destroy the native port stub */
3332 if (i
== port_num
) {
3333 kfree(dev
->port
[i
].mp
.mpi
);
3334 dev
->port
[i
].mp
.mpi
= NULL
;
3336 mlx5_ib_dbg(dev
, "unbinding port_num: %u\n",
3338 list_add_tail(&dev
->port
[i
].mp
.mpi
->list
,
3339 &mlx5_ib_unaffiliated_port_list
);
3340 mlx5_ib_unbind_slave_port(dev
,
3341 dev
->port
[i
].mp
.mpi
);
3346 mlx5_ib_dbg(dev
, "removing from devlist\n");
3347 list_del(&dev
->ib_dev_list
);
3348 mutex_unlock(&mlx5_ib_multiport_mutex
);
3350 mlx5_nic_vport_disable_roce(dev
->mdev
);
3353 static int mmap_obj_cleanup(struct ib_uobject
*uobject
,
3354 enum rdma_remove_reason why
,
3355 struct uverbs_attr_bundle
*attrs
)
3357 struct mlx5_user_mmap_entry
*obj
= uobject
->object
;
3359 rdma_user_mmap_entry_remove(&obj
->rdma_entry
);
3363 static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext
*c
,
3364 struct mlx5_user_mmap_entry
*entry
,
3367 return rdma_user_mmap_entry_insert_range(
3368 &c
->ibucontext
, &entry
->rdma_entry
, length
,
3369 (MLX5_IB_MMAP_OFFSET_START
<< 16),
3370 ((MLX5_IB_MMAP_OFFSET_END
<< 16) + (1UL << 16) - 1));
3373 static struct mlx5_user_mmap_entry
*
3374 alloc_var_entry(struct mlx5_ib_ucontext
*c
)
3376 struct mlx5_user_mmap_entry
*entry
;
3377 struct mlx5_var_table
*var_table
;
3381 var_table
= &to_mdev(c
->ibucontext
.device
)->var_table
;
3382 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
3384 return ERR_PTR(-ENOMEM
);
3386 mutex_lock(&var_table
->bitmap_lock
);
3387 page_idx
= find_first_zero_bit(var_table
->bitmap
,
3388 var_table
->num_var_hw_entries
);
3389 if (page_idx
>= var_table
->num_var_hw_entries
) {
3391 mutex_unlock(&var_table
->bitmap_lock
);
3395 set_bit(page_idx
, var_table
->bitmap
);
3396 mutex_unlock(&var_table
->bitmap_lock
);
3398 entry
->address
= var_table
->hw_start_addr
+
3399 (page_idx
* var_table
->stride_size
);
3400 entry
->page_idx
= page_idx
;
3401 entry
->mmap_flag
= MLX5_IB_MMAP_TYPE_VAR
;
3403 err
= mlx5_rdma_user_mmap_entry_insert(c
, entry
,
3404 var_table
->stride_size
);
3411 mutex_lock(&var_table
->bitmap_lock
);
3412 clear_bit(page_idx
, var_table
->bitmap
);
3413 mutex_unlock(&var_table
->bitmap_lock
);
3416 return ERR_PTR(err
);
3419 static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC
)(
3420 struct uverbs_attr_bundle
*attrs
)
3422 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
3423 attrs
, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE
);
3424 struct mlx5_ib_ucontext
*c
;
3425 struct mlx5_user_mmap_entry
*entry
;
3430 c
= to_mucontext(ib_uverbs_get_ucontext(attrs
));
3434 entry
= alloc_var_entry(c
);
3436 return PTR_ERR(entry
);
3438 mmap_offset
= mlx5_entry_to_mmap_offset(entry
);
3439 length
= entry
->rdma_entry
.npages
* PAGE_SIZE
;
3440 uobj
->object
= entry
;
3441 uverbs_finalize_uobj_create(attrs
, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE
);
3443 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET
,
3444 &mmap_offset
, sizeof(mmap_offset
));
3448 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID
,
3449 &entry
->page_idx
, sizeof(entry
->page_idx
));
3453 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH
,
3454 &length
, sizeof(length
));
3458 DECLARE_UVERBS_NAMED_METHOD(
3459 MLX5_IB_METHOD_VAR_OBJ_ALLOC
,
3460 UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE
,
3464 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID
,
3465 UVERBS_ATTR_TYPE(u32
),
3467 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH
,
3468 UVERBS_ATTR_TYPE(u32
),
3470 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET
,
3471 UVERBS_ATTR_TYPE(u64
),
3474 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
3475 MLX5_IB_METHOD_VAR_OBJ_DESTROY
,
3476 UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE
,
3478 UVERBS_ACCESS_DESTROY
,
3481 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR
,
3482 UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup
),
3483 &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC
),
3484 &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY
));
3486 static bool var_is_supported(struct ib_device
*device
)
3488 struct mlx5_ib_dev
*dev
= to_mdev(device
);
3490 return (MLX5_CAP_GEN_64(dev
->mdev
, general_obj_types
) &
3491 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q
);
3494 static struct mlx5_user_mmap_entry
*
3495 alloc_uar_entry(struct mlx5_ib_ucontext
*c
,
3496 enum mlx5_ib_uapi_uar_alloc_type alloc_type
)
3498 struct mlx5_user_mmap_entry
*entry
;
3499 struct mlx5_ib_dev
*dev
;
3503 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
3505 return ERR_PTR(-ENOMEM
);
3507 dev
= to_mdev(c
->ibucontext
.device
);
3508 err
= mlx5_cmd_uar_alloc(dev
->mdev
, &uar_index
, c
->devx_uid
);
3512 entry
->page_idx
= uar_index
;
3513 entry
->address
= uar_index2paddress(dev
, uar_index
);
3514 if (alloc_type
== MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF
)
3515 entry
->mmap_flag
= MLX5_IB_MMAP_TYPE_UAR_WC
;
3517 entry
->mmap_flag
= MLX5_IB_MMAP_TYPE_UAR_NC
;
3519 err
= mlx5_rdma_user_mmap_entry_insert(c
, entry
, PAGE_SIZE
);
3526 mlx5_cmd_uar_dealloc(dev
->mdev
, uar_index
, c
->devx_uid
);
3529 return ERR_PTR(err
);
3532 static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC
)(
3533 struct uverbs_attr_bundle
*attrs
)
3535 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
3536 attrs
, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE
);
3537 enum mlx5_ib_uapi_uar_alloc_type alloc_type
;
3538 struct mlx5_ib_ucontext
*c
;
3539 struct mlx5_user_mmap_entry
*entry
;
3544 c
= to_mucontext(ib_uverbs_get_ucontext(attrs
));
3548 err
= uverbs_get_const(&alloc_type
, attrs
,
3549 MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE
);
3553 if (alloc_type
!= MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF
&&
3554 alloc_type
!= MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC
)
3557 if (!to_mdev(c
->ibucontext
.device
)->wc_support
&&
3558 alloc_type
== MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF
)
3561 entry
= alloc_uar_entry(c
, alloc_type
);
3563 return PTR_ERR(entry
);
3565 mmap_offset
= mlx5_entry_to_mmap_offset(entry
);
3566 length
= entry
->rdma_entry
.npages
* PAGE_SIZE
;
3567 uobj
->object
= entry
;
3568 uverbs_finalize_uobj_create(attrs
, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE
);
3570 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET
,
3571 &mmap_offset
, sizeof(mmap_offset
));
3575 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID
,
3576 &entry
->page_idx
, sizeof(entry
->page_idx
));
3580 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH
,
3581 &length
, sizeof(length
));
3585 DECLARE_UVERBS_NAMED_METHOD(
3586 MLX5_IB_METHOD_UAR_OBJ_ALLOC
,
3587 UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE
,
3591 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE
,
3592 enum mlx5_ib_uapi_uar_alloc_type
,
3594 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID
,
3595 UVERBS_ATTR_TYPE(u32
),
3597 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH
,
3598 UVERBS_ATTR_TYPE(u32
),
3600 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET
,
3601 UVERBS_ATTR_TYPE(u64
),
3604 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
3605 MLX5_IB_METHOD_UAR_OBJ_DESTROY
,
3606 UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE
,
3608 UVERBS_ACCESS_DESTROY
,
3611 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR
,
3612 UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup
),
3613 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC
),
3614 &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY
));
3616 ADD_UVERBS_ATTRIBUTES_SIMPLE(
3617 mlx5_ib_query_context
,
3618 UVERBS_OBJECT_DEVICE
,
3619 UVERBS_METHOD_QUERY_CONTEXT
,
3620 UVERBS_ATTR_PTR_OUT(
3621 MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX
,
3622 UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp
,
3626 static const struct uapi_definition mlx5_ib_defs
[] = {
3627 UAPI_DEF_CHAIN(mlx5_ib_devx_defs
),
3628 UAPI_DEF_CHAIN(mlx5_ib_flow_defs
),
3629 UAPI_DEF_CHAIN(mlx5_ib_qos_defs
),
3630 UAPI_DEF_CHAIN(mlx5_ib_std_types_defs
),
3631 UAPI_DEF_CHAIN(mlx5_ib_dm_defs
),
3633 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE
, &mlx5_ib_query_context
),
3634 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR
,
3635 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported
)),
3636 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR
),
3640 static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev
*dev
)
3642 mlx5_ib_cleanup_multiport_master(dev
);
3643 WARN_ON(!xa_empty(&dev
->odp_mkeys
));
3644 mutex_destroy(&dev
->cap_mask_mutex
);
3645 WARN_ON(!xa_empty(&dev
->sig_mrs
));
3646 WARN_ON(!bitmap_empty(dev
->dm
.memic_alloc_pages
, MLX5_MAX_MEMIC_PAGES
));
3649 static int mlx5_ib_stage_init_init(struct mlx5_ib_dev
*dev
)
3651 struct mlx5_core_dev
*mdev
= dev
->mdev
;
3655 dev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
3656 dev
->ib_dev
.local_dma_lkey
= 0 /* not supported for now */;
3657 dev
->ib_dev
.phys_port_cnt
= dev
->num_ports
;
3658 dev
->ib_dev
.dev
.parent
= mdev
->device
;
3659 dev
->ib_dev
.lag_flags
= RDMA_LAG_FLAGS_HASH_ALL_SLAVES
;
3661 for (i
= 0; i
< dev
->num_ports
; i
++) {
3662 spin_lock_init(&dev
->port
[i
].mp
.mpi_lock
);
3663 rwlock_init(&dev
->port
[i
].roce
.netdev_lock
);
3664 dev
->port
[i
].roce
.dev
= dev
;
3665 dev
->port
[i
].roce
.native_port_num
= i
+ 1;
3666 dev
->port
[i
].roce
.last_port_state
= IB_PORT_DOWN
;
3669 err
= mlx5_ib_init_multiport_master(dev
);
3673 err
= set_has_smi_cap(dev
);
3677 err
= mlx5_query_max_pkeys(&dev
->ib_dev
, &dev
->pkey_table_len
);
3681 if (mlx5_use_mad_ifc(dev
))
3682 get_ext_port_caps(dev
);
3684 dev
->ib_dev
.num_comp_vectors
= mlx5_comp_vectors_count(mdev
);
3686 mutex_init(&dev
->cap_mask_mutex
);
3687 INIT_LIST_HEAD(&dev
->qp_list
);
3688 spin_lock_init(&dev
->reset_flow_resource_lock
);
3689 xa_init(&dev
->odp_mkeys
);
3690 xa_init(&dev
->sig_mrs
);
3691 atomic_set(&dev
->mkey_var
, 0);
3693 spin_lock_init(&dev
->dm
.lock
);
3698 mlx5_ib_cleanup_multiport_master(dev
);
3702 static int mlx5_ib_enable_driver(struct ib_device
*dev
)
3704 struct mlx5_ib_dev
*mdev
= to_mdev(dev
);
3707 ret
= mlx5_ib_test_wc(mdev
);
3708 mlx5_ib_dbg(mdev
, "Write-Combining %s",
3709 mdev
->wc_support
? "supported" : "not supported");
3714 static const struct ib_device_ops mlx5_ib_dev_ops
= {
3715 .owner
= THIS_MODULE
,
3716 .driver_id
= RDMA_DRIVER_MLX5
,
3717 .uverbs_abi_ver
= MLX5_IB_UVERBS_ABI_VERSION
,
3719 .add_gid
= mlx5_ib_add_gid
,
3720 .alloc_mr
= mlx5_ib_alloc_mr
,
3721 .alloc_mr_integrity
= mlx5_ib_alloc_mr_integrity
,
3722 .alloc_pd
= mlx5_ib_alloc_pd
,
3723 .alloc_ucontext
= mlx5_ib_alloc_ucontext
,
3724 .attach_mcast
= mlx5_ib_mcg_attach
,
3725 .check_mr_status
= mlx5_ib_check_mr_status
,
3726 .create_ah
= mlx5_ib_create_ah
,
3727 .create_cq
= mlx5_ib_create_cq
,
3728 .create_qp
= mlx5_ib_create_qp
,
3729 .create_srq
= mlx5_ib_create_srq
,
3730 .create_user_ah
= mlx5_ib_create_ah
,
3731 .dealloc_pd
= mlx5_ib_dealloc_pd
,
3732 .dealloc_ucontext
= mlx5_ib_dealloc_ucontext
,
3733 .del_gid
= mlx5_ib_del_gid
,
3734 .dereg_mr
= mlx5_ib_dereg_mr
,
3735 .destroy_ah
= mlx5_ib_destroy_ah
,
3736 .destroy_cq
= mlx5_ib_destroy_cq
,
3737 .destroy_qp
= mlx5_ib_destroy_qp
,
3738 .destroy_srq
= mlx5_ib_destroy_srq
,
3739 .detach_mcast
= mlx5_ib_mcg_detach
,
3740 .disassociate_ucontext
= mlx5_ib_disassociate_ucontext
,
3741 .drain_rq
= mlx5_ib_drain_rq
,
3742 .drain_sq
= mlx5_ib_drain_sq
,
3743 .device_group
= &mlx5_attr_group
,
3744 .enable_driver
= mlx5_ib_enable_driver
,
3745 .get_dev_fw_str
= get_dev_fw_str
,
3746 .get_dma_mr
= mlx5_ib_get_dma_mr
,
3747 .get_link_layer
= mlx5_ib_port_link_layer
,
3748 .map_mr_sg
= mlx5_ib_map_mr_sg
,
3749 .map_mr_sg_pi
= mlx5_ib_map_mr_sg_pi
,
3750 .mmap
= mlx5_ib_mmap
,
3751 .mmap_free
= mlx5_ib_mmap_free
,
3752 .modify_cq
= mlx5_ib_modify_cq
,
3753 .modify_device
= mlx5_ib_modify_device
,
3754 .modify_port
= mlx5_ib_modify_port
,
3755 .modify_qp
= mlx5_ib_modify_qp
,
3756 .modify_srq
= mlx5_ib_modify_srq
,
3757 .poll_cq
= mlx5_ib_poll_cq
,
3758 .post_recv
= mlx5_ib_post_recv_nodrain
,
3759 .post_send
= mlx5_ib_post_send_nodrain
,
3760 .post_srq_recv
= mlx5_ib_post_srq_recv
,
3761 .process_mad
= mlx5_ib_process_mad
,
3762 .query_ah
= mlx5_ib_query_ah
,
3763 .query_device
= mlx5_ib_query_device
,
3764 .query_gid
= mlx5_ib_query_gid
,
3765 .query_pkey
= mlx5_ib_query_pkey
,
3766 .query_qp
= mlx5_ib_query_qp
,
3767 .query_srq
= mlx5_ib_query_srq
,
3768 .query_ucontext
= mlx5_ib_query_ucontext
,
3769 .reg_user_mr
= mlx5_ib_reg_user_mr
,
3770 .reg_user_mr_dmabuf
= mlx5_ib_reg_user_mr_dmabuf
,
3771 .req_notify_cq
= mlx5_ib_arm_cq
,
3772 .rereg_user_mr
= mlx5_ib_rereg_user_mr
,
3773 .resize_cq
= mlx5_ib_resize_cq
,
3775 INIT_RDMA_OBJ_SIZE(ib_ah
, mlx5_ib_ah
, ibah
),
3776 INIT_RDMA_OBJ_SIZE(ib_counters
, mlx5_ib_mcounters
, ibcntrs
),
3777 INIT_RDMA_OBJ_SIZE(ib_cq
, mlx5_ib_cq
, ibcq
),
3778 INIT_RDMA_OBJ_SIZE(ib_pd
, mlx5_ib_pd
, ibpd
),
3779 INIT_RDMA_OBJ_SIZE(ib_qp
, mlx5_ib_qp
, ibqp
),
3780 INIT_RDMA_OBJ_SIZE(ib_srq
, mlx5_ib_srq
, ibsrq
),
3781 INIT_RDMA_OBJ_SIZE(ib_ucontext
, mlx5_ib_ucontext
, ibucontext
),
3784 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops
= {
3785 .rdma_netdev_get_params
= mlx5_ib_rn_get_params
,
3788 static const struct ib_device_ops mlx5_ib_dev_sriov_ops
= {
3789 .get_vf_config
= mlx5_ib_get_vf_config
,
3790 .get_vf_guid
= mlx5_ib_get_vf_guid
,
3791 .get_vf_stats
= mlx5_ib_get_vf_stats
,
3792 .set_vf_guid
= mlx5_ib_set_vf_guid
,
3793 .set_vf_link_state
= mlx5_ib_set_vf_link_state
,
3796 static const struct ib_device_ops mlx5_ib_dev_mw_ops
= {
3797 .alloc_mw
= mlx5_ib_alloc_mw
,
3798 .dealloc_mw
= mlx5_ib_dealloc_mw
,
3800 INIT_RDMA_OBJ_SIZE(ib_mw
, mlx5_ib_mw
, ibmw
),
3803 static const struct ib_device_ops mlx5_ib_dev_xrc_ops
= {
3804 .alloc_xrcd
= mlx5_ib_alloc_xrcd
,
3805 .dealloc_xrcd
= mlx5_ib_dealloc_xrcd
,
3807 INIT_RDMA_OBJ_SIZE(ib_xrcd
, mlx5_ib_xrcd
, ibxrcd
),
3810 static int mlx5_ib_init_var_table(struct mlx5_ib_dev
*dev
)
3812 struct mlx5_core_dev
*mdev
= dev
->mdev
;
3813 struct mlx5_var_table
*var_table
= &dev
->var_table
;
3814 u8 log_doorbell_bar_size
;
3815 u8 log_doorbell_stride
;
3818 log_doorbell_bar_size
= MLX5_CAP_DEV_VDPA_EMULATION(mdev
,
3819 log_doorbell_bar_size
);
3820 log_doorbell_stride
= MLX5_CAP_DEV_VDPA_EMULATION(mdev
,
3821 log_doorbell_stride
);
3822 var_table
->hw_start_addr
= dev
->mdev
->bar_addr
+
3823 MLX5_CAP64_DEV_VDPA_EMULATION(mdev
,
3824 doorbell_bar_offset
);
3825 bar_size
= (1ULL << log_doorbell_bar_size
) * 4096;
3826 var_table
->stride_size
= 1ULL << log_doorbell_stride
;
3827 var_table
->num_var_hw_entries
= div_u64(bar_size
,
3828 var_table
->stride_size
);
3829 mutex_init(&var_table
->bitmap_lock
);
3830 var_table
->bitmap
= bitmap_zalloc(var_table
->num_var_hw_entries
,
3832 return (var_table
->bitmap
) ? 0 : -ENOMEM
;
3835 static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev
*dev
)
3837 bitmap_free(dev
->var_table
.bitmap
);
3840 static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev
*dev
)
3842 struct mlx5_core_dev
*mdev
= dev
->mdev
;
3845 if (MLX5_CAP_GEN(mdev
, ipoib_enhanced_offloads
) &&
3846 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB
))
3847 ib_set_device_ops(&dev
->ib_dev
,
3848 &mlx5_ib_dev_ipoib_enhanced_ops
);
3850 if (mlx5_core_is_pf(mdev
))
3851 ib_set_device_ops(&dev
->ib_dev
, &mlx5_ib_dev_sriov_ops
);
3853 dev
->umr_fence
= mlx5_get_umr_fence(MLX5_CAP_GEN(mdev
, umr_fence
));
3855 if (MLX5_CAP_GEN(mdev
, imaicl
))
3856 ib_set_device_ops(&dev
->ib_dev
, &mlx5_ib_dev_mw_ops
);
3858 if (MLX5_CAP_GEN(mdev
, xrc
))
3859 ib_set_device_ops(&dev
->ib_dev
, &mlx5_ib_dev_xrc_ops
);
3861 if (MLX5_CAP_DEV_MEM(mdev
, memic
) ||
3862 MLX5_CAP_GEN_64(dev
->mdev
, general_obj_types
) &
3863 MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM
)
3864 ib_set_device_ops(&dev
->ib_dev
, &mlx5_ib_dev_dm_ops
);
3866 ib_set_device_ops(&dev
->ib_dev
, &mlx5_ib_dev_ops
);
3868 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS
))
3869 dev
->ib_dev
.driver_def
= mlx5_ib_defs
;
3871 err
= init_node_data(dev
);
3875 if ((MLX5_CAP_GEN(dev
->mdev
, port_type
) == MLX5_CAP_PORT_TYPE_ETH
) &&
3876 (MLX5_CAP_GEN(dev
->mdev
, disable_local_lb_uc
) ||
3877 MLX5_CAP_GEN(dev
->mdev
, disable_local_lb_mc
)))
3878 mutex_init(&dev
->lb
.mutex
);
3880 if (MLX5_CAP_GEN_64(dev
->mdev
, general_obj_types
) &
3881 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q
) {
3882 err
= mlx5_ib_init_var_table(dev
);
3887 dev
->ib_dev
.use_cq_dim
= true;
3892 static const struct ib_device_ops mlx5_ib_dev_port_ops
= {
3893 .get_port_immutable
= mlx5_port_immutable
,
3894 .query_port
= mlx5_ib_query_port
,
3897 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev
*dev
)
3899 ib_set_device_ops(&dev
->ib_dev
, &mlx5_ib_dev_port_ops
);
3903 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops
= {
3904 .get_port_immutable
= mlx5_port_rep_immutable
,
3905 .query_port
= mlx5_ib_rep_query_port
,
3906 .query_pkey
= mlx5_ib_rep_query_pkey
,
3909 static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev
*dev
)
3911 ib_set_device_ops(&dev
->ib_dev
, &mlx5_ib_dev_port_rep_ops
);
3915 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops
= {
3916 .create_rwq_ind_table
= mlx5_ib_create_rwq_ind_table
,
3917 .create_wq
= mlx5_ib_create_wq
,
3918 .destroy_rwq_ind_table
= mlx5_ib_destroy_rwq_ind_table
,
3919 .destroy_wq
= mlx5_ib_destroy_wq
,
3920 .get_netdev
= mlx5_ib_get_netdev
,
3921 .modify_wq
= mlx5_ib_modify_wq
,
3923 INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table
, mlx5_ib_rwq_ind_table
,
3927 static int mlx5_ib_roce_init(struct mlx5_ib_dev
*dev
)
3929 struct mlx5_core_dev
*mdev
= dev
->mdev
;
3930 enum rdma_link_layer ll
;
3935 port_type_cap
= MLX5_CAP_GEN(mdev
, port_type
);
3936 ll
= mlx5_port_type_cap_to_rdma_ll(port_type_cap
);
3938 if (ll
== IB_LINK_LAYER_ETHERNET
) {
3939 ib_set_device_ops(&dev
->ib_dev
, &mlx5_ib_dev_common_roce_ops
);
3941 port_num
= mlx5_core_native_port_num(dev
->mdev
) - 1;
3943 /* Register only for native ports */
3944 mlx5_mdev_netdev_track(dev
, port_num
);
3946 err
= mlx5_enable_eth(dev
);
3953 mlx5_mdev_netdev_untrack(dev
, port_num
);
3957 static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev
*dev
)
3959 struct mlx5_core_dev
*mdev
= dev
->mdev
;
3960 enum rdma_link_layer ll
;
3964 port_type_cap
= MLX5_CAP_GEN(mdev
, port_type
);
3965 ll
= mlx5_port_type_cap_to_rdma_ll(port_type_cap
);
3967 if (ll
== IB_LINK_LAYER_ETHERNET
) {
3968 mlx5_disable_eth(dev
);
3970 port_num
= mlx5_core_native_port_num(dev
->mdev
) - 1;
3971 mlx5_mdev_netdev_untrack(dev
, port_num
);
3975 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev
*dev
)
3977 mlx5_ib_init_cong_debugfs(dev
,
3978 mlx5_core_native_port_num(dev
->mdev
) - 1);
3982 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev
*dev
)
3984 mlx5_ib_cleanup_cong_debugfs(dev
,
3985 mlx5_core_native_port_num(dev
->mdev
) - 1);
3988 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev
*dev
)
3990 dev
->mdev
->priv
.uar
= mlx5_get_uars_page(dev
->mdev
);
3991 return PTR_ERR_OR_ZERO(dev
->mdev
->priv
.uar
);
3994 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev
*dev
)
3996 mlx5_put_uars_page(dev
->mdev
, dev
->mdev
->priv
.uar
);
3999 static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev
*dev
)
4003 err
= mlx5_alloc_bfreg(dev
->mdev
, &dev
->bfreg
, false, false);
4007 err
= mlx5_alloc_bfreg(dev
->mdev
, &dev
->fp_bfreg
, false, true);
4009 mlx5_free_bfreg(dev
->mdev
, &dev
->bfreg
);
4014 static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev
*dev
)
4016 mlx5_free_bfreg(dev
->mdev
, &dev
->fp_bfreg
);
4017 mlx5_free_bfreg(dev
->mdev
, &dev
->bfreg
);
4020 static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev
*dev
)
4024 if (!mlx5_lag_is_active(dev
->mdev
))
4027 name
= "mlx5_bond_%d";
4028 return ib_register_device(&dev
->ib_dev
, name
, &dev
->mdev
->pdev
->dev
);
4031 static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev
*dev
)
4035 err
= mlx5_mkey_cache_cleanup(dev
);
4037 mlx5_ib_warn(dev
, "mr cache cleanup failed\n");
4039 mlx5r_umr_resource_cleanup(dev
);
4042 static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev
*dev
)
4044 ib_unregister_device(&dev
->ib_dev
);
4047 static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev
*dev
)
4051 ret
= mlx5r_umr_resource_init(dev
);
4055 ret
= mlx5_mkey_cache_init(dev
);
4057 mlx5_ib_warn(dev
, "mr cache init failed %d\n", ret
);
4058 mlx5r_umr_resource_cleanup(dev
);
4063 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev
*dev
)
4065 struct dentry
*root
;
4067 if (!(dev
->ib_dev
.attrs
.raw_packet_caps
& IB_RAW_PACKET_CAP_DELAY_DROP
))
4070 mutex_init(&dev
->delay_drop
.lock
);
4071 dev
->delay_drop
.dev
= dev
;
4072 dev
->delay_drop
.activate
= false;
4073 dev
->delay_drop
.timeout
= MLX5_MAX_DELAY_DROP_TIMEOUT_MS
* 1000;
4074 INIT_WORK(&dev
->delay_drop
.delay_drop_work
, delay_drop_handler
);
4075 atomic_set(&dev
->delay_drop
.rqs_cnt
, 0);
4076 atomic_set(&dev
->delay_drop
.events_cnt
, 0);
4078 if (!mlx5_debugfs_root
)
4081 root
= debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev
->mdev
));
4082 dev
->delay_drop
.dir_debugfs
= root
;
4084 debugfs_create_atomic_t("num_timeout_events", 0400, root
,
4085 &dev
->delay_drop
.events_cnt
);
4086 debugfs_create_atomic_t("num_rqs", 0400, root
,
4087 &dev
->delay_drop
.rqs_cnt
);
4088 debugfs_create_file("timeout", 0600, root
, &dev
->delay_drop
,
4089 &fops_delay_drop_timeout
);
4093 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev
*dev
)
4095 if (!(dev
->ib_dev
.attrs
.raw_packet_caps
& IB_RAW_PACKET_CAP_DELAY_DROP
))
4098 cancel_work_sync(&dev
->delay_drop
.delay_drop_work
);
4099 if (!dev
->delay_drop
.dir_debugfs
)
4102 debugfs_remove_recursive(dev
->delay_drop
.dir_debugfs
);
4103 dev
->delay_drop
.dir_debugfs
= NULL
;
4106 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev
*dev
)
4108 dev
->mdev_events
.notifier_call
= mlx5_ib_event
;
4109 mlx5_notifier_register(dev
->mdev
, &dev
->mdev_events
);
4113 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev
*dev
)
4115 mlx5_notifier_unregister(dev
->mdev
, &dev
->mdev_events
);
4118 void __mlx5_ib_remove(struct mlx5_ib_dev
*dev
,
4119 const struct mlx5_ib_profile
*profile
,
4122 dev
->ib_active
= false;
4124 /* Number of stages to cleanup */
4127 if (profile
->stage
[stage
].cleanup
)
4128 profile
->stage
[stage
].cleanup(dev
);
4132 ib_dealloc_device(&dev
->ib_dev
);
4135 int __mlx5_ib_add(struct mlx5_ib_dev
*dev
,
4136 const struct mlx5_ib_profile
*profile
)
4141 dev
->profile
= profile
;
4143 for (i
= 0; i
< MLX5_IB_STAGE_MAX
; i
++) {
4144 if (profile
->stage
[i
].init
) {
4145 err
= profile
->stage
[i
].init(dev
);
4151 dev
->ib_active
= true;
4155 /* Clean up stages which were initialized */
4158 if (profile
->stage
[i
].cleanup
)
4159 profile
->stage
[i
].cleanup(dev
);
4164 static const struct mlx5_ib_profile pf_profile
= {
4165 STAGE_CREATE(MLX5_IB_STAGE_INIT
,
4166 mlx5_ib_stage_init_init
,
4167 mlx5_ib_stage_init_cleanup
),
4168 STAGE_CREATE(MLX5_IB_STAGE_FS
,
4170 mlx5_ib_fs_cleanup
),
4171 STAGE_CREATE(MLX5_IB_STAGE_CAPS
,
4172 mlx5_ib_stage_caps_init
,
4173 mlx5_ib_stage_caps_cleanup
),
4174 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB
,
4175 mlx5_ib_stage_non_default_cb
,
4177 STAGE_CREATE(MLX5_IB_STAGE_ROCE
,
4179 mlx5_ib_roce_cleanup
),
4180 STAGE_CREATE(MLX5_IB_STAGE_QP
,
4182 mlx5_cleanup_qp_table
),
4183 STAGE_CREATE(MLX5_IB_STAGE_SRQ
,
4184 mlx5_init_srq_table
,
4185 mlx5_cleanup_srq_table
),
4186 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES
,
4187 mlx5_ib_dev_res_init
,
4188 mlx5_ib_dev_res_cleanup
),
4189 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER
,
4190 mlx5_ib_stage_dev_notifier_init
,
4191 mlx5_ib_stage_dev_notifier_cleanup
),
4192 STAGE_CREATE(MLX5_IB_STAGE_ODP
,
4193 mlx5_ib_odp_init_one
,
4194 mlx5_ib_odp_cleanup_one
),
4195 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS
,
4196 mlx5_ib_counters_init
,
4197 mlx5_ib_counters_cleanup
),
4198 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS
,
4199 mlx5_ib_stage_cong_debugfs_init
,
4200 mlx5_ib_stage_cong_debugfs_cleanup
),
4201 STAGE_CREATE(MLX5_IB_STAGE_UAR
,
4202 mlx5_ib_stage_uar_init
,
4203 mlx5_ib_stage_uar_cleanup
),
4204 STAGE_CREATE(MLX5_IB_STAGE_BFREG
,
4205 mlx5_ib_stage_bfrag_init
,
4206 mlx5_ib_stage_bfrag_cleanup
),
4207 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR
,
4209 mlx5_ib_stage_pre_ib_reg_umr_cleanup
),
4210 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID
,
4212 mlx5_ib_devx_cleanup
),
4213 STAGE_CREATE(MLX5_IB_STAGE_IB_REG
,
4214 mlx5_ib_stage_ib_reg_init
,
4215 mlx5_ib_stage_ib_reg_cleanup
),
4216 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR
,
4217 mlx5_ib_stage_post_ib_reg_umr_init
,
4219 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP
,
4220 mlx5_ib_stage_delay_drop_init
,
4221 mlx5_ib_stage_delay_drop_cleanup
),
4222 STAGE_CREATE(MLX5_IB_STAGE_RESTRACK
,
4223 mlx5_ib_restrack_init
,
4227 const struct mlx5_ib_profile raw_eth_profile
= {
4228 STAGE_CREATE(MLX5_IB_STAGE_INIT
,
4229 mlx5_ib_stage_init_init
,
4230 mlx5_ib_stage_init_cleanup
),
4231 STAGE_CREATE(MLX5_IB_STAGE_FS
,
4233 mlx5_ib_fs_cleanup
),
4234 STAGE_CREATE(MLX5_IB_STAGE_CAPS
,
4235 mlx5_ib_stage_caps_init
,
4236 mlx5_ib_stage_caps_cleanup
),
4237 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB
,
4238 mlx5_ib_stage_raw_eth_non_default_cb
,
4240 STAGE_CREATE(MLX5_IB_STAGE_ROCE
,
4242 mlx5_ib_roce_cleanup
),
4243 STAGE_CREATE(MLX5_IB_STAGE_QP
,
4245 mlx5_cleanup_qp_table
),
4246 STAGE_CREATE(MLX5_IB_STAGE_SRQ
,
4247 mlx5_init_srq_table
,
4248 mlx5_cleanup_srq_table
),
4249 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES
,
4250 mlx5_ib_dev_res_init
,
4251 mlx5_ib_dev_res_cleanup
),
4252 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER
,
4253 mlx5_ib_stage_dev_notifier_init
,
4254 mlx5_ib_stage_dev_notifier_cleanup
),
4255 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS
,
4256 mlx5_ib_counters_init
,
4257 mlx5_ib_counters_cleanup
),
4258 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS
,
4259 mlx5_ib_stage_cong_debugfs_init
,
4260 mlx5_ib_stage_cong_debugfs_cleanup
),
4261 STAGE_CREATE(MLX5_IB_STAGE_UAR
,
4262 mlx5_ib_stage_uar_init
,
4263 mlx5_ib_stage_uar_cleanup
),
4264 STAGE_CREATE(MLX5_IB_STAGE_BFREG
,
4265 mlx5_ib_stage_bfrag_init
,
4266 mlx5_ib_stage_bfrag_cleanup
),
4267 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR
,
4269 mlx5_ib_stage_pre_ib_reg_umr_cleanup
),
4270 STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID
,
4272 mlx5_ib_devx_cleanup
),
4273 STAGE_CREATE(MLX5_IB_STAGE_IB_REG
,
4274 mlx5_ib_stage_ib_reg_init
,
4275 mlx5_ib_stage_ib_reg_cleanup
),
4276 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR
,
4277 mlx5_ib_stage_post_ib_reg_umr_init
,
4279 STAGE_CREATE(MLX5_IB_STAGE_RESTRACK
,
4280 mlx5_ib_restrack_init
,
4284 static int mlx5r_mp_probe(struct auxiliary_device
*adev
,
4285 const struct auxiliary_device_id
*id
)
4287 struct mlx5_adev
*idev
= container_of(adev
, struct mlx5_adev
, adev
);
4288 struct mlx5_core_dev
*mdev
= idev
->mdev
;
4289 struct mlx5_ib_multiport_info
*mpi
;
4290 struct mlx5_ib_dev
*dev
;
4294 mpi
= kzalloc(sizeof(*mpi
), GFP_KERNEL
);
4299 err
= mlx5_query_nic_vport_system_image_guid(mdev
,
4300 &mpi
->sys_image_guid
);
4306 mutex_lock(&mlx5_ib_multiport_mutex
);
4307 list_for_each_entry(dev
, &mlx5_ib_dev_list
, ib_dev_list
) {
4308 if (dev
->sys_image_guid
== mpi
->sys_image_guid
)
4309 bound
= mlx5_ib_bind_slave_port(dev
, mpi
);
4312 rdma_roce_rescan_device(&dev
->ib_dev
);
4313 mpi
->ibdev
->ib_active
= true;
4319 list_add_tail(&mpi
->list
, &mlx5_ib_unaffiliated_port_list
);
4320 dev_dbg(mdev
->device
,
4321 "no suitable IB device found to bind to, added to unaffiliated list.\n");
4323 mutex_unlock(&mlx5_ib_multiport_mutex
);
4325 auxiliary_set_drvdata(adev
, mpi
);
4329 static void mlx5r_mp_remove(struct auxiliary_device
*adev
)
4331 struct mlx5_ib_multiport_info
*mpi
;
4333 mpi
= auxiliary_get_drvdata(adev
);
4334 mutex_lock(&mlx5_ib_multiport_mutex
);
4336 mlx5_ib_unbind_slave_port(mpi
->ibdev
, mpi
);
4338 list_del(&mpi
->list
);
4339 mutex_unlock(&mlx5_ib_multiport_mutex
);
4343 static int mlx5r_probe(struct auxiliary_device
*adev
,
4344 const struct auxiliary_device_id
*id
)
4346 struct mlx5_adev
*idev
= container_of(adev
, struct mlx5_adev
, adev
);
4347 struct mlx5_core_dev
*mdev
= idev
->mdev
;
4348 const struct mlx5_ib_profile
*profile
;
4349 int port_type_cap
, num_ports
, ret
;
4350 enum rdma_link_layer ll
;
4351 struct mlx5_ib_dev
*dev
;
4353 port_type_cap
= MLX5_CAP_GEN(mdev
, port_type
);
4354 ll
= mlx5_port_type_cap_to_rdma_ll(port_type_cap
);
4356 num_ports
= max(MLX5_CAP_GEN(mdev
, num_ports
),
4357 MLX5_CAP_GEN(mdev
, num_vhca_ports
));
4358 dev
= ib_alloc_device(mlx5_ib_dev
, ib_dev
);
4361 dev
->port
= kcalloc(num_ports
, sizeof(*dev
->port
),
4364 ib_dealloc_device(&dev
->ib_dev
);
4369 dev
->num_ports
= num_ports
;
4371 if (ll
== IB_LINK_LAYER_ETHERNET
&& !mlx5_get_roce_state(mdev
))
4372 profile
= &raw_eth_profile
;
4374 profile
= &pf_profile
;
4376 ret
= __mlx5_ib_add(dev
, profile
);
4379 ib_dealloc_device(&dev
->ib_dev
);
4383 auxiliary_set_drvdata(adev
, dev
);
4387 static void mlx5r_remove(struct auxiliary_device
*adev
)
4389 struct mlx5_ib_dev
*dev
;
4391 dev
= auxiliary_get_drvdata(adev
);
4392 __mlx5_ib_remove(dev
, dev
->profile
, MLX5_IB_STAGE_MAX
);
4395 static const struct auxiliary_device_id mlx5r_mp_id_table
[] = {
4396 { .name
= MLX5_ADEV_NAME
".multiport", },
4400 static const struct auxiliary_device_id mlx5r_id_table
[] = {
4401 { .name
= MLX5_ADEV_NAME
".rdma", },
4405 MODULE_DEVICE_TABLE(auxiliary
, mlx5r_mp_id_table
);
4406 MODULE_DEVICE_TABLE(auxiliary
, mlx5r_id_table
);
4408 static struct auxiliary_driver mlx5r_mp_driver
= {
4409 .name
= "multiport",
4410 .probe
= mlx5r_mp_probe
,
4411 .remove
= mlx5r_mp_remove
,
4412 .id_table
= mlx5r_mp_id_table
,
4415 static struct auxiliary_driver mlx5r_driver
= {
4417 .probe
= mlx5r_probe
,
4418 .remove
= mlx5r_remove
,
4419 .id_table
= mlx5r_id_table
,
4422 static int __init
mlx5_ib_init(void)
4426 xlt_emergency_page
= (void *)__get_free_page(GFP_KERNEL
);
4427 if (!xlt_emergency_page
)
4430 mlx5_ib_event_wq
= alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
4431 if (!mlx5_ib_event_wq
) {
4432 free_page((unsigned long)xlt_emergency_page
);
4437 ret
= mlx5r_rep_init();
4440 ret
= auxiliary_driver_register(&mlx5r_mp_driver
);
4443 ret
= auxiliary_driver_register(&mlx5r_driver
);
4449 auxiliary_driver_unregister(&mlx5r_mp_driver
);
4451 mlx5r_rep_cleanup();
4453 destroy_workqueue(mlx5_ib_event_wq
);
4454 free_page((unsigned long)xlt_emergency_page
);
4458 static void __exit
mlx5_ib_cleanup(void)
4460 auxiliary_driver_unregister(&mlx5r_driver
);
4461 auxiliary_driver_unregister(&mlx5r_mp_driver
);
4462 mlx5r_rep_cleanup();
4464 destroy_workqueue(mlx5_ib_event_wq
);
4465 free_page((unsigned long)xlt_emergency_page
);
4468 module_init(mlx5_ib_init
);
4469 module_exit(mlx5_ib_cleanup
);