2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
54 #include <net/bonding.h>
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
61 #include <rdma/mlx4-abi.h>
63 #define DRV_NAME MLX4_IB_DRV_NAME
64 #define DRV_VERSION "4.0-0"
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0 0xA0
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
74 int mlx4_ib_sm_guid_assign
= 0;
75 module_param_named(sm_guid_assign
, mlx4_ib_sm_guid_assign
, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign
, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
78 static const char mlx4_ib_version
[] =
79 DRV_NAME
": Mellanox ConnectX InfiniBand driver v"
82 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
);
83 static enum rdma_link_layer
mlx4_ib_port_link_layer(struct ib_device
*device
,
86 static struct workqueue_struct
*wq
;
88 static void init_query_mad(struct ib_smp
*mad
)
90 mad
->base_version
= 1;
91 mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
92 mad
->class_version
= 1;
93 mad
->method
= IB_MGMT_METHOD_GET
;
96 static int check_flow_steering_support(struct mlx4_dev
*dev
)
98 int eth_num_ports
= 0;
101 int dmfs
= dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
;
105 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_ETH
)
107 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
109 dmfs
&= (!ib_num_ports
||
110 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
)) &&
112 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
));
113 if (ib_num_ports
&& mlx4_is_mfunc(dev
)) {
114 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
121 static int num_ib_ports(struct mlx4_dev
*dev
)
126 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
132 static struct net_device
*mlx4_ib_get_netdev(struct ib_device
*device
, u8 port_num
)
134 struct mlx4_ib_dev
*ibdev
= to_mdev(device
);
135 struct net_device
*dev
;
138 dev
= mlx4_get_protocol_dev(ibdev
->dev
, MLX4_PROT_ETH
, port_num
);
141 if (mlx4_is_bonded(ibdev
->dev
)) {
142 struct net_device
*upper
= NULL
;
144 upper
= netdev_master_upper_dev_get_rcu(dev
);
146 struct net_device
*active
;
148 active
= bond_option_active_slave_get_rcu(netdev_priv(upper
));
161 static int mlx4_ib_update_gids_v1(struct gid_entry
*gids
,
162 struct mlx4_ib_dev
*ibdev
,
165 struct mlx4_cmd_mailbox
*mailbox
;
167 struct mlx4_dev
*dev
= ibdev
->dev
;
169 union ib_gid
*gid_tbl
;
171 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
175 gid_tbl
= mailbox
->buf
;
177 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
)
178 memcpy(&gid_tbl
[i
], &gids
[i
].gid
, sizeof(union ib_gid
));
180 err
= mlx4_cmd(dev
, mailbox
->dma
,
181 MLX4_SET_PORT_GID_TABLE
<< 8 | port_num
,
182 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
184 if (mlx4_is_bonded(dev
))
185 err
+= mlx4_cmd(dev
, mailbox
->dma
,
186 MLX4_SET_PORT_GID_TABLE
<< 8 | 2,
187 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
190 mlx4_free_cmd_mailbox(dev
, mailbox
);
194 static int mlx4_ib_update_gids_v1_v2(struct gid_entry
*gids
,
195 struct mlx4_ib_dev
*ibdev
,
198 struct mlx4_cmd_mailbox
*mailbox
;
200 struct mlx4_dev
*dev
= ibdev
->dev
;
211 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
215 gid_tbl
= mailbox
->buf
;
216 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
) {
217 memcpy(&gid_tbl
[i
].gid
, &gids
[i
].gid
, sizeof(union ib_gid
));
218 if (gids
[i
].gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
) {
219 gid_tbl
[i
].version
= 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr
*)&gids
[i
].gid
))
225 err
= mlx4_cmd(dev
, mailbox
->dma
,
226 MLX4_SET_PORT_ROCE_ADDR
<< 8 | port_num
,
227 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
229 if (mlx4_is_bonded(dev
))
230 err
+= mlx4_cmd(dev
, mailbox
->dma
,
231 MLX4_SET_PORT_ROCE_ADDR
<< 8 | 2,
232 1, MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
235 mlx4_free_cmd_mailbox(dev
, mailbox
);
239 static int mlx4_ib_update_gids(struct gid_entry
*gids
,
240 struct mlx4_ib_dev
*ibdev
,
243 if (ibdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
)
244 return mlx4_ib_update_gids_v1_v2(gids
, ibdev
, port_num
);
246 return mlx4_ib_update_gids_v1(gids
, ibdev
, port_num
);
249 static void free_gid_entry(struct gid_entry
*entry
)
251 memset(&entry
->gid
, 0, sizeof(entry
->gid
));
256 static int mlx4_ib_add_gid(const struct ib_gid_attr
*attr
, void **context
)
258 struct mlx4_ib_dev
*ibdev
= to_mdev(attr
->device
);
259 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
260 struct mlx4_port_gid_table
*port_gid_table
;
261 int free
= -1, found
= -1;
265 struct gid_entry
*gids
= NULL
;
266 u16 vlan_id
= 0xffff;
269 if (!rdma_cap_roce_gid_table(attr
->device
, attr
->port_num
))
272 if (attr
->port_num
> MLX4_MAX_PORTS
)
278 ret
= rdma_read_gid_l2_fields(attr
, &vlan_id
, &mac
[0]);
281 port_gid_table
= &iboe
->gids
[attr
->port_num
- 1];
282 spin_lock_bh(&iboe
->lock
);
283 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
) {
284 if (!memcmp(&port_gid_table
->gids
[i
].gid
,
285 &attr
->gid
, sizeof(attr
->gid
)) &&
286 port_gid_table
->gids
[i
].gid_type
== attr
->gid_type
&&
287 port_gid_table
->gids
[i
].vlan_id
== vlan_id
) {
291 if (free
< 0 && rdma_is_zero_gid(&port_gid_table
->gids
[i
].gid
))
292 free
= i
; /* HW has space */
299 port_gid_table
->gids
[free
].ctx
= kmalloc(sizeof(*port_gid_table
->gids
[free
].ctx
), GFP_ATOMIC
);
300 if (!port_gid_table
->gids
[free
].ctx
) {
303 *context
= port_gid_table
->gids
[free
].ctx
;
304 memcpy(&port_gid_table
->gids
[free
].gid
,
305 &attr
->gid
, sizeof(attr
->gid
));
306 port_gid_table
->gids
[free
].gid_type
= attr
->gid_type
;
307 port_gid_table
->gids
[free
].vlan_id
= vlan_id
;
308 port_gid_table
->gids
[free
].ctx
->real_index
= free
;
309 port_gid_table
->gids
[free
].ctx
->refcount
= 1;
314 struct gid_cache_context
*ctx
= port_gid_table
->gids
[found
].ctx
;
318 if (!ret
&& hw_update
) {
319 gids
= kmalloc_array(MLX4_MAX_PORT_GIDS
, sizeof(*gids
),
324 free_gid_entry(&port_gid_table
->gids
[free
]);
326 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; i
++) {
327 memcpy(&gids
[i
].gid
, &port_gid_table
->gids
[i
].gid
, sizeof(union ib_gid
));
328 gids
[i
].gid_type
= port_gid_table
->gids
[i
].gid_type
;
332 spin_unlock_bh(&iboe
->lock
);
334 if (!ret
&& hw_update
) {
335 ret
= mlx4_ib_update_gids(gids
, ibdev
, attr
->port_num
);
337 spin_lock_bh(&iboe
->lock
);
339 free_gid_entry(&port_gid_table
->gids
[free
]);
340 spin_unlock_bh(&iboe
->lock
);
348 static int mlx4_ib_del_gid(const struct ib_gid_attr
*attr
, void **context
)
350 struct gid_cache_context
*ctx
= *context
;
351 struct mlx4_ib_dev
*ibdev
= to_mdev(attr
->device
);
352 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
353 struct mlx4_port_gid_table
*port_gid_table
;
356 struct gid_entry
*gids
= NULL
;
358 if (!rdma_cap_roce_gid_table(attr
->device
, attr
->port_num
))
361 if (attr
->port_num
> MLX4_MAX_PORTS
)
364 port_gid_table
= &iboe
->gids
[attr
->port_num
- 1];
365 spin_lock_bh(&iboe
->lock
);
368 if (!ctx
->refcount
) {
369 unsigned int real_index
= ctx
->real_index
;
371 free_gid_entry(&port_gid_table
->gids
[real_index
]);
375 if (!ret
&& hw_update
) {
378 gids
= kmalloc_array(MLX4_MAX_PORT_GIDS
, sizeof(*gids
),
383 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; i
++) {
385 &port_gid_table
->gids
[i
].gid
,
386 sizeof(union ib_gid
));
388 port_gid_table
->gids
[i
].gid_type
;
392 spin_unlock_bh(&iboe
->lock
);
394 if (!ret
&& hw_update
) {
395 ret
= mlx4_ib_update_gids(gids
, ibdev
, attr
->port_num
);
401 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev
*ibdev
,
402 const struct ib_gid_attr
*attr
)
404 struct mlx4_ib_iboe
*iboe
= &ibdev
->iboe
;
405 struct gid_cache_context
*ctx
= NULL
;
406 struct mlx4_port_gid_table
*port_gid_table
;
407 int real_index
= -EINVAL
;
410 u8 port_num
= attr
->port_num
;
412 if (port_num
> MLX4_MAX_PORTS
)
415 if (mlx4_is_bonded(ibdev
->dev
))
418 if (!rdma_cap_roce_gid_table(&ibdev
->ib_dev
, port_num
))
421 spin_lock_irqsave(&iboe
->lock
, flags
);
422 port_gid_table
= &iboe
->gids
[port_num
- 1];
424 for (i
= 0; i
< MLX4_MAX_PORT_GIDS
; ++i
)
425 if (!memcmp(&port_gid_table
->gids
[i
].gid
,
426 &attr
->gid
, sizeof(attr
->gid
)) &&
427 attr
->gid_type
== port_gid_table
->gids
[i
].gid_type
) {
428 ctx
= port_gid_table
->gids
[i
].ctx
;
432 real_index
= ctx
->real_index
;
433 spin_unlock_irqrestore(&iboe
->lock
, flags
);
437 static int mlx4_ib_query_device(struct ib_device
*ibdev
,
438 struct ib_device_attr
*props
,
439 struct ib_udata
*uhw
)
441 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
442 struct ib_smp
*in_mad
= NULL
;
443 struct ib_smp
*out_mad
= NULL
;
446 struct mlx4_uverbs_ex_query_device cmd
;
447 struct mlx4_uverbs_ex_query_device_resp resp
= {};
448 struct mlx4_clock_params clock_params
;
451 if (uhw
->inlen
< sizeof(cmd
))
454 err
= ib_copy_from_udata(&cmd
, uhw
, sizeof(cmd
));
465 resp
.response_length
= offsetof(typeof(resp
), response_length
) +
466 sizeof(resp
.response_length
);
467 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
468 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
470 if (!in_mad
|| !out_mad
)
473 init_query_mad(in_mad
);
474 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
476 err
= mlx4_MAD_IFC(to_mdev(ibdev
), MLX4_MAD_IFC_IGNORE_KEYS
,
477 1, NULL
, NULL
, in_mad
, out_mad
);
481 memset(props
, 0, sizeof *props
);
483 have_ib_ports
= num_ib_ports(dev
->dev
);
485 props
->fw_ver
= dev
->dev
->caps
.fw_ver
;
486 props
->device_cap_flags
= IB_DEVICE_CHANGE_PHY_PORT
|
487 IB_DEVICE_PORT_ACTIVE_EVENT
|
488 IB_DEVICE_SYS_IMAGE_GUID
|
489 IB_DEVICE_RC_RNR_NAK_GEN
|
490 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
;
491 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
)
492 props
->device_cap_flags
|= IB_DEVICE_BAD_PKEY_CNTR
;
493 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
)
494 props
->device_cap_flags
|= IB_DEVICE_BAD_QKEY_CNTR
;
495 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_APM
&& have_ib_ports
)
496 props
->device_cap_flags
|= IB_DEVICE_AUTO_PATH_MIG
;
497 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_UD_AV_PORT
)
498 props
->device_cap_flags
|= IB_DEVICE_UD_AV_PORT_ENFORCE
;
499 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
500 props
->device_cap_flags
|= IB_DEVICE_UD_IP_CSUM
;
501 if (dev
->dev
->caps
.max_gso_sz
&&
502 (dev
->dev
->rev_id
!= MLX4_IB_CARD_REV_A0
) &&
503 (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_BLH
))
504 props
->device_cap_flags
|= IB_DEVICE_UD_TSO
;
505 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_RESERVED_LKEY
)
506 props
->device_cap_flags
|= IB_DEVICE_LOCAL_DMA_LKEY
;
507 if ((dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_LOCAL_INV
) &&
508 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_REMOTE_INV
) &&
509 (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_FAST_REG_WR
))
510 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
511 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
)
512 props
->device_cap_flags
|= IB_DEVICE_XRC
;
513 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
)
514 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW
;
515 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
516 if (dev
->dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_WIN_TYPE_2B
)
517 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2B
;
519 props
->device_cap_flags
|= IB_DEVICE_MEM_WINDOW_TYPE_2A
;
521 if (dev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
)
522 props
->device_cap_flags
|= IB_DEVICE_MANAGED_FLOW_STEERING
;
524 props
->device_cap_flags
|= IB_DEVICE_RAW_IP_CSUM
;
526 props
->vendor_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 36)) &
528 props
->vendor_part_id
= dev
->dev
->persist
->pdev
->device
;
529 props
->hw_ver
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
530 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
532 props
->max_mr_size
= ~0ull;
533 props
->page_size_cap
= dev
->dev
->caps
.page_size_cap
;
534 props
->max_qp
= dev
->dev
->quotas
.qp
;
535 props
->max_qp_wr
= dev
->dev
->caps
.max_wqes
- MLX4_IB_SQ_MAX_SPARE
;
536 props
->max_send_sge
=
537 min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
);
538 props
->max_recv_sge
=
539 min(dev
->dev
->caps
.max_sq_sg
, dev
->dev
->caps
.max_rq_sg
);
540 props
->max_sge_rd
= MLX4_MAX_SGE_RD
;
541 props
->max_cq
= dev
->dev
->quotas
.cq
;
542 props
->max_cqe
= dev
->dev
->caps
.max_cqes
;
543 props
->max_mr
= dev
->dev
->quotas
.mpt
;
544 props
->max_pd
= dev
->dev
->caps
.num_pds
- dev
->dev
->caps
.reserved_pds
;
545 props
->max_qp_rd_atom
= dev
->dev
->caps
.max_qp_dest_rdma
;
546 props
->max_qp_init_rd_atom
= dev
->dev
->caps
.max_qp_init_rdma
;
547 props
->max_res_rd_atom
= props
->max_qp_rd_atom
* props
->max_qp
;
548 props
->max_srq
= dev
->dev
->quotas
.srq
;
549 props
->max_srq_wr
= dev
->dev
->caps
.max_srq_wqes
- 1;
550 props
->max_srq_sge
= dev
->dev
->caps
.max_srq_sge
;
551 props
->max_fast_reg_page_list_len
= MLX4_MAX_FAST_REG_PAGES
;
552 props
->local_ca_ack_delay
= dev
->dev
->caps
.local_ca_ack_delay
;
553 props
->atomic_cap
= dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_ATOMIC
?
554 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
555 props
->masked_atomic_cap
= props
->atomic_cap
;
556 props
->max_pkeys
= dev
->dev
->caps
.pkey_table_len
[1];
557 props
->max_mcast_grp
= dev
->dev
->caps
.num_mgms
+ dev
->dev
->caps
.num_amgms
;
558 props
->max_mcast_qp_attach
= dev
->dev
->caps
.num_qp_per_mgm
;
559 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
560 props
->max_mcast_grp
;
561 props
->max_map_per_fmr
= dev
->dev
->caps
.max_fmr_maps
;
562 props
->hca_core_clock
= dev
->dev
->caps
.hca_core_clock
* 1000UL;
563 props
->timestamp_mask
= 0xFFFFFFFFFFFFULL
;
564 props
->max_ah
= INT_MAX
;
566 if (mlx4_ib_port_link_layer(ibdev
, 1) == IB_LINK_LAYER_ETHERNET
||
567 mlx4_ib_port_link_layer(ibdev
, 2) == IB_LINK_LAYER_ETHERNET
) {
568 if (dev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_RSS
) {
569 props
->rss_caps
.max_rwq_indirection_tables
=
571 props
->rss_caps
.max_rwq_indirection_table_size
=
572 dev
->dev
->caps
.max_rss_tbl_sz
;
573 props
->rss_caps
.supported_qpts
= 1 << IB_QPT_RAW_PACKET
;
574 props
->max_wq_type_rq
= props
->max_qp
;
577 if (dev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
)
578 props
->raw_packet_caps
|= IB_RAW_PACKET_CAP_SCATTER_FCS
;
581 props
->cq_caps
.max_cq_moderation_count
= MLX4_MAX_CQ_COUNT
;
582 props
->cq_caps
.max_cq_moderation_period
= MLX4_MAX_CQ_PERIOD
;
584 if (!mlx4_is_slave(dev
->dev
))
585 err
= mlx4_get_internal_clock_params(dev
->dev
, &clock_params
);
587 if (uhw
->outlen
>= resp
.response_length
+ sizeof(resp
.hca_core_clock_offset
)) {
588 resp
.response_length
+= sizeof(resp
.hca_core_clock_offset
);
589 if (!err
&& !mlx4_is_slave(dev
->dev
)) {
590 resp
.comp_mask
|= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET
;
591 resp
.hca_core_clock_offset
= clock_params
.offset
% PAGE_SIZE
;
595 if (uhw
->outlen
>= resp
.response_length
+
596 sizeof(resp
.max_inl_recv_sz
)) {
597 resp
.response_length
+= sizeof(resp
.max_inl_recv_sz
);
598 resp
.max_inl_recv_sz
= dev
->dev
->caps
.max_rq_sg
*
599 sizeof(struct mlx4_wqe_data_seg
);
602 if (offsetofend(typeof(resp
), rss_caps
) <= uhw
->outlen
) {
603 if (props
->rss_caps
.supported_qpts
) {
604 resp
.rss_caps
.rx_hash_function
=
605 MLX4_IB_RX_HASH_FUNC_TOEPLITZ
;
607 resp
.rss_caps
.rx_hash_fields_mask
=
608 MLX4_IB_RX_HASH_SRC_IPV4
|
609 MLX4_IB_RX_HASH_DST_IPV4
|
610 MLX4_IB_RX_HASH_SRC_IPV6
|
611 MLX4_IB_RX_HASH_DST_IPV6
|
612 MLX4_IB_RX_HASH_SRC_PORT_TCP
|
613 MLX4_IB_RX_HASH_DST_PORT_TCP
|
614 MLX4_IB_RX_HASH_SRC_PORT_UDP
|
615 MLX4_IB_RX_HASH_DST_PORT_UDP
;
617 if (dev
->dev
->caps
.tunnel_offload_mode
==
618 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
)
619 resp
.rss_caps
.rx_hash_fields_mask
|=
620 MLX4_IB_RX_HASH_INNER
;
622 resp
.response_length
= offsetof(typeof(resp
), rss_caps
) +
623 sizeof(resp
.rss_caps
);
626 if (offsetofend(typeof(resp
), tso_caps
) <= uhw
->outlen
) {
627 if (dev
->dev
->caps
.max_gso_sz
&&
628 ((mlx4_ib_port_link_layer(ibdev
, 1) ==
629 IB_LINK_LAYER_ETHERNET
) ||
630 (mlx4_ib_port_link_layer(ibdev
, 2) ==
631 IB_LINK_LAYER_ETHERNET
))) {
632 resp
.tso_caps
.max_tso
= dev
->dev
->caps
.max_gso_sz
;
633 resp
.tso_caps
.supported_qpts
|=
634 1 << IB_QPT_RAW_PACKET
;
636 resp
.response_length
= offsetof(typeof(resp
), tso_caps
) +
637 sizeof(resp
.tso_caps
);
641 err
= ib_copy_to_udata(uhw
, &resp
, resp
.response_length
);
652 static enum rdma_link_layer
653 mlx4_ib_port_link_layer(struct ib_device
*device
, u8 port_num
)
655 struct mlx4_dev
*dev
= to_mdev(device
)->dev
;
657 return dev
->caps
.port_mask
[port_num
] == MLX4_PORT_TYPE_IB
?
658 IB_LINK_LAYER_INFINIBAND
: IB_LINK_LAYER_ETHERNET
;
661 static int ib_link_query_port(struct ib_device
*ibdev
, u8 port
,
662 struct ib_port_attr
*props
, int netw_view
)
664 struct ib_smp
*in_mad
= NULL
;
665 struct ib_smp
*out_mad
= NULL
;
666 int ext_active_speed
;
667 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
670 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
671 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
672 if (!in_mad
|| !out_mad
)
675 init_query_mad(in_mad
);
676 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
677 in_mad
->attr_mod
= cpu_to_be32(port
);
679 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
680 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
682 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
688 props
->lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 16));
689 props
->lmc
= out_mad
->data
[34] & 0x7;
690 props
->sm_lid
= be16_to_cpup((__be16
*) (out_mad
->data
+ 18));
691 props
->sm_sl
= out_mad
->data
[36] & 0xf;
692 props
->state
= out_mad
->data
[32] & 0xf;
693 props
->phys_state
= out_mad
->data
[33] >> 4;
694 props
->port_cap_flags
= be32_to_cpup((__be32
*) (out_mad
->data
+ 20));
696 props
->gid_tbl_len
= out_mad
->data
[50];
698 props
->gid_tbl_len
= to_mdev(ibdev
)->dev
->caps
.gid_table_len
[port
];
699 props
->max_msg_sz
= to_mdev(ibdev
)->dev
->caps
.max_msg_sz
;
700 props
->pkey_tbl_len
= to_mdev(ibdev
)->dev
->caps
.pkey_table_len
[port
];
701 props
->bad_pkey_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 46));
702 props
->qkey_viol_cntr
= be16_to_cpup((__be16
*) (out_mad
->data
+ 48));
703 props
->active_width
= out_mad
->data
[31] & 0xf;
704 props
->active_speed
= out_mad
->data
[35] >> 4;
705 props
->max_mtu
= out_mad
->data
[41] & 0xf;
706 props
->active_mtu
= out_mad
->data
[36] >> 4;
707 props
->subnet_timeout
= out_mad
->data
[51] & 0x1f;
708 props
->max_vl_num
= out_mad
->data
[37] >> 4;
709 props
->init_type_reply
= out_mad
->data
[41] >> 4;
711 /* Check if extended speeds (EDR/FDR/...) are supported */
712 if (props
->port_cap_flags
& IB_PORT_EXTENDED_SPEEDS_SUP
) {
713 ext_active_speed
= out_mad
->data
[62] >> 4;
715 switch (ext_active_speed
) {
717 props
->active_speed
= IB_SPEED_FDR
;
720 props
->active_speed
= IB_SPEED_EDR
;
725 /* If reported active speed is QDR, check if is FDR-10 */
726 if (props
->active_speed
== IB_SPEED_QDR
) {
727 init_query_mad(in_mad
);
728 in_mad
->attr_id
= MLX4_ATTR_EXTENDED_PORT_INFO
;
729 in_mad
->attr_mod
= cpu_to_be32(port
);
731 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
,
732 NULL
, NULL
, in_mad
, out_mad
);
736 /* Checking LinkSpeedActive for FDR-10 */
737 if (out_mad
->data
[15] & 0x1)
738 props
->active_speed
= IB_SPEED_FDR10
;
741 /* Avoid wrong speed value returned by FW if the IB link is down. */
742 if (props
->state
== IB_PORT_DOWN
)
743 props
->active_speed
= IB_SPEED_SDR
;
751 static u8
state_to_phys_state(enum ib_port_state state
)
753 return state
== IB_PORT_ACTIVE
?
754 IB_PORT_PHYS_STATE_LINK_UP
: IB_PORT_PHYS_STATE_DISABLED
;
757 static int eth_link_query_port(struct ib_device
*ibdev
, u8 port
,
758 struct ib_port_attr
*props
)
761 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
762 struct mlx4_ib_iboe
*iboe
= &mdev
->iboe
;
763 struct net_device
*ndev
;
765 struct mlx4_cmd_mailbox
*mailbox
;
767 int is_bonded
= mlx4_is_bonded(mdev
->dev
);
769 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
771 return PTR_ERR(mailbox
);
773 err
= mlx4_cmd_box(mdev
->dev
, 0, mailbox
->dma
, port
, 0,
774 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
779 props
->active_width
= (((u8
*)mailbox
->buf
)[5] == 0x40) ||
780 (((u8
*)mailbox
->buf
)[5] == 0x20 /*56Gb*/) ?
781 IB_WIDTH_4X
: IB_WIDTH_1X
;
782 props
->active_speed
= (((u8
*)mailbox
->buf
)[5] == 0x20 /*56Gb*/) ?
783 IB_SPEED_FDR
: IB_SPEED_QDR
;
784 props
->port_cap_flags
= IB_PORT_CM_SUP
;
785 props
->ip_gids
= true;
786 props
->gid_tbl_len
= mdev
->dev
->caps
.gid_table_len
[port
];
787 props
->max_msg_sz
= mdev
->dev
->caps
.max_msg_sz
;
788 props
->pkey_tbl_len
= 1;
789 props
->max_mtu
= IB_MTU_4096
;
790 props
->max_vl_num
= 2;
791 props
->state
= IB_PORT_DOWN
;
792 props
->phys_state
= state_to_phys_state(props
->state
);
793 props
->active_mtu
= IB_MTU_256
;
794 spin_lock_bh(&iboe
->lock
);
795 ndev
= iboe
->netdevs
[port
- 1];
796 if (ndev
&& is_bonded
) {
797 rcu_read_lock(); /* required to get upper dev */
798 ndev
= netdev_master_upper_dev_get_rcu(ndev
);
804 tmp
= iboe_get_mtu(ndev
->mtu
);
805 props
->active_mtu
= tmp
? min(props
->max_mtu
, tmp
) : IB_MTU_256
;
807 props
->state
= (netif_running(ndev
) && netif_carrier_ok(ndev
)) ?
808 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
809 props
->phys_state
= state_to_phys_state(props
->state
);
811 spin_unlock_bh(&iboe
->lock
);
813 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
817 int __mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
818 struct ib_port_attr
*props
, int netw_view
)
822 /* props being zeroed by the caller, avoid zeroing it here */
824 err
= mlx4_ib_port_link_layer(ibdev
, port
) == IB_LINK_LAYER_INFINIBAND
?
825 ib_link_query_port(ibdev
, port
, props
, netw_view
) :
826 eth_link_query_port(ibdev
, port
, props
);
831 static int mlx4_ib_query_port(struct ib_device
*ibdev
, u8 port
,
832 struct ib_port_attr
*props
)
834 /* returns host view */
835 return __mlx4_ib_query_port(ibdev
, port
, props
, 0);
838 int __mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
839 union ib_gid
*gid
, int netw_view
)
841 struct ib_smp
*in_mad
= NULL
;
842 struct ib_smp
*out_mad
= NULL
;
844 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
846 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
848 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
849 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
850 if (!in_mad
|| !out_mad
)
853 init_query_mad(in_mad
);
854 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
855 in_mad
->attr_mod
= cpu_to_be32(port
);
857 if (mlx4_is_mfunc(dev
->dev
) && netw_view
)
858 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
860 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
, NULL
, NULL
, in_mad
, out_mad
);
864 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
866 if (mlx4_is_mfunc(dev
->dev
) && !netw_view
) {
868 /* For any index > 0, return the null guid */
875 init_query_mad(in_mad
);
876 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
877 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
879 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, port
,
880 NULL
, NULL
, in_mad
, out_mad
);
884 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 8, 8);
888 memset(gid
->raw
+ 8, 0, 8);
894 static int mlx4_ib_query_gid(struct ib_device
*ibdev
, u8 port
, int index
,
897 if (rdma_protocol_ib(ibdev
, port
))
898 return __mlx4_ib_query_gid(ibdev
, port
, index
, gid
, 0);
902 static int mlx4_ib_query_sl2vl(struct ib_device
*ibdev
, u8 port
, u64
*sl2vl_tbl
)
904 union sl2vl_tbl_to_u64 sl2vl64
;
905 struct ib_smp
*in_mad
= NULL
;
906 struct ib_smp
*out_mad
= NULL
;
907 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
911 if (mlx4_is_slave(to_mdev(ibdev
)->dev
)) {
916 in_mad
= kzalloc(sizeof(*in_mad
), GFP_KERNEL
);
917 out_mad
= kmalloc(sizeof(*out_mad
), GFP_KERNEL
);
918 if (!in_mad
|| !out_mad
)
921 init_query_mad(in_mad
);
922 in_mad
->attr_id
= IB_SMP_ATTR_SL_TO_VL_TABLE
;
923 in_mad
->attr_mod
= 0;
925 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
))
926 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
928 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
933 for (jj
= 0; jj
< 8; jj
++)
934 sl2vl64
.sl8
[jj
] = ((struct ib_smp
*)out_mad
)->data
[jj
];
935 *sl2vl_tbl
= sl2vl64
.sl64
;
943 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev
*mdev
)
949 for (i
= 1; i
<= mdev
->dev
->caps
.num_ports
; i
++) {
950 if (mdev
->dev
->caps
.port_type
[i
] == MLX4_PORT_TYPE_ETH
)
952 err
= mlx4_ib_query_sl2vl(&mdev
->ib_dev
, i
, &sl2vl
);
954 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
958 atomic64_set(&mdev
->sl2vl
[i
- 1], sl2vl
);
962 int __mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
963 u16
*pkey
, int netw_view
)
965 struct ib_smp
*in_mad
= NULL
;
966 struct ib_smp
*out_mad
= NULL
;
967 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
970 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
971 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
972 if (!in_mad
|| !out_mad
)
975 init_query_mad(in_mad
);
976 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
977 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
979 if (mlx4_is_mfunc(to_mdev(ibdev
)->dev
) && netw_view
)
980 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
982 err
= mlx4_MAD_IFC(to_mdev(ibdev
), mad_ifc_flags
, port
, NULL
, NULL
,
987 *pkey
= be16_to_cpu(((__be16
*) out_mad
->data
)[index
% 32]);
995 static int mlx4_ib_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
, u16
*pkey
)
997 return __mlx4_ib_query_pkey(ibdev
, port
, index
, pkey
, 0);
1000 static int mlx4_ib_modify_device(struct ib_device
*ibdev
, int mask
,
1001 struct ib_device_modify
*props
)
1003 struct mlx4_cmd_mailbox
*mailbox
;
1004 unsigned long flags
;
1006 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
1009 if (!(mask
& IB_DEVICE_MODIFY_NODE_DESC
))
1012 if (mlx4_is_slave(to_mdev(ibdev
)->dev
))
1015 spin_lock_irqsave(&to_mdev(ibdev
)->sm_lock
, flags
);
1016 memcpy(ibdev
->node_desc
, props
->node_desc
, IB_DEVICE_NODE_DESC_MAX
);
1017 spin_unlock_irqrestore(&to_mdev(ibdev
)->sm_lock
, flags
);
1020 * If possible, pass node desc to FW, so it can generate
1021 * a 144 trap. If cmd fails, just ignore.
1023 mailbox
= mlx4_alloc_cmd_mailbox(to_mdev(ibdev
)->dev
);
1024 if (IS_ERR(mailbox
))
1027 memcpy(mailbox
->buf
, props
->node_desc
, IB_DEVICE_NODE_DESC_MAX
);
1028 mlx4_cmd(to_mdev(ibdev
)->dev
, mailbox
->dma
, 1, 0,
1029 MLX4_CMD_SET_NODE
, MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1031 mlx4_free_cmd_mailbox(to_mdev(ibdev
)->dev
, mailbox
);
1036 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev
*dev
, u8 port
, int reset_qkey_viols
,
1039 struct mlx4_cmd_mailbox
*mailbox
;
1042 mailbox
= mlx4_alloc_cmd_mailbox(dev
->dev
);
1043 if (IS_ERR(mailbox
))
1044 return PTR_ERR(mailbox
);
1046 if (dev
->dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
1047 *(u8
*) mailbox
->buf
= !!reset_qkey_viols
<< 6;
1048 ((__be32
*) mailbox
->buf
)[2] = cpu_to_be32(cap_mask
);
1050 ((u8
*) mailbox
->buf
)[3] = !!reset_qkey_viols
;
1051 ((__be32
*) mailbox
->buf
)[1] = cpu_to_be32(cap_mask
);
1054 err
= mlx4_cmd(dev
->dev
, mailbox
->dma
, port
, MLX4_SET_PORT_IB_OPCODE
,
1055 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
1058 mlx4_free_cmd_mailbox(dev
->dev
, mailbox
);
1062 static int mlx4_ib_modify_port(struct ib_device
*ibdev
, u8 port
, int mask
,
1063 struct ib_port_modify
*props
)
1065 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
1066 u8 is_eth
= mdev
->dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
;
1067 struct ib_port_attr attr
;
1071 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1072 * of whether port link layer is ETH or IB. For ETH ports, qkey
1073 * violations and port capabilities are not meaningful.
1078 mutex_lock(&mdev
->cap_mask_mutex
);
1080 err
= ib_query_port(ibdev
, port
, &attr
);
1084 cap_mask
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
1085 ~props
->clr_port_cap_mask
;
1087 err
= mlx4_ib_SET_PORT(mdev
, port
,
1088 !!(mask
& IB_PORT_RESET_QKEY_CNTR
),
1092 mutex_unlock(&to_mdev(ibdev
)->cap_mask_mutex
);
1096 static int mlx4_ib_alloc_ucontext(struct ib_ucontext
*uctx
,
1097 struct ib_udata
*udata
)
1099 struct ib_device
*ibdev
= uctx
->device
;
1100 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
1101 struct mlx4_ib_ucontext
*context
= to_mucontext(uctx
);
1102 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3
;
1103 struct mlx4_ib_alloc_ucontext_resp resp
;
1106 if (!dev
->ib_active
)
1109 if (ibdev
->ops
.uverbs_abi_ver
==
1110 MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
) {
1111 resp_v3
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
1112 resp_v3
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
1113 resp_v3
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
1115 resp
.dev_caps
= dev
->dev
->caps
.userspace_caps
;
1116 resp
.qp_tab_size
= dev
->dev
->caps
.num_qps
;
1117 resp
.bf_reg_size
= dev
->dev
->caps
.bf_reg_size
;
1118 resp
.bf_regs_per_page
= dev
->dev
->caps
.bf_regs_per_page
;
1119 resp
.cqe_size
= dev
->dev
->caps
.cqe_size
;
1122 err
= mlx4_uar_alloc(to_mdev(ibdev
)->dev
, &context
->uar
);
1126 INIT_LIST_HEAD(&context
->db_page_list
);
1127 mutex_init(&context
->db_page_mutex
);
1129 INIT_LIST_HEAD(&context
->wqn_ranges_list
);
1130 mutex_init(&context
->wqn_ranges_mutex
);
1132 if (ibdev
->ops
.uverbs_abi_ver
== MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
)
1133 err
= ib_copy_to_udata(udata
, &resp_v3
, sizeof(resp_v3
));
1135 err
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
1138 mlx4_uar_free(to_mdev(ibdev
)->dev
, &context
->uar
);
1145 static void mlx4_ib_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
1147 struct mlx4_ib_ucontext
*context
= to_mucontext(ibcontext
);
1149 mlx4_uar_free(to_mdev(ibcontext
->device
)->dev
, &context
->uar
);
1152 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext
*ibcontext
)
1156 static int mlx4_ib_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
1158 struct mlx4_ib_dev
*dev
= to_mdev(context
->device
);
1160 switch (vma
->vm_pgoff
) {
1162 return rdma_user_mmap_io(context
, vma
,
1163 to_mucontext(context
)->uar
.pfn
,
1165 pgprot_noncached(vma
->vm_page_prot
),
1169 if (dev
->dev
->caps
.bf_reg_size
== 0)
1171 return rdma_user_mmap_io(
1173 to_mucontext(context
)->uar
.pfn
+
1174 dev
->dev
->caps
.num_uars
,
1175 PAGE_SIZE
, pgprot_writecombine(vma
->vm_page_prot
),
1179 struct mlx4_clock_params params
;
1182 ret
= mlx4_get_internal_clock_params(dev
->dev
, ¶ms
);
1186 return rdma_user_mmap_io(
1188 (pci_resource_start(dev
->dev
->persist
->pdev
,
1192 PAGE_SIZE
, pgprot_noncached(vma
->vm_page_prot
),
1201 static int mlx4_ib_alloc_pd(struct ib_pd
*ibpd
, struct ib_udata
*udata
)
1203 struct mlx4_ib_pd
*pd
= to_mpd(ibpd
);
1204 struct ib_device
*ibdev
= ibpd
->device
;
1207 err
= mlx4_pd_alloc(to_mdev(ibdev
)->dev
, &pd
->pdn
);
1211 if (udata
&& ib_copy_to_udata(udata
, &pd
->pdn
, sizeof(__u32
))) {
1212 mlx4_pd_free(to_mdev(ibdev
)->dev
, pd
->pdn
);
1218 static void mlx4_ib_dealloc_pd(struct ib_pd
*pd
, struct ib_udata
*udata
)
1220 mlx4_pd_free(to_mdev(pd
->device
)->dev
, to_mpd(pd
)->pdn
);
1223 static struct ib_xrcd
*mlx4_ib_alloc_xrcd(struct ib_device
*ibdev
,
1224 struct ib_udata
*udata
)
1226 struct mlx4_ib_xrcd
*xrcd
;
1227 struct ib_cq_init_attr cq_attr
= {};
1230 if (!(to_mdev(ibdev
)->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
))
1231 return ERR_PTR(-ENOSYS
);
1233 xrcd
= kmalloc(sizeof *xrcd
, GFP_KERNEL
);
1235 return ERR_PTR(-ENOMEM
);
1237 err
= mlx4_xrcd_alloc(to_mdev(ibdev
)->dev
, &xrcd
->xrcdn
);
1241 xrcd
->pd
= ib_alloc_pd(ibdev
, 0);
1242 if (IS_ERR(xrcd
->pd
)) {
1243 err
= PTR_ERR(xrcd
->pd
);
1248 xrcd
->cq
= ib_create_cq(ibdev
, NULL
, NULL
, xrcd
, &cq_attr
);
1249 if (IS_ERR(xrcd
->cq
)) {
1250 err
= PTR_ERR(xrcd
->cq
);
1254 return &xrcd
->ibxrcd
;
1257 ib_dealloc_pd(xrcd
->pd
);
1259 mlx4_xrcd_free(to_mdev(ibdev
)->dev
, xrcd
->xrcdn
);
1262 return ERR_PTR(err
);
1265 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd
*xrcd
, struct ib_udata
*udata
)
1267 ib_destroy_cq(to_mxrcd(xrcd
)->cq
);
1268 ib_dealloc_pd(to_mxrcd(xrcd
)->pd
);
1269 mlx4_xrcd_free(to_mdev(xrcd
->device
)->dev
, to_mxrcd(xrcd
)->xrcdn
);
1275 static int add_gid_entry(struct ib_qp
*ibqp
, union ib_gid
*gid
)
1277 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1278 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1279 struct mlx4_ib_gid_entry
*ge
;
1281 ge
= kzalloc(sizeof *ge
, GFP_KERNEL
);
1286 if (mlx4_ib_add_mc(mdev
, mqp
, gid
)) {
1287 ge
->port
= mqp
->port
;
1291 mutex_lock(&mqp
->mutex
);
1292 list_add_tail(&ge
->list
, &mqp
->gid_list
);
1293 mutex_unlock(&mqp
->mutex
);
1298 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev
*ibdev
,
1299 struct mlx4_ib_counters
*ctr_table
)
1301 struct counter_index
*counter
, *tmp_count
;
1303 mutex_lock(&ctr_table
->mutex
);
1304 list_for_each_entry_safe(counter
, tmp_count
, &ctr_table
->counters_list
,
1306 if (counter
->allocated
)
1307 mlx4_counter_free(ibdev
->dev
, counter
->index
);
1308 list_del(&counter
->list
);
1311 mutex_unlock(&ctr_table
->mutex
);
1314 int mlx4_ib_add_mc(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
1317 struct net_device
*ndev
;
1323 spin_lock_bh(&mdev
->iboe
.lock
);
1324 ndev
= mdev
->iboe
.netdevs
[mqp
->port
- 1];
1327 spin_unlock_bh(&mdev
->iboe
.lock
);
1337 struct mlx4_ib_steering
{
1338 struct list_head list
;
1339 struct mlx4_flow_reg_id reg_id
;
1343 #define LAST_ETH_FIELD vlan_tag
1344 #define LAST_IB_FIELD sl
1345 #define LAST_IPV4_FIELD dst_ip
1346 #define LAST_TCP_UDP_FIELD src_port
1348 /* Field is the last supported field */
1349 #define FIELDS_NOT_SUPPORTED(filter, field)\
1350 memchr_inv((void *)&filter.field +\
1351 sizeof(filter.field), 0,\
1353 offsetof(typeof(filter), field) -\
1354 sizeof(filter.field))
1356 static int parse_flow_attr(struct mlx4_dev
*dev
,
1358 union ib_flow_spec
*ib_spec
,
1359 struct _rule_hw
*mlx4_spec
)
1361 enum mlx4_net_trans_rule_id type
;
1363 switch (ib_spec
->type
) {
1364 case IB_FLOW_SPEC_ETH
:
1365 if (FIELDS_NOT_SUPPORTED(ib_spec
->eth
.mask
, LAST_ETH_FIELD
))
1368 type
= MLX4_NET_TRANS_RULE_ID_ETH
;
1369 memcpy(mlx4_spec
->eth
.dst_mac
, ib_spec
->eth
.val
.dst_mac
,
1371 memcpy(mlx4_spec
->eth
.dst_mac_msk
, ib_spec
->eth
.mask
.dst_mac
,
1373 mlx4_spec
->eth
.vlan_tag
= ib_spec
->eth
.val
.vlan_tag
;
1374 mlx4_spec
->eth
.vlan_tag_msk
= ib_spec
->eth
.mask
.vlan_tag
;
1376 case IB_FLOW_SPEC_IB
:
1377 if (FIELDS_NOT_SUPPORTED(ib_spec
->ib
.mask
, LAST_IB_FIELD
))
1380 type
= MLX4_NET_TRANS_RULE_ID_IB
;
1381 mlx4_spec
->ib
.l3_qpn
=
1382 cpu_to_be32(qp_num
);
1383 mlx4_spec
->ib
.qpn_mask
=
1384 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK
);
1388 case IB_FLOW_SPEC_IPV4
:
1389 if (FIELDS_NOT_SUPPORTED(ib_spec
->ipv4
.mask
, LAST_IPV4_FIELD
))
1392 type
= MLX4_NET_TRANS_RULE_ID_IPV4
;
1393 mlx4_spec
->ipv4
.src_ip
= ib_spec
->ipv4
.val
.src_ip
;
1394 mlx4_spec
->ipv4
.src_ip_msk
= ib_spec
->ipv4
.mask
.src_ip
;
1395 mlx4_spec
->ipv4
.dst_ip
= ib_spec
->ipv4
.val
.dst_ip
;
1396 mlx4_spec
->ipv4
.dst_ip_msk
= ib_spec
->ipv4
.mask
.dst_ip
;
1399 case IB_FLOW_SPEC_TCP
:
1400 case IB_FLOW_SPEC_UDP
:
1401 if (FIELDS_NOT_SUPPORTED(ib_spec
->tcp_udp
.mask
, LAST_TCP_UDP_FIELD
))
1404 type
= ib_spec
->type
== IB_FLOW_SPEC_TCP
?
1405 MLX4_NET_TRANS_RULE_ID_TCP
:
1406 MLX4_NET_TRANS_RULE_ID_UDP
;
1407 mlx4_spec
->tcp_udp
.dst_port
= ib_spec
->tcp_udp
.val
.dst_port
;
1408 mlx4_spec
->tcp_udp
.dst_port_msk
= ib_spec
->tcp_udp
.mask
.dst_port
;
1409 mlx4_spec
->tcp_udp
.src_port
= ib_spec
->tcp_udp
.val
.src_port
;
1410 mlx4_spec
->tcp_udp
.src_port_msk
= ib_spec
->tcp_udp
.mask
.src_port
;
1416 if (mlx4_map_sw_to_hw_steering_id(dev
, type
) < 0 ||
1417 mlx4_hw_rule_sz(dev
, type
) < 0)
1419 mlx4_spec
->id
= cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev
, type
));
1420 mlx4_spec
->size
= mlx4_hw_rule_sz(dev
, type
) >> 2;
1421 return mlx4_hw_rule_sz(dev
, type
);
1424 struct default_rules
{
1425 __u32 mandatory_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1426 __u32 mandatory_not_fields
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1427 __u32 rules_create_list
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1430 static const struct default_rules default_table
[] = {
1432 .mandatory_fields
= {IB_FLOW_SPEC_IPV4
},
1433 .mandatory_not_fields
= {IB_FLOW_SPEC_ETH
},
1434 .rules_create_list
= {IB_FLOW_SPEC_IB
},
1435 .link_layer
= IB_LINK_LAYER_INFINIBAND
1439 static int __mlx4_ib_default_rules_match(struct ib_qp
*qp
,
1440 struct ib_flow_attr
*flow_attr
)
1444 const struct default_rules
*pdefault_rules
= default_table
;
1445 u8 link_layer
= rdma_port_get_link_layer(qp
->device
, flow_attr
->port
);
1447 for (i
= 0; i
< ARRAY_SIZE(default_table
); i
++, pdefault_rules
++) {
1448 __u32 field_types
[IB_FLOW_SPEC_SUPPORT_LAYERS
];
1449 memset(&field_types
, 0, sizeof(field_types
));
1451 if (link_layer
!= pdefault_rules
->link_layer
)
1454 ib_flow
= flow_attr
+ 1;
1455 /* we assume the specs are sorted */
1456 for (j
= 0, k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
&&
1457 j
< flow_attr
->num_of_specs
; k
++) {
1458 union ib_flow_spec
*current_flow
=
1459 (union ib_flow_spec
*)ib_flow
;
1461 /* same layer but different type */
1462 if (((current_flow
->type
& IB_FLOW_SPEC_LAYER_MASK
) ==
1463 (pdefault_rules
->mandatory_fields
[k
] &
1464 IB_FLOW_SPEC_LAYER_MASK
)) &&
1465 (current_flow
->type
!=
1466 pdefault_rules
->mandatory_fields
[k
]))
1469 /* same layer, try match next one */
1470 if (current_flow
->type
==
1471 pdefault_rules
->mandatory_fields
[k
]) {
1474 ((union ib_flow_spec
*)ib_flow
)->size
;
1478 ib_flow
= flow_attr
+ 1;
1479 for (j
= 0; j
< flow_attr
->num_of_specs
;
1480 j
++, ib_flow
+= ((union ib_flow_spec
*)ib_flow
)->size
)
1481 for (k
= 0; k
< IB_FLOW_SPEC_SUPPORT_LAYERS
; k
++)
1482 /* same layer and same type */
1483 if (((union ib_flow_spec
*)ib_flow
)->type
==
1484 pdefault_rules
->mandatory_not_fields
[k
])
1493 static int __mlx4_ib_create_default_rules(
1494 struct mlx4_ib_dev
*mdev
,
1496 const struct default_rules
*pdefault_rules
,
1497 struct _rule_hw
*mlx4_spec
) {
1501 for (i
= 0; i
< ARRAY_SIZE(pdefault_rules
->rules_create_list
); i
++) {
1502 union ib_flow_spec ib_spec
= {};
1505 switch (pdefault_rules
->rules_create_list
[i
]) {
1509 case IB_FLOW_SPEC_IB
:
1510 ib_spec
.type
= IB_FLOW_SPEC_IB
;
1511 ib_spec
.size
= sizeof(struct ib_flow_spec_ib
);
1518 /* We must put empty rule, qpn is being ignored */
1519 ret
= parse_flow_attr(mdev
->dev
, 0, &ib_spec
,
1522 pr_info("invalid parsing\n");
1526 mlx4_spec
= (void *)mlx4_spec
+ ret
;
1532 static int __mlx4_ib_create_flow(struct ib_qp
*qp
, struct ib_flow_attr
*flow_attr
,
1534 enum mlx4_net_trans_promisc_mode flow_type
,
1540 struct mlx4_ib_dev
*mdev
= to_mdev(qp
->device
);
1541 struct mlx4_cmd_mailbox
*mailbox
;
1542 struct mlx4_net_trans_rule_hw_ctrl
*ctrl
;
1545 static const u16 __mlx4_domain
[] = {
1546 [IB_FLOW_DOMAIN_USER
] = MLX4_DOMAIN_UVERBS
,
1547 [IB_FLOW_DOMAIN_ETHTOOL
] = MLX4_DOMAIN_ETHTOOL
,
1548 [IB_FLOW_DOMAIN_RFS
] = MLX4_DOMAIN_RFS
,
1549 [IB_FLOW_DOMAIN_NIC
] = MLX4_DOMAIN_NIC
,
1552 if (flow_attr
->priority
> MLX4_IB_FLOW_MAX_PRIO
) {
1553 pr_err("Invalid priority value %d\n", flow_attr
->priority
);
1557 if (domain
>= IB_FLOW_DOMAIN_NUM
) {
1558 pr_err("Invalid domain value %d\n", domain
);
1562 if (mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
) < 0)
1565 mailbox
= mlx4_alloc_cmd_mailbox(mdev
->dev
);
1566 if (IS_ERR(mailbox
))
1567 return PTR_ERR(mailbox
);
1568 ctrl
= mailbox
->buf
;
1570 ctrl
->prio
= cpu_to_be16(__mlx4_domain
[domain
] |
1571 flow_attr
->priority
);
1572 ctrl
->type
= mlx4_map_sw_to_hw_steering_mode(mdev
->dev
, flow_type
);
1573 ctrl
->port
= flow_attr
->port
;
1574 ctrl
->qpn
= cpu_to_be32(qp
->qp_num
);
1576 ib_flow
= flow_attr
+ 1;
1577 size
+= sizeof(struct mlx4_net_trans_rule_hw_ctrl
);
1578 /* Add default flows */
1579 default_flow
= __mlx4_ib_default_rules_match(qp
, flow_attr
);
1580 if (default_flow
>= 0) {
1581 ret
= __mlx4_ib_create_default_rules(
1582 mdev
, qp
, default_table
+ default_flow
,
1583 mailbox
->buf
+ size
);
1585 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1590 for (i
= 0; i
< flow_attr
->num_of_specs
; i
++) {
1591 ret
= parse_flow_attr(mdev
->dev
, qp
->qp_num
, ib_flow
,
1592 mailbox
->buf
+ size
);
1594 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1597 ib_flow
+= ((union ib_flow_spec
*) ib_flow
)->size
;
1601 if (mlx4_is_master(mdev
->dev
) && flow_type
== MLX4_FS_REGULAR
&&
1602 flow_attr
->num_of_specs
== 1) {
1603 struct _rule_hw
*rule_header
= (struct _rule_hw
*)(ctrl
+ 1);
1604 enum ib_flow_spec_type header_spec
=
1605 ((union ib_flow_spec
*)(flow_attr
+ 1))->type
;
1607 if (header_spec
== IB_FLOW_SPEC_ETH
)
1608 mlx4_handle_eth_header_mcast_prio(ctrl
, rule_header
);
1611 ret
= mlx4_cmd_imm(mdev
->dev
, mailbox
->dma
, reg_id
, size
>> 2, 0,
1612 MLX4_QP_FLOW_STEERING_ATTACH
, MLX4_CMD_TIME_CLASS_A
,
1615 pr_err("mcg table is full. Fail to register network rule.\n");
1616 else if (ret
== -ENXIO
)
1617 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1619 pr_err("Invalid argument. Fail to register network rule.\n");
1621 mlx4_free_cmd_mailbox(mdev
->dev
, mailbox
);
1625 static int __mlx4_ib_destroy_flow(struct mlx4_dev
*dev
, u64 reg_id
)
1628 err
= mlx4_cmd(dev
, reg_id
, 0, 0,
1629 MLX4_QP_FLOW_STEERING_DETACH
, MLX4_CMD_TIME_CLASS_A
,
1632 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1637 static int mlx4_ib_tunnel_steer_add(struct ib_qp
*qp
, struct ib_flow_attr
*flow_attr
,
1641 union ib_flow_spec
*ib_spec
;
1642 struct mlx4_dev
*dev
= to_mdev(qp
->device
)->dev
;
1645 if (dev
->caps
.tunnel_offload_mode
!= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
||
1646 dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_STATIC
)
1647 return 0; /* do nothing */
1649 ib_flow
= flow_attr
+ 1;
1650 ib_spec
= (union ib_flow_spec
*)ib_flow
;
1652 if (ib_spec
->type
!= IB_FLOW_SPEC_ETH
|| flow_attr
->num_of_specs
!= 1)
1653 return 0; /* do nothing */
1655 err
= mlx4_tunnel_steer_add(to_mdev(qp
->device
)->dev
, ib_spec
->eth
.val
.dst_mac
,
1656 flow_attr
->port
, qp
->qp_num
,
1657 MLX4_DOMAIN_UVERBS
| (flow_attr
->priority
& 0xff),
1662 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev
*dev
,
1663 struct ib_flow_attr
*flow_attr
,
1664 enum mlx4_net_trans_promisc_mode
*type
)
1668 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER
) ||
1669 (dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_STATIC
) ||
1670 (flow_attr
->num_of_specs
> 1) || (flow_attr
->priority
!= 0)) {
1674 if (flow_attr
->num_of_specs
== 0) {
1675 type
[0] = MLX4_FS_MC_SNIFFER
;
1676 type
[1] = MLX4_FS_UC_SNIFFER
;
1678 union ib_flow_spec
*ib_spec
;
1680 ib_spec
= (union ib_flow_spec
*)(flow_attr
+ 1);
1681 if (ib_spec
->type
!= IB_FLOW_SPEC_ETH
)
1684 /* if all is zero than MC and UC */
1685 if (is_zero_ether_addr(ib_spec
->eth
.mask
.dst_mac
)) {
1686 type
[0] = MLX4_FS_MC_SNIFFER
;
1687 type
[1] = MLX4_FS_UC_SNIFFER
;
1689 u8 mac
[ETH_ALEN
] = {ib_spec
->eth
.mask
.dst_mac
[0] ^ 0x01,
1690 ib_spec
->eth
.mask
.dst_mac
[1],
1691 ib_spec
->eth
.mask
.dst_mac
[2],
1692 ib_spec
->eth
.mask
.dst_mac
[3],
1693 ib_spec
->eth
.mask
.dst_mac
[4],
1694 ib_spec
->eth
.mask
.dst_mac
[5]};
1696 /* Above xor was only on MC bit, non empty mask is valid
1697 * only if this bit is set and rest are zero.
1699 if (!is_zero_ether_addr(&mac
[0]))
1702 if (is_multicast_ether_addr(ib_spec
->eth
.val
.dst_mac
))
1703 type
[0] = MLX4_FS_MC_SNIFFER
;
1705 type
[0] = MLX4_FS_UC_SNIFFER
;
1712 static struct ib_flow
*mlx4_ib_create_flow(struct ib_qp
*qp
,
1713 struct ib_flow_attr
*flow_attr
,
1714 int domain
, struct ib_udata
*udata
)
1716 int err
= 0, i
= 0, j
= 0;
1717 struct mlx4_ib_flow
*mflow
;
1718 enum mlx4_net_trans_promisc_mode type
[2];
1719 struct mlx4_dev
*dev
= (to_mdev(qp
->device
))->dev
;
1720 int is_bonded
= mlx4_is_bonded(dev
);
1722 if (flow_attr
->port
< 1 || flow_attr
->port
> qp
->device
->phys_port_cnt
)
1723 return ERR_PTR(-EINVAL
);
1725 if (flow_attr
->flags
& ~IB_FLOW_ATTR_FLAGS_DONT_TRAP
)
1726 return ERR_PTR(-EOPNOTSUPP
);
1728 if ((flow_attr
->flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
) &&
1729 (flow_attr
->type
!= IB_FLOW_ATTR_NORMAL
))
1730 return ERR_PTR(-EOPNOTSUPP
);
1733 udata
->inlen
&& !ib_is_udata_cleared(udata
, 0, udata
->inlen
))
1734 return ERR_PTR(-EOPNOTSUPP
);
1736 memset(type
, 0, sizeof(type
));
1738 mflow
= kzalloc(sizeof(*mflow
), GFP_KERNEL
);
1744 switch (flow_attr
->type
) {
1745 case IB_FLOW_ATTR_NORMAL
:
1746 /* If dont trap flag (continue match) is set, under specific
1747 * condition traffic be replicated to given qp,
1748 * without stealing it
1750 if (unlikely(flow_attr
->flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
)) {
1751 err
= mlx4_ib_add_dont_trap_rule(dev
,
1757 type
[0] = MLX4_FS_REGULAR
;
1761 case IB_FLOW_ATTR_ALL_DEFAULT
:
1762 type
[0] = MLX4_FS_ALL_DEFAULT
;
1765 case IB_FLOW_ATTR_MC_DEFAULT
:
1766 type
[0] = MLX4_FS_MC_DEFAULT
;
1769 case IB_FLOW_ATTR_SNIFFER
:
1770 type
[0] = MLX4_FS_MIRROR_RX_PORT
;
1771 type
[1] = MLX4_FS_MIRROR_SX_PORT
;
1779 while (i
< ARRAY_SIZE(type
) && type
[i
]) {
1780 err
= __mlx4_ib_create_flow(qp
, flow_attr
, domain
, type
[i
],
1781 &mflow
->reg_id
[i
].id
);
1783 goto err_create_flow
;
1785 /* Application always sees one port so the mirror rule
1786 * must be on port #2
1788 flow_attr
->port
= 2;
1789 err
= __mlx4_ib_create_flow(qp
, flow_attr
,
1791 &mflow
->reg_id
[j
].mirror
);
1792 flow_attr
->port
= 1;
1794 goto err_create_flow
;
1801 if (i
< ARRAY_SIZE(type
) && flow_attr
->type
== IB_FLOW_ATTR_NORMAL
) {
1802 err
= mlx4_ib_tunnel_steer_add(qp
, flow_attr
,
1803 &mflow
->reg_id
[i
].id
);
1805 goto err_create_flow
;
1808 flow_attr
->port
= 2;
1809 err
= mlx4_ib_tunnel_steer_add(qp
, flow_attr
,
1810 &mflow
->reg_id
[j
].mirror
);
1811 flow_attr
->port
= 1;
1813 goto err_create_flow
;
1816 /* function to create mirror rule */
1820 return &mflow
->ibflow
;
1824 (void)__mlx4_ib_destroy_flow(to_mdev(qp
->device
)->dev
,
1825 mflow
->reg_id
[i
].id
);
1830 (void)__mlx4_ib_destroy_flow(to_mdev(qp
->device
)->dev
,
1831 mflow
->reg_id
[j
].mirror
);
1836 return ERR_PTR(err
);
1839 static int mlx4_ib_destroy_flow(struct ib_flow
*flow_id
)
1843 struct mlx4_ib_dev
*mdev
= to_mdev(flow_id
->qp
->device
);
1844 struct mlx4_ib_flow
*mflow
= to_mflow(flow_id
);
1846 while (i
< ARRAY_SIZE(mflow
->reg_id
) && mflow
->reg_id
[i
].id
) {
1847 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mflow
->reg_id
[i
].id
);
1850 if (mflow
->reg_id
[i
].mirror
) {
1851 err
= __mlx4_ib_destroy_flow(mdev
->dev
,
1852 mflow
->reg_id
[i
].mirror
);
1863 static int mlx4_ib_mcg_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
1866 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1867 struct mlx4_dev
*dev
= mdev
->dev
;
1868 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1869 struct mlx4_ib_steering
*ib_steering
= NULL
;
1870 enum mlx4_protocol prot
= MLX4_PROT_IB_IPV6
;
1871 struct mlx4_flow_reg_id reg_id
;
1873 if (mdev
->dev
->caps
.steering_mode
==
1874 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1875 ib_steering
= kmalloc(sizeof(*ib_steering
), GFP_KERNEL
);
1880 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
, mqp
->port
,
1882 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
1885 pr_err("multicast attach op failed, err %d\n", err
);
1890 if (mlx4_is_bonded(dev
)) {
1891 err
= mlx4_multicast_attach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1892 (mqp
->port
== 1) ? 2 : 1,
1894 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK
),
1895 prot
, ®_id
.mirror
);
1900 err
= add_gid_entry(ibqp
, gid
);
1905 memcpy(ib_steering
->gid
.raw
, gid
->raw
, 16);
1906 ib_steering
->reg_id
= reg_id
;
1907 mutex_lock(&mqp
->mutex
);
1908 list_add(&ib_steering
->list
, &mqp
->steering_rules
);
1909 mutex_unlock(&mqp
->mutex
);
1914 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1917 mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1918 prot
, reg_id
.mirror
);
1925 static struct mlx4_ib_gid_entry
*find_gid_entry(struct mlx4_ib_qp
*qp
, u8
*raw
)
1927 struct mlx4_ib_gid_entry
*ge
;
1928 struct mlx4_ib_gid_entry
*tmp
;
1929 struct mlx4_ib_gid_entry
*ret
= NULL
;
1931 list_for_each_entry_safe(ge
, tmp
, &qp
->gid_list
, list
) {
1932 if (!memcmp(raw
, ge
->gid
.raw
, 16)) {
1941 static int mlx4_ib_mcg_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
1944 struct mlx4_ib_dev
*mdev
= to_mdev(ibqp
->device
);
1945 struct mlx4_dev
*dev
= mdev
->dev
;
1946 struct mlx4_ib_qp
*mqp
= to_mqp(ibqp
);
1947 struct net_device
*ndev
;
1948 struct mlx4_ib_gid_entry
*ge
;
1949 struct mlx4_flow_reg_id reg_id
= {0, 0};
1950 enum mlx4_protocol prot
= MLX4_PROT_IB_IPV6
;
1952 if (mdev
->dev
->caps
.steering_mode
==
1953 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1954 struct mlx4_ib_steering
*ib_steering
;
1956 mutex_lock(&mqp
->mutex
);
1957 list_for_each_entry(ib_steering
, &mqp
->steering_rules
, list
) {
1958 if (!memcmp(ib_steering
->gid
.raw
, gid
->raw
, 16)) {
1959 list_del(&ib_steering
->list
);
1963 mutex_unlock(&mqp
->mutex
);
1964 if (&ib_steering
->list
== &mqp
->steering_rules
) {
1965 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1968 reg_id
= ib_steering
->reg_id
;
1972 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1977 if (mlx4_is_bonded(dev
)) {
1978 err
= mlx4_multicast_detach(mdev
->dev
, &mqp
->mqp
, gid
->raw
,
1979 prot
, reg_id
.mirror
);
1984 mutex_lock(&mqp
->mutex
);
1985 ge
= find_gid_entry(mqp
, gid
->raw
);
1987 spin_lock_bh(&mdev
->iboe
.lock
);
1988 ndev
= ge
->added
? mdev
->iboe
.netdevs
[ge
->port
- 1] : NULL
;
1991 spin_unlock_bh(&mdev
->iboe
.lock
);
1994 list_del(&ge
->list
);
1997 pr_warn("could not find mgid entry\n");
1999 mutex_unlock(&mqp
->mutex
);
2004 static int init_node_data(struct mlx4_ib_dev
*dev
)
2006 struct ib_smp
*in_mad
= NULL
;
2007 struct ib_smp
*out_mad
= NULL
;
2008 int mad_ifc_flags
= MLX4_MAD_IFC_IGNORE_KEYS
;
2011 in_mad
= kzalloc(sizeof *in_mad
, GFP_KERNEL
);
2012 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
2013 if (!in_mad
|| !out_mad
)
2016 init_query_mad(in_mad
);
2017 in_mad
->attr_id
= IB_SMP_ATTR_NODE_DESC
;
2018 if (mlx4_is_master(dev
->dev
))
2019 mad_ifc_flags
|= MLX4_MAD_IFC_NET_VIEW
;
2021 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
2025 memcpy(dev
->ib_dev
.node_desc
, out_mad
->data
, IB_DEVICE_NODE_DESC_MAX
);
2027 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
2029 err
= mlx4_MAD_IFC(dev
, mad_ifc_flags
, 1, NULL
, NULL
, in_mad
, out_mad
);
2033 dev
->dev
->rev_id
= be32_to_cpup((__be32
*) (out_mad
->data
+ 32));
2034 memcpy(&dev
->ib_dev
.node_guid
, out_mad
->data
+ 12, 8);
2042 static ssize_t
hca_type_show(struct device
*device
,
2043 struct device_attribute
*attr
, char *buf
)
2045 struct mlx4_ib_dev
*dev
=
2046 rdma_device_to_drv_device(device
, struct mlx4_ib_dev
, ib_dev
);
2047 return sprintf(buf
, "MT%d\n", dev
->dev
->persist
->pdev
->device
);
2049 static DEVICE_ATTR_RO(hca_type
);
2051 static ssize_t
hw_rev_show(struct device
*device
,
2052 struct device_attribute
*attr
, char *buf
)
2054 struct mlx4_ib_dev
*dev
=
2055 rdma_device_to_drv_device(device
, struct mlx4_ib_dev
, ib_dev
);
2056 return sprintf(buf
, "%x\n", dev
->dev
->rev_id
);
2058 static DEVICE_ATTR_RO(hw_rev
);
2060 static ssize_t
board_id_show(struct device
*device
,
2061 struct device_attribute
*attr
, char *buf
)
2063 struct mlx4_ib_dev
*dev
=
2064 rdma_device_to_drv_device(device
, struct mlx4_ib_dev
, ib_dev
);
2066 return sprintf(buf
, "%.*s\n", MLX4_BOARD_ID_LEN
,
2067 dev
->dev
->board_id
);
2069 static DEVICE_ATTR_RO(board_id
);
2071 static struct attribute
*mlx4_class_attributes
[] = {
2072 &dev_attr_hw_rev
.attr
,
2073 &dev_attr_hca_type
.attr
,
2074 &dev_attr_board_id
.attr
,
2078 static const struct attribute_group mlx4_attr_group
= {
2079 .attrs
= mlx4_class_attributes
,
2082 struct diag_counter
{
2087 #define DIAG_COUNTER(_name, _offset) \
2088 { .name = #_name, .offset = _offset }
2090 static const struct diag_counter diag_basic
[] = {
2091 DIAG_COUNTER(rq_num_lle
, 0x00),
2092 DIAG_COUNTER(sq_num_lle
, 0x04),
2093 DIAG_COUNTER(rq_num_lqpoe
, 0x08),
2094 DIAG_COUNTER(sq_num_lqpoe
, 0x0C),
2095 DIAG_COUNTER(rq_num_lpe
, 0x18),
2096 DIAG_COUNTER(sq_num_lpe
, 0x1C),
2097 DIAG_COUNTER(rq_num_wrfe
, 0x20),
2098 DIAG_COUNTER(sq_num_wrfe
, 0x24),
2099 DIAG_COUNTER(sq_num_mwbe
, 0x2C),
2100 DIAG_COUNTER(sq_num_bre
, 0x34),
2101 DIAG_COUNTER(sq_num_rire
, 0x44),
2102 DIAG_COUNTER(rq_num_rire
, 0x48),
2103 DIAG_COUNTER(sq_num_rae
, 0x4C),
2104 DIAG_COUNTER(rq_num_rae
, 0x50),
2105 DIAG_COUNTER(sq_num_roe
, 0x54),
2106 DIAG_COUNTER(sq_num_tree
, 0x5C),
2107 DIAG_COUNTER(sq_num_rree
, 0x64),
2108 DIAG_COUNTER(rq_num_rnr
, 0x68),
2109 DIAG_COUNTER(sq_num_rnr
, 0x6C),
2110 DIAG_COUNTER(rq_num_oos
, 0x100),
2111 DIAG_COUNTER(sq_num_oos
, 0x104),
2114 static const struct diag_counter diag_ext
[] = {
2115 DIAG_COUNTER(rq_num_dup
, 0x130),
2116 DIAG_COUNTER(sq_num_to
, 0x134),
2119 static const struct diag_counter diag_device_only
[] = {
2120 DIAG_COUNTER(num_cqovf
, 0x1A0),
2121 DIAG_COUNTER(rq_num_udsdprd
, 0x118),
2124 static struct rdma_hw_stats
*mlx4_ib_alloc_hw_stats(struct ib_device
*ibdev
,
2127 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
2128 struct mlx4_ib_diag_counters
*diag
= dev
->diag_counters
;
2130 if (!diag
[!!port_num
].name
)
2133 return rdma_alloc_hw_stats_struct(diag
[!!port_num
].name
,
2134 diag
[!!port_num
].num_counters
,
2135 RDMA_HW_STATS_DEFAULT_LIFESPAN
);
2138 static int mlx4_ib_get_hw_stats(struct ib_device
*ibdev
,
2139 struct rdma_hw_stats
*stats
,
2142 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
2143 struct mlx4_ib_diag_counters
*diag
= dev
->diag_counters
;
2144 u32 hw_value
[ARRAY_SIZE(diag_device_only
) +
2145 ARRAY_SIZE(diag_ext
) + ARRAY_SIZE(diag_basic
)] = {};
2149 ret
= mlx4_query_diag_counters(dev
->dev
,
2150 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS
,
2151 diag
[!!port
].offset
, hw_value
,
2152 diag
[!!port
].num_counters
, port
);
2157 for (i
= 0; i
< diag
[!!port
].num_counters
; i
++)
2158 stats
->value
[i
] = hw_value
[i
];
2160 return diag
[!!port
].num_counters
;
2163 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev
*ibdev
,
2171 num_counters
= ARRAY_SIZE(diag_basic
);
2173 if (ibdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
)
2174 num_counters
+= ARRAY_SIZE(diag_ext
);
2177 num_counters
+= ARRAY_SIZE(diag_device_only
);
2179 *name
= kcalloc(num_counters
, sizeof(**name
), GFP_KERNEL
);
2183 *offset
= kcalloc(num_counters
, sizeof(**offset
), GFP_KERNEL
);
2187 *num
= num_counters
;
2196 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev
*ibdev
,
2204 for (i
= 0, j
= 0; i
< ARRAY_SIZE(diag_basic
); i
++, j
++) {
2205 name
[i
] = diag_basic
[i
].name
;
2206 offset
[i
] = diag_basic
[i
].offset
;
2209 if (ibdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
) {
2210 for (i
= 0; i
< ARRAY_SIZE(diag_ext
); i
++, j
++) {
2211 name
[j
] = diag_ext
[i
].name
;
2212 offset
[j
] = diag_ext
[i
].offset
;
2217 for (i
= 0; i
< ARRAY_SIZE(diag_device_only
); i
++, j
++) {
2218 name
[j
] = diag_device_only
[i
].name
;
2219 offset
[j
] = diag_device_only
[i
].offset
;
2224 static const struct ib_device_ops mlx4_ib_hw_stats_ops
= {
2225 .alloc_hw_stats
= mlx4_ib_alloc_hw_stats
,
2226 .get_hw_stats
= mlx4_ib_get_hw_stats
,
2229 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev
*ibdev
)
2231 struct mlx4_ib_diag_counters
*diag
= ibdev
->diag_counters
;
2234 bool per_port
= !!(ibdev
->dev
->caps
.flags2
&
2235 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
);
2237 if (mlx4_is_slave(ibdev
->dev
))
2240 for (i
= 0; i
< MLX4_DIAG_COUNTERS_TYPES
; i
++) {
2241 /* i == 1 means we are building port counters */
2245 ret
= __mlx4_ib_alloc_diag_counters(ibdev
, &diag
[i
].name
,
2247 &diag
[i
].num_counters
, i
);
2251 mlx4_ib_fill_diag_counters(ibdev
, diag
[i
].name
,
2255 ib_set_device_ops(&ibdev
->ib_dev
, &mlx4_ib_hw_stats_ops
);
2261 kfree(diag
[i
- 1].name
);
2262 kfree(diag
[i
- 1].offset
);
2268 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev
*ibdev
)
2272 for (i
= 0; i
< MLX4_DIAG_COUNTERS_TYPES
; i
++) {
2273 kfree(ibdev
->diag_counters
[i
].offset
);
2274 kfree(ibdev
->diag_counters
[i
].name
);
2278 #define MLX4_IB_INVALID_MAC ((u64)-1)
2279 static void mlx4_ib_update_qps(struct mlx4_ib_dev
*ibdev
,
2280 struct net_device
*dev
,
2284 u64 release_mac
= MLX4_IB_INVALID_MAC
;
2285 struct mlx4_ib_qp
*qp
;
2287 read_lock(&dev_base_lock
);
2288 new_smac
= mlx4_mac_to_u64(dev
->dev_addr
);
2289 read_unlock(&dev_base_lock
);
2291 atomic64_set(&ibdev
->iboe
.mac
[port
- 1], new_smac
);
2293 /* no need for update QP1 and mac registration in non-SRIOV */
2294 if (!mlx4_is_mfunc(ibdev
->dev
))
2297 mutex_lock(&ibdev
->qp1_proxy_lock
[port
- 1]);
2298 qp
= ibdev
->qp1_proxy
[port
- 1];
2302 struct mlx4_update_qp_params update_params
;
2304 mutex_lock(&qp
->mutex
);
2305 old_smac
= qp
->pri
.smac
;
2306 if (new_smac
== old_smac
)
2309 new_smac_index
= mlx4_register_mac(ibdev
->dev
, port
, new_smac
);
2311 if (new_smac_index
< 0)
2314 update_params
.smac_index
= new_smac_index
;
2315 if (mlx4_update_qp(ibdev
->dev
, qp
->mqp
.qpn
, MLX4_UPDATE_QP_SMAC
,
2317 release_mac
= new_smac
;
2320 /* if old port was zero, no mac was yet registered for this QP */
2321 if (qp
->pri
.smac_port
)
2322 release_mac
= old_smac
;
2323 qp
->pri
.smac
= new_smac
;
2324 qp
->pri
.smac_port
= port
;
2325 qp
->pri
.smac_index
= new_smac_index
;
2329 if (release_mac
!= MLX4_IB_INVALID_MAC
)
2330 mlx4_unregister_mac(ibdev
->dev
, port
, release_mac
);
2332 mutex_unlock(&qp
->mutex
);
2333 mutex_unlock(&ibdev
->qp1_proxy_lock
[port
- 1]);
2336 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev
*ibdev
,
2337 struct net_device
*dev
,
2338 unsigned long event
)
2341 struct mlx4_ib_iboe
*iboe
;
2342 int update_qps_port
= -1;
2347 iboe
= &ibdev
->iboe
;
2349 spin_lock_bh(&iboe
->lock
);
2350 mlx4_foreach_ib_transport_port(port
, ibdev
->dev
) {
2352 iboe
->netdevs
[port
- 1] =
2353 mlx4_get_protocol_dev(ibdev
->dev
, MLX4_PROT_ETH
, port
);
2355 if (dev
== iboe
->netdevs
[port
- 1] &&
2356 (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_REGISTER
||
2357 event
== NETDEV_UP
|| event
== NETDEV_CHANGE
))
2358 update_qps_port
= port
;
2360 if (dev
== iboe
->netdevs
[port
- 1] &&
2361 (event
== NETDEV_UP
|| event
== NETDEV_DOWN
)) {
2362 enum ib_port_state port_state
;
2363 struct ib_event ibev
= { };
2365 if (ib_get_cached_port_state(&ibdev
->ib_dev
, port
,
2369 if (event
== NETDEV_UP
&&
2370 (port_state
!= IB_PORT_ACTIVE
||
2371 iboe
->last_port_state
[port
- 1] != IB_PORT_DOWN
))
2373 if (event
== NETDEV_DOWN
&&
2374 (port_state
!= IB_PORT_DOWN
||
2375 iboe
->last_port_state
[port
- 1] != IB_PORT_ACTIVE
))
2377 iboe
->last_port_state
[port
- 1] = port_state
;
2379 ibev
.device
= &ibdev
->ib_dev
;
2380 ibev
.element
.port_num
= port
;
2381 ibev
.event
= event
== NETDEV_UP
? IB_EVENT_PORT_ACTIVE
:
2383 ib_dispatch_event(&ibev
);
2387 spin_unlock_bh(&iboe
->lock
);
2389 if (update_qps_port
> 0)
2390 mlx4_ib_update_qps(ibdev
, dev
, update_qps_port
);
2393 static int mlx4_ib_netdev_event(struct notifier_block
*this,
2394 unsigned long event
, void *ptr
)
2396 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2397 struct mlx4_ib_dev
*ibdev
;
2399 if (!net_eq(dev_net(dev
), &init_net
))
2402 ibdev
= container_of(this, struct mlx4_ib_dev
, iboe
.nb
);
2403 mlx4_ib_scan_netdevs(ibdev
, dev
, event
);
2408 static void init_pkeys(struct mlx4_ib_dev
*ibdev
)
2414 if (mlx4_is_master(ibdev
->dev
)) {
2415 for (slave
= 0; slave
<= ibdev
->dev
->persist
->num_vfs
;
2417 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
2419 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
2421 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
] =
2422 /* master has the identity virt2phys pkey mapping */
2423 (slave
== mlx4_master_func_num(ibdev
->dev
) || !i
) ? i
:
2424 ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
] - 1;
2425 mlx4_sync_pkey_table(ibdev
->dev
, slave
, port
, i
,
2426 ibdev
->pkeys
.virt2phys_pkey
[slave
][port
- 1][i
]);
2430 /* initialize pkey cache */
2431 for (port
= 1; port
<= ibdev
->dev
->caps
.num_ports
; ++port
) {
2433 i
< ibdev
->dev
->phys_caps
.pkey_phys_table_len
[port
];
2435 ibdev
->pkeys
.phys_pkey_cache
[port
-1][i
] =
2441 static void mlx4_ib_alloc_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
2443 int i
, j
, eq
= 0, total_eqs
= 0;
2445 ibdev
->eq_table
= kcalloc(dev
->caps
.num_comp_vectors
,
2446 sizeof(ibdev
->eq_table
[0]), GFP_KERNEL
);
2447 if (!ibdev
->eq_table
)
2450 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
2451 for (j
= 0; j
< mlx4_get_eqs_per_port(dev
, i
);
2453 if (i
> 1 && mlx4_is_eq_shared(dev
, total_eqs
))
2455 ibdev
->eq_table
[eq
] = total_eqs
;
2456 if (!mlx4_assign_eq(dev
, i
,
2457 &ibdev
->eq_table
[eq
]))
2460 ibdev
->eq_table
[eq
] = -1;
2464 for (i
= eq
; i
< dev
->caps
.num_comp_vectors
;
2465 ibdev
->eq_table
[i
++] = -1)
2468 /* Advertise the new number of EQs to clients */
2469 ibdev
->ib_dev
.num_comp_vectors
= eq
;
2472 static void mlx4_ib_free_eqs(struct mlx4_dev
*dev
, struct mlx4_ib_dev
*ibdev
)
2475 int total_eqs
= ibdev
->ib_dev
.num_comp_vectors
;
2477 /* no eqs were allocated */
2478 if (!ibdev
->eq_table
)
2481 /* Reset the advertised EQ number */
2482 ibdev
->ib_dev
.num_comp_vectors
= 0;
2484 for (i
= 0; i
< total_eqs
; i
++)
2485 mlx4_release_eq(dev
, ibdev
->eq_table
[i
]);
2487 kfree(ibdev
->eq_table
);
2488 ibdev
->eq_table
= NULL
;
2491 static int mlx4_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
2492 struct ib_port_immutable
*immutable
)
2494 struct ib_port_attr attr
;
2495 struct mlx4_ib_dev
*mdev
= to_mdev(ibdev
);
2498 if (mlx4_ib_port_link_layer(ibdev
, port_num
) == IB_LINK_LAYER_INFINIBAND
) {
2499 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_IB
;
2500 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
2502 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
)
2503 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
2504 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
)
2505 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
|
2506 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
2507 immutable
->core_cap_flags
|= RDMA_CORE_PORT_RAW_PACKET
;
2508 if (immutable
->core_cap_flags
& (RDMA_CORE_PORT_IBA_ROCE
|
2509 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
))
2510 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
2513 err
= ib_query_port(ibdev
, port_num
, &attr
);
2517 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
2518 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
2523 static void get_fw_ver_str(struct ib_device
*device
, char *str
)
2525 struct mlx4_ib_dev
*dev
=
2526 container_of(device
, struct mlx4_ib_dev
, ib_dev
);
2527 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%d.%d.%d",
2528 (int) (dev
->dev
->caps
.fw_ver
>> 32),
2529 (int) (dev
->dev
->caps
.fw_ver
>> 16) & 0xffff,
2530 (int) dev
->dev
->caps
.fw_ver
& 0xffff);
2533 static const struct ib_device_ops mlx4_ib_dev_ops
= {
2534 .owner
= THIS_MODULE
,
2535 .driver_id
= RDMA_DRIVER_MLX4
,
2536 .uverbs_abi_ver
= MLX4_IB_UVERBS_ABI_VERSION
,
2538 .add_gid
= mlx4_ib_add_gid
,
2539 .alloc_mr
= mlx4_ib_alloc_mr
,
2540 .alloc_pd
= mlx4_ib_alloc_pd
,
2541 .alloc_ucontext
= mlx4_ib_alloc_ucontext
,
2542 .attach_mcast
= mlx4_ib_mcg_attach
,
2543 .create_ah
= mlx4_ib_create_ah
,
2544 .create_cq
= mlx4_ib_create_cq
,
2545 .create_qp
= mlx4_ib_create_qp
,
2546 .create_srq
= mlx4_ib_create_srq
,
2547 .dealloc_pd
= mlx4_ib_dealloc_pd
,
2548 .dealloc_ucontext
= mlx4_ib_dealloc_ucontext
,
2549 .del_gid
= mlx4_ib_del_gid
,
2550 .dereg_mr
= mlx4_ib_dereg_mr
,
2551 .destroy_ah
= mlx4_ib_destroy_ah
,
2552 .destroy_cq
= mlx4_ib_destroy_cq
,
2553 .destroy_qp
= mlx4_ib_destroy_qp
,
2554 .destroy_srq
= mlx4_ib_destroy_srq
,
2555 .detach_mcast
= mlx4_ib_mcg_detach
,
2556 .disassociate_ucontext
= mlx4_ib_disassociate_ucontext
,
2557 .drain_rq
= mlx4_ib_drain_rq
,
2558 .drain_sq
= mlx4_ib_drain_sq
,
2559 .get_dev_fw_str
= get_fw_ver_str
,
2560 .get_dma_mr
= mlx4_ib_get_dma_mr
,
2561 .get_link_layer
= mlx4_ib_port_link_layer
,
2562 .get_netdev
= mlx4_ib_get_netdev
,
2563 .get_port_immutable
= mlx4_port_immutable
,
2564 .map_mr_sg
= mlx4_ib_map_mr_sg
,
2565 .mmap
= mlx4_ib_mmap
,
2566 .modify_cq
= mlx4_ib_modify_cq
,
2567 .modify_device
= mlx4_ib_modify_device
,
2568 .modify_port
= mlx4_ib_modify_port
,
2569 .modify_qp
= mlx4_ib_modify_qp
,
2570 .modify_srq
= mlx4_ib_modify_srq
,
2571 .poll_cq
= mlx4_ib_poll_cq
,
2572 .post_recv
= mlx4_ib_post_recv
,
2573 .post_send
= mlx4_ib_post_send
,
2574 .post_srq_recv
= mlx4_ib_post_srq_recv
,
2575 .process_mad
= mlx4_ib_process_mad
,
2576 .query_ah
= mlx4_ib_query_ah
,
2577 .query_device
= mlx4_ib_query_device
,
2578 .query_gid
= mlx4_ib_query_gid
,
2579 .query_pkey
= mlx4_ib_query_pkey
,
2580 .query_port
= mlx4_ib_query_port
,
2581 .query_qp
= mlx4_ib_query_qp
,
2582 .query_srq
= mlx4_ib_query_srq
,
2583 .reg_user_mr
= mlx4_ib_reg_user_mr
,
2584 .req_notify_cq
= mlx4_ib_arm_cq
,
2585 .rereg_user_mr
= mlx4_ib_rereg_user_mr
,
2586 .resize_cq
= mlx4_ib_resize_cq
,
2588 INIT_RDMA_OBJ_SIZE(ib_ah
, mlx4_ib_ah
, ibah
),
2589 INIT_RDMA_OBJ_SIZE(ib_cq
, mlx4_ib_cq
, ibcq
),
2590 INIT_RDMA_OBJ_SIZE(ib_pd
, mlx4_ib_pd
, ibpd
),
2591 INIT_RDMA_OBJ_SIZE(ib_srq
, mlx4_ib_srq
, ibsrq
),
2592 INIT_RDMA_OBJ_SIZE(ib_ucontext
, mlx4_ib_ucontext
, ibucontext
),
2595 static const struct ib_device_ops mlx4_ib_dev_wq_ops
= {
2596 .create_rwq_ind_table
= mlx4_ib_create_rwq_ind_table
,
2597 .create_wq
= mlx4_ib_create_wq
,
2598 .destroy_rwq_ind_table
= mlx4_ib_destroy_rwq_ind_table
,
2599 .destroy_wq
= mlx4_ib_destroy_wq
,
2600 .modify_wq
= mlx4_ib_modify_wq
,
2603 static const struct ib_device_ops mlx4_ib_dev_fmr_ops
= {
2604 .alloc_fmr
= mlx4_ib_fmr_alloc
,
2605 .dealloc_fmr
= mlx4_ib_fmr_dealloc
,
2606 .map_phys_fmr
= mlx4_ib_map_phys_fmr
,
2607 .unmap_fmr
= mlx4_ib_unmap_fmr
,
2610 static const struct ib_device_ops mlx4_ib_dev_mw_ops
= {
2611 .alloc_mw
= mlx4_ib_alloc_mw
,
2612 .dealloc_mw
= mlx4_ib_dealloc_mw
,
2615 static const struct ib_device_ops mlx4_ib_dev_xrc_ops
= {
2616 .alloc_xrcd
= mlx4_ib_alloc_xrcd
,
2617 .dealloc_xrcd
= mlx4_ib_dealloc_xrcd
,
2620 static const struct ib_device_ops mlx4_ib_dev_fs_ops
= {
2621 .create_flow
= mlx4_ib_create_flow
,
2622 .destroy_flow
= mlx4_ib_destroy_flow
,
2625 static void *mlx4_ib_add(struct mlx4_dev
*dev
)
2627 struct mlx4_ib_dev
*ibdev
;
2631 struct mlx4_ib_iboe
*iboe
;
2632 int ib_num_ports
= 0;
2633 int num_req_counters
;
2636 struct counter_index
*new_counter_index
= NULL
;
2638 pr_info_once("%s", mlx4_ib_version
);
2641 mlx4_foreach_ib_transport_port(i
, dev
)
2644 /* No point in registering a device with no ports... */
2648 ibdev
= ib_alloc_device(mlx4_ib_dev
, ib_dev
);
2650 dev_err(&dev
->persist
->pdev
->dev
,
2651 "Device struct alloc failed\n");
2655 iboe
= &ibdev
->iboe
;
2657 if (mlx4_pd_alloc(dev
, &ibdev
->priv_pdn
))
2660 if (mlx4_uar_alloc(dev
, &ibdev
->priv_uar
))
2663 ibdev
->uar_map
= ioremap((phys_addr_t
) ibdev
->priv_uar
.pfn
<< PAGE_SHIFT
,
2665 if (!ibdev
->uar_map
)
2667 MLX4_INIT_DOORBELL_LOCK(&ibdev
->uar_lock
);
2670 ibdev
->bond_next_port
= 0;
2672 ibdev
->ib_dev
.node_type
= RDMA_NODE_IB_CA
;
2673 ibdev
->ib_dev
.local_dma_lkey
= dev
->caps
.reserved_lkey
;
2674 ibdev
->num_ports
= num_ports
;
2675 ibdev
->ib_dev
.phys_port_cnt
= mlx4_is_bonded(dev
) ?
2676 1 : ibdev
->num_ports
;
2677 ibdev
->ib_dev
.num_comp_vectors
= dev
->caps
.num_comp_vectors
;
2678 ibdev
->ib_dev
.dev
.parent
= &dev
->persist
->pdev
->dev
;
2680 ibdev
->ib_dev
.uverbs_cmd_mask
=
2681 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
2682 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
2683 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
2684 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
2685 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
2686 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
2687 (1ull << IB_USER_VERBS_CMD_REREG_MR
) |
2688 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
2689 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
2690 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
2691 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
2692 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
2693 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
2694 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
2695 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
2696 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
2697 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
2698 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
2699 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
2700 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
2701 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
2702 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
2703 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ
) |
2704 (1ull << IB_USER_VERBS_CMD_OPEN_QP
);
2706 ib_set_device_ops(&ibdev
->ib_dev
, &mlx4_ib_dev_ops
);
2707 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2708 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ
) |
2709 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE
) |
2710 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ
) |
2711 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP
);
2713 if ((dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_RSS
) &&
2714 ((mlx4_ib_port_link_layer(&ibdev
->ib_dev
, 1) ==
2715 IB_LINK_LAYER_ETHERNET
) ||
2716 (mlx4_ib_port_link_layer(&ibdev
->ib_dev
, 2) ==
2717 IB_LINK_LAYER_ETHERNET
))) {
2718 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2719 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ
) |
2720 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ
) |
2721 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ
) |
2722 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL
) |
2723 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL
);
2724 ib_set_device_ops(&ibdev
->ib_dev
, &mlx4_ib_dev_wq_ops
);
2727 if (!mlx4_is_slave(ibdev
->dev
))
2728 ib_set_device_ops(&ibdev
->ib_dev
, &mlx4_ib_dev_fmr_ops
);
2730 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
2731 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
) {
2732 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2733 (1ull << IB_USER_VERBS_CMD_ALLOC_MW
) |
2734 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW
);
2735 ib_set_device_ops(&ibdev
->ib_dev
, &mlx4_ib_dev_mw_ops
);
2738 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) {
2739 ibdev
->ib_dev
.uverbs_cmd_mask
|=
2740 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD
) |
2741 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD
);
2742 ib_set_device_ops(&ibdev
->ib_dev
, &mlx4_ib_dev_xrc_ops
);
2745 if (check_flow_steering_support(dev
)) {
2746 ibdev
->steering_support
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
2747 ibdev
->ib_dev
.uverbs_ex_cmd_mask
|=
2748 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW
) |
2749 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW
);
2750 ib_set_device_ops(&ibdev
->ib_dev
, &mlx4_ib_dev_fs_ops
);
2753 if (!dev
->caps
.userspace_caps
)
2754 ibdev
->ib_dev
.ops
.uverbs_abi_ver
=
2755 MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION
;
2757 mlx4_ib_alloc_eqs(dev
, ibdev
);
2759 spin_lock_init(&iboe
->lock
);
2761 if (init_node_data(ibdev
))
2763 mlx4_init_sl2vl_tbl(ibdev
);
2765 for (i
= 0; i
< ibdev
->num_ports
; ++i
) {
2766 mutex_init(&ibdev
->counters_table
[i
].mutex
);
2767 INIT_LIST_HEAD(&ibdev
->counters_table
[i
].counters_list
);
2768 iboe
->last_port_state
[i
] = IB_PORT_DOWN
;
2771 num_req_counters
= mlx4_is_bonded(dev
) ? 1 : ibdev
->num_ports
;
2772 for (i
= 0; i
< num_req_counters
; ++i
) {
2773 mutex_init(&ibdev
->qp1_proxy_lock
[i
]);
2775 if (mlx4_ib_port_link_layer(&ibdev
->ib_dev
, i
+ 1) ==
2776 IB_LINK_LAYER_ETHERNET
) {
2777 err
= mlx4_counter_alloc(ibdev
->dev
, &counter_index
,
2778 MLX4_RES_USAGE_DRIVER
);
2779 /* if failed to allocate a new counter, use default */
2782 mlx4_get_default_counter_index(dev
,
2786 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2787 counter_index
= mlx4_get_default_counter_index(dev
,
2790 new_counter_index
= kmalloc(sizeof(*new_counter_index
),
2792 if (!new_counter_index
) {
2794 mlx4_counter_free(ibdev
->dev
, counter_index
);
2797 new_counter_index
->index
= counter_index
;
2798 new_counter_index
->allocated
= allocated
;
2799 list_add_tail(&new_counter_index
->list
,
2800 &ibdev
->counters_table
[i
].counters_list
);
2801 ibdev
->counters_table
[i
].default_counter
= counter_index
;
2802 pr_info("counter index %d for port %d allocated %d\n",
2803 counter_index
, i
+ 1, allocated
);
2805 if (mlx4_is_bonded(dev
))
2806 for (i
= 1; i
< ibdev
->num_ports
; ++i
) {
2808 kmalloc(sizeof(struct counter_index
),
2810 if (!new_counter_index
)
2812 new_counter_index
->index
= counter_index
;
2813 new_counter_index
->allocated
= 0;
2814 list_add_tail(&new_counter_index
->list
,
2815 &ibdev
->counters_table
[i
].counters_list
);
2816 ibdev
->counters_table
[i
].default_counter
=
2820 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2823 spin_lock_init(&ibdev
->sm_lock
);
2824 mutex_init(&ibdev
->cap_mask_mutex
);
2825 INIT_LIST_HEAD(&ibdev
->qp_list
);
2826 spin_lock_init(&ibdev
->reset_flow_resource_lock
);
2828 if (ibdev
->steering_support
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
2830 ibdev
->steer_qpn_count
= MLX4_IB_UC_MAX_NUM_QPS
;
2831 err
= mlx4_qp_reserve_range(dev
, ibdev
->steer_qpn_count
,
2832 MLX4_IB_UC_STEER_QPN_ALIGN
,
2833 &ibdev
->steer_qpn_base
, 0,
2834 MLX4_RES_USAGE_DRIVER
);
2838 ibdev
->ib_uc_qpns_bitmap
=
2839 kmalloc_array(BITS_TO_LONGS(ibdev
->steer_qpn_count
),
2842 if (!ibdev
->ib_uc_qpns_bitmap
)
2843 goto err_steer_qp_release
;
2845 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
) {
2846 bitmap_zero(ibdev
->ib_uc_qpns_bitmap
,
2847 ibdev
->steer_qpn_count
);
2848 err
= mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2849 dev
, ibdev
->steer_qpn_base
,
2850 ibdev
->steer_qpn_base
+
2851 ibdev
->steer_qpn_count
- 1);
2853 goto err_steer_free_bitmap
;
2855 bitmap_fill(ibdev
->ib_uc_qpns_bitmap
,
2856 ibdev
->steer_qpn_count
);
2860 for (j
= 1; j
<= ibdev
->dev
->caps
.num_ports
; j
++)
2861 atomic64_set(&iboe
->mac
[j
- 1], ibdev
->dev
->caps
.def_mac
[j
]);
2863 if (mlx4_ib_alloc_diag_counters(ibdev
))
2864 goto err_steer_free_bitmap
;
2866 rdma_set_device_sysfs_group(&ibdev
->ib_dev
, &mlx4_attr_group
);
2867 if (ib_register_device(&ibdev
->ib_dev
, "mlx4_%d"))
2868 goto err_diag_counters
;
2870 if (mlx4_ib_mad_init(ibdev
))
2873 if (mlx4_ib_init_sriov(ibdev
))
2876 if (!iboe
->nb
.notifier_call
) {
2877 iboe
->nb
.notifier_call
= mlx4_ib_netdev_event
;
2878 err
= register_netdevice_notifier(&iboe
->nb
);
2880 iboe
->nb
.notifier_call
= NULL
;
2884 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
) {
2885 err
= mlx4_config_roce_v2_port(dev
, ROCE_V2_UDP_DPORT
);
2890 ibdev
->ib_active
= true;
2891 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
2892 devlink_port_type_ib_set(mlx4_get_devlink_port(dev
, i
),
2895 if (mlx4_is_mfunc(ibdev
->dev
))
2898 /* create paravirt contexts for any VFs which are active */
2899 if (mlx4_is_master(ibdev
->dev
)) {
2900 for (j
= 0; j
< MLX4_MFUNC_MAX
; j
++) {
2901 if (j
== mlx4_master_func_num(ibdev
->dev
))
2903 if (mlx4_is_slave_active(ibdev
->dev
, j
))
2904 do_slave_init(ibdev
, j
, 1);
2910 if (ibdev
->iboe
.nb
.notifier_call
) {
2911 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
2912 pr_warn("failure unregistering notifier\n");
2913 ibdev
->iboe
.nb
.notifier_call
= NULL
;
2915 flush_workqueue(wq
);
2917 mlx4_ib_close_sriov(ibdev
);
2920 mlx4_ib_mad_cleanup(ibdev
);
2923 ib_unregister_device(&ibdev
->ib_dev
);
2926 mlx4_ib_diag_cleanup(ibdev
);
2928 err_steer_free_bitmap
:
2929 kfree(ibdev
->ib_uc_qpns_bitmap
);
2931 err_steer_qp_release
:
2932 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
2933 ibdev
->steer_qpn_count
);
2935 for (i
= 0; i
< ibdev
->num_ports
; ++i
)
2936 mlx4_ib_delete_counters_table(ibdev
, &ibdev
->counters_table
[i
]);
2939 mlx4_ib_free_eqs(dev
, ibdev
);
2940 iounmap(ibdev
->uar_map
);
2943 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
2946 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
2949 ib_dealloc_device(&ibdev
->ib_dev
);
2954 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev
*dev
, int count
, int *qpn
)
2958 WARN_ON(!dev
->ib_uc_qpns_bitmap
);
2960 offset
= bitmap_find_free_region(dev
->ib_uc_qpns_bitmap
,
2961 dev
->steer_qpn_count
,
2962 get_count_order(count
));
2966 *qpn
= dev
->steer_qpn_base
+ offset
;
2970 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev
*dev
, u32 qpn
, int count
)
2973 dev
->steering_support
!= MLX4_STEERING_MODE_DEVICE_MANAGED
)
2976 if (WARN(qpn
< dev
->steer_qpn_base
, "qpn = %u, steer_qpn_base = %u\n",
2977 qpn
, dev
->steer_qpn_base
))
2978 /* not supposed to be here */
2981 bitmap_release_region(dev
->ib_uc_qpns_bitmap
,
2982 qpn
- dev
->steer_qpn_base
,
2983 get_count_order(count
));
2986 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev
*mdev
, struct mlx4_ib_qp
*mqp
,
2991 struct ib_flow_attr
*flow
= NULL
;
2992 struct ib_flow_spec_ib
*ib_spec
;
2995 flow_size
= sizeof(struct ib_flow_attr
) +
2996 sizeof(struct ib_flow_spec_ib
);
2997 flow
= kzalloc(flow_size
, GFP_KERNEL
);
3000 flow
->port
= mqp
->port
;
3001 flow
->num_of_specs
= 1;
3002 flow
->size
= flow_size
;
3003 ib_spec
= (struct ib_flow_spec_ib
*)(flow
+ 1);
3004 ib_spec
->type
= IB_FLOW_SPEC_IB
;
3005 ib_spec
->size
= sizeof(struct ib_flow_spec_ib
);
3006 /* Add an empty rule for IB L2 */
3007 memset(&ib_spec
->mask
, 0, sizeof(ib_spec
->mask
));
3009 err
= __mlx4_ib_create_flow(&mqp
->ibqp
, flow
,
3014 err
= __mlx4_ib_destroy_flow(mdev
->dev
, mqp
->reg_id
);
3020 static void mlx4_ib_remove(struct mlx4_dev
*dev
, void *ibdev_ptr
)
3022 struct mlx4_ib_dev
*ibdev
= ibdev_ptr
;
3026 mlx4_foreach_port(i
, dev
, MLX4_PORT_TYPE_IB
)
3027 devlink_port_type_clear(mlx4_get_devlink_port(dev
, i
));
3028 ibdev
->ib_active
= false;
3029 flush_workqueue(wq
);
3031 if (ibdev
->iboe
.nb
.notifier_call
) {
3032 if (unregister_netdevice_notifier(&ibdev
->iboe
.nb
))
3033 pr_warn("failure unregistering notifier\n");
3034 ibdev
->iboe
.nb
.notifier_call
= NULL
;
3037 mlx4_ib_close_sriov(ibdev
);
3038 mlx4_ib_mad_cleanup(ibdev
);
3039 ib_unregister_device(&ibdev
->ib_dev
);
3040 mlx4_ib_diag_cleanup(ibdev
);
3042 mlx4_qp_release_range(dev
, ibdev
->steer_qpn_base
,
3043 ibdev
->steer_qpn_count
);
3044 kfree(ibdev
->ib_uc_qpns_bitmap
);
3046 iounmap(ibdev
->uar_map
);
3047 for (p
= 0; p
< ibdev
->num_ports
; ++p
)
3048 mlx4_ib_delete_counters_table(ibdev
, &ibdev
->counters_table
[p
]);
3050 mlx4_foreach_port(p
, dev
, MLX4_PORT_TYPE_IB
)
3051 mlx4_CLOSE_PORT(dev
, p
);
3053 mlx4_ib_free_eqs(dev
, ibdev
);
3055 mlx4_uar_free(dev
, &ibdev
->priv_uar
);
3056 mlx4_pd_free(dev
, ibdev
->priv_pdn
);
3057 ib_dealloc_device(&ibdev
->ib_dev
);
3060 static void do_slave_init(struct mlx4_ib_dev
*ibdev
, int slave
, int do_init
)
3062 struct mlx4_ib_demux_work
**dm
= NULL
;
3063 struct mlx4_dev
*dev
= ibdev
->dev
;
3065 unsigned long flags
;
3066 struct mlx4_active_ports actv_ports
;
3068 unsigned int first_port
;
3070 if (!mlx4_is_master(dev
))
3073 actv_ports
= mlx4_get_active_ports(dev
, slave
);
3074 ports
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
3075 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
3077 dm
= kcalloc(ports
, sizeof(*dm
), GFP_ATOMIC
);
3081 for (i
= 0; i
< ports
; i
++) {
3082 dm
[i
] = kmalloc(sizeof (struct mlx4_ib_demux_work
), GFP_ATOMIC
);
3088 INIT_WORK(&dm
[i
]->work
, mlx4_ib_tunnels_update_work
);
3089 dm
[i
]->port
= first_port
+ i
+ 1;
3090 dm
[i
]->slave
= slave
;
3091 dm
[i
]->do_init
= do_init
;
3094 /* initialize or tear down tunnel QPs for the slave */
3095 spin_lock_irqsave(&ibdev
->sriov
.going_down_lock
, flags
);
3096 if (!ibdev
->sriov
.is_going_down
) {
3097 for (i
= 0; i
< ports
; i
++)
3098 queue_work(ibdev
->sriov
.demux
[i
].ud_wq
, &dm
[i
]->work
);
3099 spin_unlock_irqrestore(&ibdev
->sriov
.going_down_lock
, flags
);
3101 spin_unlock_irqrestore(&ibdev
->sriov
.going_down_lock
, flags
);
3102 for (i
= 0; i
< ports
; i
++)
3110 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev
*ibdev
)
3112 struct mlx4_ib_qp
*mqp
;
3113 unsigned long flags_qp
;
3114 unsigned long flags_cq
;
3115 struct mlx4_ib_cq
*send_mcq
, *recv_mcq
;
3116 struct list_head cq_notify_list
;
3117 struct mlx4_cq
*mcq
;
3118 unsigned long flags
;
3120 pr_warn("mlx4_ib_handle_catas_error was started\n");
3121 INIT_LIST_HEAD(&cq_notify_list
);
3123 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3124 spin_lock_irqsave(&ibdev
->reset_flow_resource_lock
, flags
);
3126 list_for_each_entry(mqp
, &ibdev
->qp_list
, qps_list
) {
3127 spin_lock_irqsave(&mqp
->sq
.lock
, flags_qp
);
3128 if (mqp
->sq
.tail
!= mqp
->sq
.head
) {
3129 send_mcq
= to_mcq(mqp
->ibqp
.send_cq
);
3130 spin_lock_irqsave(&send_mcq
->lock
, flags_cq
);
3131 if (send_mcq
->mcq
.comp
&&
3132 mqp
->ibqp
.send_cq
->comp_handler
) {
3133 if (!send_mcq
->mcq
.reset_notify_added
) {
3134 send_mcq
->mcq
.reset_notify_added
= 1;
3135 list_add_tail(&send_mcq
->mcq
.reset_notify
,
3139 spin_unlock_irqrestore(&send_mcq
->lock
, flags_cq
);
3141 spin_unlock_irqrestore(&mqp
->sq
.lock
, flags_qp
);
3142 /* Now, handle the QP's receive queue */
3143 spin_lock_irqsave(&mqp
->rq
.lock
, flags_qp
);
3144 /* no handling is needed for SRQ */
3145 if (!mqp
->ibqp
.srq
) {
3146 if (mqp
->rq
.tail
!= mqp
->rq
.head
) {
3147 recv_mcq
= to_mcq(mqp
->ibqp
.recv_cq
);
3148 spin_lock_irqsave(&recv_mcq
->lock
, flags_cq
);
3149 if (recv_mcq
->mcq
.comp
&&
3150 mqp
->ibqp
.recv_cq
->comp_handler
) {
3151 if (!recv_mcq
->mcq
.reset_notify_added
) {
3152 recv_mcq
->mcq
.reset_notify_added
= 1;
3153 list_add_tail(&recv_mcq
->mcq
.reset_notify
,
3157 spin_unlock_irqrestore(&recv_mcq
->lock
,
3161 spin_unlock_irqrestore(&mqp
->rq
.lock
, flags_qp
);
3164 list_for_each_entry(mcq
, &cq_notify_list
, reset_notify
) {
3167 spin_unlock_irqrestore(&ibdev
->reset_flow_resource_lock
, flags
);
3168 pr_warn("mlx4_ib_handle_catas_error ended\n");
3171 static void handle_bonded_port_state_event(struct work_struct
*work
)
3173 struct ib_event_work
*ew
=
3174 container_of(work
, struct ib_event_work
, work
);
3175 struct mlx4_ib_dev
*ibdev
= ew
->ib_dev
;
3176 enum ib_port_state bonded_port_state
= IB_PORT_NOP
;
3178 struct ib_event ibev
;
3181 spin_lock_bh(&ibdev
->iboe
.lock
);
3182 for (i
= 0; i
< MLX4_MAX_PORTS
; ++i
) {
3183 struct net_device
*curr_netdev
= ibdev
->iboe
.netdevs
[i
];
3184 enum ib_port_state curr_port_state
;
3190 (netif_running(curr_netdev
) &&
3191 netif_carrier_ok(curr_netdev
)) ?
3192 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
3194 bonded_port_state
= (bonded_port_state
!= IB_PORT_ACTIVE
) ?
3195 curr_port_state
: IB_PORT_ACTIVE
;
3197 spin_unlock_bh(&ibdev
->iboe
.lock
);
3199 ibev
.device
= &ibdev
->ib_dev
;
3200 ibev
.element
.port_num
= 1;
3201 ibev
.event
= (bonded_port_state
== IB_PORT_ACTIVE
) ?
3202 IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
3204 ib_dispatch_event(&ibev
);
3207 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev
*mdev
, int port
)
3212 err
= mlx4_ib_query_sl2vl(&mdev
->ib_dev
, port
, &sl2vl
);
3214 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3218 atomic64_set(&mdev
->sl2vl
[port
- 1], sl2vl
);
3221 static void ib_sl2vl_update_work(struct work_struct
*work
)
3223 struct ib_event_work
*ew
= container_of(work
, struct ib_event_work
, work
);
3224 struct mlx4_ib_dev
*mdev
= ew
->ib_dev
;
3225 int port
= ew
->port
;
3227 mlx4_ib_sl2vl_update(mdev
, port
);
3232 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev
*ibdev
,
3235 struct ib_event_work
*ew
;
3237 ew
= kmalloc(sizeof(*ew
), GFP_ATOMIC
);
3239 INIT_WORK(&ew
->work
, ib_sl2vl_update_work
);
3242 queue_work(wq
, &ew
->work
);
3246 static void mlx4_ib_event(struct mlx4_dev
*dev
, void *ibdev_ptr
,
3247 enum mlx4_dev_event event
, unsigned long param
)
3249 struct ib_event ibev
;
3250 struct mlx4_ib_dev
*ibdev
= to_mdev((struct ib_device
*) ibdev_ptr
);
3251 struct mlx4_eqe
*eqe
= NULL
;
3252 struct ib_event_work
*ew
;
3255 if (mlx4_is_bonded(dev
) &&
3256 ((event
== MLX4_DEV_EVENT_PORT_UP
) ||
3257 (event
== MLX4_DEV_EVENT_PORT_DOWN
))) {
3258 ew
= kmalloc(sizeof(*ew
), GFP_ATOMIC
);
3261 INIT_WORK(&ew
->work
, handle_bonded_port_state_event
);
3263 queue_work(wq
, &ew
->work
);
3267 if (event
== MLX4_DEV_EVENT_PORT_MGMT_CHANGE
)
3268 eqe
= (struct mlx4_eqe
*)param
;
3273 case MLX4_DEV_EVENT_PORT_UP
:
3274 if (p
> ibdev
->num_ports
)
3276 if (!mlx4_is_slave(dev
) &&
3277 rdma_port_get_link_layer(&ibdev
->ib_dev
, p
) ==
3278 IB_LINK_LAYER_INFINIBAND
) {
3279 if (mlx4_is_master(dev
))
3280 mlx4_ib_invalidate_all_guid_record(ibdev
, p
);
3281 if (ibdev
->dev
->flags
& MLX4_FLAG_SECURE_HOST
&&
3282 !(ibdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT
))
3283 mlx4_sched_ib_sl2vl_update_work(ibdev
, p
);
3285 ibev
.event
= IB_EVENT_PORT_ACTIVE
;
3288 case MLX4_DEV_EVENT_PORT_DOWN
:
3289 if (p
> ibdev
->num_ports
)
3291 ibev
.event
= IB_EVENT_PORT_ERR
;
3294 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR
:
3295 ibdev
->ib_active
= false;
3296 ibev
.event
= IB_EVENT_DEVICE_FATAL
;
3297 mlx4_ib_handle_catas_error(ibdev
);
3300 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE
:
3301 ew
= kmalloc(sizeof *ew
, GFP_ATOMIC
);
3305 INIT_WORK(&ew
->work
, handle_port_mgmt_change_event
);
3306 memcpy(&ew
->ib_eqe
, eqe
, sizeof *eqe
);
3308 /* need to queue only for port owner, which uses GEN_EQE */
3309 if (mlx4_is_master(dev
))
3310 queue_work(wq
, &ew
->work
);
3312 handle_port_mgmt_change_event(&ew
->work
);
3315 case MLX4_DEV_EVENT_SLAVE_INIT
:
3316 /* here, p is the slave id */
3317 do_slave_init(ibdev
, p
, 1);
3318 if (mlx4_is_master(dev
)) {
3321 for (i
= 1; i
<= ibdev
->num_ports
; i
++) {
3322 if (rdma_port_get_link_layer(&ibdev
->ib_dev
, i
)
3323 == IB_LINK_LAYER_INFINIBAND
)
3324 mlx4_ib_slave_alias_guid_event(ibdev
,
3331 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN
:
3332 if (mlx4_is_master(dev
)) {
3335 for (i
= 1; i
<= ibdev
->num_ports
; i
++) {
3336 if (rdma_port_get_link_layer(&ibdev
->ib_dev
, i
)
3337 == IB_LINK_LAYER_INFINIBAND
)
3338 mlx4_ib_slave_alias_guid_event(ibdev
,
3343 /* here, p is the slave id */
3344 do_slave_init(ibdev
, p
, 0);
3351 ibev
.device
= ibdev_ptr
;
3352 ibev
.element
.port_num
= mlx4_is_bonded(ibdev
->dev
) ? 1 : (u8
)p
;
3354 ib_dispatch_event(&ibev
);
3357 static struct mlx4_interface mlx4_ib_interface
= {
3359 .remove
= mlx4_ib_remove
,
3360 .event
= mlx4_ib_event
,
3361 .protocol
= MLX4_PROT_IB_IPV6
,
3362 .flags
= MLX4_INTFF_BONDING
3365 static int __init
mlx4_ib_init(void)
3369 wq
= alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM
);
3373 err
= mlx4_ib_mcg_init();
3377 err
= mlx4_register_interface(&mlx4_ib_interface
);
3384 mlx4_ib_mcg_destroy();
3387 destroy_workqueue(wq
);
3391 static void __exit
mlx4_ib_cleanup(void)
3393 mlx4_unregister_interface(&mlx4_ib_interface
);
3394 mlx4_ib_mcg_destroy();
3395 destroy_workqueue(wq
);
3398 module_init(mlx4_ib_init
);
3399 module_exit(mlx4_ib_cleanup
);