u32 type;
};
+struct mlx5_rdma_counter {
+ struct rdma_counter rdma_counter;
+
+ struct mlx5_fc *fc[MLX5_IB_OPCOUNTER_MAX];
+ struct xarray qpn_opfc_xa;
+};
+
+static struct mlx5_rdma_counter *to_mcounter(struct rdma_counter *counter)
+{
+ return container_of(counter, struct mlx5_rdma_counter, rdma_counter);
+}
+
#define INIT_Q_COUNTER(_name) \
{ .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
return 0;
WARN_ON(!xa_empty(&mcounter->qpn_opfc_xa));
- mlx5r_fs_destroy_fcs(dev, counter);
+ mlx5r_fs_destroy_fcs(dev, mcounter->fc);
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id);
static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp, u32 port)
{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_ib_dev *dev = to_mdev(qp->device);
bool new = false;
int err;
if (err)
goto fail_set_counter;
- err = mlx5r_fs_bind_op_fc(qp, counter, port);
+ if (!counter->mode.bind_opcnt)
+ return 0;
+
+ err = mlx5r_fs_bind_op_fc(qp, mcounter->fc, &mcounter->qpn_opfc_xa,
+ port);
if (err)
goto fail_bind_op_fc;
static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp, u32 port)
{
struct rdma_counter *counter = qp->counter;
+ struct mlx5_rdma_counter *mcounter;
int err;
- mlx5r_fs_unbind_op_fc(qp, counter);
+ mcounter = to_mcounter(counter);
+
+ mlx5r_fs_unbind_op_fc(qp, &mcounter->qpn_opfc_xa);
err = mlx5_ib_qp_set_counter(qp, NULL);
if (err)
return 0;
fail_set_counter:
- mlx5r_fs_bind_op_fc(qp, counter, port);
+ if (counter->mode.bind_opcnt)
+ mlx5r_fs_bind_op_fc(qp, mcounter->fc,
+ &mcounter->qpn_opfc_xa, port);
return err;
}
return 0;
}
-static struct mlx5_per_qp_opfc *
-get_per_qp_opfc(struct mlx5_rdma_counter *mcounter, u32 qp_num, bool *new)
+static struct mlx5_per_qp_opfc *get_per_qp_opfc(struct xarray *qpn_opfc_xa,
+ u32 qp_num, bool *new)
{
struct mlx5_per_qp_opfc *per_qp_opfc;
*new = false;
- per_qp_opfc = xa_load(&mcounter->qpn_opfc_xa, qp_num);
+ per_qp_opfc = xa_load(qpn_opfc_xa, qp_num);
if (per_qp_opfc)
return per_qp_opfc;
per_qp_opfc = kzalloc(sizeof(*per_qp_opfc), GFP_KERNEL);
}
static int add_op_fc_rules(struct mlx5_ib_dev *dev,
- struct mlx5_rdma_counter *mcounter,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa,
struct mlx5_per_qp_opfc *per_qp_opfc,
struct mlx5_ib_flow_prio *prio,
enum mlx5_ib_optional_counter_type type,
return 0;
}
- opfc->fc = mcounter->fc[type];
+ opfc->fc = fc_arr[type];
spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
if (!spec) {
}
prio->refcount += spec_num;
- err = xa_err(xa_store(&mcounter->qpn_opfc_xa, qp_num, per_qp_opfc,
- GFP_KERNEL));
+ err = xa_err(xa_store(qpn_opfc_xa, qp_num, per_qp_opfc, GFP_KERNEL));
if (err)
goto del_rules;
return err;
}
-static bool is_fc_shared_and_in_use(struct mlx5_rdma_counter *mcounter,
- u32 type, struct mlx5_fc **fc)
+static bool
+is_fc_shared_and_in_use(struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX], u32 type,
+ struct mlx5_fc **fc)
{
u32 shared_fc_type;
return false;
}
- *fc = mcounter->fc[shared_fc_type];
+ *fc = fc_arr[shared_fc_type];
if (!(*fc))
return false;
}
void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
- struct rdma_counter *counter)
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX])
{
- struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_fc *in_use_fc;
int i;
for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP; i++) {
- if (!mcounter->fc[i])
+ if (!fc_arr[i])
continue;
- if (is_fc_shared_and_in_use(mcounter, i, &in_use_fc)) {
- mcounter->fc[i] = NULL;
+ if (is_fc_shared_and_in_use(fc_arr, i, &in_use_fc)) {
+ fc_arr[i] = NULL;
continue;
}
- mlx5_fc_destroy(dev->mdev, mcounter->fc[i]);
- mcounter->fc[i] = NULL;
+ mlx5_fc_destroy(dev->mdev, fc_arr[i]);
+ fc_arr[i] = NULL;
}
}
put_per_qp_prio(dev, type);
}
-void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter)
+void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct xarray *qpn_opfc_xa)
{
- struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
- struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_per_qp_opfc *per_qp_opfc;
struct mlx5_ib_op_fc *in_use_opfc;
struct mlx5_ib_flow_prio *prio;
int i, j;
- per_qp_opfc = xa_load(&mcounter->qpn_opfc_xa, qp->qp_num);
+ per_qp_opfc = xa_load(qpn_opfc_xa, qp->qp_num);
if (!per_qp_opfc)
return;
}
kfree(per_qp_opfc);
- xa_erase(&mcounter->qpn_opfc_xa, qp->qp_num);
+ xa_erase(qpn_opfc_xa, qp->qp_num);
}
-int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
- u32 port)
+int mlx5r_fs_bind_op_fc(struct ib_qp *qp,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa, u32 port)
{
- struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_per_qp_opfc *per_qp_opfc;
struct mlx5_ib_flow_prio *prio;
int i, err, per_qp_type;
bool new;
- if (!counter->mode.bind_opcnt)
- return 0;
-
cnts = &dev->port[port - 1].cnts;
for (i = 0; i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES; i++) {
prio = get_opfc_prio(dev, per_qp_type);
WARN_ON(!prio->flow_table);
- if (is_fc_shared_and_in_use(mcounter, per_qp_type, &in_use_fc))
- mcounter->fc[per_qp_type] = in_use_fc;
+ if (is_fc_shared_and_in_use(fc_arr, per_qp_type, &in_use_fc))
+ fc_arr[per_qp_type] = in_use_fc;
- if (!mcounter->fc[per_qp_type]) {
- mcounter->fc[per_qp_type] = mlx5_fc_create(dev->mdev,
- false);
- if (IS_ERR(mcounter->fc[per_qp_type]))
- return PTR_ERR(mcounter->fc[per_qp_type]);
+ if (!fc_arr[per_qp_type]) {
+ fc_arr[per_qp_type] = mlx5_fc_create(dev->mdev, false);
+ if (IS_ERR(fc_arr[per_qp_type]))
+ return PTR_ERR(fc_arr[per_qp_type]);
}
- per_qp_opfc = get_per_qp_opfc(mcounter, qp->qp_num, &new);
+ per_qp_opfc = get_per_qp_opfc(qpn_opfc_xa, qp->qp_num, &new);
if (!per_qp_opfc) {
err = -ENOMEM;
goto free_fc;
}
- err = add_op_fc_rules(dev, mcounter, per_qp_opfc, prio,
- per_qp_type, qp->qp_num, port);
+ err = add_op_fc_rules(dev, fc_arr, qpn_opfc_xa, per_qp_opfc,
+ prio, per_qp_type, qp->qp_num, port);
if (err)
goto del_rules;
}
return 0;
del_rules:
- mlx5r_fs_unbind_op_fc(qp, counter);
+ mlx5r_fs_unbind_op_fc(qp, qpn_opfc_xa);
if (new)
kfree(per_qp_opfc);
free_fc:
- if (xa_empty(&mcounter->qpn_opfc_xa))
- mlx5r_fs_destroy_fcs(dev, counter);
+ if (xa_empty(qpn_opfc_xa))
+ mlx5r_fs_destroy_fcs(dev, fc_arr);
return err;
}