static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
+ kfree(counter->bulk);
kfree(counter);
}
if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
return;
- if (counter->bulk)
+ if (counter->type == MLX5_FC_TYPE_POOL_ACQUIRED)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
mlx5_fc_free(dev, counter);
mlx5_fc_stats_query_all_counters(dev);
}
+static void mlx5_fc_bulk_init(struct mlx5_fc_bulk *fc_bulk, u32 base_id)
+{
+ fc_bulk->base_id = base_id;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+}
+
static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
{
+ struct mlx5_fc_bulk *fc_bulk;
struct mlx5_fc *counter;
int err;
if (!counter)
return ERR_PTR(-ENOMEM);
- err = mlx5_cmd_fc_alloc(dev, &counter->id);
- if (err) {
- kfree(counter);
- return ERR_PTR(err);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ err = -ENOMEM;
+ goto free_counter;
}
+ err = mlx5_cmd_fc_alloc(dev, &counter->id);
+ if (err)
+ goto free_bulk;
+ counter->type = MLX5_FC_TYPE_SINGLE;
+ mlx5_fs_bulk_init(&fc_bulk->fs_bulk, 1);
+ mlx5_fc_bulk_init(fc_bulk, counter->id);
+ counter->bulk = fc_bulk;
return counter;
+
+free_bulk:
+ kfree(fc_bulk);
+free_counter:
+ kfree(counter);
+ return ERR_PTR(err);
}
static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
counter->id = id;
}
-static void mlx5_fc_bulk_init(struct mlx5_fc_bulk *fc_bulk, u32 base_id)
-{
- fc_bulk->base_id = base_id;
- refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
- mutex_init(&fc_bulk->hws_data.lock);
-}
-
u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
{
return counter->bulk->base_id;