/* state of dvq mr */
bool initialized;
- /* serialize mkey creation and destruction */
- struct mutex mkey_mtx;
bool user_mr;
};
u32 generation;
struct mlx5_vdpa_mr mr;
+ /* serialize mr access */
+ struct mutex mr_mtx;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr)
{
- mutex_lock(&mr->mkey_mtx);
+ mutex_lock(&mvdev->mr_mtx);
_mlx5_vdpa_destroy_mr(mvdev, mr);
- mutex_unlock(&mr->mkey_mtx);
+ mutex_unlock(&mvdev->mr_mtx);
}
void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
{
int err;
- mutex_lock(&mvdev->mr.mkey_mtx);
+ mutex_lock(&mvdev->mr_mtx);
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
- mutex_unlock(&mvdev->mr.mkey_mtx);
+ mutex_unlock(&mvdev->mr_mtx);
+
return err;
}
int err = 0;
*change_map = false;
- mutex_lock(&mr->mkey_mtx);
+ mutex_lock(&mvdev->mr_mtx);
if (mr->initialized) {
mlx5_vdpa_info(mvdev, "memory map update\n");
*change_map = true;
}
if (!*change_map)
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
- mutex_unlock(&mr->mkey_mtx);
+ mutex_unlock(&mvdev->mr_mtx);
return err;
}
mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL;
}
- mutex_init(&mvdev->mr.mkey_mtx);
+ mutex_init(&mvdev->mr_mtx);
res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar);
err_uctx:
mlx5_put_uars_page(mdev, res->uar);
err_uars:
- mutex_destroy(&mvdev->mr.mkey_mtx);
+ mutex_destroy(&mvdev->mr_mtx);
return err;
}
dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar);
- mutex_destroy(&mvdev->mr.mkey_mtx);
+ mutex_destroy(&mvdev->mr_mtx);
res->valid = false;
}