]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
vduse: return internal vq group struct as map token
authorEugenio Pérez <eperezma@redhat.com>
Mon, 19 Jan 2026 14:32:57 +0000 (15:32 +0100)
committerMichael S. Tsirkin <mst@redhat.com>
Wed, 28 Jan 2026 20:32:16 +0000 (15:32 -0500)
Return the internal struct that represents the vq group as virtqueue map
token, instead of the device.  This allows the map functions to access
the information per group.

At this moment all the virtqueues share the same vq group, that only
can point to ASID 0.  This change prepares the infrastructure for actual
per-group address space handling

Acked-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Message-Id: <20260119143306.1818855-5-eperezma@redhat.com>

drivers/vdpa/vdpa_user/vduse_dev.c
include/linux/virtio.h

index 5bffc25a266e5956d5cc23a18c57eafa87e58ca0..68290c3d9d8fe914ef54aa8c9ef62b552b551cc1 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/uio.h>
 #include <linux/vdpa.h>
 #include <linux/nospec.h>
+#include <linux/virtio.h>
 #include <linux/vmalloc.h>
 #include <linux/sched/mm.h>
 #include <uapi/linux/vduse.h>
@@ -85,6 +86,10 @@ struct vduse_umem {
        struct mm_struct *mm;
 };
 
+struct vduse_vq_group {
+       struct vduse_dev *dev;
+};
+
 struct vduse_dev {
        struct vduse_vdpa *vdev;
        struct device *dev;
@@ -118,6 +123,7 @@ struct vduse_dev {
        u32 vq_align;
        u32 ngroups;
        struct vduse_umem *umem;
+       struct vduse_vq_group *groups;
        struct mutex mem_lock;
        unsigned int bounce_size;
        struct mutex domain_lock;
@@ -605,6 +611,17 @@ static u32 vduse_get_vq_group(struct vdpa_device *vdpa, u16 idx)
        return dev->vqs[idx]->group;
 }
 
+static union virtio_map vduse_get_vq_map(struct vdpa_device *vdpa, u16 idx)
+{
+       struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+       u32 vq_group = vduse_get_vq_group(vdpa, idx);
+       union virtio_map ret = {
+               .group = &dev->groups[vq_group],
+       };
+
+       return ret;
+}
+
 static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
                                struct vdpa_vq_state *state)
 {
@@ -825,6 +842,7 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
        .get_vq_affinity        = vduse_vdpa_get_vq_affinity,
        .reset                  = vduse_vdpa_reset,
        .set_map                = vduse_vdpa_set_map,
+       .get_vq_map             = vduse_get_vq_map,
        .free                   = vduse_vdpa_free,
 };
 
@@ -832,7 +850,14 @@ static void vduse_dev_sync_single_for_device(union virtio_map token,
                                             dma_addr_t dma_addr, size_t size,
                                             enum dma_data_direction dir)
 {
-       struct vduse_iova_domain *domain = token.iova_domain;
+       struct vduse_dev *vdev;
+       struct vduse_iova_domain *domain;
+
+       if (!token.group)
+               return;
+
+       vdev = token.group->dev;
+       domain = vdev->domain;
 
        vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
 }
@@ -841,7 +866,14 @@ static void vduse_dev_sync_single_for_cpu(union virtio_map token,
                                             dma_addr_t dma_addr, size_t size,
                                             enum dma_data_direction dir)
 {
-       struct vduse_iova_domain *domain = token.iova_domain;
+       struct vduse_dev *vdev;
+       struct vduse_iova_domain *domain;
+
+       if (!token.group)
+               return;
+
+       vdev = token.group->dev;
+       domain = vdev->domain;
 
        vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
 }
@@ -851,7 +883,14 @@ static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page,
                                     enum dma_data_direction dir,
                                     unsigned long attrs)
 {
-       struct vduse_iova_domain *domain = token.iova_domain;
+       struct vduse_dev *vdev;
+       struct vduse_iova_domain *domain;
+
+       if (!token.group)
+               return DMA_MAPPING_ERROR;
+
+       vdev = token.group->dev;
+       domain = vdev->domain;
 
        return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
 }
@@ -860,7 +899,14 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
                                 size_t size, enum dma_data_direction dir,
                                 unsigned long attrs)
 {
-       struct vduse_iova_domain *domain = token.iova_domain;
+       struct vduse_dev *vdev;
+       struct vduse_iova_domain *domain;
+
+       if (!token.group)
+               return;
+
+       vdev = token.group->dev;
+       domain = vdev->domain;
 
        return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
 }
@@ -868,11 +914,17 @@ static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
 static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
                                      dma_addr_t *dma_addr, gfp_t flag)
 {
-       struct vduse_iova_domain *domain = token.iova_domain;
+       struct vduse_dev *vdev;
+       struct vduse_iova_domain *domain;
        unsigned long iova;
        void *addr;
 
        *dma_addr = DMA_MAPPING_ERROR;
+       if (!token.group)
+               return NULL;
+
+       vdev = token.group->dev;
+       domain = vdev->domain;
        addr = vduse_domain_alloc_coherent(domain, size,
                                           (dma_addr_t *)&iova, flag);
        if (!addr)
@@ -887,14 +939,28 @@ static void vduse_dev_free_coherent(union virtio_map token, size_t size,
                                    void *vaddr, dma_addr_t dma_addr,
                                    unsigned long attrs)
 {
-       struct vduse_iova_domain *domain = token.iova_domain;
+       struct vduse_dev *vdev;
+       struct vduse_iova_domain *domain;
+
+       if (!token.group)
+               return;
+
+       vdev = token.group->dev;
+       domain = vdev->domain;
 
        vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
 }
 
 static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
 {
-       struct vduse_iova_domain *domain = token.iova_domain;
+       struct vduse_dev *vdev;
+       struct vduse_iova_domain *domain;
+
+       if (!token.group)
+               return false;
+
+       vdev = token.group->dev;
+       domain = vdev->domain;
 
        return dma_addr < domain->bounce_size;
 }
@@ -908,7 +974,14 @@ static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr)
 
 static size_t vduse_dev_max_mapping_size(union virtio_map token)
 {
-       struct vduse_iova_domain *domain = token.iova_domain;
+       struct vduse_dev *vdev;
+       struct vduse_iova_domain *domain;
+
+       if (!token.group)
+               return 0;
+
+       vdev = token.group->dev;
+       domain = vdev->domain;
 
        return domain->bounce_size;
 }
@@ -1726,6 +1799,7 @@ static int vduse_destroy_dev(char *name)
        if (dev->domain)
                vduse_domain_destroy(dev->domain);
        kfree(dev->name);
+       kfree(dev->groups);
        vduse_dev_destroy(dev);
        module_put(THIS_MODULE);
 
@@ -1895,6 +1969,13 @@ static int vduse_create_dev(struct vduse_dev_config *config,
        dev->ngroups = (dev->api_version < VDUSE_API_VERSION_1)
                       ? 1
                       : config->ngroups;
+       dev->groups = kcalloc(dev->ngroups, sizeof(dev->groups[0]),
+                             GFP_KERNEL);
+       if (!dev->groups)
+               goto err_vq_groups;
+       for (u32 i = 0; i < dev->ngroups; ++i)
+               dev->groups[i].dev = dev;
+
        dev->name = kstrdup(config->name, GFP_KERNEL);
        if (!dev->name)
                goto err_str;
@@ -1931,6 +2012,8 @@ err_dev:
 err_idr:
        kfree(dev->name);
 err_str:
+       kfree(dev->groups);
+err_vq_groups:
        vduse_dev_destroy(dev);
 err:
        return ret;
@@ -2092,7 +2175,6 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
                return -ENOMEM;
        }
 
-       dev->vdev->vdpa.vmap.iova_domain = dev->domain;
        ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
        if (ret) {
                put_device(&dev->vdev->vdpa.dev);
index 63bb05ece8c58fe50d79694d29bddea1ee36c005..3bbc4cb6a6727e2d0e0d91a2972748df2cededc9 100644 (file)
@@ -43,13 +43,13 @@ struct virtqueue {
        void *priv;
 };
 
-struct vduse_iova_domain;
+struct vduse_vq_group;
 
 union virtio_map {
        /* Device that performs DMA */
        struct device *dma_dev;
-       /* VDUSE specific mapping data */
-       struct vduse_iova_domain *iova_domain;
+       /* VDUSE specific virtqueue group for doing map */
+       struct vduse_vq_group *group;
 };
 
 int virtqueue_add_outbuf(struct virtqueue *vq,