]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
vduse: switch to use virtio map API instead of DMA API
authorJason Wang <jasowang@redhat.com>
Wed, 24 Sep 2025 07:00:45 +0000 (15:00 +0800)
committerMichael S. Tsirkin <mst@redhat.com>
Wed, 1 Oct 2025 11:24:55 +0000 (07:24 -0400)
Lacking the support of device specific mapping supported in virtio,
VDUSE must trick the DMA API in order to make virtio-vdpa transport
work. This is done by advertising vDPA device as dma device with a
VDUSE specific dma_ops even if it doesn't do DMA at all.

This will be fixed by this patch. Thanks to the new mapping operations
support by virtio and vDPA. VDUSE can simply switch to advertise its
specific mappings operations to virtio via virtio-vdpa then DMA API is
not needed for VDUSE any more and iova domain could be used as the
mapping token instead.

Signed-off-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20250924070045.10361-3-jasowang@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Eugenio Pérez <eperezma@redhat.com>
drivers/vdpa/Kconfig
drivers/vdpa/vdpa_user/iova_domain.c
drivers/vdpa/vdpa_user/iova_domain.h
drivers/vdpa/vdpa_user/vduse_dev.c
include/linux/virtio.h

index 559fb9d3271fcc64d903b2f2f1a652a5ed15d758..857cf288c876aca0579efe241a396f89ef1754ea 100644 (file)
@@ -34,13 +34,7 @@ config VDPA_SIM_BLOCK
 
 config VDPA_USER
        tristate "VDUSE (vDPA Device in Userspace) support"
-       depends on EVENTFD && MMU && HAS_DMA
-       #
-       # This driver incorrectly tries to override the dma_ops.  It should
-       # never have done that, but for now keep it working on architectures
-       # that use dma ops
-       #
-       depends on ARCH_HAS_DMA_OPS
+       depends on EVENTFD && MMU
        select VHOST_IOTLB
        select IOMMU_IOVA
        help
index 58116f89d8dae9daade2c51385964e7f05740bf7..ccaed24b7ef8ded1c622bd0bd1f1ccdcacb92adc 100644 (file)
@@ -447,7 +447,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
 
 void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
                                  size_t size, dma_addr_t *dma_addr,
-                                 gfp_t flag, unsigned long attrs)
+                                 gfp_t flag)
 {
        struct iova_domain *iovad = &domain->consistent_iovad;
        unsigned long limit = domain->iova_limit;
index 7f3f0928ec78141cfe4282f100b1d9392c65fd0d..1f3c30be272af1f4be47028115067de5294e7711 100644 (file)
@@ -64,7 +64,7 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
 
 void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
                                  size_t size, dma_addr_t *dma_addr,
-                                 gfp_t flag, unsigned long attrs);
+                                 gfp_t flag);
 
 void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
                                void *vaddr, dma_addr_t dma_addr,
index ad782d20a8ed5283ee1bcbd81c9ba0a3ea5399bd..e7bced0b55422009fc22ca63861d7bb9bd8d0978 100644 (file)
@@ -814,59 +814,53 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
        .free                   = vduse_vdpa_free,
 };
 
-static void vduse_dev_sync_single_for_device(struct device *dev,
+static void vduse_dev_sync_single_for_device(union virtio_map token,
                                             dma_addr_t dma_addr, size_t size,
                                             enum dma_data_direction dir)
 {
-       struct vduse_dev *vdev = dev_to_vduse(dev);
-       struct vduse_iova_domain *domain = vdev->domain;
+       struct vduse_iova_domain *domain = token.iova_domain;
 
        vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
 }
 
-static void vduse_dev_sync_single_for_cpu(struct device *dev,
+static void vduse_dev_sync_single_for_cpu(union virtio_map token,
                                             dma_addr_t dma_addr, size_t size,
                                             enum dma_data_direction dir)
 {
-       struct vduse_dev *vdev = dev_to_vduse(dev);
-       struct vduse_iova_domain *domain = vdev->domain;
+       struct vduse_iova_domain *domain = token.iova_domain;
 
        vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
 }
 
-static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
+static dma_addr_t vduse_dev_map_page(union virtio_map token, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction dir,
                                     unsigned long attrs)
 {
-       struct vduse_dev *vdev = dev_to_vduse(dev);
-       struct vduse_iova_domain *domain = vdev->domain;
+       struct vduse_iova_domain *domain = token.iova_domain;
 
        return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
 }
 
-static void vduse_dev_unmap_page(struct device *dev, dma_addr_t dma_addr,
-                               size_t size, enum dma_data_direction dir,
-                               unsigned long attrs)
+static void vduse_dev_unmap_page(union virtio_map token, dma_addr_t dma_addr,
+                                size_t size, enum dma_data_direction dir,
+                                unsigned long attrs)
 {
-       struct vduse_dev *vdev = dev_to_vduse(dev);
-       struct vduse_iova_domain *domain = vdev->domain;
+       struct vduse_iova_domain *domain = token.iova_domain;
 
        return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
 }
 
-static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
-                                       dma_addr_t *dma_addr, gfp_t flag,
-                                       unsigned long attrs)
+static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
+                                     dma_addr_t *dma_addr, gfp_t flag)
 {
-       struct vduse_dev *vdev = dev_to_vduse(dev);
-       struct vduse_iova_domain *domain = vdev->domain;
+       struct vduse_iova_domain *domain = token.iova_domain;
        unsigned long iova;
        void *addr;
 
        *dma_addr = DMA_MAPPING_ERROR;
        addr = vduse_domain_alloc_coherent(domain, size,
-                               (dma_addr_t *)&iova, flag, attrs);
+                                          (dma_addr_t *)&iova, flag);
        if (!addr)
                return NULL;
 
@@ -875,31 +869,45 @@ static void *vduse_dev_alloc_coherent(struct device *dev, size_t size,
        return addr;
 }
 
-static void vduse_dev_free_coherent(struct device *dev, size_t size,
-                                       void *vaddr, dma_addr_t dma_addr,
-                                       unsigned long attrs)
+static void vduse_dev_free_coherent(union virtio_map token, size_t size,
+                                   void *vaddr, dma_addr_t dma_addr,
+                                   unsigned long attrs)
 {
-       struct vduse_dev *vdev = dev_to_vduse(dev);
-       struct vduse_iova_domain *domain = vdev->domain;
+       struct vduse_iova_domain *domain = token.iova_domain;
 
        vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
 }
 
-static size_t vduse_dev_max_mapping_size(struct device *dev)
+static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
 {
-       struct vduse_dev *vdev = dev_to_vduse(dev);
-       struct vduse_iova_domain *domain = vdev->domain;
+       struct vduse_iova_domain *domain = token.iova_domain;
+
+       return dma_addr < domain->bounce_size;
+}
+
+static int vduse_dev_mapping_error(union virtio_map token, dma_addr_t dma_addr)
+{
+       if (unlikely(dma_addr == DMA_MAPPING_ERROR))
+               return -ENOMEM;
+       return 0;
+}
+
+static size_t vduse_dev_max_mapping_size(union virtio_map token)
+{
+       struct vduse_iova_domain *domain = token.iova_domain;
 
        return domain->bounce_size;
 }
 
-static const struct dma_map_ops vduse_dev_dma_ops = {
+static const struct virtio_map_ops vduse_map_ops = {
        .sync_single_for_device = vduse_dev_sync_single_for_device,
        .sync_single_for_cpu = vduse_dev_sync_single_for_cpu,
        .map_page = vduse_dev_map_page,
        .unmap_page = vduse_dev_unmap_page,
        .alloc = vduse_dev_alloc_coherent,
        .free = vduse_dev_free_coherent,
+       .need_sync = vduse_dev_need_sync,
+       .mapping_error = vduse_dev_mapping_error,
        .max_mapping_size = vduse_dev_max_mapping_size,
 };
 
@@ -2003,27 +2011,18 @@ static struct vduse_mgmt_dev *vduse_mgmt;
 static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
 {
        struct vduse_vdpa *vdev;
-       int ret;
 
        if (dev->vdev)
                return -EEXIST;
 
        vdev = vdpa_alloc_device(struct vduse_vdpa, vdpa, dev->dev,
-                                &vduse_vdpa_config_ops, NULL,
+                                &vduse_vdpa_config_ops, &vduse_map_ops,
                                 1, 1, name, true);
        if (IS_ERR(vdev))
                return PTR_ERR(vdev);
 
        dev->vdev = vdev;
        vdev->dev = dev;
-       vdev->vdpa.dev.dma_mask = &vdev->vdpa.dev.coherent_dma_mask;
-       ret = dma_set_mask_and_coherent(&vdev->vdpa.dev, DMA_BIT_MASK(64));
-       if (ret) {
-               put_device(&vdev->vdpa.dev);
-               return ret;
-       }
-       set_dma_ops(&vdev->vdpa.dev, &vduse_dev_dma_ops);
-       vdev->vdpa.vmap.dma_dev = &vdev->vdpa.dev;
        vdev->vdpa.mdev = &vduse_mgmt->mgmt_dev;
 
        return 0;
@@ -2056,6 +2055,7 @@ static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
                return -ENOMEM;
        }
 
+       dev->vdev->vdpa.vmap.iova_domain = dev->domain;
        ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
        if (ret) {
                put_device(&dev->vdev->vdpa.dev);
index 3386a4a8d06b0226fb44fa490f65f8cf17ec63fa..96c66126c0741c53301c691b9dda584d792344a0 100644 (file)
@@ -41,9 +41,13 @@ struct virtqueue {
        void *priv;
 };
 
+struct vduse_iova_domain;
+
 union virtio_map {
        /* Device that performs DMA */
        struct device *dma_dev;
+       /* VDUSE specific mapping data */
+       struct vduse_iova_domain *iova_domain;
 };
 
 int virtqueue_add_outbuf(struct virtqueue *vq,