]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
vdpasim: protect concurrent access to iommu iotlb
authorMax Gurtovoy <maxg@mellanox.com>
Fri, 31 Jul 2020 07:38:22 +0000 (15:38 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Aug 2020 06:24:12 +0000 (08:24 +0200)
commit 0ea9ee430e74b16c6b17e70757d1c26d8d140e1f upstream.

Iommu iotlb can be accessed by different cores for performing IO using
multiple virt queues. Add a spinlock to synchronize iotlb accesses.

This could be easily reproduced when using more than 1 pktgen threads
to inject traffic to vdpa simulator.

Fixes: 2c53d0f64c06f("vdpasim: vDPA device simulator")
Cc: stable@vger.kernel.org
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20200731073822.13326-1-jasowang@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/vdpa/vdpa_sim/vdpa_sim.c

index 01c456f7c1f7bda2df1c592c6056b54585b40ba6..e2dc8edd680e048600b0d94b642c78444d5e273e 100644 (file)
@@ -70,6 +70,8 @@ struct vdpasim {
        u32 status;
        u32 generation;
        u64 features;
+       /* spinlock to synchronize iommu table */
+       spinlock_t iommu_lock;
 };
 
 static struct vdpasim *vdpasim_dev;
@@ -118,7 +120,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim)
        for (i = 0; i < VDPASIM_VQ_NUM; i++)
                vdpasim_vq_reset(&vdpasim->vqs[i]);
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_reset(vdpasim->iommu);
+       spin_unlock(&vdpasim->iommu_lock);
 
        vdpasim->features = 0;
        vdpasim->status = 0;
@@ -235,8 +239,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
        /* For simplicity, use identical mapping to avoid e.g iova
         * allocator.
         */
+       spin_lock(&vdpasim->iommu_lock);
        ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
                                    pa, dir_to_perm(dir));
+       spin_unlock(&vdpasim->iommu_lock);
        if (ret)
                return DMA_MAPPING_ERROR;
 
@@ -250,8 +256,10 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
        struct vdpasim *vdpasim = dev_to_sim(dev);
        struct vhost_iotlb *iommu = vdpasim->iommu;
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_del_range(iommu, (u64)dma_addr,
                              (u64)dma_addr + size - 1);
+       spin_unlock(&vdpasim->iommu_lock);
 }
 
 static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@@ -263,9 +271,10 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
        void *addr = kmalloc(size, flag);
        int ret;
 
-       if (!addr)
+       spin_lock(&vdpasim->iommu_lock);
+       if (!addr) {
                *dma_addr = DMA_MAPPING_ERROR;
-       else {
+       else {
                u64 pa = virt_to_phys(addr);
 
                ret = vhost_iotlb_add_range(iommu, (u64)pa,
@@ -278,6 +287,7 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
                } else
                        *dma_addr = (dma_addr_t)pa;
        }
+       spin_unlock(&vdpasim->iommu_lock);
 
        return addr;
 }
@@ -289,8 +299,11 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
        struct vdpasim *vdpasim = dev_to_sim(dev);
        struct vhost_iotlb *iommu = vdpasim->iommu;
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_del_range(iommu, (u64)dma_addr,
                              (u64)dma_addr + size - 1);
+       spin_unlock(&vdpasim->iommu_lock);
+
        kfree(phys_to_virt((uintptr_t)dma_addr));
 }
 
@@ -531,6 +544,7 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
        u64 start = 0ULL, last = 0ULL - 1;
        int ret;
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_reset(vdpasim->iommu);
 
        for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
@@ -540,10 +554,12 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
                if (ret)
                        goto err;
        }
+       spin_unlock(&vdpasim->iommu_lock);
        return 0;
 
 err:
        vhost_iotlb_reset(vdpasim->iommu);
+       spin_unlock(&vdpasim->iommu_lock);
        return ret;
 }
 
@@ -551,16 +567,23 @@ static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
                           u64 pa, u32 perm)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+       int ret;
 
-       return vhost_iotlb_add_range(vdpasim->iommu, iova,
-                                    iova + size - 1, pa, perm);
+       spin_lock(&vdpasim->iommu_lock);
+       ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
+                                   perm);
+       spin_unlock(&vdpasim->iommu_lock);
+
+       return ret;
 }
 
 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 
+       spin_lock(&vdpasim->iommu_lock);
        vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
+       spin_unlock(&vdpasim->iommu_lock);
 
        return 0;
 }