#include <linux/uio.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
+#include <linux/virtio.h>
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
#include <uapi/linux/vduse.h>
struct mm_struct *mm;
};
+struct vduse_vq_group {
+ struct vduse_dev *dev;
+};
+
struct vduse_dev {
struct vduse_vdpa *vdev;
struct device *dev;
u32 vq_align;
u32 ngroups;
struct vduse_umem *umem;
+ struct vduse_vq_group *groups;
struct mutex mem_lock;
unsigned int bounce_size;
struct mutex domain_lock;
return dev->vqs[idx]->group;
}
+static union virtio_map vduse_get_vq_map(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ u32 vq_group = vduse_get_vq_group(vdpa, idx);
+ union virtio_map ret = {
+ .group = &dev->groups[vq_group],
+ };
+
+ return ret;
+}
+
static int vduse_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vdpa_vq_state *state)
{
.get_vq_affinity = vduse_vdpa_get_vq_affinity,
.reset = vduse_vdpa_reset,
.set_map = vduse_vdpa_set_map,
+ .get_vq_map = vduse_get_vq_map,
.free = vduse_vdpa_free,
};
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
}
dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
}
enum dma_data_direction dir,
unsigned long attrs)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return DMA_MAPPING_ERROR;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
return vduse_domain_map_page(domain, page, offset, size, dir, attrs);
}
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
return vduse_domain_unmap_page(domain, dma_addr, size, dir, attrs);
}
static void *vduse_dev_alloc_coherent(union virtio_map token, size_t size,
dma_addr_t *dma_addr, gfp_t flag)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
unsigned long iova;
void *addr;
*dma_addr = DMA_MAPPING_ERROR;
+ if (!token.group)
+ return NULL;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
addr = vduse_domain_alloc_coherent(domain, size,
(dma_addr_t *)&iova, flag);
if (!addr)
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
vduse_domain_free_coherent(domain, size, vaddr, dma_addr, attrs);
}
static bool vduse_dev_need_sync(union virtio_map token, dma_addr_t dma_addr)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return false;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
return dma_addr < domain->bounce_size;
}
static size_t vduse_dev_max_mapping_size(union virtio_map token)
{
- struct vduse_iova_domain *domain = token.iova_domain;
+ struct vduse_dev *vdev;
+ struct vduse_iova_domain *domain;
+
+ if (!token.group)
+ return 0;
+
+ vdev = token.group->dev;
+ domain = vdev->domain;
return domain->bounce_size;
}
if (dev->domain)
vduse_domain_destroy(dev->domain);
kfree(dev->name);
+ kfree(dev->groups);
vduse_dev_destroy(dev);
module_put(THIS_MODULE);
dev->ngroups = (dev->api_version < VDUSE_API_VERSION_1)
? 1
: config->ngroups;
+ dev->groups = kcalloc(dev->ngroups, sizeof(dev->groups[0]),
+ GFP_KERNEL);
+ if (!dev->groups)
+ goto err_vq_groups;
+ for (u32 i = 0; i < dev->ngroups; ++i)
+ dev->groups[i].dev = dev;
+
dev->name = kstrdup(config->name, GFP_KERNEL);
if (!dev->name)
goto err_str;
err_idr:
kfree(dev->name);
err_str:
+ kfree(dev->groups);
+err_vq_groups:
vduse_dev_destroy(dev);
err:
return ret;
return -ENOMEM;
}
- dev->vdev->vdpa.vmap.iova_domain = dev->domain;
ret = _vdpa_register_device(&dev->vdev->vdpa, dev->vq_num);
if (ret) {
put_device(&dev->vdev->vdpa.dev);