{
struct vfio_dma_region *region;
- list_for_each_entry(region, &device->dma_regions, link) {
+ list_for_each_entry(region, &device->iommu->dma_regions, link) {
if (vaddr < region->vaddr)
continue;
.size = region->size,
};
- if (ioctl(device->container_fd, VFIO_IOMMU_MAP_DMA, &args))
+ if (ioctl(device->iommu->container_fd, VFIO_IOMMU_MAP_DMA, &args))
return -errno;
return 0;
.user_va = (u64)region->vaddr,
.iova = region->iova,
.length = region->size,
- .ioas_id = device->ioas_id,
+ .ioas_id = device->iommu->ioas_id,
};
- if (ioctl(device->iommufd, IOMMU_IOAS_MAP, &args))
+ if (ioctl(device->iommu->iommufd, IOMMU_IOAS_MAP, &args))
return -errno;
return 0;
{
int ret;
- if (device->iommufd)
+ if (device->iommu->iommufd)
ret = iommufd_dma_map(device, region);
else
ret = vfio_iommu_dma_map(device, region);
if (ret)
return ret;
- list_add(®ion->link, &device->dma_regions);
+ list_add(®ion->link, &device->iommu->dma_regions);
return 0;
}
{
int ret;
- if (device->iommufd)
- ret = iommufd_dma_unmap(device->iommufd, region->iova,
- region->size, device->ioas_id,
+ if (device->iommu->iommufd)
+ ret = iommufd_dma_unmap(device->iommu->iommufd, region->iova,
+ region->size, device->iommu->ioas_id,
unmapped);
else
- ret = vfio_iommu_dma_unmap(device->container_fd, region->iova,
- region->size, 0, unmapped);
+ ret = vfio_iommu_dma_unmap(device->iommu->container_fd,
+ region->iova, region->size, 0,
+ unmapped);
if (ret)
return ret;
int ret;
struct vfio_dma_region *curr, *next;
- if (device->iommufd)
- ret = iommufd_dma_unmap(device->iommufd, 0, UINT64_MAX,
- device->ioas_id, unmapped);
+ if (device->iommu->iommufd)
+ ret = iommufd_dma_unmap(device->iommu->iommufd, 0, UINT64_MAX,
+ device->iommu->ioas_id, unmapped);
else
- ret = vfio_iommu_dma_unmap(device->container_fd, 0, 0,
+ ret = vfio_iommu_dma_unmap(device->iommu->container_fd, 0, 0,
VFIO_DMA_UNMAP_FLAG_ALL, unmapped);
if (ret)
return ret;
- list_for_each_entry_safe(curr, next, &device->dma_regions, link)
+ list_for_each_entry_safe(curr, next, &device->iommu->dma_regions, link)
list_del_init(&curr->link);
return 0;
ioctl_assert(device->group_fd, VFIO_GROUP_GET_STATUS, &group_status);
VFIO_ASSERT_TRUE(group_status.flags & VFIO_GROUP_FLAGS_VIABLE);
- ioctl_assert(device->group_fd, VFIO_GROUP_SET_CONTAINER, &device->container_fd);
+ ioctl_assert(device->group_fd, VFIO_GROUP_SET_CONTAINER, &device->iommu->container_fd);
}
static void vfio_pci_container_setup(struct vfio_pci_device *device, const char *bdf)
{
- unsigned long iommu_type = device->iommu_mode->iommu_type;
- const char *path = device->iommu_mode->container_path;
+ unsigned long iommu_type = device->iommu->mode->iommu_type;
+ const char *path = device->iommu->mode->container_path;
int version;
int ret;
- device->container_fd = open(path, O_RDWR);
- VFIO_ASSERT_GE(device->container_fd, 0, "open(%s) failed\n", path);
+ device->iommu->container_fd = open(path, O_RDWR);
+ VFIO_ASSERT_GE(device->iommu->container_fd, 0, "open(%s) failed\n", path);
- version = ioctl(device->container_fd, VFIO_GET_API_VERSION);
+ version = ioctl(device->iommu->container_fd, VFIO_GET_API_VERSION);
VFIO_ASSERT_EQ(version, VFIO_API_VERSION, "Unsupported version: %d\n", version);
vfio_pci_group_setup(device, bdf);
- ret = ioctl(device->container_fd, VFIO_CHECK_EXTENSION, iommu_type);
+ ret = ioctl(device->iommu->container_fd, VFIO_CHECK_EXTENSION, iommu_type);
VFIO_ASSERT_GT(ret, 0, "VFIO IOMMU type %lu not supported\n", iommu_type);
- ioctl_assert(device->container_fd, VFIO_SET_IOMMU, (void *)iommu_type);
+ ioctl_assert(device->iommu->container_fd, VFIO_SET_IOMMU, (void *)iommu_type);
device->fd = ioctl(device->group_fd, VFIO_GROUP_GET_DEVICE_FD, bdf);
VFIO_ASSERT_GE(device->fd, 0);
* used to check if iommufd is enabled. In practice open() will never
* return 0 unless stdin is closed.
*/
- device->iommufd = open("/dev/iommu", O_RDWR);
- VFIO_ASSERT_GT(device->iommufd, 0);
+ device->iommu->iommufd = open("/dev/iommu", O_RDWR);
+ VFIO_ASSERT_GT(device->iommu->iommufd, 0);
- vfio_device_bind_iommufd(device->fd, device->iommufd);
- device->ioas_id = iommufd_ioas_alloc(device->iommufd);
- vfio_device_attach_iommufd_pt(device->fd, device->ioas_id);
+ vfio_device_bind_iommufd(device->fd, device->iommu->iommufd);
+ device->iommu->ioas_id = iommufd_ioas_alloc(device->iommu->iommufd);
+ vfio_device_attach_iommufd_pt(device->fd, device->iommu->ioas_id);
}
struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_mode)
device = calloc(1, sizeof(*device));
VFIO_ASSERT_NOT_NULL(device);
- INIT_LIST_HEAD(&device->dma_regions);
+ device->iommu = calloc(1, sizeof(*device->iommu));
+ VFIO_ASSERT_NOT_NULL(device->iommu);
+
+ INIT_LIST_HEAD(&device->iommu->dma_regions);
- device->iommu_mode = lookup_iommu_mode(iommu_mode);
+ device->iommu->mode = lookup_iommu_mode(iommu_mode);
- if (device->iommu_mode->container_path)
+ if (device->iommu->mode->container_path)
vfio_pci_container_setup(device, bdf);
else
vfio_pci_iommufd_setup(device, bdf);
VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0);
}
- if (device->iommufd) {
- VFIO_ASSERT_EQ(close(device->iommufd), 0);
+ if (device->iommu->iommufd) {
+ VFIO_ASSERT_EQ(close(device->iommu->iommufd), 0);
} else {
VFIO_ASSERT_EQ(close(device->group_fd), 0);
- VFIO_ASSERT_EQ(close(device->container_fd), 0);
+ VFIO_ASSERT_EQ(close(device->iommu->container_fd), 0);
}
+ free(device->iommu);
free(device);
}