}
static unsigned int calc_sg_nents(struct dma_iova_state *state,
- struct dma_buf_phys_vec *phys_vec,
- size_t nr_ranges, size_t size)
+ struct phys_vec *phys_vec, size_t nr_ranges,
+ size_t size)
{
unsigned int nents = 0;
size_t i;
*/
struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
struct p2pdma_provider *provider,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
size_t nr_ranges, size_t size,
enum dma_data_direction dir)
{
struct iopt_pages_dmabuf {
struct dma_buf_attachment *attach;
- struct dma_buf_phys_vec phys;
+ struct phys_vec phys;
/* Always PAGE_SIZE aligned */
unsigned long start;
struct list_head tracker;
struct iommu_option;
struct iommufd_device;
struct dma_buf_attachment;
-struct dma_buf_phys_vec;
struct iommufd_sw_msi_map {
struct list_head sw_msi_item;
void iommufd_test_exit(void);
bool iommufd_selftest_is_mock_dev(struct device *dev);
int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys);
+ struct phys_vec *phys);
#else
static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id,
}
static inline int
iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys)
+ struct phys_vec *phys)
{
return -EOPNOTSUPP;
}
}
struct pfn_reader_dmabuf {
- struct dma_buf_phys_vec phys;
+ struct phys_vec phys;
unsigned long start_offset;
};
*/
static int
sym_vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys)
+ struct phys_vec *phys)
{
typeof(&vfio_pci_dma_buf_iommufd_map) fn;
int rc;
};
int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys)
+ struct phys_vec *phys)
{
struct iommufd_test_dma_buf *priv = attachment->dmabuf->priv;
static int nvgrace_get_dmabuf_phys(struct vfio_pci_core_device *core_vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges)
{
struct vfio_pci_core_device *vdev;
struct list_head dmabufs_elm;
size_t size;
- struct dma_buf_phys_vec *phys_vec;
+ struct phys_vec *phys_vec;
struct p2pdma_provider *provider;
u32 nr_ranges;
u8 revoked : 1;
* will fail if it is currently revoked
*/
int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys)
+ struct phys_vec *phys)
{
struct vfio_pci_dma_buf *priv;
}
EXPORT_SYMBOL_FOR_MODULES(vfio_pci_dma_buf_iommufd_map, "iommufd");
-int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
+int vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len)
int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges)
{
struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
struct p2pdma_provider *provider,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
size_t nr_ranges, size_t size,
enum dma_data_direction dir);
void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,
void *priv;
};
-/**
- * struct dma_buf_phys_vec - describe continuous chunk of memory
- * @paddr: physical address of that chunk
- * @len: Length of this chunk
- */
-struct dma_buf_phys_vec {
- phys_addr_t paddr;
- size_t len;
-};
-
/**
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
* @name: export-info name
struct vfio_pci_core_device;
struct vfio_pci_region;
struct p2pdma_provider;
-struct dma_buf_phys_vec;
struct dma_buf_attachment;
struct vfio_pci_eventfd {
int (*get_dmabuf_phys)(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges);
};
#if IS_ENABLED(CONFIG_VFIO_PCI_DMABUF)
-int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
+int vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len);
int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
- struct dma_buf_phys_vec *phys_vec,
+ struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges);
#else
static inline int
-vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
+vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len)
}
static inline int vfio_pci_core_get_dmabuf_phys(
struct vfio_pci_core_device *vdev, struct p2pdma_provider **provider,
- unsigned int region_index, struct dma_buf_phys_vec *phys_vec,
+ unsigned int region_index, struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges, size_t nr_ranges)
{
return -EOPNOTSUPP;
}
int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys);
+ struct phys_vec *phys);
#endif /* VFIO_PCI_CORE_H */