return ret;
}
-/**
- * unmap_pages() - Make a range of IOVA empty/not present
- * @domain: Domain to manipulate
- * @iova: IO virtual address to start
- * @pgsize: Length of each page
- * @pgcount: Length of the range in pgsize units starting from @iova
- * @iotlb_gather: Gather struct that must be flushed on return
- *
- * unmap_pages() will remove a translation created by map_pages(). It cannot
- * subdivide a mapping created by map_pages(), so it should be called with IOVA
- * ranges that match those passed to map_pages(). The IOVA range can aggregate
- * contiguous map_pages() calls so long as no individual range is split.
- *
- * Context: The caller must hold a write range lock that includes
- * the whole range.
- *
- * Returns: Number of bytes of VA unmapped. iova + res will be the point
- * unmapping stopped.
- */
-size_t DOMAIN_NS(unmap_pages)(struct iommu_domain *domain, unsigned long iova,
- size_t pgsize, size_t pgcount,
+static size_t NS(unmap_range)(struct pt_iommu *iommu_table, dma_addr_t iova,
+ dma_addr_t len,
struct iommu_iotlb_gather *iotlb_gather)
{
- struct pt_iommu *iommu_table =
- container_of(domain, struct pt_iommu, domain);
struct pt_unmap_args unmap = { .free_list = IOMMU_PAGES_LIST_INIT(
unmap.free_list) };
- pt_vaddr_t len = pgsize * pgcount;
struct pt_range range;
int ret;
return unmap.unmapped;
}
-EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(unmap_pages), "GENERIC_PT_IOMMU");
static void NS(get_info)(struct pt_iommu *iommu_table,
struct pt_iommu_info *info)
}
static const struct pt_iommu_ops NS(ops) = {
+ .unmap_range = NS(unmap_range),
#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) && defined(pt_entry_is_write_dirty) && \
IS_ENABLED(CONFIG_IOMMUFD_TEST) && defined(pt_entry_make_write_dirty)
.set_dirty = NS(set_dirty),
domain->type = __IOMMU_DOMAIN_PAGING;
domain->pgsize_bitmap = info.pgsize_bitmap;
+ domain->is_iommupt = true;
if (pt_feature(common, PT_FEAT_DYNAMIC_TOP))
range = _pt_top_range(common,
#include <linux/sched/mm.h>
#include <linux/msi.h>
#include <uapi/linux/iommufd.h>
+#include <linux/generic_pt/iommu.h>
#include "dma-iommu.h"
#include "iommu-priv.h"
}
EXPORT_SYMBOL_GPL(iommu_map);
-static size_t __iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- struct iommu_iotlb_gather *iotlb_gather)
+static size_t
+__iommu_unmap_domain_pgtbl(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *iotlb_gather)
{
const struct iommu_domain_ops *ops = domain->ops;
size_t unmapped_page, unmapped = 0;
- unsigned long orig_iova = iova;
unsigned int min_pagesz;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
unmapped += unmapped_page;
}
- trace_unmap(orig_iova, size, unmapped);
- iommu_debug_unmap_end(domain, orig_iova, size, unmapped);
+ return unmapped;
+}
+
+static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size,
+ struct iommu_iotlb_gather *iotlb_gather)
+{
+ struct pt_iommu *pt = iommupt_from_domain(domain);
+ size_t unmapped;
+
+ if (pt)
+ unmapped = pt->ops->unmap_range(pt, iova, size, iotlb_gather);
+ else
+ unmapped = __iommu_unmap_domain_pgtbl(domain, iova, size,
+ iotlb_gather);
+ trace_unmap(iova, size, unmapped);
+ iommu_debug_unmap_end(domain, iova, size, unmapped);
return unmapped;
}
struct device *iommu_device;
};
+static inline struct pt_iommu *iommupt_from_domain(struct iommu_domain *domain)
+{
+ if (!IS_ENABLED(CONFIG_IOMMU_PT) || !domain->is_iommupt)
+ return NULL;
+ return container_of(domain, struct pt_iommu, domain);
+}
+
/**
* struct pt_iommu_info - Details about the IOMMU page table
*
};
struct pt_iommu_ops {
+ /**
+ * @unmap_range: Make a range of IOVA empty/not present
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ * @len: Length of the range starting from @iova
+ * @iotlb_gather: Gather struct that must be flushed on return
+ *
+ * unmap_range() will remove a translation created by map_range(). It
+ * cannot subdivide a mapping created by map_range(), so it should be
+ * called with IOVA ranges that match those passed to map_pages. The
+ * IOVA range can aggregate contiguous map_range() calls so long as no
+ * individual range is split.
+ *
+ * Context: The caller must hold a write range lock that includes
+ * the whole range.
+ *
+ * Returns: Number of bytes of VA unmapped. iova + res will be the
+ * point unmapping stopped.
+ */
+ size_t (*unmap_range)(struct pt_iommu *iommu_table, dma_addr_t iova,
+ dma_addr_t len,
+ struct iommu_iotlb_gather *iotlb_gather);
+
/**
* @set_dirty: Make the iova write dirty
* @iommu_table: Table to manipulate
unsigned long iova, phys_addr_t paddr, \
size_t pgsize, size_t pgcount, \
int prot, gfp_t gfp, size_t *mapped); \
- size_t pt_iommu_##fmt##_unmap_pages( \
- struct iommu_domain *domain, unsigned long iova, \
- size_t pgsize, size_t pgcount, \
- struct iommu_iotlb_gather *iotlb_gather); \
int pt_iommu_##fmt##_read_and_clear_dirty( \
struct iommu_domain *domain, unsigned long iova, size_t size, \
unsigned long flags, struct iommu_dirty_bitmap *dirty); \
*/
#define IOMMU_PT_DOMAIN_OPS(fmt) \
.iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \
- .map_pages = &pt_iommu_##fmt##_map_pages, \
- .unmap_pages = &pt_iommu_##fmt##_unmap_pages
+ .map_pages = &pt_iommu_##fmt##_map_pages
#define IOMMU_PT_DIRTY_OPS(fmt) \
.read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty
struct iommu_domain {
unsigned type;
enum iommu_domain_cookie_type cookie_type;
+ bool is_iommupt;
const struct iommu_domain_ops *ops;
const struct iommu_dirty_ops *dirty_ops;
const struct iommu_ops *owner; /* Whose domain_alloc we came from */