pt_oaddr_t oa;
unsigned int leaf_pgsize_lg2;
unsigned int leaf_level;
+ pt_vaddr_t num_leaves;
};
/*
static int __map_range_leaf(struct pt_range *range, void *arg,
unsigned int level, struct pt_table_p *table)
{
+ struct pt_iommu *iommu_table = iommu_from_common(range->common);
struct pt_state pts = pt_init(range, level, table);
struct pt_iommu_map_args *map = arg;
unsigned int leaf_pgsize_lg2 = map->leaf_pgsize_lg2;
unsigned int start_index;
pt_oaddr_t oa = map->oa;
+ unsigned int num_leaves;
+ unsigned int orig_end;
+ pt_vaddr_t last_va;
unsigned int step;
bool need_contig;
int ret = 0;
_pt_iter_first(&pts);
start_index = pts.index;
+ orig_end = pts.end_index;
+ if (pts.index + map->num_leaves < pts.end_index) {
+ /* Need to stop in the middle of the table to change sizes */
+ pts.end_index = pts.index + map->num_leaves;
+ num_leaves = 0;
+ } else {
+ num_leaves = map->num_leaves - (pts.end_index - pts.index);
+ }
+
do {
pts.type = pt_load_entry_raw(&pts);
if (pts.type != PT_ENTRY_EMPTY || need_contig) {
flush_writes_range(&pts, start_index, pts.index);
map->oa = oa;
- return ret;
+ map->num_leaves = num_leaves;
+ if (ret || num_leaves)
+ return ret;
+
+ /* range->va is not valid if we reached the end of the table */
+ pts.index -= step;
+ pt_index_to_va(&pts);
+ pts.index += step;
+ last_va = range->va + log2_to_int(leaf_pgsize_lg2);
+
+ if (last_va - 1 == range->last_va) {
+ PT_WARN_ON(pts.index != orig_end);
+ return 0;
+ }
+
+ /*
+ * Reached a point where the page size changed, compute the new
+ * parameters.
+ */
+ map->leaf_pgsize_lg2 = pt_compute_best_pgsize(
+ iommu_table->domain.pgsize_bitmap, last_va, range->last_va, oa);
+ map->leaf_level =
+ pt_pgsz_lg2_to_level(range->common, map->leaf_pgsize_lg2);
+ map->num_leaves = pt_pgsz_count(iommu_table->domain.pgsize_bitmap,
+ last_va, range->last_va, oa,
+ map->leaf_pgsize_lg2);
+
+ /* Didn't finish this table level, caller will repeat it */
+ if (pts.index != orig_end) {
+ if (pts.index != start_index)
+ pt_index_to_va(&pts);
+ return -EAGAIN;
+ }
+ return 0;
}
static int __map_range(struct pt_range *range, void *arg, unsigned int level,
if (pts.type != PT_ENTRY_EMPTY)
return -EADDRINUSE;
ret = pt_iommu_new_table(&pts, &map->attrs);
- if (ret) {
- /*
- * Racing with another thread installing a table
- */
- if (ret == -EAGAIN)
- continue;
+ /* EAGAIN on a race will loop again */
+ if (ret)
return ret;
- }
} else {
pts.table_lower = pt_table_ptr(&pts);
/*
* The already present table can possibly be shared with another
* concurrent map.
*/
- if (map->leaf_level == level - 1)
- ret = pt_descend(&pts, arg, __map_range_leaf);
- else
- ret = pt_descend(&pts, arg, __map_range);
+ do {
+ if (map->leaf_level == level - 1)
+ ret = pt_descend(&pts, arg, __map_range_leaf);
+ else
+ ret = pt_descend(&pts, arg, __map_range);
+ } while (ret == -EAGAIN);
if (ret)
return ret;
pt_index_to_va(&pts);
if (pts.index >= pts.end_index)
break;
+
+ /*
+ * This level is currently running __map_range_leaf() which is
+ * not correct if the target level has been updated to this
+ * level. Have the caller invoke __map_range_leaf.
+ */
+ if (map->leaf_level == level)
+ return -EAGAIN;
} while (true);
return 0;
}
static int do_map(struct pt_range *range, struct pt_common *common,
bool single_page, struct pt_iommu_map_args *map)
{
+ int ret;
+
/*
* The __map_single_page() fast path does not support DMA_INCOHERENT
* flushing to keep its .text small.
*/
if (single_page && !pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
- int ret;
ret = pt_walk_range(range, __map_single_page, map);
if (ret != -EAGAIN)
/* EAGAIN falls through to the full path */
}
- if (map->leaf_level == range->top_level)
- return pt_walk_range(range, __map_range_leaf, map);
- return pt_walk_range(range, __map_range, map);
+ do {
+ if (map->leaf_level == range->top_level)
+ ret = pt_walk_range(range, __map_range_leaf, map);
+ else
+ ret = pt_walk_range(range, __map_range, map);
+ } while (ret == -EAGAIN);
+ return ret;
}
-/**
- * map_pages() - Install translation for an IOVA range
- * @domain: Domain to manipulate
- * @iova: IO virtual address to start
- * @paddr: Physical/Output address to start
- * @pgsize: Length of each page
- * @pgcount: Length of the range in pgsize units starting from @iova
- * @prot: A bitmap of IOMMU_READ/WRITE/CACHE/NOEXEC/MMIO
- * @gfp: GFP flags for any memory allocations
- * @mapped: Total bytes successfully mapped
- *
- * The range starting at IOVA will have paddr installed into it. The caller
- * must specify a valid pgsize and pgcount to segment the range into compatible
- * blocks.
- *
- * On error the caller will probably want to invoke unmap on the range from iova
- * up to the amount indicated by @mapped to return the table back to an
- * unchanged state.
- *
- * Context: The caller must hold a write range lock that includes the whole
- * range.
- *
- * Returns: -ERRNO on failure, 0 on success. The number of bytes of VA that were
- * mapped are added to @mapped, @mapped is not zerod first.
- */
-int DOMAIN_NS(map_pages)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t pgsize, size_t pgcount,
- int prot, gfp_t gfp, size_t *mapped)
+static int NS(map_range)(struct pt_iommu *iommu_table, dma_addr_t iova,
+ phys_addr_t paddr, dma_addr_t len, unsigned int prot,
+ gfp_t gfp, size_t *mapped)
{
- struct pt_iommu *iommu_table =
- container_of(domain, struct pt_iommu, domain);
pt_vaddr_t pgsize_bitmap = iommu_table->domain.pgsize_bitmap;
struct pt_common *common = common_from_iommu(iommu_table);
struct iommu_iotlb_gather iotlb_gather;
- pt_vaddr_t len = pgsize * pgcount;
struct pt_iommu_map_args map = {
.iotlb_gather = &iotlb_gather,
.oa = paddr,
- .leaf_pgsize_lg2 = vaffs(pgsize),
};
bool single_page = false;
struct pt_range range;
return ret;
/* Calculate target page size and level for the leaves */
- if (pt_has_system_page_size(common) && pgsize == PAGE_SIZE &&
- pgcount == 1) {
+ if (pt_has_system_page_size(common) && len == PAGE_SIZE) {
PT_WARN_ON(!(pgsize_bitmap & PAGE_SIZE));
if (log2_mod(iova | paddr, PAGE_SHIFT))
return -ENXIO;
map.leaf_pgsize_lg2 = PAGE_SHIFT;
map.leaf_level = 0;
+ map.num_leaves = 1;
single_page = true;
} else {
map.leaf_pgsize_lg2 = pt_compute_best_pgsize(
return -ENXIO;
map.leaf_level =
pt_pgsz_lg2_to_level(common, map.leaf_pgsize_lg2);
+ map.num_leaves = pt_pgsz_count(pgsize_bitmap, range.va,
+ range.last_va, paddr,
+ map.leaf_pgsize_lg2);
}
ret = check_map_range(iommu_table, &range, &map);
*mapped += map.oa - paddr;
return ret;
}
-EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(map_pages), "GENERIC_PT_IOMMU");
struct pt_unmap_args {
struct iommu_pages_list free_list;
}
static const struct pt_iommu_ops NS(ops) = {
+ .map_range = NS(map_range),
.unmap_range = NS(unmap_range),
#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) && defined(pt_entry_is_write_dirty) && \
IS_ENABLED(CONFIG_IOMMUFD_TEST) && defined(pt_entry_make_write_dirty)
return pgsize;
}
-int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int __iommu_map_domain_pgtbl(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot, gfp_t gfp)
{
const struct iommu_domain_ops *ops = domain->ops;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
- phys_addr_t orig_paddr = paddr;
int ret = 0;
might_sleep_if(gfpflags_allow_blocking(gfp));
/* unroll mapping in case something went wrong */
if (ret) {
iommu_unmap(domain, orig_iova, orig_size - size);
- } else {
- trace_map(orig_iova, orig_paddr, orig_size);
- iommu_debug_map(domain, orig_paddr, orig_size);
+ return ret;
}
-
- return ret;
+ return 0;
}
int iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size)
return ops->iotlb_sync_map(domain, iova, size);
}
+int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct pt_iommu *pt = iommupt_from_domain(domain);
+ int ret;
+
+ if (pt) {
+ size_t mapped = 0;
+
+ ret = pt->ops->map_range(pt, iova, paddr, size, prot, gfp,
+ &mapped);
+ if (ret) {
+ iommu_unmap(domain, iova, mapped);
+ return ret;
+ }
+ return 0;
+ }
+ ret = __iommu_map_domain_pgtbl(domain, iova, paddr, size, prot, gfp);
+ if (!ret)
+ return ret;
+
+ trace_map(iova, paddr, size);
+ iommu_debug_map(domain, paddr, size);
+ return 0;
+}
+
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
};
struct pt_iommu_ops {
+ /**
+ * @map_range: Install translation for an IOVA range
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ * @paddr: Physical/Output address to start
+ * @len: Length of the range starting from @iova
+ * @prot: A bitmap of IOMMU_READ/WRITE/CACHE/NOEXEC/MMIO
+ * @gfp: GFP flags for any memory allocations
+ *
+ * The range starting at IOVA will have paddr installed into it. The
+ * rage is automatically segmented into optimally sized table entries,
+ * and can have any valid alignment.
+ *
+ * On error the caller will probably want to invoke unmap on the range
+ * from iova up to the amount indicated by @mapped to return the table
+ * back to an unchanged state.
+ *
+ * Context: The caller must hold a write range lock that includes
+ * the whole range.
+ *
+ * Returns: -ERRNO on failure, 0 on success. The number of bytes of VA
+ * that were mapped are added to @mapped, @mapped is not zerod first.
+ */
+ int (*map_range)(struct pt_iommu *iommu_table, dma_addr_t iova,
+ phys_addr_t paddr, dma_addr_t len, unsigned int prot,
+ gfp_t gfp, size_t *mapped);
+
/**
* @unmap_range: Make a range of IOVA empty/not present
* @iommu_table: Table to manipulate
#define IOMMU_PROTOTYPES(fmt) \
phys_addr_t pt_iommu_##fmt##_iova_to_phys(struct iommu_domain *domain, \
dma_addr_t iova); \
- int pt_iommu_##fmt##_map_pages(struct iommu_domain *domain, \
- unsigned long iova, phys_addr_t paddr, \
- size_t pgsize, size_t pgcount, \
- int prot, gfp_t gfp, size_t *mapped); \
int pt_iommu_##fmt##_read_and_clear_dirty( \
struct iommu_domain *domain, unsigned long iova, size_t size, \
unsigned long flags, struct iommu_dirty_bitmap *dirty); \
* iommu_pt
*/
#define IOMMU_PT_DOMAIN_OPS(fmt) \
- .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \
- .map_pages = &pt_iommu_##fmt##_map_pages
+ .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys
#define IOMMU_PT_DIRTY_OPS(fmt) \
.read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty