.need = need_iommu_debug,
};
+static struct page_ext *get_iommu_page_ext(phys_addr_t phys)
+{
+ struct page *page = phys_to_page(phys);
+ struct page_ext *page_ext = page_ext_get(page);
+
+ return page_ext;
+}
+
+static struct iommu_debug_metadata *get_iommu_data(struct page_ext *page_ext)
+{
+ return page_ext_data(page_ext, &page_iommu_debug_ops);
+}
+
+static void iommu_debug_inc_page(phys_addr_t phys)
+{
+ struct page_ext *page_ext = get_iommu_page_ext(phys);
+ struct iommu_debug_metadata *d = get_iommu_data(page_ext);
+
+ WARN_ON(atomic_inc_return_relaxed(&d->ref) <= 0);
+ page_ext_put(page_ext);
+}
+
+static void iommu_debug_dec_page(phys_addr_t phys)
+{
+ struct page_ext *page_ext = get_iommu_page_ext(phys);
+ struct iommu_debug_metadata *d = get_iommu_data(page_ext);
+
+ WARN_ON(atomic_dec_return_relaxed(&d->ref) < 0);
+ page_ext_put(page_ext);
+}
+
+/*
+ * IOMMU page size doesn't have to match the CPU page size. So, we use
+ * the smallest IOMMU page size to refcount the pages in the vmemmap.
+ * That is important as both map and unmap has to use the same page size
+ * to update the refcount to avoid double counting the same page.
+ * And as we can't know from iommu_unmap() what was the original page size
+ * used for map, we just use the minimum supported one for both.
+ */
+static size_t iommu_debug_page_size(struct iommu_domain *domain)
+{
+ return 1UL << __ffs(domain->pgsize_bitmap);
+}
+
void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size)
{
+ size_t off, end;
+ size_t page_size = iommu_debug_page_size(domain);
+
+ if (WARN_ON(!phys || check_add_overflow(phys, size, &end)))
+ return;
+
+ for (off = 0 ; off < size ; off += page_size) {
+ if (!pfn_valid(__phys_to_pfn(phys + off)))
+ continue;
+ iommu_debug_inc_page(phys + off);
+ }
+}
+
+static void __iommu_debug_update_iova(struct iommu_domain *domain,
+ unsigned long iova, size_t size, bool inc)
+{
+ size_t off, end;
+ size_t page_size = iommu_debug_page_size(domain);
+
+ if (WARN_ON(check_add_overflow(iova, size, &end)))
+ return;
+
+ for (off = 0 ; off < size ; off += page_size) {
+ phys_addr_t phys = iommu_iova_to_phys(domain, iova + off);
+
+ if (!phys || !pfn_valid(__phys_to_pfn(phys)))
+ continue;
+
+ if (inc)
+ iommu_debug_inc_page(phys);
+ else
+ iommu_debug_dec_page(phys);
+ }
}
void __iommu_debug_unmap_begin(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
+ __iommu_debug_update_iova(domain, iova, size, false);
}
void __iommu_debug_unmap_end(struct iommu_domain *domain,
unsigned long iova, size_t size,
size_t unmapped)
{
+ if ((unmapped == size) || WARN_ON_ONCE(unmapped > size))
+ return;
+
+ /* If unmap failed, re-increment the refcount. */
+ __iommu_debug_update_iova(domain, iova + unmapped,
+ size - unmapped, true);
}
void iommu_debug_init(void)