* IOMMU API debug page alloc sanitizer
*/
#include <linux/atomic.h>
+#include <linux/iommu.h>
#include <linux/iommu-debug-pagealloc.h>
#include <linux/kernel.h>
#include <linux/page_ext.h>
+#include "iommu-priv.h"
+
static bool needed;
+DEFINE_STATIC_KEY_FALSE(iommu_debug_initialized);
struct iommu_debug_metadata {
atomic_t ref;
.need = need_iommu_debug,
};
+void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size)
+{
+}
+
+void __iommu_debug_unmap_begin(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+}
+
+void __iommu_debug_unmap_end(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ size_t unmapped)
+{
+}
+
+void iommu_debug_init(void)
+{
+ if (!needed)
+ return;
+
+ pr_info("iommu: Debugging page allocations, expect overhead or disable iommu.debug_pagealloc");
+ static_branch_enable(&iommu_debug_initialized);
+}
+
static int __init iommu_debug_pagealloc(char *str)
{
return kstrtobool(str, &needed);
#define __LINUX_IOMMU_PRIV_H
#include <linux/iommu.h>
+#include <linux/iommu-debug-pagealloc.h>
#include <linux/msi.h>
static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
int iommu_replace_device_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid,
struct iommu_attach_handle *handle);
+
+#ifdef CONFIG_IOMMU_DEBUG_PAGEALLOC
+
+void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys,
+ size_t size);
+void __iommu_debug_unmap_begin(struct iommu_domain *domain,
+ unsigned long iova, size_t size);
+void __iommu_debug_unmap_end(struct iommu_domain *domain,
+ unsigned long iova, size_t size, size_t unmapped);
+
+static inline void iommu_debug_map(struct iommu_domain *domain,
+ phys_addr_t phys, size_t size)
+{
+ if (static_branch_unlikely(&iommu_debug_initialized))
+ __iommu_debug_map(domain, phys, size);
+}
+
+static inline void iommu_debug_unmap_begin(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ if (static_branch_unlikely(&iommu_debug_initialized))
+ __iommu_debug_unmap_begin(domain, iova, size);
+}
+
+static inline void iommu_debug_unmap_end(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ size_t unmapped)
+{
+ if (static_branch_unlikely(&iommu_debug_initialized))
+ __iommu_debug_unmap_end(domain, iova, size, unmapped);
+}
+
+void iommu_debug_init(void);
+
+#else
+static inline void iommu_debug_map(struct iommu_domain *domain,
+ phys_addr_t phys, size_t size)
+{
+}
+
+static inline void iommu_debug_unmap_begin(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+}
+
+static inline void iommu_debug_unmap_end(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ size_t unmapped)
+{
+}
+
+static inline void iommu_debug_init(void)
+{
+}
+
+#endif /* CONFIG_IOMMU_DEBUG_PAGEALLOC */
+
#endif /* __LINUX_IOMMU_PRIV_H */
if (!nb)
return -ENOMEM;
+ iommu_debug_init();
+
for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
nb[i].notifier_call = iommu_bus_notifier;
bus_register_notifier(iommu_buses[i], &nb[i]);
}
/* unroll mapping in case something went wrong */
- if (ret)
+ if (ret) {
iommu_unmap(domain, orig_iova, orig_size - size);
- else
+ } else {
trace_map(orig_iova, orig_paddr, orig_size);
+ iommu_debug_map(domain, orig_paddr, orig_size);
+ }
return ret;
}
pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
+ iommu_debug_unmap_begin(domain, iova, size);
+
/*
* Keep iterating until we either unmap 'size' bytes (or more)
* or we hit an area that isn't mapped.
}
trace_unmap(orig_iova, size, unmapped);
+ iommu_debug_unmap_end(domain, orig_iova, size, unmapped);
return unmapped;
}