}
EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
-int cpu_cache_invalidate_memregion(void)
+int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
{
if (WARN_ON_ONCE(!cpu_cache_has_invalidate_memregion()))
return -ENXIO;
return -ENXIO;
}
- cpu_cache_invalidate_memregion();
+ if (!cxlr->params.res)
+ return -ENXIO;
+ cpu_cache_invalidate_memregion(cxlr->params.res->start,
+ resource_size(cxlr->params.res));
return 0;
}
* here is ok.
*/
if (cpu_cache_has_invalidate_memregion())
- cpu_cache_invalidate_memregion();
+ cpu_cache_invalidate_all();
}
static int child_notify(struct device *dev, void *data)
}
}
- cpu_cache_invalidate_memregion();
+ cpu_cache_invalidate_all();
out:
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
/**
* cpu_cache_invalidate_memregion - drop any CPU cached data for
* memregion
+ * @start: start physical address of the target memory region.
+ * @len: length of the target memory region. -1 for all the regions of
+ * the target type.
*
* Perform cache maintenance after a memory event / operation that
* changes the contents of physical memory in a cache-incoherent manner.
* the cache maintenance.
*/
#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
-int cpu_cache_invalidate_memregion(void);
+int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len);
bool cpu_cache_has_invalidate_memregion(void);
#else
static inline bool cpu_cache_has_invalidate_memregion(void)
return false;
}
-static inline int cpu_cache_invalidate_memregion(void)
+static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
{
WARN_ON_ONCE("CPU cache invalidation required");
return -ENXIO;
}
#endif
+
+static inline int cpu_cache_invalidate_all(void)
+{
+ return cpu_cache_invalidate_memregion(0, -1);
+}
+
#endif /* _MEMREGION_H_ */