if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
return -EINVAL;
- /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
+ /* Don't mess with any tables not fully in 1:1 mapping, vmemmap & kasan area */
+#ifdef CONFIG_KASAN
+ if (WARN_ON_ONCE(!(start >= KASAN_SHADOW_START && end <= KASAN_SHADOW_END) &&
+ end > __abs_lowcore))
+ return -EINVAL;
+#else
if (WARN_ON_ONCE(end > __abs_lowcore))
return -EINVAL;
+#endif
for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
pgd = pgd_offset_k(addr);
unsigned int id;
unsigned int memmap_on_memory;
unsigned int config;
+#ifdef CONFIG_KASAN
+ unsigned int early_shadow_mapped;
+#endif
};
struct sclp_mem_arg {
put_device(&mem->dev);
sclp_mem_change_state(addr, block_size, 0);
__remove_memory(addr, block_size);
+#ifdef CONFIG_KASAN
+ if (sclp_mem->early_shadow_mapped) {
+ unsigned long start, end;
+
+ start = (unsigned long)kasan_mem_to_shadow(__va(addr));
+ end = start + (block_size >> KASAN_SHADOW_SCALE_SHIFT);
+ vmemmap_free(start, end, NULL);
+ sclp_mem->early_shadow_mapped = 0;
+ }
+#endif
WRITE_ONCE(sclp_mem->config, 0);
}
out_unlock:
sclp_mem->memmap_on_memory = memmap_on_memory;
sclp_mem->config = config;
+#ifdef CONFIG_KASAN
+ sclp_mem->early_shadow_mapped = config;
+#endif
sclp_mem->id = id;
kobject_init(&sclp_mem->kobj, &ktype);
rc = kobject_add(&sclp_mem->kobj, &kset->kobj, "memory%d", id);