__remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
}
+
+static bool addr_splits_kernel_leaf(unsigned long addr)
+{
+ pgd_t *pgdp, pgd;
+ p4d_t *p4dp, p4d;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+
+ /*
+ * If the given address points at a the start address of
+ * a possible leaf, we certainly won't split. Otherwise,
+ * check if we would actually split a leaf by traversing
+ * the page tables further.
+ */
+ if (IS_ALIGNED(addr, PGDIR_SIZE))
+ return false;
+
+ pgdp = pgd_offset_k(addr);
+ pgd = pgdp_get(pgdp);
+ if (!pgd_present(pgd))
+ return false;
+
+ if (IS_ALIGNED(addr, P4D_SIZE))
+ return false;
+
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = p4dp_get(p4dp);
+ if (!p4d_present(p4d))
+ return false;
+
+ if (IS_ALIGNED(addr, PUD_SIZE))
+ return false;
+
+ pudp = pud_offset(p4dp, addr);
+ pud = pudp_get(pudp);
+ if (!pud_present(pud))
+ return false;
+
+ if (pud_leaf(pud))
+ return true;
+
+ if (IS_ALIGNED(addr, CONT_PMD_SIZE))
+ return false;
+
+ pmdp = pmd_offset(pudp, addr);
+ pmd = pmdp_get(pmdp);
+ if (!pmd_present(pmd))
+ return false;
+
+ if (pmd_cont(pmd))
+ return true;
+
+ if (IS_ALIGNED(addr, PMD_SIZE))
+ return false;
+
+ if (pmd_leaf(pmd))
+ return true;
+
+ if (IS_ALIGNED(addr, CONT_PTE_SIZE))
+ return false;
+
+ ptep = pte_offset_kernel(pmdp, addr);
+ pte = __ptep_get(ptep);
+ if (!pte_present(pte))
+ return false;
+
+ if (pte_cont(pte))
+ return true;
+
+ return !IS_ALIGNED(addr, PAGE_SIZE);
+}
+
+static bool can_unmap_without_split(unsigned long pfn, unsigned long nr_pages)
+{
+ unsigned long phys_start, phys_end, start, end;
+
+ phys_start = PFN_PHYS(pfn);
+ phys_end = phys_start + nr_pages * PAGE_SIZE;
+
+ /* PFN range's linear map edges are leaf entry aligned */
+ start = __phys_to_virt(phys_start);
+ end = __phys_to_virt(phys_end);
+ if (addr_splits_kernel_leaf(start) || addr_splits_kernel_leaf(end)) {
+ pr_warn("[%lx %lx] splits a leaf entry in linear map\n",
+ phys_start, phys_end);
+ return false;
+ }
+
+ /* PFN range's vmemmap edges are leaf entry aligned */
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP));
+ start = (unsigned long)pfn_to_page(pfn);
+ end = (unsigned long)pfn_to_page(pfn + nr_pages);
+ if (addr_splits_kernel_leaf(start) || addr_splits_kernel_leaf(end)) {
+ pr_warn("[%lx %lx] splits a leaf entry in vmemmap\n",
+ phys_start, phys_end);
+ return false;
+ }
+ return true;
+}
+
/*
* This memory hotplug notifier helps prevent boot memory from being
* inadvertently removed as it blocks pfn range offlining process in
* In future if and when boot memory could be removed, this notifier
* should be dropped and free_hotplug_page_range() should handle any
* reserved pages allocated during boot.
+ *
+ * This also blocks any memory remove that would have caused a split
+ * in leaf entry in kernel linear or vmemmap mapping.
*/
-static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
+static int prevent_memory_remove_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct mem_section *ms;
return NOTIFY_DONE;
}
}
+
+ if (!can_unmap_without_split(pfn, arg->nr_pages))
+ return NOTIFY_BAD;
+
return NOTIFY_OK;
}
-static struct notifier_block prevent_bootmem_remove_nb = {
- .notifier_call = prevent_bootmem_remove_notifier,
+static struct notifier_block prevent_memory_remove_nb = {
+ .notifier_call = prevent_memory_remove_notifier,
};
/*
}
}
-static int __init prevent_bootmem_remove_init(void)
+static int __init prevent_memory_remove_init(void)
{
int ret = 0;
return ret;
validate_bootmem_online();
- ret = register_memory_notifier(&prevent_bootmem_remove_nb);
+ ret = register_memory_notifier(&prevent_memory_remove_nb);
if (ret)
pr_err("%s: Notifier registration failed %d\n", __func__, ret);
return ret;
}
-early_initcall(prevent_bootmem_remove_init);
+early_initcall(prevent_memory_remove_init);
#endif
pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr,