Export split_pud_page() so it can be used from the vmem code and teach
modify_pud_table() to split PUD-sized mappings when only a subrange
needs to be removed.
If the range to be removed covers a full PUD-sized mapping, keep the
existing behavior: clear the PUD entry and free the backing large page
(for non-direct mappings). Otherwise, split the PUD-mapped page into
PMD mappings and let the walker handle the smaller ranges.
This is needed for KASAN early shadow removal support: memory hotplug
freeing the KASAN early shadow is the only expected caller that will
try to free 2G PUD-mapped regions of non-direct mappings.
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
return CC_TRANSFORM(cc);
}
+int split_pud_page(pud_t *pudp, unsigned long addr);
+
/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
return rc;
}
-static int split_pud_page(pud_t *pudp, unsigned long addr)
+int split_pud_page(pud_t *pudp, unsigned long addr)
{
unsigned long pmd_addr, prot;
pmd_t *pm_dir, *pmdp;
if (pud_leaf(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) {
+ if (!direct)
+ vmem_free_pages(pud_deref(*pud), get_order(PUD_SIZE), altmap);
pud_clear(pud);
pages++;
+ continue;
+ } else {
+ split_pud_page(pud, addr & PUD_MASK);
}
- continue;
}
} else if (pud_none(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&