static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
{
/*
- * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
- * We allocate one page for pgd.
- */
- if (!SHARED_KERNEL_PMD)
- return __pgd_alloc(mm, pgd_allocation_order());
-
- /*
- * Now PAE kernel is not running as a Xen domain. We can allocate
- * a 32-byte slab for pgd to save memory space.
+ * PTI and Xen need a whole page for the PAE PGD
+ * even though the hardware only needs 32 bytes.
+ *
+ * For simplicity, allocate a page for all users.
*/
- return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
- }
-
- static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)
- {
- if (!SHARED_KERNEL_PMD)
- __pgd_free(mm, pgd);
- else
- kmem_cache_free(pgd_cache, pgd);
- }
- #else
-
- static inline pgd_t *_pgd_alloc(struct mm_struct *mm)
- {
- return __pgd_alloc(mm, PGD_ALLOCATION_ORDER);
+ return __pgd_alloc(mm, pgd_allocation_order());
}
static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd)