]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
powerpc/8xx: Add a function to early map kernel via huge pages
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Tue, 19 May 2020 05:49:22 +0000 (05:49 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 26 May 2020 12:22:22 +0000 (22:22 +1000)
Add a function to early map kernel memory using huge pages.

For 512k pages, just use standard page table and map in using 512k
pages.

For 8M pages, create a hugepd table and populate the two PGD
entries with it.

This function can only be used to create page tables at startup. Once
the regular SLAB allocation functions replace memblock functions,
this function cannot allocate new pages anymore. However it can still
update existing mappings with new protections.

hugepd_none() macro is moved into asm/hugetlb.h to be usable outside
of mm/hugetlbpage.c

early_pte_alloc_kernel() is made visible.

_PAGE_HUGE flag is now displayed by ptdump.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
[mpe: Change ptdump display to use "huge"]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/68325bcd3b6f93127f7810418a2352c3519066d6.1589866984.git.christophe.leroy@csgroup.eu
arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
arch/powerpc/include/asm/pgtable.h
arch/powerpc/mm/nohash/8xx.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/ptdump/8xx.c
arch/powerpc/platforms/Kconfig.cputype

index 1c7d4693a78e1d2fc0f706558f898b0db12811aa..e752a5807a596ae50c819c63157fef5e77fd095b 100644 (file)
@@ -35,6 +35,11 @@ static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshi
        *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M);
 }
 
+static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+       *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M);
+}
+
 static inline int check_and_get_huge_psize(int shift)
 {
        return shift_to_mmu_psize(shift);
index b1f1d53397355951b27214cbe116f40592cad7bb..961895be932afcb13a36207332d06ea438a7bb0b 100644 (file)
@@ -107,6 +107,8 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr);
 
 void pgtable_cache_add(unsigned int shift);
 
+pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
+
 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
 void mark_initmem_nx(void);
 #else
index b735482e1529691533dd80d8a0ef6b8cd8bbfbdf..72fb75f2a5f146bf0d0ab6bf5993ff8de8e0121a 100644 (file)
@@ -9,9 +9,11 @@
 
 #include <linux/memblock.h>
 #include <linux/mmu_context.h>
+#include <linux/hugetlb.h>
 #include <asm/fixmap.h>
 #include <asm/code-patching.h>
 #include <asm/inst.h>
+#include <asm/pgalloc.h>
 
 #include <mm/mmu_decl.h>
 
@@ -55,6 +57,56 @@ unsigned long p_block_mapped(phys_addr_t pa)
        return 0;
 }
 
+static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
+{
+       if (hpd_val(*pmdp) == 0) {
+               pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
+
+               if (!ptep)
+                       return NULL;
+
+               hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
+               hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
+       }
+       return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
+}
+
+static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
+                                            pgprot_t prot, int psize, bool new)
+{
+       pmd_t *pmdp = pmd_ptr_k(va);
+       pte_t *ptep;
+
+       if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
+               return -EINVAL;
+
+       if (new) {
+               if (WARN_ON(slab_is_available()))
+                       return -EINVAL;
+
+               if (psize == MMU_PAGE_512K)
+                       ptep = early_pte_alloc_kernel(pmdp, va);
+               else
+                       ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
+       } else {
+               if (psize == MMU_PAGE_512K)
+                       ptep = pte_offset_kernel(pmdp, va);
+               else
+                       ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
+       }
+
+       if (WARN_ON(!ptep))
+               return -ENOMEM;
+
+       /* The PTE should never be already present */
+       if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
+               return -EINVAL;
+
+       set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
+
+       return 0;
+}
+
 /*
  * MMU_init_hw does the chip-specific initialization of the MMU hardware.
  */
index bd0cb6e3573ea91c35156a5e581e5a637310a5cf..05902bbff8d6cef5e7273891c27e9a78a83d784a 100644 (file)
@@ -61,7 +61,7 @@ static void __init *early_alloc_pgtable(unsigned long size)
        return ptr;
 }
 
-static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
+pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
 {
        if (pmd_none(*pmdp)) {
                pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
index 9e2d8e847d6e874a9b8d3d16fb5220ff326afd07..4bc350736c1debb109847640c04edd86a22d0310 100644 (file)
 
 static const struct flag_info flag_array[] = {
        {
+               .mask   = _PAGE_HUGE,
+               .val    = _PAGE_HUGE,
+               .set    = "huge",
+               .clear  = "    ",
+       }, {
                .mask   = _PAGE_SH,
                .val    = 0,
                .set    = "user",
index b0587b83351766e60f9a9c0cec313a4c095f22ba..404f26917da7d293cf7bc2d9325936fb186adb56 100644 (file)
@@ -56,6 +56,7 @@ config PPC_8xx
        select PPC_HAVE_KUEP
        select PPC_HAVE_KUAP
        select HAVE_ARCH_VMAP_STACK
+       select HUGETLBFS
 
 config 40x
        bool "AMCC 40x"