]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
m68k: mm: Improve kernel_page_table()
authorPeter Zijlstra <peterz@infradead.org>
Fri, 31 Jan 2020 12:45:37 +0000 (13:45 +0100)
committerGeert Uytterhoeven <geert@linux-m68k.org>
Mon, 10 Feb 2020 09:57:48 +0000 (10:57 +0100)
With the PTE-tables now only being 256 bytes, allocating a full page
for them is a giant waste. Start by improving the boot time allocator
such that init_mm initialization will at least have optimal memory
density.

Much thanks to Will Deacon in help with debugging and ferreting out
lost information on these dusty MMUs.

Notes:

 - _TABLE_MASK is reduced to account for the shorter (256 byte)
   alignment of pte-tables, per the manual, table entries should only
   ever have state in the low 4 bits (Used,WrProt,Desc1,Desc0) so it is
   still longer than strictly required. (Thanks Will!!!)

 - Also use kernel_page_table() for the 020/030 zero_pgtable case and
   consequently remove the zero_pgtable init hack (will fix up later).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Greg Ungerer <gerg@linux-m68k.org>
Tested-by: Michael Schmitz <schmitzmic@gmail.com>
Tested-by: Greg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.768263973@infradead.org
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
arch/m68k/include/asm/motorola_pgtable.h
arch/m68k/mm/init.c
arch/m68k/mm/motorola.c

index 4d94e462bb2b9c039ad691a7c78b22495744c358..2ad0f416684159ea9f8b860c486b4b1b256f57b2 100644 (file)
 #define _DESCTYPE_MASK 0x003
 
 #define _CACHEMASK040  (~0x060)
-#define _TABLE_MASK    (0xfffffe00)
+
+/*
+ * Currently set to the minimum alignment of table pointers (256 bytes).
+ * The hardware only uses the low 4 bits for state:
+ *
+ *    3 - Used
+ *    2 - Write Protected
+ *  0,1 - Descriptor Type
+ *
+ * and has the rest of the bits reserved.
+ */
+#define _TABLE_MASK    (0xffffff00)
 
 #define _PAGE_TABLE    (_PAGE_SHORT)
 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
index 27c453f4fffe1d92b508d03bcf9f8ef9df72be0e..a9b6d26cff8a151865d85aa978a02f95165ff492 100644 (file)
@@ -42,7 +42,6 @@ EXPORT_SYMBOL(empty_zero_page);
 
 #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
 extern void init_pointer_table(unsigned long ptable);
-extern pmd_t *zero_pgtable;
 #endif
 
 #ifdef CONFIG_MMU
@@ -135,10 +134,6 @@ static inline void init_pointer_tables(void)
                if (pud_present(*pud))
                        init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i]));
        }
-
-       /* insert also pointer table that we used to unmap the zero page */
-       if (zero_pgtable)
-               init_pointer_table((unsigned long)zero_pgtable);
 #endif
 }
 
index c888ef46da3e14d799f883edebcd8b892ef33378..600f9c1d96f8492d3edc256e2c1d4efbb4f7557c 100644 (file)
@@ -174,27 +174,35 @@ extern __initdata unsigned long m68k_init_mapped_size;
 
 extern unsigned long availmem;
 
+static pte_t *last_pte_table __initdata = NULL;
+
 static pte_t * __init kernel_page_table(void)
 {
-       pte_t *ptablep;
+       pte_t *pte_table = last_pte_table;
 
-       ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
-       if (!ptablep)
-               panic("%s: Failed to allocate %lu bytes align=%lx\n",
-                     __func__, PAGE_SIZE, PAGE_SIZE);
+       if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) {
+               pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+               if (!pte_table) {
+                       panic("%s: Failed to allocate %lu bytes align=%lx\n",
+                                       __func__, PAGE_SIZE, PAGE_SIZE);
+               }
 
-       clear_page(ptablep);
-       mmu_page_ctor(ptablep);
+               clear_page(pte_table);
+               mmu_page_ctor(pte_table);
 
-       return ptablep;
+               last_pte_table = pte_table;
+       }
+
+       last_pte_table += PTRS_PER_PTE;
+
+       return pte_table;
 }
 
-static pmd_t *last_pgtable __initdata = NULL;
-pmd_t *zero_pgtable __initdata = NULL;
+static pmd_t *last_pmd_table __initdata = NULL;
 
 static pmd_t * __init kernel_ptr_table(void)
 {
-       if (!last_pgtable) {
+       if (!last_pmd_table) {
                unsigned long pmd, last;
                int i;
 
@@ -213,25 +221,25 @@ static pmd_t * __init kernel_ptr_table(void)
                                last = pmd;
                }
 
-               last_pgtable = (pmd_t *)last;
+               last_pmd_table = (pmd_t *)last;
 #ifdef DEBUG
-               printk("kernel_ptr_init: %p\n", last_pgtable);
+               printk("kernel_ptr_init: %p\n", last_pmd_table);
 #endif
        }
 
-       last_pgtable += PTRS_PER_PMD;
-       if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
-               last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
+       last_pmd_table += PTRS_PER_PMD;
+       if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) {
+               last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
                                                           PAGE_SIZE);
-               if (!last_pgtable)
+               if (!last_pmd_table)
                        panic("%s: Failed to allocate %lu bytes align=%lx\n",
                              __func__, PAGE_SIZE, PAGE_SIZE);
 
-               clear_page(last_pgtable);
-               mmu_page_ctor(last_pgtable);
+               clear_page(last_pmd_table);
+               mmu_page_ctor(last_pmd_table);
        }
 
-       return last_pgtable;
+       return last_pmd_table;
 }
 
 static void __init map_node(int node)
@@ -294,8 +302,7 @@ static void __init map_node(int node)
 #ifdef DEBUG
                                printk ("[zero map]");
 #endif
-                               zero_pgtable = kernel_ptr_table();
-                               pte_dir = (pte_t *)zero_pgtable;
+                               pte_dir = kernel_page_table();
                                pmd_set(pmd_dir, pte_dir);
 
                                pte_val(*pte_dir++) = 0;