]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/x86: replace p4d_large() with p4d_leaf()
authorPeter Xu <peterx@redhat.com>
Tue, 5 Mar 2024 04:37:43 +0000 (12:37 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 6 Mar 2024 21:04:19 +0000 (13:04 -0800)
p4d_large() is always defined as p4d_leaf().  Merge their usages.  Chose
p4d_leaf() because p4d_leaf() is a global API, while p4d_large() is not.

Only x86 has p4d_leaf() defined as of now.  So it also means after this
patch we removed all p4d_large() usages.

Link: https://lkml.kernel.org/r/20240305043750.93762-4-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/mm/pat/set_memory.c
arch/x86/mm/pti.c
arch/x86/power/hibernate.c
arch/x86/xen/mmu_pv.c

index 679b09cfe241c72e7f85bd7bbd406d59a259bf2a..8b69ce3f411542f0f3080d78fed97c17afd51bdc 100644 (file)
@@ -368,7 +368,7 @@ static void dump_pagetable(unsigned long address)
                goto bad;
 
        pr_cont("P4D %lx ", p4d_val(*p4d));
-       if (!p4d_present(*p4d) || p4d_large(*p4d))
+       if (!p4d_present(*p4d) || p4d_leaf(*p4d))
                goto out;
 
        pud = pud_offset(p4d, address);
@@ -1039,7 +1039,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
        if (!p4d_present(*p4d))
                return 0;
 
-       if (p4d_large(*p4d))
+       if (p4d_leaf(*p4d))
                return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
 
        pud = pud_offset(p4d, address);
index ebdbcae48011d4cfe6851906a675f7fa05d1892d..d691e7992a9ab795568811bb9c0f0ade084e1d9b 100644 (file)
@@ -1197,7 +1197,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
                if (!p4d_present(*p4d))
                        continue;
 
-               BUILD_BUG_ON(p4d_large(*p4d));
+               BUILD_BUG_ON(p4d_leaf(*p4d));
 
                pud_base = pud_offset(p4d, 0);
                remove_pud_table(pud_base, addr, next, altmap, direct);
index e9b448d1b1b70f08dae6216250f02e783091a83a..5359a9c8809976297321b54fafc50572a5374a8f 100644 (file)
@@ -676,7 +676,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
                return NULL;
 
        *level = PG_LEVEL_512G;
-       if (p4d_large(*p4d) || !p4d_present(*p4d))
+       if (p4d_leaf(*p4d) || !p4d_present(*p4d))
                return (pte_t *)p4d;
 
        pud = pud_offset(p4d, address);
@@ -739,7 +739,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
                return NULL;
 
        p4d = p4d_offset(pgd, address);
-       if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
+       if (p4d_none(*p4d) || p4d_leaf(*p4d) || !p4d_present(*p4d))
                return NULL;
 
        pud = pud_offset(p4d, address);
index 669ba1c345b3898326657f35b7033bb412fe8898..dc0a81f5f60e7c97063f8e0a100e770dab641047 100644 (file)
@@ -206,7 +206,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
        if (!p4d)
                return NULL;
 
-       BUILD_BUG_ON(p4d_large(*p4d) != 0);
+       BUILD_BUG_ON(p4d_leaf(*p4d) != 0);
        if (p4d_none(*p4d)) {
                unsigned long new_pud_page = __get_free_page(gfp);
                if (WARN_ON_ONCE(!new_pud_page))
index 6f955eb1e1631a04df52708ea9792bb0328d1ca8..28153789f87396302e525e5b77a7a60066e2bf4f 100644 (file)
@@ -165,7 +165,7 @@ int relocate_restore_code(void)
        pgd = (pgd_t *)__va(read_cr3_pa()) +
                pgd_index(relocated_restore_code);
        p4d = p4d_offset(pgd, relocated_restore_code);
-       if (p4d_large(*p4d)) {
+       if (p4d_leaf(*p4d)) {
                set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
                goto out;
        }
index e21974f2cf2d7d33efb19641426e292fe7af0ee9..12a43a4abebfb5f733127de60379d3eb9ad5e3d1 100644 (file)
@@ -1104,7 +1104,7 @@ static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
        pud_t *pud_tbl;
        int i;
 
-       if (p4d_large(*p4d)) {
+       if (p4d_leaf(*p4d)) {
                pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
                xen_free_ro_pages(pa, P4D_SIZE);
                return;