]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/treewide: replace pud_large() with pud_leaf()
authorPeter Xu <peterx@redhat.com>
Tue, 5 Mar 2024 04:37:48 +0000 (12:37 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 10 Apr 2024 14:35:46 +0000 (16:35 +0200)
[ Upstream commit 0a845e0f6348ccfa2dcc8c450ffd1c9ffe8c4add ]

pud_large() is always defined as pud_leaf().  Merge their usages.  Chose
pud_leaf() because pud_leaf() is a global API, while pud_large() is not.

Link: https://lkml.kernel.org/r/20240305043750.93762-9-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Stable-dep-of: c567f2948f57 ("Revert "x86/mm/ident_map: Use gbpages only where full GB page should be mapped."")
Signed-off-by: Sasha Levin <sashal@kernel.org>
20 files changed:
arch/powerpc/mm/book3s64/pgtable.c
arch/s390/boot/vmem.c
arch/s390/include/asm/pgtable.h
arch/s390/mm/gmap.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/pageattr.c
arch/s390/mm/pgtable.c
arch/s390/mm/vmem.c
arch/sparc/mm/init_64.c
arch/x86/kvm/mmu/mmu.c
arch/x86/mm/fault.c
arch/x86/mm/ident_map.c
arch/x86/mm/init_64.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/mem_encrypt_identity.c
arch/x86/mm/pat/set_memory.c
arch/x86/mm/pgtable.c
arch/x86/mm/pti.c
arch/x86/power/hibernate.c
arch/x86/xen/mmu_pv.c

index 926bec775f41cc5b0457e1f0656bb2da92d08a70..9822366dc186e3f7f974a394a185f70997303c74 100644 (file)
@@ -130,7 +130,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
 
        WARN_ON(pte_hw_valid(pud_pte(*pudp)));
        assert_spin_locked(pud_lockptr(mm, pudp));
-       WARN_ON(!(pud_large(pud)));
+       WARN_ON(!(pud_leaf(pud)));
 #endif
        trace_hugepage_set_pud(addr, pud_val(pud));
        return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
index 442a74f113cbfdbafdff3bb98ee24c7dbef20ed7..14e1a73ffcfe63be4d99449d99ebef369d07e108 100644 (file)
@@ -360,7 +360,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
                        }
                        pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
                        pud_populate(&init_mm, pud, pmd);
-               } else if (pud_large(*pud)) {
+               } else if (pud_leaf(*pud)) {
                        continue;
                }
                pgtable_pmd_populate(pud, addr, next, mode);
index fb3ee7758b76509e70d50c89fa42d047640d2727..38290b0078c562dfb3404344df07a9081f5b0990 100644 (file)
@@ -729,7 +729,7 @@ static inline int pud_bad(pud_t pud)
 {
        unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
 
-       if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
+       if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
                return 1;
        if (type < _REGION_ENTRY_TYPE_R3)
                return 0;
@@ -1396,7 +1396,7 @@ static inline unsigned long pud_deref(pud_t pud)
        unsigned long origin_mask;
 
        origin_mask = _REGION_ENTRY_ORIGIN;
-       if (pud_large(pud))
+       if (pud_leaf(pud))
                origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
        return (unsigned long)__va(pud_val(pud) & origin_mask);
 }
index 157e0a8d5157dc40979eb0804c2c5be9cdd23e04..d17bb1ef63f41d4d2255830edf01e0fa2b41df86 100644 (file)
@@ -596,7 +596,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
        pud = pud_offset(p4d, vmaddr);
        VM_BUG_ON(pud_none(*pud));
        /* large puds cannot yet be handled */
-       if (pud_large(*pud))
+       if (pud_leaf(*pud))
                return -EFAULT;
        pmd = pmd_offset(pud, vmaddr);
        VM_BUG_ON(pmd_none(*pmd));
index 297a6d897d5a0c0e2e00f271ae23d918c4c6862a..5f64f3d0fafbb4c350ba5c082cf086906d8d7214 100644 (file)
@@ -224,7 +224,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
                if (p4d_present(*p4dp)) {
                        pudp = pud_offset(p4dp, addr);
                        if (pud_present(*pudp)) {
-                               if (pud_large(*pudp))
+                               if (pud_leaf(*pudp))
                                        return (pte_t *) pudp;
                                pmdp = pmd_offset(pudp, addr);
                        }
@@ -240,7 +240,7 @@ int pmd_huge(pmd_t pmd)
 
 int pud_huge(pud_t pud)
 {
-       return pud_large(pud);
+       return pud_leaf(pud);
 }
 
 bool __init arch_hugetlb_valid_size(unsigned long size)
index b87e96c64b61d218e2a05570892ec30537d9985f..441f654d048d2018a219f47a3c4adef38c35531b 100644 (file)
@@ -274,7 +274,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
                if (pud_none(*pudp))
                        return -EINVAL;
                next = pud_addr_end(addr, end);
-               if (pud_large(*pudp)) {
+               if (pud_leaf(*pudp)) {
                        need_split  = !!(flags & SET_MEMORY_4K);
                        need_split |= !!(addr & ~PUD_MASK);
                        need_split |= !!(addr + PUD_SIZE > next);
index 5cb92941540b32bea8e908863c3364a0f61230c5..5e349869590a83cdb2b00c5c2ea589d49c1570d1 100644 (file)
@@ -479,7 +479,7 @@ static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
                return -ENOENT;
 
        /* Large PUDs are not supported yet. */
-       if (pud_large(*pud))
+       if (pud_leaf(*pud))
                return -EFAULT;
 
        *pmdp = pmd_offset(pud, addr);
index 6d276103c6d58e924550f715d666e99132573343..2d3f65da56eeaaa3c96a391875936f7c166bc77e 100644 (file)
@@ -322,7 +322,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
                if (!add) {
                        if (pud_none(*pud))
                                continue;
-                       if (pud_large(*pud)) {
+                       if (pud_leaf(*pud)) {
                                if (IS_ALIGNED(addr, PUD_SIZE) &&
                                    IS_ALIGNED(next, PUD_SIZE)) {
                                        pud_clear(pud);
@@ -343,7 +343,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
                        if (!pmd)
                                goto out;
                        pud_populate(&init_mm, pud, pmd);
-               } else if (pud_large(*pud)) {
+               } else if (pud_leaf(*pud)) {
                        continue;
                }
                ret = modify_pmd_table(pud, addr, next, add, direct);
@@ -586,7 +586,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
                if (!pmd)
                        goto out;
                pud_populate(&init_mm, pud, pmd);
-       } else if (WARN_ON_ONCE(pud_large(*pud))) {
+       } else if (WARN_ON_ONCE(pud_leaf(*pud))) {
                goto out;
        }
        pmd = pmd_offset(pud, addr);
index f83017992eaaeb79e757adebf7cb2923e4859b0b..d7db4e737218c20a7f1a5e8f78601f659c49af6b 100644 (file)
@@ -1665,7 +1665,7 @@ bool kern_addr_valid(unsigned long addr)
        if (pud_none(*pud))
                return false;
 
-       if (pud_large(*pud))
+       if (pud_leaf(*pud))
                return pfn_valid(pud_pfn(*pud));
 
        pmd = pmd_offset(pud, addr);
index f7901cb4d2fa4b67a4dae93648a169fd2931585b..11c484d72eab2f97256a7afa33fd461db6ec2334 100644 (file)
@@ -3120,7 +3120,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
        if (pud_none(pud) || !pud_present(pud))
                goto out;
 
-       if (pud_large(pud)) {
+       if (pud_leaf(pud)) {
                level = PG_LEVEL_1G;
                goto out;
        }
index a9d69ec994b75d8d666ddcef165db287e69c040d..e238517968836ea1c60475662825cf923d938fdd 100644 (file)
@@ -376,7 +376,7 @@ static void dump_pagetable(unsigned long address)
                goto bad;
 
        pr_cont("PUD %lx ", pud_val(*pud));
-       if (!pud_present(*pud) || pud_large(*pud))
+       if (!pud_present(*pud) || pud_leaf(*pud))
                goto out;
 
        pmd = pmd_offset(pud, address);
@@ -1037,7 +1037,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
        if (!pud_present(*pud))
                return 0;
 
-       if (pud_large(*pud))
+       if (pud_leaf(*pud))
                return spurious_kernel_fault_check(error_code, (pte_t *) pud);
 
        pmd = pmd_offset(pud, address);
index f50cc210a981886e7d3a265b4d43ca16f47f6825..a204a332c71fc50948c884251cc15cc89afeaf1f 100644 (file)
@@ -33,7 +33,7 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
                        next = end;
 
                /* if this is already a gbpage, this portion is already mapped */
-               if (pud_large(*pud))
+               if (pud_leaf(*pud))
                        continue;
 
                /* Is using a gbpage allowed? */
index a190aae8ceaf70c4070c216348a307a2d28535c9..19d209b412d7acbe7b42b075fec1164120e51c19 100644 (file)
@@ -617,7 +617,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
                }
 
                if (!pud_none(*pud)) {
-                       if (!pud_large(*pud)) {
+                       if (!pud_leaf(*pud)) {
                                pmd = pmd_offset(pud, 0);
                                paddr_last = phys_pmd_init(pmd, paddr,
                                                           paddr_end,
@@ -1163,7 +1163,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
                if (!pud_present(*pud))
                        continue;
 
-               if (pud_large(*pud) &&
+               if (pud_leaf(*pud) &&
                    IS_ALIGNED(addr, PUD_SIZE) &&
                    IS_ALIGNED(next, PUD_SIZE)) {
                        spin_lock(&init_mm.page_table_lock);
index 0302491d799d1b2227826eab5a01f76403e75edc..fcf508c52bdc5c1c6606c96a9a4a42dfce191e0a 100644 (file)
@@ -115,7 +115,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
        pud = pud_offset(p4d, addr);
        do {
                next = pud_addr_end(addr, end);
-               if (!pud_large(*pud))
+               if (!pud_leaf(*pud))
                        kasan_populate_pud(pud, addr, next, nid);
        } while (pud++, addr = next, addr != end);
 }
index 0166ab1780ccb115c75fde3795e5800474078557..ead3561359242d9c2ed559a44ff3adfb3cd7fcf0 100644 (file)
@@ -144,7 +144,7 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
                set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
        }
 
-       if (pud_large(*pud))
+       if (pud_leaf(*pud))
                return NULL;
 
        return pud;
index bda9f129835e956fc4db50d3e67cf96618e76be0..f3c4c756fe1ee24aed74acfb330f708f24d4af70 100644 (file)
@@ -684,7 +684,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
                return NULL;
 
        *level = PG_LEVEL_1G;
-       if (pud_large(*pud) || !pud_present(*pud))
+       if (pud_leaf(*pud) || !pud_present(*pud))
                return (pte_t *)pud;
 
        pmd = pmd_offset(pud, address);
@@ -743,7 +743,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
                return NULL;
 
        pud = pud_offset(p4d, address);
-       if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
+       if (pud_none(*pud) || pud_leaf(*pud) || !pud_present(*pud))
                return NULL;
 
        return pmd_offset(pud, address);
@@ -1274,7 +1274,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
         */
        while (end - start >= PUD_SIZE) {
 
-               if (pud_large(*pud))
+               if (pud_leaf(*pud))
                        pud_clear(pud);
                else
                        unmap_pmd_range(pud, start, start + PUD_SIZE);
index 9deadf517f14a94c75f1509ba40678cc544b8a5b..8e1ef5345b7a88c8f43a472de0686d415ac3d082 100644 (file)
@@ -774,7 +774,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
  */
 int pud_clear_huge(pud_t *pud)
 {
-       if (pud_large(*pud)) {
+       if (pud_leaf(*pud)) {
                pud_clear(pud);
                return 1;
        }
index 78414c6d1b5ed1c245d13767907649b40f88b993..51b6b78e6b1751b20b20323703ed4219a44d8ec7 100644 (file)
@@ -217,7 +217,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
 
        pud = pud_offset(p4d, address);
        /* The user page tables do not use large mappings: */
-       if (pud_large(*pud)) {
+       if (pud_leaf(*pud)) {
                WARN_ON(1);
                return NULL;
        }
index 6f955eb1e1631a04df52708ea9792bb0328d1ca8..d8af46e6775034a49211c07431f3d35433c5ff29 100644 (file)
@@ -170,7 +170,7 @@ int relocate_restore_code(void)
                goto out;
        }
        pud = pud_offset(p4d, relocated_restore_code);
-       if (pud_large(*pud)) {
+       if (pud_leaf(*pud)) {
                set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
                goto out;
        }
index b6830554ff6905633fc543fb39e1b5b4a024bb07..9d4a9311e819bb349157be4d7660b9931b155624 100644 (file)
@@ -1082,7 +1082,7 @@ static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
        pmd_t *pmd_tbl;
        int i;
 
-       if (pud_large(*pud)) {
+       if (pud_leaf(*pud)) {
                pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
                xen_free_ro_pages(pa, PUD_SIZE);
                return;
@@ -1863,7 +1863,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
        if (!pud_present(pud))
                return 0;
        pa = pud_val(pud) & PTE_PFN_MASK;
-       if (pud_large(pud))
+       if (pud_leaf(pud))
                return pa + (vaddr & ~PUD_MASK);
 
        pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *