]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
s390/hugetlb: Convert PG_arch_1 code to work on folio->flags
authorDavid Hildenbrand <david@redhat.com>
Wed, 8 May 2024 18:29:55 +0000 (20:29 +0200)
committerAlexander Gordeev <agordeev@linux.ibm.com>
Wed, 5 Jun 2024 15:17:26 +0000 (17:17 +0200)
Let's make it clearer that we are always working on folio flags and
never page flags of tail pages by converting remaining PG_arch_1 users
that modify page->flags to modify folio->flags instead.

No functional change intended, because we would always have worked with
the head page (where page->flags corresponds to folio->flags) and never
with tail pages.

Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20240508182955.358628-11-david@redhat.com
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
arch/s390/mm/gmap.c
arch/s390/mm/hugetlbpage.c

index 7537e7b4be3930c52e69aaa849dd6dfbbe11bb07..d5a5756dd69f20ded3db0ef36ee8982a8deb5092 100644 (file)
@@ -2733,7 +2733,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
 {
        pmd_t *pmd = (pmd_t *)pte;
        unsigned long start, end;
-       struct page *page = pmd_page(*pmd);
+       struct folio *folio = page_folio(pmd_page(*pmd));
 
        /*
         * The write check makes sure we do not set a key on shared
@@ -2748,7 +2748,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
        start = pmd_val(*pmd) & HPAGE_MASK;
        end = start + HPAGE_SIZE;
        __storage_key_init_range(start, end);
-       set_bit(PG_arch_1, &page->flags);
+       set_bit(PG_arch_1, &folio->flags);
        cond_resched();
        return 0;
 }
index 2675aab4acc7008d830fdf6938554e8d00415374..34d558164f0d6978a2cdbd1ce4968f7b06c5f1cd 100644 (file)
@@ -121,7 +121,7 @@ static inline pte_t __rste_to_pte(unsigned long rste)
 
 static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
 {
-       struct page *page;
+       struct folio *folio;
        unsigned long size, paddr;
 
        if (!mm_uses_skeys(mm) ||
@@ -129,16 +129,16 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
                return;
 
        if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
-               page = pud_page(__pud(rste));
+               folio = page_folio(pud_page(__pud(rste)));
                size = PUD_SIZE;
                paddr = rste & PUD_MASK;
        } else {
-               page = pmd_page(__pmd(rste));
+               folio = page_folio(pmd_page(__pmd(rste)));
                size = PMD_SIZE;
                paddr = rste & PMD_MASK;
        }
 
-       if (!test_and_set_bit(PG_arch_1, &page->flags))
+       if (!test_and_set_bit(PG_arch_1, &folio->flags))
                __storage_key_init_range(paddr, paddr + size);
 }