]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
s390/uv: Implement HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
authorDavid Hildenbrand <david@redhat.com>
Wed, 8 May 2024 18:29:54 +0000 (20:29 +0200)
committerAlexander Gordeev <agordeev@linux.ibm.com>
Wed, 5 Jun 2024 15:17:25 +0000 (17:17 +0200)
Let's also implement HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE, so we can convert
arch_make_page_accessible() to be a simple wrapper around
arch_make_folio_accessible(). Unfortunately, we cannot do that in the
header.

There are only two arch_make_page_accessible() calls remaining in gup.c.
We can now drop HAVE_ARCH_MAKE_PAGE_ACCESSIBLE completely form core-MM.
We'll handle that separately, once the s390x part landed.

Suggested-by: Matthew Wilcox <willy@infradead.org>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20240508182955.358628-10-david@redhat.com
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
arch/s390/include/asm/page.h
arch/s390/kernel/uv.c
arch/s390/mm/fault.c

index ecbf4b626f467c23caf80d5e5feed2b2a8562091..5ec41ec3d761e3fa6b603da90c35280ab34d8a50 100644 (file)
@@ -162,6 +162,7 @@ static inline int page_reset_referenced(unsigned long addr)
 #define _PAGE_ACC_BITS         0xf0    /* HW access control bits       */
 
 struct page;
+struct folio;
 void arch_free_page(struct page *page, int order);
 void arch_alloc_page(struct page *page, int order);
 
@@ -174,6 +175,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
 #define HAVE_ARCH_ALLOC_PAGE
 
 #if IS_ENABLED(CONFIG_PGSTE)
+int arch_make_folio_accessible(struct folio *folio);
+#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
 int arch_make_page_accessible(struct page *page);
 #define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
 #endif
index b456066d72da96b1658a12dcd5409b506c44e31d..fa62fa0e369f2736216d3509f51c35d50c32a1c0 100644 (file)
@@ -498,14 +498,13 @@ out:
 EXPORT_SYMBOL_GPL(gmap_destroy_page);
 
 /*
- * To be called with the page locked or with an extra reference! This will
- * prevent gmap_make_secure from touching the page concurrently. Having 2
- * parallel make_page_accessible is fine, as the UV calls will become a
- * no-op if the page is already exported.
+ * To be called with the folio locked or with an extra reference! This will
+ * prevent gmap_make_secure from touching the folio concurrently. Having 2
+ * parallel arch_make_folio_accessible is fine, as the UV calls will become a
+ * no-op if the folio is already exported.
  */
-int arch_make_page_accessible(struct page *page)
+int arch_make_folio_accessible(struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
        int rc = 0;
 
        /* See gmap_make_secure(): large folios cannot be secure */
@@ -537,8 +536,13 @@ int arch_make_page_accessible(struct page *page)
 
        return rc;
 }
-EXPORT_SYMBOL_GPL(arch_make_page_accessible);
+EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
 
+int arch_make_page_accessible(struct page *page)
+{
+       return arch_make_folio_accessible(page_folio(page));
+}
+EXPORT_SYMBOL_GPL(arch_make_page_accessible);
 #endif
 
 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
index 65747f15dbec4baf37cc05b7f064eb57dd05cd5d..7cd50ad3b4ade17a35d96bd2e8010c55af956681 100644 (file)
@@ -492,6 +492,7 @@ void do_secure_storage_access(struct pt_regs *regs)
        unsigned long addr = get_fault_address(regs);
        struct vm_area_struct *vma;
        struct mm_struct *mm;
+       struct folio *folio;
        struct page *page;
        struct gmap *gmap;
        int rc;
@@ -539,17 +540,18 @@ void do_secure_storage_access(struct pt_regs *regs)
                        mmap_read_unlock(mm);
                        break;
                }
-               if (arch_make_page_accessible(page))
+               folio = page_folio(page);
+               if (arch_make_folio_accessible(folio))
                        send_sig(SIGSEGV, current, 0);
-               put_page(page);
+               folio_put(folio);
                mmap_read_unlock(mm);
                break;
        case KERNEL_FAULT:
-               page = phys_to_page(addr);
-               if (unlikely(!try_get_page(page)))
+               folio = phys_to_folio(addr);
+               if (unlikely(!folio_try_get(folio)))
                        break;
-               rc = arch_make_page_accessible(page);
-               put_page(page);
+               rc = arch_make_folio_accessible(folio);
+               folio_put(folio);
                if (rc)
                        BUG();
                break;