]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: simplify thp_vma_allowable_order
authorMatthew Wilcox <willy@infradead.org>
Thu, 25 Apr 2024 04:00:55 +0000 (05:00 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:53 +0000 (17:53 -0700)
Combine the three boolean arguments into one flags argument for
readability.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/task_mmu.c
include/linux/huge_mm.h
mm/huge_memory.c
mm/khugepaged.c
mm/memory.c

index f4259b7edfded80d25aad0020739fd606e0ee561..81fbecfe5ff6c70c9ef645ea6c52285c5222f5e4 100644 (file)
@@ -871,8 +871,8 @@ static int show_smap(struct seq_file *m, void *v)
        __show_smap(m, &mss, false);
 
        seq_printf(m, "THPeligible:    %8u\n",
-                  !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
-                                             true, THP_ORDERS_ALL));
+                  !!thp_vma_allowable_orders(vma, vma->vm_flags,
+                          TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
 
        if (arch_pkeys_enabled())
                seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
index 7cd07b83a3d0aaacb9a7195d9004b351c7133262..c8d3ec116e291ba432b4b5a58a664b791712de09 100644 (file)
@@ -81,8 +81,12 @@ extern struct kobj_attribute shmem_enabled_attr;
  */
 #define THP_ORDERS_ALL         (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
 
-#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
-       (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+#define TVA_SMAPS              (1 << 0)        /* Will be used for procfs */
+#define TVA_IN_PF              (1 << 1)        /* Page fault handler */
+#define TVA_ENFORCE_SYSFS      (1 << 2)        /* Obey sysfs configuration */
+
+#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
+       (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
 
 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
 #define HPAGE_PMD_SHIFT PMD_SHIFT
@@ -218,17 +222,15 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
 }
 
 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
-                                        unsigned long vm_flags, bool smaps,
-                                        bool in_pf, bool enforce_sysfs,
+                                        unsigned long vm_flags,
+                                        unsigned long tva_flags,
                                         unsigned long orders);
 
 /**
  * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
  * @vma:  the vm area to check
  * @vm_flags: use these vm_flags instead of vma->vm_flags
- * @smaps: whether answer will be used for smaps file
- * @in_pf: whether answer will be used by page fault handler
- * @enforce_sysfs: whether sysfs config should be taken into account
+ * @tva_flags: Which TVA flags to honour
  * @orders: bitfield of all orders to consider
  *
  * Calculates the intersection of the requested hugepage orders and the allowed
@@ -241,12 +243,12 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
  */
 static inline
 unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
-                                      unsigned long vm_flags, bool smaps,
-                                      bool in_pf, bool enforce_sysfs,
+                                      unsigned long vm_flags,
+                                      unsigned long tva_flags,
                                       unsigned long orders)
 {
        /* Optimization to check if required orders are enabled early. */
-       if (enforce_sysfs && vma_is_anonymous(vma)) {
+       if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
                unsigned long mask = READ_ONCE(huge_anon_orders_always);
 
                if (vm_flags & VM_HUGEPAGE)
@@ -260,8 +262,7 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
                        return 0;
        }
 
-       return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
-                                         enforce_sysfs, orders);
+       return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
 }
 
 enum mthp_stat_item {
@@ -428,8 +429,8 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
 }
 
 static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
-                                       unsigned long vm_flags, bool smaps,
-                                       bool in_pf, bool enforce_sysfs,
+                                       unsigned long vm_flags,
+                                       unsigned long tva_flags,
                                        unsigned long orders)
 {
        return 0;
index eabf088cb444b06c48b2a60a57a6f076900ef4af..aaa327bd5155bc1b177aaf92e2617bab9535cf12 100644 (file)
@@ -81,10 +81,13 @@ unsigned long huge_anon_orders_madvise __read_mostly;
 unsigned long huge_anon_orders_inherit __read_mostly;
 
 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
-                                        unsigned long vm_flags, bool smaps,
-                                        bool in_pf, bool enforce_sysfs,
+                                        unsigned long vm_flags,
+                                        unsigned long tva_flags,
                                         unsigned long orders)
 {
+       bool smaps = tva_flags & TVA_SMAPS;
+       bool in_pf = tva_flags & TVA_IN_PF;
+       bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
        /* Check the intersection of requested and supported orders. */
        orders &= vma_is_anonymous(vma) ?
                        THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
index cf518fc4409822027a1ec60aac150afca9639bc6..774a97e6e2da39f0c174b885f2ea2297a8df4c61 100644 (file)
@@ -453,7 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
 {
        if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
            hugepage_flags_enabled()) {
-               if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
+               if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
                                            PMD_ORDER))
                        __khugepaged_enter(vma->vm_mm);
        }
@@ -900,6 +900,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
                                   struct collapse_control *cc)
 {
        struct vm_area_struct *vma;
+       unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
 
        if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
                return SCAN_ANY_PROCESS;
@@ -910,8 +911,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 
        if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
                return SCAN_ADDRESS_RANGE;
-       if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
-                                    cc->is_khugepaged, PMD_ORDER))
+       if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
                return SCAN_VMA_CHECK;
        /*
         * Anon VMA expected, the address may be unmapped then
@@ -1501,8 +1501,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
         * and map it by a PMD, regardless of sysfs THP settings. As such, let's
         * analogously elide sysfs THP settings here.
         */
-       if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
-                                    PMD_ORDER))
+       if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
                return SCAN_VMA_CHECK;
 
        /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2363,8 +2362,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
                        progress++;
                        break;
                }
-               if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
-                                            true, PMD_ORDER)) {
+               if (!thp_vma_allowable_order(vma, vma->vm_flags,
+                                       TVA_ENFORCE_SYSFS, PMD_ORDER)) {
 skip:
                        progress++;
                        continue;
@@ -2701,8 +2700,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
 
        *prev = vma;
 
-       if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
-                                    PMD_ORDER))
+       if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
                return -EINVAL;
 
        cc = kmalloc(sizeof(*cc), GFP_KERNEL);
index c0bd4e0d5e7a7ccddd58438be245b0de37d5e8a1..79d851be8ab25c5d407fc53a8b2dee2a1a6701a7 100644 (file)
@@ -4334,8 +4334,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
         * for this vma. Then filter out the orders that can't be allocated over
         * the faulting address and still be fully contained in the vma.
         */
-       orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
-                                         BIT(PMD_ORDER) - 1);
+       orders = thp_vma_allowable_orders(vma, vma->vm_flags,
+                       TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
        orders = thp_vma_suitable_orders(vma, vmf->address, orders);
 
        if (!orders)
@@ -5438,7 +5438,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
                return VM_FAULT_OOM;
 retry_pud:
        if (pud_none(*vmf.pud) &&
-           thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
+           thp_vma_allowable_order(vma, vm_flags,
+                               TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
                ret = create_huge_pud(&vmf);
                if (!(ret & VM_FAULT_FALLBACK))
                        return ret;
@@ -5472,7 +5473,8 @@ retry_pud:
                goto retry_pud;
 
        if (pmd_none(*vmf.pmd) &&
-           thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
+           thp_vma_allowable_order(vma, vm_flags,
+                               TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
                ret = create_huge_pmd(&vmf);
                if (!(ret & VM_FAULT_FALLBACK))
                        return ret;