]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Mar 2021 14:58:15 +0000 (15:58 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Mar 2021 14:58:15 +0000 (15:58 +0100)
added patches:
mm-hugetlb.c-fix-unnecessary-address-expansion-of-pmd-sharing.patch

queue-4.9/mm-hugetlb.c-fix-unnecessary-address-expansion-of-pmd-sharing.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/mm-hugetlb.c-fix-unnecessary-address-expansion-of-pmd-sharing.patch b/queue-4.9/mm-hugetlb.c-fix-unnecessary-address-expansion-of-pmd-sharing.patch
new file mode 100644 (file)
index 0000000..ca3b816
--- /dev/null
@@ -0,0 +1,83 @@
+From a1ba9da8f0f9a37d900ff7eff66482cf7de8015e Mon Sep 17 00:00:00 2001
+From: Li Xinhai <lixinhai.lxh@gmail.com>
+Date: Wed, 24 Feb 2021 12:06:54 -0800
+Subject: mm/hugetlb.c: fix unnecessary address expansion of pmd sharing
+
+From: Li Xinhai <lixinhai.lxh@gmail.com>
+
+commit a1ba9da8f0f9a37d900ff7eff66482cf7de8015e upstream.
+
+The current code would unnecessarily expand the address range.  Consider
+one example, (start, end) = (1G-2M, 3G+2M), and (vm_start, vm_end) =
+(1G-4M, 3G+4M), the expected adjustment should be keep (1G-2M, 3G+2M)
+without expand.  But the current result will be (1G-4M, 3G+4M).  Actually,
+the range (1G-4M, 1G) and (3G, 3G+4M) would never been involved in pmd
+sharing.
+
+After this patch, we will check that the vma span at least one PUD aligned
+size and the start,end range overlap the aligned range of vma.
+
+With above example, the aligned vma range is (1G, 3G), so if (start, end)
+range is within (1G-4M, 1G), or within (3G, 3G+4M), then no adjustment to
+both start and end.  Otherwise, we will have chance to adjust start
+downwards or end upwards without exceeding (vm_start, vm_end).
+
+Mike:
+
+: The 'adjusted range' is used for calls to mmu notifiers and cache(tlb)
+: flushing.  Since the current code unnecessarily expands the range in some
+: cases, more entries than necessary would be flushed.  This would/could
+: result in performance degradation.  However, this is highly dependent on
+: the user runtime.  Is there a combination of vma layout and calls to
+: actually hit this issue?  If the issue is hit, will those entries
+: unnecessarily flushed be used again and need to be unnecessarily reloaded?
+
+Link: https://lkml.kernel.org/r/20210104081631.2921415-1-lixinhai.lxh@gmail.com
+Fixes: 75802ca66354 ("mm/hugetlb: fix calculation of adjust_range_if_pmd_sharing_possible")
+Signed-off-by: Li Xinhai <lixinhai.lxh@gmail.com>
+Suggested-by: Mike Kravetz <mike.kravetz@oracle.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Peter Xu <peterx@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c |   22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -4436,21 +4436,23 @@ static bool vma_shareable(struct vm_area
+ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+ {
+-      unsigned long a_start, a_end;
++      unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
++              v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
+-      if (!(vma->vm_flags & VM_MAYSHARE))
++      /*
++       * vma need span at least one aligned PUD size and the start,end range
++       * must at least partialy within it.
++       */
++      if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
++              (*end <= v_start) || (*start >= v_end))
+               return;
+       /* Extend the range to be PUD aligned for a worst case scenario */
+-      a_start = ALIGN_DOWN(*start, PUD_SIZE);
+-      a_end = ALIGN(*end, PUD_SIZE);
++      if (*start > v_start)
++              *start = ALIGN_DOWN(*start, PUD_SIZE);
+-      /*
+-       * Intersect the range with the vma range, since pmd sharing won't be
+-       * across vma after all
+-       */
+-      *start = max(vma->vm_start, a_start);
+-      *end = min(vma->vm_end, a_end);
++      if (*end < v_end)
++              *end = ALIGN(*end, PUD_SIZE);
+ }
+ /*
index 0f76e8198abc4763be784480029cee567991e41e..d1b8ad5e5eb5f7d162fd4972815bc32057e3837b 100644 (file)
@@ -19,3 +19,4 @@ jfs-more-checks-for-invalid-superblock.patch
 xfs-fix-assert-failure-in-xfs_setattr_size.patch
 smackfs-restrict-bytes-count-in-smackfs-write-functions.patch
 net-fix-up-truesize-of-cloned-skb-in-skb_prepare_for_shift.patch
+mm-hugetlb.c-fix-unnecessary-address-expansion-of-pmd-sharing.patch