]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 5 Apr 2024 09:23:55 +0000 (11:23 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 5 Apr 2024 09:23:55 +0000 (11:23 +0200)
added patches:
revert-x86-mm-ident_map-use-gbpages-only-where-full-gb-page-should-be-mapped.patch

queue-6.1/revert-x86-mm-ident_map-use-gbpages-only-where-full-gb-page-should-be-mapped.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/revert-x86-mm-ident_map-use-gbpages-only-where-full-gb-page-should-be-mapped.patch b/queue-6.1/revert-x86-mm-ident_map-use-gbpages-only-where-full-gb-page-should-be-mapped.patch
new file mode 100644 (file)
index 0000000..191d032
--- /dev/null
@@ -0,0 +1,75 @@
+From c567f2948f57bdc03ed03403ae0234085f376b7d Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Mon, 25 Mar 2024 11:47:51 +0100
+Subject: Revert "x86/mm/ident_map: Use gbpages only where full GB page should be mapped."
+
+From: Ingo Molnar <mingo@kernel.org>
+
+commit c567f2948f57bdc03ed03403ae0234085f376b7d upstream.
+
+This reverts commit d794734c9bbfe22f86686dc2909c25f5ffe1a572.
+
+While the original change tries to fix a bug, it also unintentionally broke
+existing systems, see the regressions reported at:
+
+  https://lore.kernel.org/all/3a1b9909-45ac-4f97-ad68-d16ef1ce99db@pavinjoseph.com/
+
+Since d794734c9bbf was also marked for -stable, let's back it out before
+causing more damage.
+
+Note that due to another upstream change the revert was not 100% automatic:
+
+  0a845e0f6348 mm/treewide: replace pud_large() with pud_leaf()
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Cc: Russ Anderson <rja@hpe.com>
+Cc: Steve Wahl <steve.wahl@hpe.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/3a1b9909-45ac-4f97-ad68-d16ef1ce99db@pavinjoseph.com/
+Fixes: d794734c9bbf ("x86/mm/ident_map: Use gbpages only where full GB page should be mapped.")
+Signed-off-by: Steve Wahl <steve.wahl@hpe.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/ident_map.c |   23 +++++------------------
+ 1 file changed, 5 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/mm/ident_map.c
++++ b/arch/x86/mm/ident_map.c
+@@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_map
+       for (; addr < end; addr = next) {
+               pud_t *pud = pud_page + pud_index(addr);
+               pmd_t *pmd;
+-              bool use_gbpage;
+               next = (addr & PUD_MASK) + PUD_SIZE;
+               if (next > end)
+                       next = end;
+-              /* if this is already a gbpage, this portion is already mapped */
+-              if (pud_large(*pud))
+-                      continue;
+-
+-              /* Is using a gbpage allowed? */
+-              use_gbpage = info->direct_gbpages;
+-
+-              /* Don't use gbpage if it maps more than the requested region. */
+-              /* at the begining: */
+-              use_gbpage &= ((addr & ~PUD_MASK) == 0);
+-              /* ... or at the end: */
+-              use_gbpage &= ((next & ~PUD_MASK) == 0);
+-
+-              /* Never overwrite existing mappings */
+-              use_gbpage &= !pud_present(*pud);
+-
+-              if (use_gbpage) {
++              if (info->direct_gbpages) {
+                       pud_t pudval;
++                      if (pud_present(*pud))
++                              continue;
++
++                      addr &= PUD_MASK;
+                       pudval = __pud((addr - info->offset) | info->page_flag);
+                       set_pud(pud, pudval);
+                       continue;
index ccd1e22496a2702eb75b3add510567f3fd521518..3b846debae322c510ed1ba338ac8ccf30a76556e 100644 (file)
@@ -36,3 +36,4 @@ xen-netfront-add-missing-skb_mark_for_recycle.patch
 net-rds-fix-possible-cp-null-dereference.patch
 net-usb-ax88179_178a-avoid-the-interface-always-configured-as-random-address.patch
 vsock-virtio-fix-packet-delivery-to-tap-device.patch
+revert-x86-mm-ident_map-use-gbpages-only-where-full-gb-page-should-be-mapped.patch