]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Sep 2022 10:31:47 +0000 (12:31 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Sep 2022 10:31:47 +0000 (12:31 +0200)
added patches:
bluetooth-l2cap-fix-build-errors-in-some-archs.patch
bpf-fix-the-off-by-two-error-in-range-markings.patch
kbuild-fix-include-path-in-scripts-makefile.modpost.patch
s390-mm-do-not-trigger-write-fault-when-vma-does-not-allow-vm_write.patch
selftests-bpf-fix-test_align-verifier-log-patterns.patch
x86-bugs-add-unknown-reporting-for-mmio-stale-data.patch

queue-4.19/bluetooth-l2cap-fix-build-errors-in-some-archs.patch [new file with mode: 0644]
queue-4.19/bpf-fix-the-off-by-two-error-in-range-markings.patch [new file with mode: 0644]
queue-4.19/kbuild-fix-include-path-in-scripts-makefile.modpost.patch [new file with mode: 0644]
queue-4.19/s390-mm-do-not-trigger-write-fault-when-vma-does-not-allow-vm_write.patch [new file with mode: 0644]
queue-4.19/selftests-bpf-fix-test_align-verifier-log-patterns.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/x86-bugs-add-unknown-reporting-for-mmio-stale-data.patch [new file with mode: 0644]

diff --git a/queue-4.19/bluetooth-l2cap-fix-build-errors-in-some-archs.patch b/queue-4.19/bluetooth-l2cap-fix-build-errors-in-some-archs.patch
new file mode 100644 (file)
index 0000000..979c15f
--- /dev/null
@@ -0,0 +1,63 @@
+From b840304fb46cdf7012722f456bce06f151b3e81b Mon Sep 17 00:00:00 2001
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Date: Fri, 12 Aug 2022 15:33:57 -0700
+Subject: Bluetooth: L2CAP: Fix build errors in some archs
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+commit b840304fb46cdf7012722f456bce06f151b3e81b upstream.
+
+This attempts to fix the follow errors:
+
+In function 'memcmp',
+    inlined from 'bacmp' at ./include/net/bluetooth/bluetooth.h:347:9,
+    inlined from 'l2cap_global_chan_by_psm' at
+    net/bluetooth/l2cap_core.c:2003:15:
+./include/linux/fortify-string.h:44:33: error: '__builtin_memcmp'
+specified bound 6 exceeds source size 0 [-Werror=stringop-overread]
+   44 | #define __underlying_memcmp     __builtin_memcmp
+      |                                 ^
+./include/linux/fortify-string.h:420:16: note: in expansion of macro
+'__underlying_memcmp'
+  420 |         return __underlying_memcmp(p, q, size);
+      |                ^~~~~~~~~~~~~~~~~~~
+In function 'memcmp',
+    inlined from 'bacmp' at ./include/net/bluetooth/bluetooth.h:347:9,
+    inlined from 'l2cap_global_chan_by_psm' at
+    net/bluetooth/l2cap_core.c:2004:15:
+./include/linux/fortify-string.h:44:33: error: '__builtin_memcmp'
+specified bound 6 exceeds source size 0 [-Werror=stringop-overread]
+   44 | #define __underlying_memcmp     __builtin_memcmp
+      |                                 ^
+./include/linux/fortify-string.h:420:16: note: in expansion of macro
+'__underlying_memcmp'
+  420 |         return __underlying_memcmp(p, q, size);
+      |                ^~~~~~~~~~~~~~~~~~~
+
+Fixes: 332f1795ca20 ("Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm regression")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Cc: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bluetooth/l2cap_core.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -1826,11 +1826,11 @@ static struct l2cap_chan *l2cap_global_c
+                       src_match = !bacmp(&c->src, src);
+                       dst_match = !bacmp(&c->dst, dst);
+                       if (src_match && dst_match) {
+-                              c = l2cap_chan_hold_unless_zero(c);
+-                              if (c) {
+-                                      read_unlock(&chan_list_lock);
+-                                      return c;
+-                              }
++                              if (!l2cap_chan_hold_unless_zero(c))
++                                      continue;
++
++                              read_unlock(&chan_list_lock);
++                              return c;
+                       }
+                       /* Closest match */
diff --git a/queue-4.19/bpf-fix-the-off-by-two-error-in-range-markings.patch b/queue-4.19/bpf-fix-the-off-by-two-error-in-range-markings.patch
new file mode 100644 (file)
index 0000000..eb6ef9b
--- /dev/null
@@ -0,0 +1,159 @@
+From foo@baz Thu Sep  1 12:09:22 PM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Mon, 29 Aug 2022 14:50:53 +0300
+Subject: bpf: Fix the off-by-two error in range markings
+To: stable@vger.kernel.org
+Cc: raajeshdasari@gmail.com, jean-philippe@linaro.org, Maxim Mikityanskiy <maximmi@nvidia.com>, Daniel Borkmann <daniel@iogearbox.net>, Ovidiu Panait <ovidiu.panait@windriver.com>
+Message-ID: <20220829115054.1714528-2-ovidiu.panait@windriver.com>
+
+From: Maxim Mikityanskiy <maximmi@nvidia.com>
+
+commit 2fa7d94afc1afbb4d702760c058dc2d7ed30f226 upstream.
+
+The first commit cited below attempts to fix the off-by-one error that
+appeared in some comparisons with an open range. Due to this error,
+arithmetically equivalent pieces of code could get different verdicts
+from the verifier, for example (pseudocode):
+
+  // 1. Passes the verifier:
+  if (data + 8 > data_end)
+      return early
+  read *(u64 *)data, i.e. [data; data+7]
+
+  // 2. Rejected by the verifier (should still pass):
+  if (data + 7 >= data_end)
+      return early
+  read *(u64 *)data, i.e. [data; data+7]
+
+The attempted fix, however, shifts the range by one in a wrong
+direction, so the bug not only remains, but also such piece of code
+starts failing in the verifier:
+
+  // 3. Rejected by the verifier, but the check is stricter than in #1.
+  if (data + 8 >= data_end)
+      return early
+  read *(u64 *)data, i.e. [data; data+7]
+
+The change performed by that fix converted an off-by-one bug into
+off-by-two. The second commit cited below added the BPF selftests
+written to ensure than code chunks like #3 are rejected, however,
+they should be accepted.
+
+This commit fixes the off-by-two error by adjusting new_range in the
+right direction and fixes the tests by changing the range into the
+one that should actually fail.
+
+Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
+Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
+Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
+[OP: cherry-pick selftest changes only]
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_verifier.c |   32 ++++++++++++++--------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -9108,10 +9108,10 @@ static struct bpf_test tests[] = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+-                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+@@ -9166,10 +9166,10 @@ static struct bpf_test tests[] = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+-                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+@@ -9279,9 +9279,9 @@ static struct bpf_test tests[] = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+-                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+@@ -9451,9 +9451,9 @@ static struct bpf_test tests[] = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+-                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+@@ -9564,10 +9564,10 @@ static struct bpf_test tests[] = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+-                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+@@ -9622,10 +9622,10 @@ static struct bpf_test tests[] = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+-                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+@@ -9735,9 +9735,9 @@ static struct bpf_test tests[] = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+-                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+@@ -9907,9 +9907,9 @@ static struct bpf_test tests[] = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+-                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++                      BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
diff --git a/queue-4.19/kbuild-fix-include-path-in-scripts-makefile.modpost.patch b/queue-4.19/kbuild-fix-include-path-in-scripts-makefile.modpost.patch
new file mode 100644 (file)
index 0000000..eec1a5b
--- /dev/null
@@ -0,0 +1,44 @@
+From 23a0cb8e3225122496bfa79172005c587c2d64bf Mon Sep 17 00:00:00 2001
+From: Jing Leng <jleng@ambarella.com>
+Date: Tue, 17 May 2022 18:51:28 +0800
+Subject: kbuild: Fix include path in scripts/Makefile.modpost
+
+From: Jing Leng <jleng@ambarella.com>
+
+commit 23a0cb8e3225122496bfa79172005c587c2d64bf upstream.
+
+When building an external module, if users don't need to separate the
+compilation output and source code, they run the following command:
+"make -C $(LINUX_SRC_DIR) M=$(PWD)". At this point, "$(KBUILD_EXTMOD)"
+and "$(src)" are the same.
+
+If they need to separate them, they run "make -C $(KERNEL_SRC_DIR)
+O=$(KERNEL_OUT_DIR) M=$(OUT_DIR) src=$(PWD)". Before running the
+command, they need to copy "Kbuild" or "Makefile" to "$(OUT_DIR)" to
+prevent compilation failure.
+
+So the kernel should change the included path to avoid the copy operation.
+
+Signed-off-by: Jing Leng <jleng@ambarella.com>
+[masahiro: I do not think "M=$(OUT_DIR) src=$(PWD)" is the official way,
+but this patch is a nice clean up anyway.]
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+[nsc: updated context for v4.19]
+Signed-off-by: Nicolas Schier <n.schier@avm.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/Makefile.modpost |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/scripts/Makefile.modpost
++++ b/scripts/Makefile.modpost
+@@ -51,8 +51,7 @@ obj := $(KBUILD_EXTMOD)
+ src := $(obj)
+ # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
+-include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
+-             $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
++include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
+ endif
+ include scripts/Makefile.lib
diff --git a/queue-4.19/s390-mm-do-not-trigger-write-fault-when-vma-does-not-allow-vm_write.patch b/queue-4.19/s390-mm-do-not-trigger-write-fault-when-vma-does-not-allow-vm_write.patch
new file mode 100644 (file)
index 0000000..2c9cd0c
--- /dev/null
@@ -0,0 +1,50 @@
+From 41ac42f137080bc230b5882e3c88c392ab7f2d32 Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Date: Wed, 17 Aug 2022 15:26:03 +0200
+Subject: s390/mm: do not trigger write fault when vma does not allow VM_WRITE
+
+From: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+
+commit 41ac42f137080bc230b5882e3c88c392ab7f2d32 upstream.
+
+For non-protection pXd_none() page faults in do_dat_exception(), we
+call do_exception() with access == (VM_READ | VM_WRITE | VM_EXEC).
+In do_exception(), vma->vm_flags is checked against that before
+calling handle_mm_fault().
+
+Since commit 92f842eac7ee3 ("[S390] store indication fault optimization"),
+we call handle_mm_fault() with FAULT_FLAG_WRITE, when recognizing that
+it was a write access. However, the vma flags check is still only
+checking against (VM_READ | VM_WRITE | VM_EXEC), and therefore also
+calling handle_mm_fault() with FAULT_FLAG_WRITE in cases where the vma
+does not allow VM_WRITE.
+
+Fix this by changing access check in do_exception() to VM_WRITE only,
+when recognizing write access.
+
+Link: https://lkml.kernel.org/r/20220811103435.188481-3-david@redhat.com
+Fixes: 92f842eac7ee3 ("[S390] store indication fault optimization")
+Cc: <stable@vger.kernel.org>
+Reported-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/mm/fault.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -455,7 +455,9 @@ static inline vm_fault_t do_exception(st
+       flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+-      if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
++      if ((trans_exc_code & store_indication) == 0x400)
++              access = VM_WRITE;
++      if (access == VM_WRITE)
+               flags |= FAULT_FLAG_WRITE;
+       down_read(&mm->mmap_sem);
diff --git a/queue-4.19/selftests-bpf-fix-test_align-verifier-log-patterns.patch b/queue-4.19/selftests-bpf-fix-test_align-verifier-log-patterns.patch
new file mode 100644 (file)
index 0000000..a4f2d91
--- /dev/null
@@ -0,0 +1,120 @@
+From foo@baz Thu Sep  1 12:09:22 PM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Mon, 29 Aug 2022 14:50:54 +0300
+Subject: selftests/bpf: Fix test_align verifier log patterns
+To: stable@vger.kernel.org
+Cc: raajeshdasari@gmail.com, jean-philippe@linaro.org, Stanislav Fomichev <sdf@google.com>, Daniel Borkmann <daniel@iogearbox.net>, Ovidiu Panait <ovidiu.panait@windriver.com>
+Message-ID: <20220829115054.1714528-3-ovidiu.panait@windriver.com>
+
+From: Stanislav Fomichev <sdf@google.com>
+
+commit 5366d2269139ba8eb6a906d73a0819947e3e4e0a upstream.
+
+Commit 294f2fc6da27 ("bpf: Verifer, adjust_scalar_min_max_vals to always
+call update_reg_bounds()") changed the way verifier logs some of its state,
+adjust the test_align accordingly. Where possible, I tried to not copy-paste
+the entire log line and resorted to dropping the last closing brace instead.
+
+Fixes: 294f2fc6da27 ("bpf: Verifer, adjust_scalar_min_max_vals to always call update_reg_bounds()")
+Signed-off-by: Stanislav Fomichev <sdf@google.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20200515194904.229296-1-sdf@google.com
+[OP: adjust for 4.19 selftests, apply only the relevant diffs]
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_align.c |   27 ++++++++++++++-------------
+ 1 file changed, 14 insertions(+), 13 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_align.c
++++ b/tools/testing/selftests/bpf/test_align.c
+@@ -359,15 +359,15 @@ static struct bpf_align_test tests[] = {
+                        * is still (4n), fixed offset is not changed.
+                        * Also, we create a new reg->id.
+                        */
+-                      {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
++                      {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
+                       /* At the time the word size load is performed from R5,
+                        * its total fixed offset is NET_IP_ALIGN + reg->off (18)
+                        * which is 20.  Then the variable offset is (4n), so
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
+-                      {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
++                      {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
++                      {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
+               },
+       },
+       {
+@@ -410,15 +410,15 @@ static struct bpf_align_test tests[] = {
+                       /* Adding 14 makes R6 be (4n+2) */
+                       {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+                       /* Packet pointer has (4n+2) offset */
+-                      {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+-                      {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
++                      {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
++                      {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+                       /* At the time the word size load is performed from R5,
+                        * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+                        * which is 2.  Then the variable offset is (4n+2), so
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
++                      {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
+                       /* Newly read value in R6 was shifted left by 2, so has
+                        * known alignment of 4.
+                        */
+@@ -426,15 +426,15 @@ static struct bpf_align_test tests[] = {
+                       /* Added (4n) to packet pointer's (4n+2) var_off, giving
+                        * another (4n+2).
+                        */
+-                      {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
+-                      {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
++                      {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
++                      {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+                       /* At the time the word size load is performed from R5,
+                        * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+                        * which is 2.  Then the variable offset is (4n+2), so
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
++                      {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
+               },
+       },
+       {
+@@ -469,11 +469,11 @@ static struct bpf_align_test tests[] = {
+               .matches = {
+                       {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
+                       /* (ptr - ptr) << 2 == unknown, (4n) */
+-                      {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
++                      {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
+                       /* (4n) + 14 == (4n+2).  We blow our bounds, because
+                        * the add could overflow.
+                        */
+-                      {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
++                      {7, "R5=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+                       /* Checked s>=0 */
+                       {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+                       /* packet pointer + nonnegative (4n+2) */
+@@ -528,7 +528,7 @@ static struct bpf_align_test tests[] = {
+                       /* New unknown value in R7 is (4n) */
+                       {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       /* Subtracting it from R6 blows our unsigned bounds */
+-                      {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
++                      {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
+                       /* Checked s>= 0 */
+                       {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
+                       /* At the time the word size load is performed from R5,
+@@ -537,7 +537,8 @@ static struct bpf_align_test tests[] = {
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
++                      {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
++
+               },
+       },
+       {
index f332d9a16a9adac6ccabae7e4f7c37980db7aa06..506594447cfbfdd4a611e9aeb4f18d66b91df553 100644 (file)
@@ -36,3 +36,9 @@ md-call-__md_stop_writes-in-md_stop.patch
 scsi-storvsc-remove-wq_mem_reclaim-from-storvsc_error_wq.patch
 mm-force-tlb-flush-for-pfnmap-mappings-before-unlink_file_vma.patch
 arm64-map-fdt-as-rw-for-early_init_dt_scan.patch
+bpf-fix-the-off-by-two-error-in-range-markings.patch
+selftests-bpf-fix-test_align-verifier-log-patterns.patch
+s390-mm-do-not-trigger-write-fault-when-vma-does-not-allow-vm_write.patch
+x86-bugs-add-unknown-reporting-for-mmio-stale-data.patch
+kbuild-fix-include-path-in-scripts-makefile.modpost.patch
+bluetooth-l2cap-fix-build-errors-in-some-archs.patch
diff --git a/queue-4.19/x86-bugs-add-unknown-reporting-for-mmio-stale-data.patch b/queue-4.19/x86-bugs-add-unknown-reporting-for-mmio-stale-data.patch
new file mode 100644 (file)
index 0000000..61b11ef
--- /dev/null
@@ -0,0 +1,196 @@
+From 7df548840c496b0141fb2404b889c346380c2b22 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Wed, 3 Aug 2022 14:41:32 -0700
+Subject: x86/bugs: Add "unknown" reporting for MMIO Stale Data
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 7df548840c496b0141fb2404b889c346380c2b22 upstream.
+
+Older Intel CPUs that are not in the affected processor list for MMIO
+Stale Data vulnerabilities currently report "Not affected" in sysfs,
+which may not be correct. Vulnerability status for these older CPUs is
+unknown.
+
+Add known-not-affected CPUs to the whitelist. Report "unknown"
+mitigation status for CPUs that are not in blacklist, whitelist and also
+don't enumerate MSR ARCH_CAPABILITIES bits that reflect hardware
+immunity to MMIO Stale Data vulnerabilities.
+
+Mitigation is not deployed when the status is unknown.
+
+  [ bp: Massage, fixup. ]
+
+Fixes: 8d50cdf8b834 ("x86/speculation/mmio: Add sysfs reporting for Processor MMIO Stale Data")
+Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Suggested-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/a932c154772f2121794a5f2eded1a11013114711.1657846269.git.pawan.kumar.gupta@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst |   14 ++++
+ arch/x86/include/asm/cpufeatures.h                              |    3 
+ arch/x86/kernel/cpu/bugs.c                                      |   14 +++-
+ arch/x86/kernel/cpu/common.c                                    |   34 ++++++----
+ 4 files changed, 51 insertions(+), 14 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+@@ -230,6 +230,20 @@ The possible values in this file are:
+      * - 'Mitigation: Clear CPU buffers'
+        - The processor is vulnerable and the CPU buffer clearing mitigation is
+          enabled.
++     * - 'Unknown: No mitigations'
++       - The processor vulnerability status is unknown because it is
++       out of Servicing period. Mitigation is not attempted.
++
++Definitions:
++------------
++
++Servicing period: The process of providing functional and security updates to
++Intel processors or platforms, utilizing the Intel Platform Update (IPU)
++process or other similar mechanisms.
++
++End of Servicing Updates (ESU): ESU is the date at which Intel will no
++longer provide Servicing, such as through IPU or other similar update
++processes. ESU dates will typically be aligned to end of quarter.
+ If the processor is vulnerable then the following information is appended to
+ the above information:
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -396,6 +396,7 @@
+ #define X86_BUG_ITLB_MULTIHIT         X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
+ #define X86_BUG_SRBDS                 X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
+ #define X86_BUG_MMIO_STALE_DATA               X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
+-#define X86_BUG_EIBRS_PBRSB           X86_BUG(26) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
++#define X86_BUG_MMIO_UNKNOWN          X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
++#define X86_BUG_EIBRS_PBRSB           X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -396,7 +396,8 @@ static void __init mmio_select_mitigatio
+       u64 ia32_cap;
+       if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
+-          cpu_mitigations_off()) {
++           boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
++           cpu_mitigations_off()) {
+               mmio_mitigation = MMIO_MITIGATION_OFF;
+               return;
+       }
+@@ -501,6 +502,8 @@ out:
+               pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
+       if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+               pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
++      else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
++              pr_info("MMIO Stale Data: Unknown: No mitigations\n");
+ }
+ static void __init md_clear_select_mitigation(void)
+@@ -1868,6 +1871,9 @@ static ssize_t tsx_async_abort_show_stat
+ static ssize_t mmio_stale_data_show_state(char *buf)
+ {
++      if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
++              return sysfs_emit(buf, "Unknown: No mitigations\n");
++
+       if (mmio_mitigation == MMIO_MITIGATION_OFF)
+               return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
+@@ -1995,6 +2001,7 @@ static ssize_t cpu_show_common(struct de
+               return srbds_show_state(buf);
+       case X86_BUG_MMIO_STALE_DATA:
++      case X86_BUG_MMIO_UNKNOWN:
+               return mmio_stale_data_show_state(buf);
+       default:
+@@ -2051,6 +2058,9 @@ ssize_t cpu_show_srbds(struct device *de
+ ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+-      return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
++      if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
++              return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
++      else
++              return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
+ }
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -955,6 +955,7 @@ static void identify_cpu_without_cpuid(s
+ #define NO_SWAPGS             BIT(6)
+ #define NO_ITLB_MULTIHIT      BIT(7)
+ #define NO_EIBRS_PBRSB                BIT(8)
++#define NO_MMIO                       BIT(9)
+ #define VULNWL(_vendor, _family, _model, _whitelist)  \
+       { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+@@ -972,6 +973,11 @@ static const __initconst struct x86_cpu_
+       VULNWL(NSC,     5, X86_MODEL_ANY,       NO_SPECULATION),
+       /* Intel Family 6 */
++      VULNWL_INTEL(TIGERLAKE,                 NO_MMIO),
++      VULNWL_INTEL(TIGERLAKE_L,               NO_MMIO),
++      VULNWL_INTEL(ALDERLAKE,                 NO_MMIO),
++      VULNWL_INTEL(ALDERLAKE_L,               NO_MMIO),
++
+       VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION | NO_ITLB_MULTIHIT),
+@@ -989,9 +995,9 @@ static const __initconst struct x86_cpu_
+       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+-      VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+-      VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+-      VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
++      VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++      VULNWL_INTEL(ATOM_GOLDMONT_X,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++      VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+       /*
+        * Technically, swapgs isn't serializing on AMD (despite it previously
+@@ -1006,13 +1012,13 @@ static const __initconst struct x86_cpu_
+       VULNWL_INTEL(ATOM_TREMONT_X,            NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
+       /* AMD Family 0xf - 0x12 */
+-      VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+-      VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+-      VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+-      VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
++      VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++      VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++      VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
++      VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+-      VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
++      VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+       {}
+ };
+@@ -1152,10 +1158,16 @@ static void __init cpu_set_bug_bits(stru
+        * Affected CPU list is generally enough to enumerate the vulnerability,
+        * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
+        * not want the guest to enumerate the bug.
++       *
++       * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
++       * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
+        */
+-      if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
+-          !arch_cap_mmio_immune(ia32_cap))
+-              setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
++      if (!arch_cap_mmio_immune(ia32_cap)) {
++              if (cpu_matches(cpu_vuln_blacklist, MMIO))
++                      setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
++              else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
++                      setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
++      }
+       if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
+           !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&