]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Sun, 12 Nov 2023 19:19:20 +0000 (14:19 -0500)
committerSasha Levin <sashal@kernel.org>
Sun, 12 Nov 2023 19:19:20 +0000 (14:19 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.10/series
queue-5.10/x86-sev-es-allow-copy_from_kernel_nofault-in-earlier.patch [new file with mode: 0644]
queue-5.10/x86-share-definition-of-__is_canonical_address.patch [new file with mode: 0644]

index 91c0bc28bb6e7fbcd19d0cef0d97c1d532df54b5..3c5cfb6b3b8fd431a0a18c86de87b552a7e01303 100644 (file)
@@ -195,3 +195,5 @@ r8169-respect-userspace-disabling-iff_multicast.patch
 netfilter-xt_recent-fix-increase-ipv6-literal-buffer.patch
 netfilter-nft_redir-use-struct-nf_nat_range2-through.patch
 netfilter-nat-fix-ipv6-nat-redirect-with-mapped-and-.patch
+x86-share-definition-of-__is_canonical_address.patch
+x86-sev-es-allow-copy_from_kernel_nofault-in-earlier.patch
diff --git a/queue-5.10/x86-sev-es-allow-copy_from_kernel_nofault-in-earlier.patch b/queue-5.10/x86-sev-es-allow-copy_from_kernel_nofault-in-earlier.patch
new file mode 100644 (file)
index 0000000..33f81e3
--- /dev/null
@@ -0,0 +1,64 @@
+From 7b67affe27b56e14adb83200c4b3b14768bc6c7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Sep 2023 17:27:02 -0700
+Subject: x86/sev-es: Allow copy_from_kernel_nofault() in earlier boot
+
+From: Adam Dunlap <acdunlap@google.com>
+
+[ Upstream commit f79936545fb122856bd78b189d3c7ee59928c751 ]
+
+Previously, if copy_from_kernel_nofault() was called before
+boot_cpu_data.x86_virt_bits was set up, then it would trigger undefined
+behavior due to a shift by 64.
+
+This ended up causing boot failures in the latest version of ubuntu2204
+in the gcp project when using SEV-SNP.
+
+Specifically, this function is called during an early #VC handler which
+is triggered by a CPUID to check if NX is implemented.
+
+Fixes: 1aa9aa8ee517 ("x86/sev-es: Setup GHCB-based boot #VC handler")
+Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Adam Dunlap <acdunlap@google.com>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Jacob Xu <jacobhxu@google.com>
+Link: https://lore.kernel.org/r/20230912002703.3924521-2-acdunlap@google.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/mm/maccess.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
+index 5a53c2cc169cc..6993f026adec9 100644
+--- a/arch/x86/mm/maccess.c
++++ b/arch/x86/mm/maccess.c
+@@ -9,12 +9,21 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+       unsigned long vaddr = (unsigned long)unsafe_src;
+       /*
+-       * Range covering the highest possible canonical userspace address
+-       * as well as non-canonical address range. For the canonical range
+-       * we also need to include the userspace guard page.
++       * Do not allow userspace addresses.  This disallows
++       * normal userspace and the userspace guard page:
+        */
+-      return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
+-             __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
++      if (vaddr < TASK_SIZE_MAX + PAGE_SIZE)
++              return false;
++
++      /*
++       * Allow everything during early boot before 'x86_virt_bits'
++       * is initialized.  Needed for instruction decoding in early
++       * exception handlers.
++       */
++      if (!boot_cpu_data.x86_virt_bits)
++              return true;
++
++      return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+ }
+ #else
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+-- 
+2.42.0
+
diff --git a/queue-5.10/x86-share-definition-of-__is_canonical_address.patch b/queue-5.10/x86-share-definition-of-__is_canonical_address.patch
new file mode 100644 (file)
index 0000000..40c6e4d
--- /dev/null
@@ -0,0 +1,165 @@
+From 24e5ea69289774ab392564ccf47dab38fc48f865 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 31 Jan 2022 09:24:50 +0200
+Subject: x86: Share definition of __is_canonical_address()
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+[ Upstream commit 1fb85d06ad6754796cd1b920639ca9d8840abefd ]
+
+Reduce code duplication by moving canonical address code to a common header
+file.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220131072453.2839535-3-adrian.hunter@intel.com
+Stable-dep-of: f79936545fb1 ("x86/sev-es: Allow copy_from_kernel_nofault() in earlier boot")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/pt.c  | 14 ++------------
+ arch/x86/include/asm/page.h | 10 ++++++++++
+ arch/x86/kvm/emulate.c      |  4 ++--
+ arch/x86/kvm/x86.c          |  2 +-
+ arch/x86/kvm/x86.h          |  7 +------
+ arch/x86/mm/maccess.c       |  7 +------
+ 6 files changed, 17 insertions(+), 27 deletions(-)
+
+diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
+index d87421acddc39..5667b8b994e34 100644
+--- a/arch/x86/events/intel/pt.c
++++ b/arch/x86/events/intel/pt.c
+@@ -1360,20 +1360,10 @@ static void pt_addr_filters_fini(struct perf_event *event)
+ }
+ #ifdef CONFIG_X86_64
+-static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
+-{
+-      return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
+-}
+-
+-static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
+-{
+-      return canonical_address(vaddr, vaddr_bits) == vaddr;
+-}
+-
+ /* Clamp to a canonical address greater-than-or-equal-to the address given */
+ static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
+ {
+-      return is_canonical_address(vaddr, vaddr_bits) ?
++      return __is_canonical_address(vaddr, vaddr_bits) ?
+              vaddr :
+              -BIT_ULL(vaddr_bits - 1);
+ }
+@@ -1381,7 +1371,7 @@ static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
+ /* Clamp to a canonical address less-than-or-equal-to the address given */
+ static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
+ {
+-      return is_canonical_address(vaddr, vaddr_bits) ?
++      return __is_canonical_address(vaddr, vaddr_bits) ?
+              vaddr :
+              BIT_ULL(vaddr_bits - 1) - 1;
+ }
+diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
+index 7555b48803a8c..ffae5ea9fd4e1 100644
+--- a/arch/x86/include/asm/page.h
++++ b/arch/x86/include/asm/page.h
+@@ -71,6 +71,16 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
+ extern bool __virt_addr_valid(unsigned long kaddr);
+ #define virt_addr_valid(kaddr)        __virt_addr_valid((unsigned long) (kaddr))
++static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
++{
++      return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
++}
++
++static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
++{
++      return __canonical_address(vaddr, vaddr_bits) == vaddr;
++}
++
+ #endif        /* __ASSEMBLY__ */
+ #include <asm-generic/memory_model.h>
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 63efccc8f4292..56750febf4604 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -688,7 +688,7 @@ static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
+ static inline bool emul_is_noncanonical_address(u64 la,
+                                               struct x86_emulate_ctxt *ctxt)
+ {
+-      return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
++      return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
+ }
+ /*
+@@ -738,7 +738,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+       case X86EMUL_MODE_PROT64:
+               *linear = la;
+               va_bits = ctxt_virt_addr_bits(ctxt);
+-              if (get_canonical(la, va_bits) != la)
++              if (!__is_canonical_address(la, va_bits))
+                       goto bad;
+               *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 9d3015863e581..c2899ff31a068 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1640,7 +1640,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
+                * value, and that something deterministic happens if the guest
+                * invokes 64-bit SYSENTER.
+                */
+-              data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
++              data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
+       }
+       msr.data = data;
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index 2bff44f1efec8..4037b3cc704e8 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -156,14 +156,9 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
+       return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
+ }
+-static inline u64 get_canonical(u64 la, u8 vaddr_bits)
+-{
+-      return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
+-}
+-
+ static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
+ {
+-      return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
++      return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu));
+ }
+ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
+index 92ec176a72937..5a53c2cc169cc 100644
+--- a/arch/x86/mm/maccess.c
++++ b/arch/x86/mm/maccess.c
+@@ -4,11 +4,6 @@
+ #include <linux/kernel.h>
+ #ifdef CONFIG_X86_64
+-static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
+-{
+-      return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
+-}
+-
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+ {
+       unsigned long vaddr = (unsigned long)unsafe_src;
+@@ -19,7 +14,7 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+        * we also need to include the userspace guard page.
+        */
+       return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
+-             canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr;
++             __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits);
+ }
+ #else
+ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+-- 
+2.42.0
+