]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Sun, 14 Feb 2021 18:31:54 +0000 (13:31 -0500)
committerSasha Levin <sashal@kernel.org>
Sun, 14 Feb 2021 18:32:38 +0000 (13:32 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.10/arm64-mte-allow-ptrace_peekmtetags-access-to-the-zer.patch [new file with mode: 0644]
queue-5.10/kvm-x86-cleanup-cr3-reserved-bits-checks.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arm64-mte-allow-ptrace_peekmtetags-access-to-the-zer.patch b/queue-5.10/arm64-mte-allow-ptrace_peekmtetags-access-to-the-zer.patch
new file mode 100644 (file)
index 0000000..bc425e8
--- /dev/null
@@ -0,0 +1,80 @@
+From 42138508d8089c9ba8258c37dc3988a451bbc72e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Feb 2021 18:03:16 +0000
+Subject: arm64: mte: Allow PTRACE_PEEKMTETAGS access to the zero page
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+[ Upstream commit 68d54ceeec0e5fee4fb8048e6a04c193f32525ca ]
+
+The ptrace(PTRACE_PEEKMTETAGS) implementation checks whether the user
+page has valid tags (mapped with PROT_MTE) by testing the PG_mte_tagged
+page flag. If this bit is cleared, ptrace(PTRACE_PEEKMTETAGS) returns
+-EIO.
+
+A newly created (PROT_MTE) mapping points to the zero page which had its
+tags zeroed during cpu_enable_mte(). If there were no prior writes to
+this mapping, ptrace(PTRACE_PEEKMTETAGS) fails with -EIO since the zero
+page does not have the PG_mte_tagged flag set.
+
+Set PG_mte_tagged on the zero page when its tags are cleared during
+boot. In addition, to avoid ptrace(PTRACE_PEEKMTETAGS) succeeding on
+!PROT_MTE mappings pointing to the zero page, change the
+__access_remote_tags() check to (vm_flags & VM_MTE) instead of
+PG_mte_tagged.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Fixes: 34bfeea4a9e9 ("arm64: mte: Clear the tags when a page is mapped in user-space with PROT_MTE")
+Cc: <stable@vger.kernel.org> # 5.10.x
+Cc: Will Deacon <will@kernel.org>
+Reported-by: Luis Machado <luis.machado@linaro.org>
+Tested-by: Luis Machado <luis.machado@linaro.org>
+Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Link: https://lore.kernel.org/r/20210210180316.23654-1-catalin.marinas@arm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/cpufeature.c | 5 +----
+ arch/arm64/kernel/mte.c        | 3 ++-
+ 2 files changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 0a52e076153bb..e01ad6aa9674e 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1696,14 +1696,11 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
+ #ifdef CONFIG_ARM64_MTE
+ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+ {
+-      static bool cleared_zero_page = false;
+-
+       /*
+        * Clear the tags in the zero page. This needs to be done via the
+        * linear map which has the Tagged attribute.
+        */
+-      if (!cleared_zero_page) {
+-              cleared_zero_page = true;
++      if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
+               mte_clear_page_tags(lm_alias(empty_zero_page));
+       }
+ }
+diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
+index ef15c8a2a49dc..7a66a7d9c1ffc 100644
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -239,11 +239,12 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
+                * would cause the existing tags to be cleared if the page
+                * was never mapped with PROT_MTE.
+                */
+-              if (!test_bit(PG_mte_tagged, &page->flags)) {
++              if (!(vma->vm_flags & VM_MTE)) {
+                       ret = -EOPNOTSUPP;
+                       put_page(page);
+                       break;
+               }
++              WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
+               /* limit access to the end of the page */
+               offset = offset_in_page(addr);
+-- 
+2.27.0
+
diff --git a/queue-5.10/kvm-x86-cleanup-cr3-reserved-bits-checks.patch b/queue-5.10/kvm-x86-cleanup-cr3-reserved-bits-checks.patch
new file mode 100644 (file)
index 0000000..1ed091f
--- /dev/null
@@ -0,0 +1,92 @@
+From fe089974dac685cf29a335f28ddc37abcc8885c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Nov 2020 08:30:38 -0500
+Subject: KVM: x86: cleanup CR3 reserved bits checks
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit c1c35cf78bfab31b8cb455259524395c9e4c7cd6 ]
+
+If not in long mode, the low bits of CR3 are reserved but not enforced to
+be zero, so remove those checks.  If in long mode, however, the MBZ bits
+extend down to the highest physical address bit of the guest, excluding
+the encryption bit.
+
+Make the checks consistent with the above, and match them between
+nested_vmcb_checks and KVM_SET_SREGS.
+
+Cc: stable@vger.kernel.org
+Fixes: 761e41693465 ("KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests")
+Fixes: a780a3ea6282 ("KVM: X86: Fix reserved bits check for MOV to CR3")
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/svm/nested.c | 13 +++----------
+ arch/x86/kvm/svm/svm.h    |  3 ---
+ arch/x86/kvm/x86.c        |  2 ++
+ 3 files changed, 5 insertions(+), 13 deletions(-)
+
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index 65e40acde71aa..4fbe190c79159 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -231,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
+ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+ {
++      struct kvm_vcpu *vcpu = &svm->vcpu;
+       bool vmcb12_lma;
+       if ((vmcb12->save.efer & EFER_SVME) == 0)
+@@ -244,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+       vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
+-      if (!vmcb12_lma) {
+-              if (vmcb12->save.cr4 & X86_CR4_PAE) {
+-                      if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
+-                              return false;
+-              } else {
+-                      if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
+-                              return false;
+-              }
+-      } else {
++      if (vmcb12_lma) {
+               if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
+                   !(vmcb12->save.cr0 & X86_CR0_PE) ||
+-                  (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
++                  (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
+                       return false;
+       }
+       if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 1d853fe4c778b..be74e22b82ea7 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -346,9 +346,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
+ }
+ /* svm.c */
+-#define MSR_CR3_LEGACY_RESERVED_MASK          0xfe7U
+-#define MSR_CR3_LEGACY_PAE_RESERVED_MASK      0x7U
+-#define MSR_CR3_LONG_MBZ_MASK                 0xfff0000000000000U
+ #define MSR_INVALID                           0xffffffffU
+ u32 svm_msrpm_offset(u32 msr);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 18a315bbcb79e..3bcde449938e6 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9558,6 +9558,8 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+               if (!(sregs->cr4 & X86_CR4_PAE)
+                   || !(sregs->efer & EFER_LMA))
+                       return -EINVAL;
++              if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
++                      return false;
+       } else {
+               /*
+                * Not in 64-bit mode: EFER.LMA is clear and the code
+-- 
+2.27.0
+
index a243bc71b82e1e4099ab7e10ce0107227b24710e..5ea6299bd1a81a891626bc28a88cc84bc7ac70b0 100644 (file)
@@ -45,3 +45,5 @@ ubsan-implement-__ubsan_handle_alignment_assumption.patch
 revert-lib-restrict-cpumask_local_spread-to-houskeep.patch
 x86-efi-remove-efi-pgd-build-time-checks.patch
 lkdtm-don-t-move-ctors-to-.rodata.patch
+arm64-mte-allow-ptrace_peekmtetags-access-to-the-zer.patch
+kvm-x86-cleanup-cr3-reserved-bits-checks.patch