]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 8 Jul 2021 18:33:09 +0000 (20:33 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 8 Jul 2021 18:33:09 +0000 (20:33 +0200)
added patches:
kvm-svm-call-sev-guest-decommission-if-asid-binding-fails.patch
kvm-svm-periodically-schedule-when-unregistering-regions-on-destroy.patch
s390-stack-fix-possible-register-corruption-with-stack-switch-helper.patch
xen-events-reset-active-flag-for-lateeoi-events-later.patch

queue-5.4/kvm-svm-call-sev-guest-decommission-if-asid-binding-fails.patch [new file with mode: 0644]
queue-5.4/kvm-svm-periodically-schedule-when-unregistering-regions-on-destroy.patch [new file with mode: 0644]
queue-5.4/s390-stack-fix-possible-register-corruption-with-stack-switch-helper.patch [new file with mode: 0644]
queue-5.4/series [new file with mode: 0644]
queue-5.4/xen-events-reset-active-flag-for-lateeoi-events-later.patch [new file with mode: 0644]

diff --git a/queue-5.4/kvm-svm-call-sev-guest-decommission-if-asid-binding-fails.patch b/queue-5.4/kvm-svm-call-sev-guest-decommission-if-asid-binding-fails.patch
new file mode 100644 (file)
index 0000000..872f6c3
--- /dev/null
@@ -0,0 +1,92 @@
+From 934002cd660b035b926438244b4294e647507e13 Mon Sep 17 00:00:00 2001
+From: Alper Gun <alpergun@google.com>
+Date: Thu, 10 Jun 2021 17:46:04 +0000
+Subject: KVM: SVM: Call SEV Guest Decommission if ASID binding fails
+
+From: Alper Gun <alpergun@google.com>
+
+commit 934002cd660b035b926438244b4294e647507e13 upstream.
+
+Send SEV_CMD_DECOMMISSION command to PSP firmware if ASID binding
+fails. If a failure happens after  a successful LAUNCH_START command,
+a decommission command should be executed. Otherwise, guest context
+will be unfreed inside the AMD SP. After the firmware will not have
+memory to allocate more SEV guest context, LAUNCH_START command will
+begin to fail with SEV_RET_RESOURCE_LIMIT error.
+
+The existing code calls decommission inside sev_unbind_asid, but it is
+not called if a failure happens before guest activation succeeds. If
+sev_bind_asid fails, decommission is never called. PSP firmware has a
+limit for the number of guests. If sev_asid_binding fails many times,
+PSP firmware will not have resources to create another guest context.
+
+Cc: stable@vger.kernel.org
+Fixes: 59414c989220 ("KVM: SVM: Add support for KVM_SEV_LAUNCH_START command")
+Reported-by: Peter Gonda <pgonda@google.com>
+Signed-off-by: Alper Gun <alpergun@google.com>
+Reviewed-by: Marc Orr <marcorr@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-Id: <20210610174604.2554090-1-alpergun@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm.c |   32 +++++++++++++++++++++-----------
+ 1 file changed, 21 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1794,9 +1794,25 @@ static void sev_asid_free(struct kvm *kv
+       __sev_asid_free(sev->asid);
+ }
+-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
++static void sev_decommission(unsigned int handle)
+ {
+       struct sev_data_decommission *decommission;
++
++      if (!handle)
++              return;
++
++      decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
++      if (!decommission)
++              return;
++
++      decommission->handle = handle;
++      sev_guest_decommission(decommission, NULL);
++
++      kfree(decommission);
++}
++
++static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
++{
+       struct sev_data_deactivate *data;
+       if (!handle)
+@@ -1814,15 +1830,7 @@ static void sev_unbind_asid(struct kvm *
+       sev_guest_df_flush(NULL);
+       kfree(data);
+-      decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
+-      if (!decommission)
+-              return;
+-
+-      /* decommission handle */
+-      decommission->handle = handle;
+-      sev_guest_decommission(decommission, NULL);
+-
+-      kfree(decommission);
++      sev_decommission(handle);
+ }
+ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+@@ -6476,8 +6484,10 @@ static int sev_launch_start(struct kvm *
+       /* Bind ASID to this guest */
+       ret = sev_bind_asid(kvm, start->handle, error);
+-      if (ret)
++      if (ret) {
++              sev_decommission(start->handle);
+               goto e_free_session;
++      }
+       /* return handle to userspace */
+       params.handle = start->handle;
diff --git a/queue-5.4/kvm-svm-periodically-schedule-when-unregistering-regions-on-destroy.patch b/queue-5.4/kvm-svm-periodically-schedule-when-unregistering-regions-on-destroy.patch
new file mode 100644 (file)
index 0000000..b526c6a
--- /dev/null
@@ -0,0 +1,59 @@
+From 7be74942f184fdfba34ddd19a0d995deb34d4a03 Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Tue, 25 Aug 2020 12:56:28 -0700
+Subject: KVM: SVM: Periodically schedule when unregistering regions on destroy
+
+From: David Rientjes <rientjes@google.com>
+
+commit 7be74942f184fdfba34ddd19a0d995deb34d4a03 upstream.
+
+There may be many encrypted regions that need to be unregistered when a
+SEV VM is destroyed.  This can lead to soft lockups.  For example, on a
+host running 4.15:
+
+watchdog: BUG: soft lockup - CPU#206 stuck for 11s! [t_virtual_machi:194348]
+CPU: 206 PID: 194348 Comm: t_virtual_machi
+RIP: 0010:free_unref_page_list+0x105/0x170
+...
+Call Trace:
+ [<0>] release_pages+0x159/0x3d0
+ [<0>] sev_unpin_memory+0x2c/0x50 [kvm_amd]
+ [<0>] __unregister_enc_region_locked+0x2f/0x70 [kvm_amd]
+ [<0>] svm_vm_destroy+0xa9/0x200 [kvm_amd]
+ [<0>] kvm_arch_destroy_vm+0x47/0x200
+ [<0>] kvm_put_kvm+0x1a8/0x2f0
+ [<0>] kvm_vm_release+0x25/0x30
+ [<0>] do_exit+0x335/0xc10
+ [<0>] do_group_exit+0x3f/0xa0
+ [<0>] get_signal+0x1bc/0x670
+ [<0>] do_signal+0x31/0x130
+
+Although the CLFLUSH is no longer issued on every encrypted region to be
+unregistered, there are no other changes that can prevent soft lockups for
+very large SEV VMs in the latest kernel.
+
+Periodically schedule if necessary.  This still holds kvm->lock across the
+resched, but since this only happens when the VM is destroyed this is
+assumed to be acceptable.
+
+Signed-off-by: David Rientjes <rientjes@google.com>
+Message-Id: <alpine.DEB.2.23.453.2008251255240.2987727@chino.kir.corp.google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[iwamatsu: adjust filename.]
+Reference: CVE-2020-36311
+Signed-off-by: Nobuhiro Iwamatsu (CIP) <nobuhiro1.iwamatsu@toshiba.co.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1960,6 +1960,7 @@ static void sev_vm_destroy(struct kvm *k
+               list_for_each_safe(pos, q, head) {
+                       __unregister_enc_region_locked(kvm,
+                               list_entry(pos, struct enc_region, list));
++                      cond_resched();
+               }
+       }
diff --git a/queue-5.4/s390-stack-fix-possible-register-corruption-with-stack-switch-helper.patch b/queue-5.4/s390-stack-fix-possible-register-corruption-with-stack-switch-helper.patch
new file mode 100644 (file)
index 0000000..52b831f
--- /dev/null
@@ -0,0 +1,68 @@
+From 67147e96a332b56c7206238162771d82467f86c0 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Fri, 18 Jun 2021 16:58:47 +0200
+Subject: s390/stack: fix possible register corruption with stack switch helper
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit 67147e96a332b56c7206238162771d82467f86c0 upstream.
+
+The CALL_ON_STACK macro is used to call a C function from inline
+assembly, and therefore must consider the C ABI, which says that only
+registers 6-13, and 15 are non-volatile (restored by the called
+function).
+
+The inline assembly incorrectly marks all registers used to pass
+parameters to the called function as read-only input operands, instead
+of operands that are read and written to. This might result in
+register corruption depending on usage, compiler, and compile options.
+
+Fix this by marking all operands used to pass parameters as read/write
+operands. To keep the code simple even register 6, if used, is marked
+as read-write operand.
+
+Fixes: ff340d2472ec ("s390: add stack switch helper")
+Cc: <stable@kernel.org> # 4.20
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/stacktrace.h |   18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/arch/s390/include/asm/stacktrace.h
++++ b/arch/s390/include/asm/stacktrace.h
+@@ -79,12 +79,16 @@ struct stack_frame {
+       CALL_ARGS_4(arg1, arg2, arg3, arg4);                            \
+       register unsigned long r4 asm("6") = (unsigned long)(arg5)
+-#define CALL_FMT_0 "=&d" (r2) :
+-#define CALL_FMT_1 "+&d" (r2) :
+-#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
+-#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
+-#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
+-#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
++/*
++ * To keep this simple mark register 2-6 as being changed (volatile)
++ * by the called function, even though register 6 is saved/nonvolatile.
++ */
++#define CALL_FMT_0 "=&d" (r2)
++#define CALL_FMT_1 "+&d" (r2)
++#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
++#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
++#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
++#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
+ #define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
+ #define CALL_CLOBBER_4 CALL_CLOBBER_5
+@@ -105,7 +109,7 @@ struct stack_frame {
+               "       brasl   14,%[_fn]\n"                            \
+               "       la      15,0(%[_prev])\n"                       \
+               : [_prev] "=&a" (prev), CALL_FMT_##nr                   \
+-                [_stack] "a" (stack),                                 \
++              : [_stack] "a" (stack),                                 \
+                 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
+                 [_fn] "X" (fn) : CALL_CLOBBER_##nr);                  \
+       r2;                                                             \
diff --git a/queue-5.4/series b/queue-5.4/series
new file mode 100644 (file)
index 0000000..242dde4
--- /dev/null
@@ -0,0 +1,4 @@
+kvm-svm-periodically-schedule-when-unregistering-regions-on-destroy.patch
+s390-stack-fix-possible-register-corruption-with-stack-switch-helper.patch
+kvm-svm-call-sev-guest-decommission-if-asid-binding-fails.patch
+xen-events-reset-active-flag-for-lateeoi-events-later.patch
diff --git a/queue-5.4/xen-events-reset-active-flag-for-lateeoi-events-later.patch b/queue-5.4/xen-events-reset-active-flag-for-lateeoi-events-later.patch
new file mode 100644 (file)
index 0000000..3826952
--- /dev/null
@@ -0,0 +1,69 @@
+From 3de218ff39b9e3f0d453fe3154f12a174de44b25 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Wed, 23 Jun 2021 15:09:13 +0200
+Subject: xen/events: reset active flag for lateeoi events later
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 3de218ff39b9e3f0d453fe3154f12a174de44b25 upstream.
+
+In order to avoid a race condition for user events when changing
+cpu affinity reset the active flag only when EOI-ing the event.
+
+This is working fine as all user events are lateeoi events. Note that
+lateeoi_ack_mask_dynirq() is not modified as there is no explicit call
+to xen_irq_lateeoi() expected later.
+
+Cc: stable@vger.kernel.org
+Reported-by: Julien Grall <julien@xen.org>
+Fixes: b6622798bc50b62 ("xen/events: avoid handling the same event on two cpus at the same time")
+Tested-by: Julien Grall <julien@xen.org>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrvsky@oracle.com>
+Link: https://lore.kernel.org/r/20210623130913.9405-1-jgross@suse.com
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/events/events_base.c |   23 +++++++++++++++++++----
+ 1 file changed, 19 insertions(+), 4 deletions(-)
+
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -525,6 +525,9 @@ static void xen_irq_lateeoi_locked(struc
+       }
+       info->eoi_time = 0;
++
++      /* is_active hasn't been reset yet, do it now. */
++      smp_store_release(&info->is_active, 0);
+       do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
+ }
+@@ -1781,10 +1784,22 @@ static void lateeoi_ack_dynirq(struct ir
+       struct irq_info *info = info_for_irq(data->irq);
+       evtchn_port_t evtchn = info ? info->evtchn : 0;
+-      if (VALID_EVTCHN(evtchn)) {
+-              do_mask(info, EVT_MASK_REASON_EOI_PENDING);
+-              ack_dynirq(data);
+-      }
++      if (!VALID_EVTCHN(evtchn))
++              return;
++
++      do_mask(info, EVT_MASK_REASON_EOI_PENDING);
++
++      if (unlikely(irqd_is_setaffinity_pending(data)) &&
++          likely(!irqd_irq_disabled(data))) {
++              do_mask(info, EVT_MASK_REASON_TEMPORARY);
++
++              clear_evtchn(evtchn);
++
++              irq_move_masked_irq(data);
++
++              do_unmask(info, EVT_MASK_REASON_TEMPORARY);
++      } else
++              clear_evtchn(evtchn);
+ }
+ static void lateeoi_mask_ack_dynirq(struct irq_data *data)