]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Nov 2021 08:24:28 +0000 (09:24 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Nov 2021 08:24:28 +0000 (09:24 +0100)
added patches:
kvm-sev-es-fix-another-issue-with-string-i-o-vmgexits.patch
kvm-x86-switch-pvclock_gtod_sync_lock-to-a-raw-spinlock.patch
kvm-x86-take-srcu-lock-in-post_kvm_run_save.patch
kvm-x86-xen-fix-kvm_xen_has_interrupt-sleeping-in-kvm_vcpu_block.patch
perf-script-check-session-header.env.arch-before-using-it.patch
riscv-do-not-re-populate-shadow-memory-with-kasan_populate_early_shadow.patch
riscv-fix-asan-stack-clang-build.patch
riscv-fix-misalgned-trap-vector-base-address.patch
scsi-ibmvfc-fix-up-duplicate-response-detection.patch

queue-5.14/kvm-sev-es-fix-another-issue-with-string-i-o-vmgexits.patch [new file with mode: 0644]
queue-5.14/kvm-x86-switch-pvclock_gtod_sync_lock-to-a-raw-spinlock.patch [new file with mode: 0644]
queue-5.14/kvm-x86-take-srcu-lock-in-post_kvm_run_save.patch [new file with mode: 0644]
queue-5.14/kvm-x86-xen-fix-kvm_xen_has_interrupt-sleeping-in-kvm_vcpu_block.patch [new file with mode: 0644]
queue-5.14/perf-script-check-session-header.env.arch-before-using-it.patch [new file with mode: 0644]
queue-5.14/riscv-do-not-re-populate-shadow-memory-with-kasan_populate_early_shadow.patch [new file with mode: 0644]
queue-5.14/riscv-fix-asan-stack-clang-build.patch [new file with mode: 0644]
queue-5.14/riscv-fix-misalgned-trap-vector-base-address.patch [new file with mode: 0644]
queue-5.14/scsi-ibmvfc-fix-up-duplicate-response-detection.patch [new file with mode: 0644]
queue-5.14/series

diff --git a/queue-5.14/kvm-sev-es-fix-another-issue-with-string-i-o-vmgexits.patch b/queue-5.14/kvm-sev-es-fix-another-issue-with-string-i-o-vmgexits.patch
new file mode 100644 (file)
index 0000000..9205f61
--- /dev/null
@@ -0,0 +1,54 @@
+From 9b0971ca7fc75daca80c0bb6c02e96059daea90a Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 25 Oct 2021 12:14:31 -0400
+Subject: KVM: SEV-ES: fix another issue with string I/O VMGEXITs
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 9b0971ca7fc75daca80c0bb6c02e96059daea90a upstream.
+
+If the guest requests string I/O from the hypervisor via VMGEXIT,
+SW_EXITINFO2 will contain the REP count.  However, sev_es_string_io
+was incorrectly treating it as the size of the GHCB buffer in
+bytes.
+
+This fixes the "outsw" test in the experimental SEV tests of
+kvm-unit-tests.
+
+Cc: stable@vger.kernel.org
+Fixes: 7ed9abfe8e9f ("KVM: SVM: Support string IO operations for an SEV-ES guest")
+Reported-by: Marc Orr <marcorr@google.com>
+Tested-by: Marc Orr <marcorr@google.com>
+Reviewed-by: Marc Orr <marcorr@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/sev.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -2592,11 +2592,20 @@ int sev_handle_vmgexit(struct kvm_vcpu *
+ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
+ {
+-      if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
++      int count;
++      int bytes;
++
++      if (svm->vmcb->control.exit_info_2 > INT_MAX)
++              return -EINVAL;
++
++      count = svm->vmcb->control.exit_info_2;
++      if (unlikely(check_mul_overflow(count, size, &bytes)))
++              return -EINVAL;
++
++      if (!setup_vmgexit_scratch(svm, in, bytes))
+               return -EINVAL;
+-      return kvm_sev_es_string_io(&svm->vcpu, size, port,
+-                                  svm->ghcb_sa, svm->ghcb_sa_len / size, in);
++      return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, count, in);
+ }
+ void sev_es_init_vmcb(struct vcpu_svm *svm)
diff --git a/queue-5.14/kvm-x86-switch-pvclock_gtod_sync_lock-to-a-raw-spinlock.patch b/queue-5.14/kvm-x86-switch-pvclock_gtod_sync_lock-to-a-raw-spinlock.patch
new file mode 100644 (file)
index 0000000..be6eb33
--- /dev/null
@@ -0,0 +1,161 @@
+From 8228c77d8b56e3f735baf71fefb1b548c23691a7 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sat, 23 Oct 2021 21:29:22 +0100
+Subject: KVM: x86: switch pvclock_gtod_sync_lock to a raw spinlock
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 8228c77d8b56e3f735baf71fefb1b548c23691a7 upstream.
+
+On the preemption path when updating a Xen guest's runstate times, this
+lock is taken inside the scheduler rq->lock, which is a raw spinlock.
+This was shown in a lockdep warning:
+
+[   89.138354] =============================
+[   89.138356] [ BUG: Invalid wait context ]
+[   89.138358] 5.15.0-rc5+ #834 Tainted: G S        I E
+[   89.138360] -----------------------------
+[   89.138361] xen_shinfo_test/2575 is trying to lock:
+[   89.138363] ffffa34a0364efd8 (&kvm->arch.pvclock_gtod_sync_lock){....}-{3:3}, at: get_kvmclock_ns+0x1f/0x130 [kvm]
+[   89.138442] other info that might help us debug this:
+[   89.138444] context-{5:5}
+[   89.138445] 4 locks held by xen_shinfo_test/2575:
+[   89.138447]  #0: ffff972bdc3b8108 (&vcpu->mutex){+.+.}-{4:4}, at: kvm_vcpu_ioctl+0x77/0x6f0 [kvm]
+[   89.138483]  #1: ffffa34a03662e90 (&kvm->srcu){....}-{0:0}, at: kvm_arch_vcpu_ioctl_run+0xdc/0x8b0 [kvm]
+[   89.138526]  #2: ffff97331fdbac98 (&rq->__lock){-.-.}-{2:2}, at: __schedule+0xff/0xbd0
+[   89.138534]  #3: ffffa34a03662e90 (&kvm->srcu){....}-{0:0}, at: kvm_arch_vcpu_put+0x26/0x170 [kvm]
+...
+[   89.138695]  get_kvmclock_ns+0x1f/0x130 [kvm]
+[   89.138734]  kvm_xen_update_runstate+0x14/0x90 [kvm]
+[   89.138783]  kvm_xen_update_runstate_guest+0x15/0xd0 [kvm]
+[   89.138830]  kvm_arch_vcpu_put+0xe6/0x170 [kvm]
+[   89.138870]  kvm_sched_out+0x2f/0x40 [kvm]
+[   89.138900]  __schedule+0x5de/0xbd0
+
+Cc: stable@vger.kernel.org
+Reported-by: syzbot+b282b65c2c68492df769@syzkaller.appspotmail.com
+Fixes: 30b5c851af79 ("KVM: x86/xen: Add support for vCPU runstate information")
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Message-Id: <1b02a06421c17993df337493a68ba923f3bd5c0f.camel@infradead.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h |    2 +-
+ arch/x86/kvm/x86.c              |   28 ++++++++++++++--------------
+ 2 files changed, 15 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1084,7 +1084,7 @@ struct kvm_arch {
+       u64 cur_tsc_generation;
+       int nr_vcpus_matched_tsc;
+-      spinlock_t pvclock_gtod_sync_lock;
++      raw_spinlock_t pvclock_gtod_sync_lock;
+       bool use_master_clock;
+       u64 master_kernel_ns;
+       u64 master_cycle_now;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2537,7 +2537,7 @@ static void kvm_synchronize_tsc(struct k
+       kvm_vcpu_write_tsc_offset(vcpu, offset);
+       raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+-      spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
++      raw_spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
+       if (!matched) {
+               kvm->arch.nr_vcpus_matched_tsc = 0;
+       } else if (!already_matched) {
+@@ -2545,7 +2545,7 @@ static void kvm_synchronize_tsc(struct k
+       }
+       kvm_track_tsc_matching(vcpu);
+-      spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
++      raw_spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
+ }
+ static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+@@ -2775,9 +2775,9 @@ static void kvm_gen_update_masterclock(s
+       kvm_make_mclock_inprogress_request(kvm);
+       /* no guest entries from this point */
+-      spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
++      raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
+       pvclock_update_vm_gtod_copy(kvm);
+-      spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++      raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+@@ -2795,15 +2795,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
+       unsigned long flags;
+       u64 ret;
+-      spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
++      raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
+       if (!ka->use_master_clock) {
+-              spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++              raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+               return get_kvmclock_base_ns() + ka->kvmclock_offset;
+       }
+       hv_clock.tsc_timestamp = ka->master_cycle_now;
+       hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
+-      spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++      raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+       /* both __this_cpu_read() and rdtsc() should be on the same cpu */
+       get_cpu();
+@@ -2897,13 +2897,13 @@ static int kvm_guest_time_update(struct
+        * If the host uses TSC clock, then passthrough TSC as stable
+        * to the guest.
+        */
+-      spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
++      raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
+       use_master_clock = ka->use_master_clock;
+       if (use_master_clock) {
+               host_tsc = ka->master_cycle_now;
+               kernel_ns = ka->master_kernel_ns;
+       }
+-      spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++      raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+       /* Keep irq disabled to prevent changes to the clock */
+       local_irq_save(flags);
+@@ -6101,13 +6101,13 @@ set_pit2_out:
+                * is slightly ahead) here we risk going negative on unsigned
+                * 'system_time' when 'user_ns.clock' is very small.
+                */
+-              spin_lock_irq(&ka->pvclock_gtod_sync_lock);
++              raw_spin_lock_irq(&ka->pvclock_gtod_sync_lock);
+               if (kvm->arch.use_master_clock)
+                       now_ns = ka->master_kernel_ns;
+               else
+                       now_ns = get_kvmclock_base_ns();
+               ka->kvmclock_offset = user_ns.clock - now_ns;
+-              spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
++              raw_spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
+               kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
+               break;
+@@ -8157,9 +8157,9 @@ static void kvm_hyperv_tsc_notifier(void
+       list_for_each_entry(kvm, &vm_list, vm_list) {
+               struct kvm_arch *ka = &kvm->arch;
+-              spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
++              raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
+               pvclock_update_vm_gtod_copy(kvm);
+-              spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
++              raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
+               kvm_for_each_vcpu(cpu, vcpu, kvm)
+                       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+@@ -11148,7 +11148,7 @@ int kvm_arch_init_vm(struct kvm *kvm, un
+       raw_spin_lock_init(&kvm->arch.tsc_write_lock);
+       mutex_init(&kvm->arch.apic_map_lock);
+-      spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
++      raw_spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
+       kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
+       pvclock_update_vm_gtod_copy(kvm);
diff --git a/queue-5.14/kvm-x86-take-srcu-lock-in-post_kvm_run_save.patch b/queue-5.14/kvm-x86-take-srcu-lock-in-post_kvm_run_save.patch
new file mode 100644 (file)
index 0000000..beed50b
--- /dev/null
@@ -0,0 +1,71 @@
+From f3d1436d4bf8ced1c9a62a045d193a65567e1fcc Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 26 Oct 2021 04:12:38 +0100
+Subject: KVM: x86: Take srcu lock in post_kvm_run_save()
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit f3d1436d4bf8ced1c9a62a045d193a65567e1fcc upstream.
+
+The Xen interrupt injection for event channels relies on accessing the
+guest's vcpu_info structure in __kvm_xen_has_interrupt(), through a
+gfn_to_hva_cache.
+
+This requires the srcu lock to be held, which is mostly the case except
+for this code path:
+
+[   11.822877] WARNING: suspicious RCU usage
+[   11.822965] -----------------------------
+[   11.823013] include/linux/kvm_host.h:664 suspicious rcu_dereference_check() usage!
+[   11.823131]
+[   11.823131] other info that might help us debug this:
+[   11.823131]
+[   11.823196]
+[   11.823196] rcu_scheduler_active = 2, debug_locks = 1
+[   11.823253] 1 lock held by dom:0/90:
+[   11.823292]  #0: ffff998956ec8118 (&vcpu->mutex){+.+.}, at: kvm_vcpu_ioctl+0x85/0x680
+[   11.823379]
+[   11.823379] stack backtrace:
+[   11.823428] CPU: 2 PID: 90 Comm: dom:0 Kdump: loaded Not tainted 5.4.34+ #5
+[   11.823496] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014
+[   11.823612] Call Trace:
+[   11.823645]  dump_stack+0x7a/0xa5
+[   11.823681]  lockdep_rcu_suspicious+0xc5/0x100
+[   11.823726]  __kvm_xen_has_interrupt+0x179/0x190
+[   11.823773]  kvm_cpu_has_extint+0x6d/0x90
+[   11.823813]  kvm_cpu_accept_dm_intr+0xd/0x40
+[   11.823853]  kvm_vcpu_ready_for_interrupt_injection+0x20/0x30
+              < post_kvm_run_save() inlined here >
+[   11.823906]  kvm_arch_vcpu_ioctl_run+0x135/0x6a0
+[   11.823947]  kvm_vcpu_ioctl+0x263/0x680
+
+Fixes: 40da8ccd724f ("KVM: x86/xen: Add event channel interrupt vector upcall")
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Cc: stable@vger.kernel.org
+Message-Id: <606aaaf29fca3850a63aa4499826104e77a72346.camel@infradead.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8799,9 +8799,17 @@ static void post_kvm_run_save(struct kvm
+       kvm_run->cr8 = kvm_get_cr8(vcpu);
+       kvm_run->apic_base = kvm_get_apic_base(vcpu);
++
++      /*
++       * The call to kvm_ready_for_interrupt_injection() may end up in
++       * kvm_xen_has_interrupt() which may require the srcu lock to be
++       * held, to protect against changes in the vcpu_info address.
++       */
++      vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_run->ready_for_interrupt_injection =
+               pic_in_kernel(vcpu->kvm) ||
+               kvm_vcpu_ready_for_interrupt_injection(vcpu);
++      srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       if (is_smm(vcpu))
+               kvm_run->flags |= KVM_RUN_X86_SMM;
diff --git a/queue-5.14/kvm-x86-xen-fix-kvm_xen_has_interrupt-sleeping-in-kvm_vcpu_block.patch b/queue-5.14/kvm-x86-xen-fix-kvm_xen_has_interrupt-sleeping-in-kvm_vcpu_block.patch
new file mode 100644 (file)
index 0000000..27148bf
--- /dev/null
@@ -0,0 +1,82 @@
+From 0985dba842eaa391858972cfe2724c3c174a2827 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Sat, 23 Oct 2021 20:47:19 +0100
+Subject: KVM: x86/xen: Fix kvm_xen_has_interrupt() sleeping in kvm_vcpu_block()
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 0985dba842eaa391858972cfe2724c3c174a2827 upstream.
+
+In kvm_vcpu_block, the current task is set to TASK_INTERRUPTIBLE before
+making a final check whether the vCPU should be woken from HLT by any
+incoming interrupt.
+
+This is a problem for the get_user() in __kvm_xen_has_interrupt(), which
+really shouldn't be sleeping when the task state has already been set.
+I think it's actually harmless as it would just manifest itself as a
+spurious wakeup, but it's causing a debug warning:
+
+[  230.963649] do not call blocking ops when !TASK_RUNNING; state=1 set at [<00000000b6bcdbc9>] prepare_to_swait_exclusive+0x30/0x80
+
+Fix the warning by turning it into an *explicit* spurious wakeup. When
+invoked with !task_is_running(current) (and we might as well add
+in_atomic() there while we're at it), just return 1 to indicate that
+an IRQ is pending, which will cause a wakeup and then something will
+call it again in a context that *can* sleep so it can fault the page
+back in.
+
+Cc: stable@vger.kernel.org
+Fixes: 40da8ccd724f ("KVM: x86/xen: Add event channel interrupt vector upcall")
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Message-Id: <168bf8c689561da904e48e2ff5ae4713eaef9e2d.camel@infradead.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/xen.c |   27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -191,6 +191,7 @@ void kvm_xen_update_runstate_guest(struc
+ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
+ {
++      int err;
+       u8 rc = 0;
+       /*
+@@ -217,13 +218,29 @@ int __kvm_xen_has_interrupt(struct kvm_v
+       if (likely(slots->generation == ghc->generation &&
+                  !kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
+               /* Fast path */
+-              __get_user(rc, (u8 __user *)ghc->hva + offset);
+-      } else {
+-              /* Slow path */
+-              kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
+-                                           sizeof(rc));
++              pagefault_disable();
++              err = __get_user(rc, (u8 __user *)ghc->hva + offset);
++              pagefault_enable();
++              if (!err)
++                      return rc;
+       }
++      /* Slow path */
++
++      /*
++       * This function gets called from kvm_vcpu_block() after setting the
++       * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
++       * from a HLT. So we really mustn't sleep. If the page ended up absent
++       * at that point, just return 1 in order to trigger an immediate wake,
++       * and we'll end up getting called again from a context where we *can*
++       * fault in the page and wait for it.
++       */
++      if (in_atomic() || !task_is_running(current))
++              return 1;
++
++      kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
++                                   sizeof(rc));
++
+       return rc;
+ }
diff --git a/queue-5.14/perf-script-check-session-header.env.arch-before-using-it.patch b/queue-5.14/perf-script-check-session-header.env.arch-before-using-it.patch
new file mode 100644 (file)
index 0000000..7b5d3c3
--- /dev/null
@@ -0,0 +1,56 @@
+From 29c77550eef31b0d72a45b49eeab03b8963264e8 Mon Sep 17 00:00:00 2001
+From: Song Liu <songliubraving@fb.com>
+Date: Sun, 3 Oct 2021 22:32:38 -0700
+Subject: perf script: Check session->header.env.arch before using it
+
+From: Song Liu <songliubraving@fb.com>
+
+commit 29c77550eef31b0d72a45b49eeab03b8963264e8 upstream.
+
+When perf.data is not written cleanly, we would like to process existing
+data as much as possible (please see f_header.data.size == 0 condition
+in perf_session__read_header). However, perf.data with partial data may
+crash perf. Specifically, we see crash in 'perf script' for NULL
+session->header.env.arch.
+
+Fix this by checking session->header.env.arch before using it to determine
+native_arch. Also split the if condition so it is easier to read.
+
+Committer notes:
+
+If it is a pipe, we already assume is a native arch, so no need to check
+session->header.env.arch.
+
+Signed-off-by: Song Liu <songliubraving@fb.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: kernel-team@fb.com
+Cc: stable@vger.kernel.org
+Link: http://lore.kernel.org/lkml/20211004053238.514936-1-songliubraving@fb.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/builtin-script.c |   12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -4024,11 +4024,15 @@ script_found:
+               goto out_delete;
+       uname(&uts);
+-      if (data.is_pipe ||  /* assume pipe_mode indicates native_arch */
+-          !strcmp(uts.machine, session->header.env.arch) ||
+-          (!strcmp(uts.machine, "x86_64") &&
+-           !strcmp(session->header.env.arch, "i386")))
++      if (data.is_pipe) { /* Assume pipe_mode indicates native_arch */
+               native_arch = true;
++      } else if (session->header.env.arch) {
++              if (!strcmp(uts.machine, session->header.env.arch))
++                      native_arch = true;
++              else if (!strcmp(uts.machine, "x86_64") &&
++                       !strcmp(session->header.env.arch, "i386"))
++                      native_arch = true;
++      }
+       script.session = session;
+       script__setup_sample_type(&script);
diff --git a/queue-5.14/riscv-do-not-re-populate-shadow-memory-with-kasan_populate_early_shadow.patch b/queue-5.14/riscv-do-not-re-populate-shadow-memory-with-kasan_populate_early_shadow.patch
new file mode 100644 (file)
index 0000000..bad5dfc
--- /dev/null
@@ -0,0 +1,58 @@
+From cf11d01135ea1ff7fddb612033e3cb5cde279ff2 Mon Sep 17 00:00:00 2001
+From: Alexandre Ghiti <alexandre.ghiti@canonical.com>
+Date: Fri, 29 Oct 2021 06:59:26 +0200
+Subject: riscv: Do not re-populate shadow memory with kasan_populate_early_shadow
+
+From: Alexandre Ghiti <alexandre.ghiti@canonical.com>
+
+commit cf11d01135ea1ff7fddb612033e3cb5cde279ff2 upstream.
+
+When calling this function, all the shadow memory is already populated
+with kasan_early_shadow_pte which has PAGE_KERNEL protection.
+kasan_populate_early_shadow write-protects the mapping of the range
+of addresses passed in argument in zero_pte_populate, which actually
+write-protects all the shadow memory mapping since kasan_early_shadow_pte
+is used for all the shadow memory at this point. And then when using
+memblock API to populate the shadow memory, the first write access to the
+kernel stack triggers a trap. This becomes visible with the next commit
+that contains a fix for asan-stack.
+
+We already manually populate all the shadow memory in kasan_early_init
+and we write-protect kasan_early_shadow_pte at the end of kasan_init
+which makes the calls to kasan_populate_early_shadow superfluous so
+we can remove them.
+
+Signed-off-by: Alexandre Ghiti <alexandre.ghiti@canonical.com>
+Fixes: e178d670f251 ("riscv/kasan: add KASAN_VMALLOC support")
+Fixes: 8ad8b72721d0 ("riscv: Add KASAN support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/mm/kasan_init.c |   11 -----------
+ 1 file changed, 11 deletions(-)
+
+--- a/arch/riscv/mm/kasan_init.c
++++ b/arch/riscv/mm/kasan_init.c
+@@ -172,21 +172,10 @@ void __init kasan_init(void)
+       phys_addr_t p_start, p_end;
+       u64 i;
+-      /*
+-       * Populate all kernel virtual address space with kasan_early_shadow_page
+-       * except for the linear mapping and the modules/kernel/BPF mapping.
+-       */
+-      kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
+-                                  (void *)kasan_mem_to_shadow((void *)
+-                                                              VMEMMAP_END));
+       if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+               kasan_shallow_populate(
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+-      else
+-              kasan_populate_early_shadow(
+-                      (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+-                      (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+       /* Populate the linear mapping */
+       for_each_mem_range(i, &p_start, &p_end) {
diff --git a/queue-5.14/riscv-fix-asan-stack-clang-build.patch b/queue-5.14/riscv-fix-asan-stack-clang-build.patch
new file mode 100644 (file)
index 0000000..6883784
--- /dev/null
@@ -0,0 +1,65 @@
+From 54c5639d8f507ebefa814f574cb6f763033a72a5 Mon Sep 17 00:00:00 2001
+From: Alexandre Ghiti <alexandre.ghiti@canonical.com>
+Date: Fri, 29 Oct 2021 06:59:27 +0200
+Subject: riscv: Fix asan-stack clang build
+
+From: Alexandre Ghiti <alexandre.ghiti@canonical.com>
+
+commit 54c5639d8f507ebefa814f574cb6f763033a72a5 upstream.
+
+Nathan reported that because KASAN_SHADOW_OFFSET was not defined in
+Kconfig, it prevents asan-stack from getting disabled with clang even
+when CONFIG_KASAN_STACK is disabled: fix this by defining the
+corresponding config.
+
+Reported-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Alexandre Ghiti <alexandre.ghiti@canonical.com>
+Fixes: 8ad8b72721d0 ("riscv: Add KASAN support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/Kconfig             |    6 ++++++
+ arch/riscv/include/asm/kasan.h |    3 +--
+ arch/riscv/mm/kasan_init.c     |    3 +++
+ 3 files changed, 10 insertions(+), 2 deletions(-)
+
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -157,6 +157,12 @@ config PAGE_OFFSET
+       default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
+       default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
++config KASAN_SHADOW_OFFSET
++      hex
++      depends on KASAN_GENERIC
++      default 0xdfffffc800000000 if 64BIT
++      default 0xffffffff if 32BIT
++
+ config ARCH_FLATMEM_ENABLE
+       def_bool !NUMA
+--- a/arch/riscv/include/asm/kasan.h
++++ b/arch/riscv/include/asm/kasan.h
+@@ -30,8 +30,7 @@
+ #define KASAN_SHADOW_SIZE     (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
+ #define KASAN_SHADOW_START    KERN_VIRT_START
+ #define KASAN_SHADOW_END      (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+-#define KASAN_SHADOW_OFFSET   (KASAN_SHADOW_END - (1ULL << \
+-                                      (64 - KASAN_SHADOW_SCALE_SHIFT)))
++#define KASAN_SHADOW_OFFSET   _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+ void kasan_init(void);
+ asmlinkage void kasan_early_init(void);
+--- a/arch/riscv/mm/kasan_init.c
++++ b/arch/riscv/mm/kasan_init.c
+@@ -17,6 +17,9 @@ asmlinkage void __init kasan_early_init(
+       uintptr_t i;
+       pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
++      BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
++              KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
++
+       for (i = 0; i < PTRS_PER_PTE; ++i)
+               set_pte(kasan_early_shadow_pte + i,
+                       mk_pte(virt_to_page(kasan_early_shadow_page),
diff --git a/queue-5.14/riscv-fix-misalgned-trap-vector-base-address.patch b/queue-5.14/riscv-fix-misalgned-trap-vector-base-address.patch
new file mode 100644 (file)
index 0000000..b0ebcb5
--- /dev/null
@@ -0,0 +1,33 @@
+From 64a19591a2938b170aa736443d5d3bf4c51e1388 Mon Sep 17 00:00:00 2001
+From: Chen Lu <181250012@smail.nju.edu.cn>
+Date: Mon, 18 Oct 2021 13:22:38 +0800
+Subject: riscv: fix misalgned trap vector base address
+
+From: Chen Lu <181250012@smail.nju.edu.cn>
+
+commit 64a19591a2938b170aa736443d5d3bf4c51e1388 upstream.
+
+The trap vector marked by label .Lsecondary_park must align on a
+4-byte boundary, as the {m,s}tvec is defined to require 4-byte
+alignment.
+
+Signed-off-by: Chen Lu <181250012@smail.nju.edu.cn>
+Reviewed-by: Anup Patel <anup.patel@wdc.com>
+Fixes: e011995e826f ("RISC-V: Move relocate and few other functions out of __init")
+Cc: stable@vger.kernel.org
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kernel/head.S |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/riscv/kernel/head.S
++++ b/arch/riscv/kernel/head.S
+@@ -193,6 +193,7 @@ setup_trap_vector:
+       csrw CSR_SCRATCH, zero
+       ret
++.align 2
+ .Lsecondary_park:
+       /* We lack SMP support or have too many harts, so park this hart */
+       wfi
diff --git a/queue-5.14/scsi-ibmvfc-fix-up-duplicate-response-detection.patch b/queue-5.14/scsi-ibmvfc-fix-up-duplicate-response-detection.patch
new file mode 100644 (file)
index 0000000..4f4d97e
--- /dev/null
@@ -0,0 +1,57 @@
+From e20f80b9b163dc402dca115eed0affba6df5ebb5 Mon Sep 17 00:00:00 2001
+From: Brian King <brking@linux.vnet.ibm.com>
+Date: Tue, 19 Oct 2021 10:21:29 -0500
+Subject: scsi: ibmvfc: Fix up duplicate response detection
+
+From: Brian King <brking@linux.vnet.ibm.com>
+
+commit e20f80b9b163dc402dca115eed0affba6df5ebb5 upstream.
+
+Commit a264cf5e81c7 ("scsi: ibmvfc: Fix command state accounting and stale
+response detection") introduced a regression in detecting duplicate
+responses. This was observed in test where a command was sent to the VIOS
+and completed before ibmvfc_send_event() set the active flag to 1, which
+resulted in the atomic_dec_if_positive() call in ibmvfc_handle_crq()
+thinking this was a duplicate response, which resulted in scsi_done() not
+getting called, so we then hit a SCSI command timeout for this command once
+the timeout expires.  This simply ensures the active flag gets set prior to
+making the hcall to send the command to the VIOS, in order to close this
+window.
+
+Link: https://lore.kernel.org/r/20211019152129.16558-1-brking@linux.vnet.ibm.com
+Fixes: a264cf5e81c7 ("scsi: ibmvfc: Fix command state accounting and stale response detection")
+Cc: stable@vger.kernel.org
+Acked-by: Tyrel Datwyler <tyreld@linux.ibm.com>
+Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/scsi/ibmvscsi/ibmvfc.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -1696,6 +1696,7 @@ static int ibmvfc_send_event(struct ibmv
+       spin_lock_irqsave(&evt->queue->l_lock, flags);
+       list_add_tail(&evt->queue_list, &evt->queue->sent);
++      atomic_set(&evt->active, 1);
+       mb();
+@@ -1710,6 +1711,7 @@ static int ibmvfc_send_event(struct ibmv
+                                    be64_to_cpu(crq_as_u64[1]));
+       if (rc) {
++              atomic_set(&evt->active, 0);
+               list_del(&evt->queue_list);
+               spin_unlock_irqrestore(&evt->queue->l_lock, flags);
+               del_timer(&evt->timer);
+@@ -1737,7 +1739,6 @@ static int ibmvfc_send_event(struct ibmv
+               evt->done(evt);
+       } else {
+-              atomic_set(&evt->active, 1);
+               spin_unlock_irqrestore(&evt->queue->l_lock, flags);
+               ibmvfc_trc_start(evt);
+       }
index bf845ef9c945d056d3f22c093da545fe0985e835..6f4ce2c16f0c3d05079b09a597a15494cd0b2134 100644 (file)
@@ -114,3 +114,12 @@ kvm-s390-clear-kicked_mask-before-sleeping-again.patch
 kvm-s390-preserve-deliverable_mask-in-__airqs_kick_s.patch
 scsi-ufs-ufs-exynos-correct-timeout-value-setting-re.patch
 perf-script-fix-perf_sample_weight_struct-support.patch
+scsi-ibmvfc-fix-up-duplicate-response-detection.patch
+riscv-fix-misalgned-trap-vector-base-address.patch
+riscv-do-not-re-populate-shadow-memory-with-kasan_populate_early_shadow.patch
+riscv-fix-asan-stack-clang-build.patch
+perf-script-check-session-header.env.arch-before-using-it.patch
+kvm-x86-xen-fix-kvm_xen_has_interrupt-sleeping-in-kvm_vcpu_block.patch
+kvm-x86-switch-pvclock_gtod_sync_lock-to-a-raw-spinlock.patch
+kvm-sev-es-fix-another-issue-with-string-i-o-vmgexits.patch
+kvm-x86-take-srcu-lock-in-post_kvm_run_save.patch