]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.7-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jun 2020 14:04:45 +0000 (16:04 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Jun 2020 14:04:45 +0000 (16:04 +0200)
added patches:
asoc-max9867-fix-volume-controls.patch
asoc-tlv320adcx140-fix-mic-gain-registers.patch
io_uring-allow-o_nonblock-async-retry.patch
io_uring-fix-flush-req-refs-underflow.patch
io_uring-re-set-iov-base-len-for-buffer-select-retry.patch
io_uring-use-kvfree-in-io_sqe_buffer_register.patch
kvm-vmx-enable-x86_feature_waitpkg-in-kvm-capabilities.patch
kvm-x86-allow-kvm_state_nested_mtf_pending-in-kvm_state-flags.patch
kvm-x86-don-t-expose-msr_ia32_umwait_control-unconditionally.patch
kvm-x86-fix-apic-page-invalidation-race.patch
kvm-x86-mmu-set-mmio_value-to-0-if-reserved-pf-can-t-be-generated.patch
kvm-x86-respect-singlestep-when-emulating-instruction.patch
perf-x86-intel-add-more-available-bits-for-offcore_response-of-intel-tremont.patch
powerpc-ptdump-properly-handle-non-standard-page-size.patch
x86-reboot-quirks-add-macbook6-1-reboot-quirk.patch
x86-vdso-unbreak-paravirt-vdso-clocks.patch

17 files changed:
queue-5.7/asoc-max9867-fix-volume-controls.patch [new file with mode: 0644]
queue-5.7/asoc-tlv320adcx140-fix-mic-gain-registers.patch [new file with mode: 0644]
queue-5.7/io_uring-allow-o_nonblock-async-retry.patch [new file with mode: 0644]
queue-5.7/io_uring-fix-flush-req-refs-underflow.patch [new file with mode: 0644]
queue-5.7/io_uring-re-set-iov-base-len-for-buffer-select-retry.patch [new file with mode: 0644]
queue-5.7/io_uring-use-kvfree-in-io_sqe_buffer_register.patch [new file with mode: 0644]
queue-5.7/kvm-vmx-enable-x86_feature_waitpkg-in-kvm-capabilities.patch [new file with mode: 0644]
queue-5.7/kvm-x86-allow-kvm_state_nested_mtf_pending-in-kvm_state-flags.patch [new file with mode: 0644]
queue-5.7/kvm-x86-don-t-expose-msr_ia32_umwait_control-unconditionally.patch [new file with mode: 0644]
queue-5.7/kvm-x86-fix-apic-page-invalidation-race.patch [new file with mode: 0644]
queue-5.7/kvm-x86-mmu-set-mmio_value-to-0-if-reserved-pf-can-t-be-generated.patch [new file with mode: 0644]
queue-5.7/kvm-x86-respect-singlestep-when-emulating-instruction.patch [new file with mode: 0644]
queue-5.7/perf-x86-intel-add-more-available-bits-for-offcore_response-of-intel-tremont.patch [new file with mode: 0644]
queue-5.7/powerpc-ptdump-properly-handle-non-standard-page-size.patch [new file with mode: 0644]
queue-5.7/series
queue-5.7/x86-reboot-quirks-add-macbook6-1-reboot-quirk.patch [new file with mode: 0644]
queue-5.7/x86-vdso-unbreak-paravirt-vdso-clocks.patch [new file with mode: 0644]

diff --git a/queue-5.7/asoc-max9867-fix-volume-controls.patch b/queue-5.7/asoc-max9867-fix-volume-controls.patch
new file mode 100644 (file)
index 0000000..3f5fc9f
--- /dev/null
@@ -0,0 +1,42 @@
+From 8ba4dc3cff8cbe2c571063a5fd7116e8bde563ca Mon Sep 17 00:00:00 2001
+From: Pavel Dobias <dobias@2n.cz>
+Date: Fri, 15 May 2020 14:07:57 +0200
+Subject: ASoC: max9867: fix volume controls
+
+From: Pavel Dobias <dobias@2n.cz>
+
+commit 8ba4dc3cff8cbe2c571063a5fd7116e8bde563ca upstream.
+
+The xmax values for Master Playback Volume and Mic Boost
+Capture Volume are specified incorrectly (one greater)
+which results in the wrong dB gain being shown to the user
+in the case of Master Playback Volume.
+
+Signed-off-by: Pavel Dobias <dobias@2n.cz>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200515120757.24669-1-dobias@2n.cz
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/max9867.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/sound/soc/codecs/max9867.c
++++ b/sound/soc/codecs/max9867.c
+@@ -46,13 +46,13 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_R
+ static const struct snd_kcontrol_new max9867_snd_controls[] = {
+       SOC_DOUBLE_R_TLV("Master Playback Volume", MAX9867_LEFTVOL,
+-                      MAX9867_RIGHTVOL, 0, 41, 1, max9867_master_tlv),
++                      MAX9867_RIGHTVOL, 0, 40, 1, max9867_master_tlv),
+       SOC_DOUBLE_R_TLV("Line Capture Volume", MAX9867_LEFTLINELVL,
+                       MAX9867_RIGHTLINELVL, 0, 15, 1, max9867_line_tlv),
+       SOC_DOUBLE_R_TLV("Mic Capture Volume", MAX9867_LEFTMICGAIN,
+                       MAX9867_RIGHTMICGAIN, 0, 20, 1, max9867_mic_tlv),
+       SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", MAX9867_LEFTMICGAIN,
+-                      MAX9867_RIGHTMICGAIN, 5, 4, 0, max9867_micboost_tlv),
++                      MAX9867_RIGHTMICGAIN, 5, 3, 0, max9867_micboost_tlv),
+       SOC_SINGLE("Digital Sidetone Volume", MAX9867_SIDETONE, 0, 31, 1),
+       SOC_SINGLE_TLV("Digital Playback Volume", MAX9867_DACLEVEL, 0, 15, 1,
+                       max9867_dac_tlv),
diff --git a/queue-5.7/asoc-tlv320adcx140-fix-mic-gain-registers.patch b/queue-5.7/asoc-tlv320adcx140-fix-mic-gain-registers.patch
new file mode 100644 (file)
index 0000000..699e2b8
--- /dev/null
@@ -0,0 +1,41 @@
+From be8499c48f115b912f5747c420f66a5e2c31defe Mon Sep 17 00:00:00 2001
+From: Dan Murphy <dmurphy@ti.com>
+Date: Mon, 27 Apr 2020 15:36:08 -0500
+Subject: ASoC: tlv320adcx140: Fix mic gain registers
+
+From: Dan Murphy <dmurphy@ti.com>
+
+commit be8499c48f115b912f5747c420f66a5e2c31defe upstream.
+
+Fix the mic gain registers for channels 2-4.
+The incorret register was being set as it was touching the CH1 config
+registers.
+
+Fixes: 37bde5acf040 ("ASoC: tlv320adcx140: Add the tlv320adcx140 codec driver family")
+Signed-off-by: Dan Murphy <dmurphy@ti.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200427203608.7031-1-dmurphy@ti.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/tlv320adcx140.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/sound/soc/codecs/tlv320adcx140.c
++++ b/sound/soc/codecs/tlv320adcx140.c
+@@ -511,11 +511,11 @@ static const struct snd_soc_dapm_route a
+ static const struct snd_kcontrol_new adcx140_snd_controls[] = {
+       SOC_SINGLE_TLV("Analog CH1 Mic Gain Volume", ADCX140_CH1_CFG1, 2, 42, 0,
+                       adc_tlv),
+-      SOC_SINGLE_TLV("Analog CH2 Mic Gain Volume", ADCX140_CH1_CFG2, 2, 42, 0,
++      SOC_SINGLE_TLV("Analog CH2 Mic Gain Volume", ADCX140_CH2_CFG1, 2, 42, 0,
+                       adc_tlv),
+-      SOC_SINGLE_TLV("Analog CH3 Mic Gain Volume", ADCX140_CH1_CFG3, 2, 42, 0,
++      SOC_SINGLE_TLV("Analog CH3 Mic Gain Volume", ADCX140_CH3_CFG1, 2, 42, 0,
+                       adc_tlv),
+-      SOC_SINGLE_TLV("Analog CH4 Mic Gain Volume", ADCX140_CH1_CFG4, 2, 42, 0,
++      SOC_SINGLE_TLV("Analog CH4 Mic Gain Volume", ADCX140_CH4_CFG1, 2, 42, 0,
+                       adc_tlv),
+       SOC_SINGLE_TLV("DRE Threshold", ADCX140_DRE_CFG0, 4, 9, 0,
diff --git a/queue-5.7/io_uring-allow-o_nonblock-async-retry.patch b/queue-5.7/io_uring-allow-o_nonblock-async-retry.patch
new file mode 100644 (file)
index 0000000..aae2f9c
--- /dev/null
@@ -0,0 +1,56 @@
+From c5b856255cbc3b664d686a83fa9397a835e063de Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 9 Jun 2020 19:23:05 -0600
+Subject: io_uring: allow O_NONBLOCK async retry
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit c5b856255cbc3b664d686a83fa9397a835e063de upstream.
+
+We can assume that O_NONBLOCK is always honored, even if we don't
+have a ->read/write_iter() for the file type. Also unify the read/write
+checking for allowing async punt, having the write side factoring in the
+REQ_F_NOWAIT flag as well.
+
+Cc: stable@vger.kernel.org
+Fixes: 490e89676a52 ("io_uring: only force async punt if poll based retry can't handle it")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c |   10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2038,6 +2038,10 @@ static bool io_file_supports_async(struc
+       if (S_ISREG(mode) && file->f_op != &io_uring_fops)
+               return true;
++      /* any ->read/write should understand O_NONBLOCK */
++      if (file->f_flags & O_NONBLOCK)
++              return true;
++
+       if (!(file->f_mode & FMODE_NOWAIT))
+               return false;
+@@ -2080,8 +2084,7 @@ static int io_prep_rw(struct io_kiocb *r
+               kiocb->ki_ioprio = get_current_ioprio();
+       /* don't allow async punt if RWF_NOWAIT was requested */
+-      if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+-          (req->file->f_flags & O_NONBLOCK))
++      if (kiocb->ki_flags & IOCB_NOWAIT)
+               req->flags |= REQ_F_NOWAIT;
+       if (force_nonblock)
+@@ -2722,7 +2725,8 @@ copy_iov:
+                       if (ret)
+                               goto out_free;
+                       /* any defer here is final, must blocking retry */
+-                      if (!file_can_poll(req->file))
++                      if (!(req->flags & REQ_F_NOWAIT) &&
++                          !file_can_poll(req->file))
+                               req->flags |= REQ_F_MUST_PUNT;
+                       return -EAGAIN;
+               }
diff --git a/queue-5.7/io_uring-fix-flush-req-refs-underflow.patch b/queue-5.7/io_uring-fix-flush-req-refs-underflow.patch
new file mode 100644 (file)
index 0000000..c87f9fc
--- /dev/null
@@ -0,0 +1,34 @@
+From 4518a3cc273cf82efdd36522fb1f13baad173c70 Mon Sep 17 00:00:00 2001
+From: Pavel Begunkov <asml.silence@gmail.com>
+Date: Tue, 26 May 2020 20:34:02 +0300
+Subject: io_uring: fix flush req->refs underflow
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+commit 4518a3cc273cf82efdd36522fb1f13baad173c70 upstream.
+
+In io_uring_cancel_files(), after refcount_sub_and_test() leaves 0
+req->refs, it calls io_put_req(), which would also put a ref. Call
+io_free_req() instead.
+
+Cc: stable@vger.kernel.org
+Fixes: 2ca10259b418 ("io_uring: prune request from overflow list on flush")
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7390,7 +7390,7 @@ static void io_uring_cancel_files(struct
+                        * all we had, then we're done with this request.
+                        */
+                       if (refcount_sub_and_test(2, &cancel_req->refs)) {
+-                              io_put_req(cancel_req);
++                              io_free_req(cancel_req);
+                               finish_wait(&ctx->inflight_wait, &wait);
+                               continue;
+                       }
diff --git a/queue-5.7/io_uring-re-set-iov-base-len-for-buffer-select-retry.patch b/queue-5.7/io_uring-re-set-iov-base-len-for-buffer-select-retry.patch
new file mode 100644 (file)
index 0000000..7f053ca
--- /dev/null
@@ -0,0 +1,38 @@
+From dddb3e26f6d88c5344d28cb5ff9d3d6fa05c4f7a Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 4 Jun 2020 11:27:01 -0600
+Subject: io_uring: re-set iov base/len for buffer select retry
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit dddb3e26f6d88c5344d28cb5ff9d3d6fa05c4f7a upstream.
+
+We already have the buffer selected, but we should set the iter list
+again.
+
+Cc: stable@vger.kernel.org # v5.7
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2333,8 +2333,14 @@ static ssize_t __io_iov_buffer_select(st
+ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
+                                   bool needs_lock)
+ {
+-      if (req->flags & REQ_F_BUFFER_SELECTED)
++      if (req->flags & REQ_F_BUFFER_SELECTED) {
++              struct io_buffer *kbuf;
++
++              kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
++              iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
++              iov[0].iov_len = kbuf->len;
+               return 0;
++      }
+       if (!req->rw.len)
+               return 0;
+       else if (req->rw.len > 1)
diff --git a/queue-5.7/io_uring-use-kvfree-in-io_sqe_buffer_register.patch b/queue-5.7/io_uring-use-kvfree-in-io_sqe_buffer_register.patch
new file mode 100644 (file)
index 0000000..1266c6a
--- /dev/null
@@ -0,0 +1,36 @@
+From a8c73c1a614f6da6c0b04c393f87447e28cb6de4 Mon Sep 17 00:00:00 2001
+From: Denis Efremov <efremov@linux.com>
+Date: Fri, 5 Jun 2020 12:32:03 +0300
+Subject: io_uring: use kvfree() in io_sqe_buffer_register()
+
+From: Denis Efremov <efremov@linux.com>
+
+commit a8c73c1a614f6da6c0b04c393f87447e28cb6de4 upstream.
+
+Use kvfree() to free the pages and vmas, since they are allocated by
+kvmalloc_array() in a loop.
+
+Fixes: d4ef647510b1 ("io_uring: avoid page allocation warnings")
+Signed-off-by: Denis Efremov <efremov@linux.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200605093203.40087-1-efremov@linux.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7093,8 +7093,8 @@ static int io_sqe_buffer_register(struct
+               ret = 0;
+               if (!pages || nr_pages > got_pages) {
+-                      kfree(vmas);
+-                      kfree(pages);
++                      kvfree(vmas);
++                      kvfree(pages);
+                       pages = kvmalloc_array(nr_pages, sizeof(struct page *),
+                                               GFP_KERNEL);
+                       vmas = kvmalloc_array(nr_pages,
diff --git a/queue-5.7/kvm-vmx-enable-x86_feature_waitpkg-in-kvm-capabilities.patch b/queue-5.7/kvm-vmx-enable-x86_feature_waitpkg-in-kvm-capabilities.patch
new file mode 100644 (file)
index 0000000..684b432
--- /dev/null
@@ -0,0 +1,42 @@
+From 0abcc8f65cc23b65bc8d1614cc64b02b1641ed7c Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Sat, 23 May 2020 19:14:54 +0300
+Subject: KVM: VMX: enable X86_FEATURE_WAITPKG in KVM capabilities
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit 0abcc8f65cc23b65bc8d1614cc64b02b1641ed7c upstream.
+
+Even though we might not allow the guest to use WAITPKG's new
+instructions, we should tell KVM that the feature is supported by the
+host CPU.
+
+Note that vmx_waitpkg_supported checks that WAITPKG _can_ be set in
+secondary execution controls as specified by VMX capability MSR, rather
+that we actually enable it for a guest.
+
+Cc: stable@vger.kernel.org
+Fixes: e69e72faa3a0 ("KVM: x86: Add support for user wait instructions")
+Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20200523161455.3940-2-mlevitsk@redhat.com>
+Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/vmx/vmx.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7138,6 +7138,9 @@ static __init void vmx_set_cpu_caps(void
+       /* CPUID 0x80000001 */
+       if (!cpu_has_vmx_rdtscp())
+               kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
++
++      if (vmx_waitpkg_supported())
++              kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
+ }
+ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
diff --git a/queue-5.7/kvm-x86-allow-kvm_state_nested_mtf_pending-in-kvm_state-flags.patch b/queue-5.7/kvm-x86-allow-kvm_state_nested_mtf_pending-in-kvm_state-flags.patch
new file mode 100644 (file)
index 0000000..fa3fe7a
--- /dev/null
@@ -0,0 +1,35 @@
+From df2a69af85bef169ab6810cc57f6b6b943941e7e Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 19 May 2020 12:51:32 -0400
+Subject: KVM: x86: allow KVM_STATE_NESTED_MTF_PENDING in kvm_state flags
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit df2a69af85bef169ab6810cc57f6b6b943941e7e upstream.
+
+The migration functionality was left incomplete in commit 5ef8acbdd687
+("KVM: nVMX: Emulate MTF when performing instruction emulation", 2020-02-23),
+fix it.
+
+Fixes: 5ef8acbdd687 ("KVM: nVMX: Emulate MTF when performing instruction emulation")
+Cc: stable@vger.kernel.org
+Reviewed-by: Oliver Upton <oupton@google.com>
+Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4586,7 +4586,7 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+               if (kvm_state.flags &
+                   ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
+-                    | KVM_STATE_NESTED_EVMCS))
++                    | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING))
+                       break;
+               /* nested_run_pending implies guest_mode.  */
diff --git a/queue-5.7/kvm-x86-don-t-expose-msr_ia32_umwait_control-unconditionally.patch b/queue-5.7/kvm-x86-don-t-expose-msr_ia32_umwait_control-unconditionally.patch
new file mode 100644 (file)
index 0000000..b3f6bb1
--- /dev/null
@@ -0,0 +1,41 @@
+From f4cfcd2d5aea4e96c5d483c476f3057b6b7baf6a Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Sat, 23 May 2020 19:14:55 +0300
+Subject: KVM: x86: don't expose MSR_IA32_UMWAIT_CONTROL unconditionally
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit f4cfcd2d5aea4e96c5d483c476f3057b6b7baf6a upstream.
+
+This msr is only available when the host supports WAITPKG feature.
+
+This breaks a nested guest, if the L1 hypervisor is set to ignore
+unknown msrs, because the only other safety check that the
+kernel does is that it attempts to read the msr and
+rejects it if it gets an exception.
+
+Cc: stable@vger.kernel.org
+Fixes: 6e3ba4abce ("KVM: vmx: Emulate MSR IA32_UMWAIT_CONTROL")
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20200523161455.3940-3-mlevitsk@redhat.com>
+Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5242,6 +5242,10 @@ static void kvm_init_msr_list(void)
+                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
+                               continue;
+                       break;
++              case MSR_IA32_UMWAIT_CONTROL:
++                      if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
++                              continue;
++                      break;
+               case MSR_IA32_RTIT_CTL:
+               case MSR_IA32_RTIT_STATUS:
+                       if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
diff --git a/queue-5.7/kvm-x86-fix-apic-page-invalidation-race.patch b/queue-5.7/kvm-x86-fix-apic-page-invalidation-race.patch
new file mode 100644 (file)
index 0000000..6827d8a
--- /dev/null
@@ -0,0 +1,163 @@
+From e649b3f0188f8fd34dd0dde8d43fd3312b902fb2 Mon Sep 17 00:00:00 2001
+From: Eiichi Tsukata <eiichi.tsukata@nutanix.com>
+Date: Sat, 6 Jun 2020 13:26:27 +0900
+Subject: KVM: x86: Fix APIC page invalidation race
+
+From: Eiichi Tsukata <eiichi.tsukata@nutanix.com>
+
+commit e649b3f0188f8fd34dd0dde8d43fd3312b902fb2 upstream.
+
+Commit b1394e745b94 ("KVM: x86: fix APIC page invalidation") tried
+to fix inappropriate APIC page invalidation by re-introducing arch
+specific kvm_arch_mmu_notifier_invalidate_range() and calling it from
+kvm_mmu_notifier_invalidate_range_start. However, the patch left a
+possible race where the VMCS APIC address cache is updated *before*
+it is unmapped:
+
+  (Invalidator) kvm_mmu_notifier_invalidate_range_start()
+  (Invalidator) kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD)
+  (KVM VCPU) vcpu_enter_guest()
+  (KVM VCPU) kvm_vcpu_reload_apic_access_page()
+  (Invalidator) actually unmap page
+
+Because of the above race, there can be a mismatch between the
+host physical address stored in the APIC_ACCESS_PAGE VMCS field and
+the host physical address stored in the EPT entry for the APIC GPA
+(0xfee0000).  When this happens, the processor will not trap APIC
+accesses, and will instead show the raw contents of the APIC-access page.
+Because Windows OS periodically checks for unexpected modifications to
+the LAPIC register, this will show up as a BSOD crash with BugCheck
+CRITICAL_STRUCTURE_CORRUPTION (109) we are currently seeing in
+https://bugzilla.redhat.com/show_bug.cgi?id=1751017.
+
+The root cause of the issue is that kvm_arch_mmu_notifier_invalidate_range()
+cannot guarantee that no additional references are taken to the pages in
+the range before kvm_mmu_notifier_invalidate_range_end().  Fortunately,
+this case is supported by the MMU notifier API, as documented in
+include/linux/mmu_notifier.h:
+
+        * If the subsystem
+         * can't guarantee that no additional references are taken to
+         * the pages in the range, it has to implement the
+         * invalidate_range() notifier to remove any references taken
+         * after invalidate_range_start().
+
+The fix therefore is to reload the APIC-access page field in the VMCS
+from kvm_mmu_notifier_invalidate_range() instead of ..._range_start().
+
+Cc: stable@vger.kernel.org
+Fixes: b1394e745b94 ("KVM: x86: fix APIC page invalidation")
+Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=197951
+Signed-off-by: Eiichi Tsukata <eiichi.tsukata@nutanix.com>
+Message-Id: <20200606042627.61070-1-eiichi.tsukata@nutanix.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c       |    7 ++-----
+ include/linux/kvm_host.h |    4 ++--
+ virt/kvm/kvm_main.c      |   26 ++++++++++++++++----------
+ 3 files changed, 20 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8154,9 +8154,8 @@ static void vcpu_load_eoi_exitmap(struct
+       kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+ }
+-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+-              unsigned long start, unsigned long end,
+-              bool blockable)
++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
++                                          unsigned long start, unsigned long end)
+ {
+       unsigned long apic_address;
+@@ -8167,8 +8166,6 @@ int kvm_arch_mmu_notifier_invalidate_ran
+       apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+       if (start <= apic_address && apic_address < end)
+               kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
+-
+-      return 0;
+ }
+ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -1406,8 +1406,8 @@ static inline long kvm_arch_vcpu_async_i
+ }
+ #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
+-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+-              unsigned long start, unsigned long end, bool blockable);
++void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
++                                          unsigned long start, unsigned long end);
+ #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
+ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -155,10 +155,9 @@ static void kvm_uevent_notify_change(uns
+ static unsigned long long kvm_createvm_count;
+ static unsigned long long kvm_active_vms;
+-__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+-              unsigned long start, unsigned long end, bool blockable)
++__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
++                                                 unsigned long start, unsigned long end)
+ {
+-      return 0;
+ }
+ bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
+@@ -384,6 +383,18 @@ static inline struct kvm *mmu_notifier_t
+       return container_of(mn, struct kvm, mmu_notifier);
+ }
++static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
++                                            struct mm_struct *mm,
++                                            unsigned long start, unsigned long end)
++{
++      struct kvm *kvm = mmu_notifier_to_kvm(mn);
++      int idx;
++
++      idx = srcu_read_lock(&kvm->srcu);
++      kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
++      srcu_read_unlock(&kvm->srcu, idx);
++}
++
+ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
+                                       struct mm_struct *mm,
+                                       unsigned long address,
+@@ -408,7 +419,6 @@ static int kvm_mmu_notifier_invalidate_r
+ {
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+       int need_tlb_flush = 0, idx;
+-      int ret;
+       idx = srcu_read_lock(&kvm->srcu);
+       spin_lock(&kvm->mmu_lock);
+@@ -425,14 +435,9 @@ static int kvm_mmu_notifier_invalidate_r
+               kvm_flush_remote_tlbs(kvm);
+       spin_unlock(&kvm->mmu_lock);
+-
+-      ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
+-                                      range->end,
+-                                      mmu_notifier_range_blockable(range));
+-
+       srcu_read_unlock(&kvm->srcu, idx);
+-      return ret;
++      return 0;
+ }
+ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+@@ -538,6 +543,7 @@ static void kvm_mmu_notifier_release(str
+ }
+ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
++      .invalidate_range       = kvm_mmu_notifier_invalidate_range,
+       .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
+       .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
+       .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
diff --git a/queue-5.7/kvm-x86-mmu-set-mmio_value-to-0-if-reserved-pf-can-t-be-generated.patch b/queue-5.7/kvm-x86-mmu-set-mmio_value-to-0-if-reserved-pf-can-t-be-generated.patch
new file mode 100644 (file)
index 0000000..1190d45
--- /dev/null
@@ -0,0 +1,64 @@
+From 6129ed877d409037b79866327102c9dc59a302fe Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Wed, 27 May 2020 01:49:09 -0700
+Subject: KVM: x86/mmu: Set mmio_value to '0' if reserved #PF can't be generated
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit 6129ed877d409037b79866327102c9dc59a302fe upstream.
+
+Set the mmio_value to '0' instead of simply clearing the present bit to
+squash a benign warning in kvm_mmu_set_mmio_spte_mask() that complains
+about the mmio_value overlapping the lower GFN mask on systems with 52
+bits of PA space.
+
+Opportunistically clean up the code and comments.
+
+Cc: stable@vger.kernel.org
+Fixes: d43e2675e96fc ("KVM: x86: only do L1TF workaround on affected processors")
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Message-Id: <20200527084909.23492-1-sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/mmu/mmu.c |   27 +++++++++------------------
+ 1 file changed, 9 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -6143,25 +6143,16 @@ static void kvm_set_mmio_spte_mask(void)
+       u64 mask;
+       /*
+-       * Set the reserved bits and the present bit of an paging-structure
+-       * entry to generate page fault with PFER.RSV = 1.
++       * Set a reserved PA bit in MMIO SPTEs to generate page faults with
++       * PFEC.RSVD=1 on MMIO accesses.  64-bit PTEs (PAE, x86-64, and EPT
++       * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
++       * 52-bit physical addresses then there are no reserved PA bits in the
++       * PTEs and so the reserved PA approach must be disabled.
+        */
+-
+-      /*
+-       * Mask the uppermost physical address bit, which would be reserved as
+-       * long as the supported physical address width is less than 52.
+-       */
+-      mask = 1ull << 51;
+-
+-      /* Set the present bit. */
+-      mask |= 1ull;
+-
+-      /*
+-       * If reserved bit is not supported, clear the present bit to disable
+-       * mmio page fault.
+-       */
+-      if (shadow_phys_bits == 52)
+-              mask &= ~1ull;
++      if (shadow_phys_bits < 52)
++              mask = BIT_ULL(51) | PT_PRESENT_MASK;
++      else
++              mask = 0;
+       kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
+ }
diff --git a/queue-5.7/kvm-x86-respect-singlestep-when-emulating-instruction.patch b/queue-5.7/kvm-x86-respect-singlestep-when-emulating-instruction.patch
new file mode 100644 (file)
index 0000000..11f33e3
--- /dev/null
@@ -0,0 +1,43 @@
+From 384dea1c9183880be183cfaae161d99aafd16df6 Mon Sep 17 00:00:00 2001
+From: Felipe Franciosi <felipe@nutanix.com>
+Date: Tue, 19 May 2020 08:11:22 +0000
+Subject: KVM: x86: respect singlestep when emulating instruction
+
+From: Felipe Franciosi <felipe@nutanix.com>
+
+commit 384dea1c9183880be183cfaae161d99aafd16df6 upstream.
+
+When userspace configures KVM_GUESTDBG_SINGLESTEP, KVM will manage the
+presence of X86_EFLAGS_TF via kvm_set/get_rflags on vcpus. The actual
+rflag bit is therefore hidden from callers.
+
+That includes init_emulate_ctxt() which uses the value returned from
+kvm_get_flags() to set ctxt->tf. As a result, x86_emulate_instruction()
+will skip a single step, leaving singlestep_rip stale and not returning
+to userspace.
+
+This resolves the issue by observing the vcpu guest_debug configuration
+alongside ctxt->tf in x86_emulate_instruction(), performing the single
+step if set.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Felipe Franciosi <felipe@nutanix.com>
+Message-Id: <20200519081048.8204-1-felipe@nutanix.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6923,7 +6923,7 @@ restart:
+               if (!ctxt->have_exception ||
+                   exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+                       kvm_rip_write(vcpu, ctxt->eip);
+-                      if (r && ctxt->tf)
++                      if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+                               r = kvm_vcpu_do_singlestep(vcpu);
+                       if (kvm_x86_ops.update_emulated_instruction)
+                               kvm_x86_ops.update_emulated_instruction(vcpu);
diff --git a/queue-5.7/perf-x86-intel-add-more-available-bits-for-offcore_response-of-intel-tremont.patch b/queue-5.7/perf-x86-intel-add-more-available-bits-for-offcore_response-of-intel-tremont.patch
new file mode 100644 (file)
index 0000000..46ef31a
--- /dev/null
@@ -0,0 +1,39 @@
+From 0813c40556fce1eeefb996e020cc5339e0b84137 Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Fri, 1 May 2020 05:54:42 -0700
+Subject: perf/x86/intel: Add more available bits for OFFCORE_RESPONSE of Intel Tremont
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit 0813c40556fce1eeefb996e020cc5339e0b84137 upstream.
+
+The mask in the extra_regs for Intel Tremont need to be extended to
+allow more defined bits.
+
+"Outstanding Requests" (bit 63) is only available on MSR_OFFCORE_RSP0;
+
+Fixes: 6daeb8737f8a ("perf/x86/intel: Add Tremont core PMU support")
+Reported-by: Stephane Eranian <eranian@google.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20200501125442.7030-1-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/core.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -1892,8 +1892,8 @@ static __initconst const u64 tnt_hw_cach
+ static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
+       /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+-      INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
+-      INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
++      INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
++      INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
+       EVENT_EXTRA_END
+ };
diff --git a/queue-5.7/powerpc-ptdump-properly-handle-non-standard-page-size.patch b/queue-5.7/powerpc-ptdump-properly-handle-non-standard-page-size.patch
new file mode 100644 (file)
index 0000000..c33c796
--- /dev/null
@@ -0,0 +1,124 @@
+From b00ff6d8c1c3898b0f768cbb38ef722d25bd2f39 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Tue, 19 May 2020 05:48:54 +0000
+Subject: powerpc/ptdump: Properly handle non standard page size
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit b00ff6d8c1c3898b0f768cbb38ef722d25bd2f39 upstream.
+
+In order to properly display information regardless of the page size,
+it is necessary to take into account real page size.
+
+Fixes: cabe8138b23c ("powerpc: dump as a single line areas mapping a single physical page.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/a53b2a0ffd042a8d85464bf90d55bc5b970e00a1.1589866984.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/ptdump/ptdump.c |   21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/mm/ptdump/ptdump.c
++++ b/arch/powerpc/mm/ptdump/ptdump.c
+@@ -60,6 +60,7 @@ struct pg_state {
+       unsigned long start_address;
+       unsigned long start_pa;
+       unsigned long last_pa;
++      unsigned long page_size;
+       unsigned int level;
+       u64 current_flags;
+       bool check_wx;
+@@ -157,9 +158,9 @@ static void dump_addr(struct pg_state *s
+ #endif
+       pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
+-      if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) {
++      if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) {
+               pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
+-              delta = PAGE_SIZE >> 10;
++              delta = st->page_size >> 10;
+       } else {
+               pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
+               delta = (addr - st->start_address) >> 10;
+@@ -190,7 +191,7 @@ static void note_prot_wx(struct pg_state
+ }
+ static void note_page(struct pg_state *st, unsigned long addr,
+-             unsigned int level, u64 val)
++             unsigned int level, u64 val, unsigned long page_size)
+ {
+       u64 flag = val & pg_level[level].mask;
+       u64 pa = val & PTE_RPN_MASK;
+@@ -202,6 +203,7 @@ static void note_page(struct pg_state *s
+               st->start_address = addr;
+               st->start_pa = pa;
+               st->last_pa = pa;
++              st->page_size = page_size;
+               pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+       /*
+        * Dump the section of virtual memory when:
+@@ -213,7 +215,7 @@ static void note_page(struct pg_state *s
+        */
+       } else if (flag != st->current_flags || level != st->level ||
+                  addr >= st->marker[1].start_address ||
+-                 (pa != st->last_pa + PAGE_SIZE &&
++                 (pa != st->last_pa + st->page_size &&
+                   (pa != st->start_pa || st->start_pa != st->last_pa))) {
+               /* Check the PTE flags */
+@@ -241,6 +243,7 @@ static void note_page(struct pg_state *s
+               st->start_address = addr;
+               st->start_pa = pa;
+               st->last_pa = pa;
++              st->page_size = page_size;
+               st->current_flags = flag;
+               st->level = level;
+       } else {
+@@ -256,7 +259,7 @@ static void walk_pte(struct pg_state *st
+       for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+               addr = start + i * PAGE_SIZE;
+-              note_page(st, addr, 4, pte_val(*pte));
++              note_page(st, addr, 4, pte_val(*pte), PAGE_SIZE);
+       }
+ }
+@@ -273,7 +276,7 @@ static void walk_pmd(struct pg_state *st
+                       /* pmd exists */
+                       walk_pte(st, pmd, addr);
+               else
+-                      note_page(st, addr, 3, pmd_val(*pmd));
++                      note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE);
+       }
+ }
+@@ -289,7 +292,7 @@ static void walk_pud(struct pg_state *st
+                       /* pud exists */
+                       walk_pmd(st, pud, addr);
+               else
+-                      note_page(st, addr, 2, pud_val(*pud));
++                      note_page(st, addr, 2, pud_val(*pud), PUD_SIZE);
+       }
+ }
+@@ -308,7 +311,7 @@ static void walk_pagetables(struct pg_st
+                       /* pgd exists */
+                       walk_pud(st, pgd, addr);
+               else
+-                      note_page(st, addr, 1, pgd_val(*pgd));
++                      note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE);
+       }
+ }
+@@ -363,7 +366,7 @@ static int ptdump_show(struct seq_file *
+       /* Traverse kernel page tables */
+       walk_pagetables(&st);
+-      note_page(&st, 0, 0, 0);
++      note_page(&st, 0, 0, 0, 0);
+       return 0;
+ }
index 1b8409831d3b80d1091b5d367e69ed2015979a09..6c567cfd6073ae7844e283480258e35b9a0a77b0 100644 (file)
@@ -38,3 +38,19 @@ x86-pci-mark-intel-c620-mroms-as-having-non-compliant-bars.patch
 x86-speculation-prevent-rogue-cross-process-ssbd-shutdown.patch
 x86-speculation-avoid-force-disabling-ibpb-based-on-stibp-and-enhanced-ibrs.patch
 x86-speculation-pr_spec_force_disable-enforcement-for-indirect-branches.patch
+x86-reboot-quirks-add-macbook6-1-reboot-quirk.patch
+x86-vdso-unbreak-paravirt-vdso-clocks.patch
+perf-x86-intel-add-more-available-bits-for-offcore_response-of-intel-tremont.patch
+kvm-x86-don-t-expose-msr_ia32_umwait_control-unconditionally.patch
+kvm-x86-allow-kvm_state_nested_mtf_pending-in-kvm_state-flags.patch
+kvm-vmx-enable-x86_feature_waitpkg-in-kvm-capabilities.patch
+kvm-x86-mmu-set-mmio_value-to-0-if-reserved-pf-can-t-be-generated.patch
+kvm-x86-respect-singlestep-when-emulating-instruction.patch
+kvm-x86-fix-apic-page-invalidation-race.patch
+powerpc-ptdump-properly-handle-non-standard-page-size.patch
+asoc-tlv320adcx140-fix-mic-gain-registers.patch
+asoc-max9867-fix-volume-controls.patch
+io_uring-fix-flush-req-refs-underflow.patch
+io_uring-re-set-iov-base-len-for-buffer-select-retry.patch
+io_uring-use-kvfree-in-io_sqe_buffer_register.patch
+io_uring-allow-o_nonblock-async-retry.patch
diff --git a/queue-5.7/x86-reboot-quirks-add-macbook6-1-reboot-quirk.patch b/queue-5.7/x86-reboot-quirks-add-macbook6-1-reboot-quirk.patch
new file mode 100644 (file)
index 0000000..a309f57
--- /dev/null
@@ -0,0 +1,39 @@
+From 140fd4ac78d385e6c8e6a5757585f6c707085f87 Mon Sep 17 00:00:00 2001
+From: Hill Ma <maahiuzeon@gmail.com>
+Date: Sat, 25 Apr 2020 13:06:41 -0700
+Subject: x86/reboot/quirks: Add MacBook6,1 reboot quirk
+
+From: Hill Ma <maahiuzeon@gmail.com>
+
+commit 140fd4ac78d385e6c8e6a5757585f6c707085f87 upstream.
+
+On MacBook6,1 reboot would hang unless parameter reboot=pci is added.
+Make it automatic.
+
+Signed-off-by: Hill Ma <maahiuzeon@gmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20200425200641.GA1554@cslab.localdomain
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/reboot.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
+               },
+       },
++      {       /* Handle problems with rebooting on Apple MacBook6,1 */
++              .callback = set_pci_reboot,
++              .ident = "Apple MacBook6,1",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
++              },
++      },
+       {       /* Handle problems with rebooting on Apple MacBookPro5 */
+               .callback = set_pci_reboot,
+               .ident = "Apple MacBookPro5",
diff --git a/queue-5.7/x86-vdso-unbreak-paravirt-vdso-clocks.patch b/queue-5.7/x86-vdso-unbreak-paravirt-vdso-clocks.patch
new file mode 100644 (file)
index 0000000..271c86c
--- /dev/null
@@ -0,0 +1,77 @@
+From 7778d8417b74aded842eeb372961cfc460417fa0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 6 Jun 2020 23:51:17 +0200
+Subject: x86/vdso: Unbreak paravirt VDSO clocks
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 7778d8417b74aded842eeb372961cfc460417fa0 upstream.
+
+The conversion of x86 VDSO to the generic clock mode storage broke the
+paravirt and hyperv clocksource logic. These clock sources have their own
+internal sequence counter to validate the clocksource at the point of
+reading it. This is necessary because the hypervisor can invalidate the
+clocksource asynchronously so a check during the VDSO data update is not
+sufficient. If the internal check during read invalidates the clocksource
+the read return U64_MAX. The original code checked this efficiently by
+testing whether the result (casted to signed) is negative, i.e. bit 63 is
+set. This was done that way because an extra indicator for the validity had
+more overhead.
+
+The conversion broke this check because the check was replaced by a check
+for a valid VDSO clock mode.
+
+The wreckage manifests itself when the paravirt clock is installed as a
+valid VDSO clock and during runtime invalidated by the hypervisor,
+e.g. after a host suspend/resume cycle. After the invalidation the read
+function returns U64_MAX which is used as cycles and makes the clock jump
+by ~2200 seconds, and become stale until the 2200 seconds have elapsed
+where it starts to jump again. The period of this effect depends on the
+shift/mult pair of the clocksource and the jumps and staleness are an
+artifact of undefined but reproducible behaviour of math overflow.
+
+Implement an x86 version of the new vdso_cycles_ok() inline which adds this
+check back and a variant of vdso_clocksource_ok() which lets the compiler
+optimize it out to avoid the extra conditional. That's suboptimal when the
+system does not have a VDSO capable clocksource, but that's not the case
+which is optimized for.
+
+Fixes: 5d51bee725cc ("clocksource: Add common vdso clock mode storage")
+Reported-by: Miklos Szeredi <miklos@szeredi.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Miklos Szeredi <mszeredi@redhat.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20200606221532.080560273@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/vdso/gettimeofday.h |   18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/arch/x86/include/asm/vdso/gettimeofday.h
++++ b/arch/x86/include/asm/vdso/gettimeofday.h
+@@ -271,6 +271,24 @@ static __always_inline const struct vdso
+       return __vdso_data;
+ }
++static inline bool arch_vdso_clocksource_ok(const struct vdso_data *vd)
++{
++      return true;
++}
++#define vdso_clocksource_ok arch_vdso_clocksource_ok
++
++/*
++ * Clocksource read value validation to handle PV and HyperV clocksources
++ * which can be invalidated asynchronously and indicate invalidation by
++ * returning U64_MAX, which can be effectively tested by checking for a
++ * negative value after casting it to s64.
++ */
++static inline bool arch_vdso_cycles_ok(u64 cycles)
++{
++      return (s64)cycles >= 0;
++}
++#define vdso_cycles_ok arch_vdso_cycles_ok
++
+ /*
+  * x86 specific delta calculation.
+  *