]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Sep 2021 11:35:50 +0000 (13:35 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Sep 2021 11:35:50 +0000 (13:35 +0200)
added patches:
fuse-flush-extending-writes.patch
fuse-truncate-pagecache-on-atomic_o_trunc.patch
ima-remove-the-dependency-on-crypto_md5.patch
ima-remove-wmissing-prototypes-warning.patch
kvm-nvmx-unconditionally-clear-nested.pi_pending-on-nested-vm-enter.patch
kvm-s390-index-kvm-arch.idle_mask-by-vcpu_idx.patch
kvm-x86-update-vcpu-s-hv_clock-before-back-to-guest-when-tsc_offset-is-adjusted.patch
perf-x86-amd-ibs-extend-perf_pmu_cap_no_exclude-to-ibs-op.patch
x86-resctrl-fix-a-maybe-uninitialized-build-warning-treated-as-error.patch

queue-5.4/fuse-flush-extending-writes.patch [new file with mode: 0644]
queue-5.4/fuse-truncate-pagecache-on-atomic_o_trunc.patch [new file with mode: 0644]
queue-5.4/ima-remove-the-dependency-on-crypto_md5.patch [new file with mode: 0644]
queue-5.4/ima-remove-wmissing-prototypes-warning.patch [new file with mode: 0644]
queue-5.4/kvm-nvmx-unconditionally-clear-nested.pi_pending-on-nested-vm-enter.patch [new file with mode: 0644]
queue-5.4/kvm-s390-index-kvm-arch.idle_mask-by-vcpu_idx.patch [new file with mode: 0644]
queue-5.4/kvm-x86-update-vcpu-s-hv_clock-before-back-to-guest-when-tsc_offset-is-adjusted.patch [new file with mode: 0644]
queue-5.4/perf-x86-amd-ibs-extend-perf_pmu_cap_no_exclude-to-ibs-op.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/x86-resctrl-fix-a-maybe-uninitialized-build-warning-treated-as-error.patch [new file with mode: 0644]

diff --git a/queue-5.4/fuse-flush-extending-writes.patch b/queue-5.4/fuse-flush-extending-writes.patch
new file mode 100644 (file)
index 0000000..4cda758
--- /dev/null
@@ -0,0 +1,49 @@
+From 59bda8ecee2ffc6a602b7bf2b9e43ca669cdbdcd Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 31 Aug 2021 14:18:08 +0200
+Subject: fuse: flush extending writes
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 59bda8ecee2ffc6a602b7bf2b9e43ca669cdbdcd upstream.
+
+Callers of fuse_writeback_range() assume that the file is ready for
+modification by the server in the supplied byte range after the call
+returns.
+
+If there's a write that extends the file beyond the end of the supplied
+range, then the file needs to be extended to at least the end of the range,
+but currently that's not done.
+
+There are at least two cases where this can cause problems:
+
+ - copy_file_range() will return short count if the file is not extended
+   up to end of the source range.
+
+ - FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE will not extend the file,
+   hence the region may not be fully allocated.
+
+Fix by flushing writes from the start of the range up to the end of the
+file.  This could be optimized if the writes are non-extending, etc, but
+it's probably not worth the trouble.
+
+Fixes: a2bc92362941 ("fuse: fix copy_file_range() in the writeback case")
+Fixes: 6b1bdb56b17c ("fuse: allow fallocate(FALLOC_FL_ZERO_RANGE)")
+Cc: <stable@vger.kernel.org>  # v5.2
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -3188,7 +3188,7 @@ fuse_direct_IO(struct kiocb *iocb, struc
+ static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
+ {
+-      int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
++      int err = filemap_write_and_wait_range(inode->i_mapping, start, -1);
+       if (!err)
+               fuse_sync_writes(inode);
diff --git a/queue-5.4/fuse-truncate-pagecache-on-atomic_o_trunc.patch b/queue-5.4/fuse-truncate-pagecache-on-atomic_o_trunc.patch
new file mode 100644 (file)
index 0000000..2f85152
--- /dev/null
@@ -0,0 +1,58 @@
+From 76224355db7570cbe6b6f75c8929a1558828dd55 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Tue, 17 Aug 2021 21:05:16 +0200
+Subject: fuse: truncate pagecache on atomic_o_trunc
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 76224355db7570cbe6b6f75c8929a1558828dd55 upstream.
+
+fuse_finish_open() will be called with FUSE_NOWRITE in case of atomic
+O_TRUNC.  This can deadlock with fuse_wait_on_page_writeback() in
+fuse_launder_page() triggered by invalidate_inode_pages2().
+
+Fix by replacing invalidate_inode_pages2() in fuse_finish_open() with a
+truncate_pagecache() call.  This makes sense regardless of FOPEN_KEEP_CACHE
+or fc->writeback cache, so do it unconditionally.
+
+Reported-by: Xie Yongji <xieyongji@bytedance.com>
+Reported-and-tested-by: syzbot+bea44a5189836d956894@syzkaller.appspotmail.com
+Fixes: e4648309b85a ("fuse: truncate pending writes on O_TRUNC")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fuse/file.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -193,12 +193,11 @@ void fuse_finish_open(struct inode *inod
+       struct fuse_file *ff = file->private_data;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+-      if (!(ff->open_flags & FOPEN_KEEP_CACHE))
+-              invalidate_inode_pages2(inode->i_mapping);
+       if (ff->open_flags & FOPEN_STREAM)
+               stream_open(inode, file);
+       else if (ff->open_flags & FOPEN_NONSEEKABLE)
+               nonseekable_open(inode, file);
++
+       if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
+               struct fuse_inode *fi = get_fuse_inode(inode);
+@@ -206,10 +205,14 @@ void fuse_finish_open(struct inode *inod
+               fi->attr_version = atomic64_inc_return(&fc->attr_version);
+               i_size_write(inode, 0);
+               spin_unlock(&fi->lock);
++              truncate_pagecache(inode, 0);
+               fuse_invalidate_attr(inode);
+               if (fc->writeback_cache)
+                       file_update_time(file);
++      } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) {
++              invalidate_inode_pages2(inode->i_mapping);
+       }
++
+       if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
+               fuse_link_write_file(file);
+ }
diff --git a/queue-5.4/ima-remove-the-dependency-on-crypto_md5.patch b/queue-5.4/ima-remove-the-dependency-on-crypto_md5.patch
new file mode 100644 (file)
index 0000000..d8acf2e
--- /dev/null
@@ -0,0 +1,45 @@
+From 8510505d55e194d3f6c9644c9f9d12c4f6b0395a Mon Sep 17 00:00:00 2001
+From: THOBY Simon <Simon.THOBY@viveris.fr>
+Date: Mon, 16 Aug 2021 08:10:59 +0000
+Subject: IMA: remove the dependency on CRYPTO_MD5
+
+From: THOBY Simon <Simon.THOBY@viveris.fr>
+
+commit 8510505d55e194d3f6c9644c9f9d12c4f6b0395a upstream.
+
+MD5 is a weak digest algorithm that shouldn't be used for cryptographic
+operation. It hinders the efficiency of a patch set that aims to limit
+the digests allowed for the extended file attribute namely security.ima.
+MD5 is no longer a requirement for IMA, nor should it be used there.
+
+The sole place where we still use the MD5 algorithm inside IMA is setting
+the ima_hash algorithm to MD5, if the user supplies 'ima_hash=md5'
+parameter on the command line.  With commit ab60368ab6a4 ("ima: Fallback
+to the builtin hash algorithm"), setting "ima_hash=md5" fails gracefully
+when CRYPTO_MD5 is not set:
+       ima: Can not allocate md5 (reason: -2)
+       ima: Allocating md5 failed, going to use default hash algorithm sha256
+
+Remove the CRYPTO_MD5 dependency for IMA.
+
+Signed-off-by: THOBY Simon <Simon.THOBY@viveris.fr>
+Reviewed-by: Lakshmi Ramasubramanian <nramas@linux.microsoft.com>
+[zohar@linux.ibm.com: include commit number in patch description for
+stable.]
+Cc: stable@vger.kernel.org # 4.17
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/integrity/ima/Kconfig |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -6,7 +6,6 @@ config IMA
+       select SECURITYFS
+       select CRYPTO
+       select CRYPTO_HMAC
+-      select CRYPTO_MD5
+       select CRYPTO_SHA1
+       select CRYPTO_HASH_INFO
+       select TCG_TPM if HAS_IOMEM && !UML
diff --git a/queue-5.4/ima-remove-wmissing-prototypes-warning.patch b/queue-5.4/ima-remove-wmissing-prototypes-warning.patch
new file mode 100644 (file)
index 0000000..4464b56
--- /dev/null
@@ -0,0 +1,40 @@
+From a32ad90426a9c8eb3915eed26e08ce133bd9e0da Mon Sep 17 00:00:00 2001
+From: Austin Kim <austin.kim@lge.com>
+Date: Tue, 29 Jun 2021 14:50:50 +0100
+Subject: IMA: remove -Wmissing-prototypes warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Austin Kim <austin.kim@lge.com>
+
+commit a32ad90426a9c8eb3915eed26e08ce133bd9e0da upstream.
+
+With W=1 build, the compiler throws warning message as below:
+
+   security/integrity/ima/ima_mok.c:24:12: warning:
+   no previous prototype for ‘ima_mok_init’ [-Wmissing-prototypes]
+       __init int ima_mok_init(void)
+
+Silence the warning by adding static keyword to ima_mok_init().
+
+Signed-off-by: Austin Kim <austin.kim@lge.com>
+Fixes: 41c89b64d718 ("IMA: create machine owner and blacklist keyrings")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/integrity/ima/ima_mok.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/security/integrity/ima/ima_mok.c
++++ b/security/integrity/ima/ima_mok.c
+@@ -21,7 +21,7 @@ struct key *ima_blacklist_keyring;
+ /*
+  * Allocate the IMA blacklist keyring
+  */
+-__init int ima_mok_init(void)
++static __init int ima_mok_init(void)
+ {
+       struct key_restriction *restriction;
diff --git a/queue-5.4/kvm-nvmx-unconditionally-clear-nested.pi_pending-on-nested-vm-enter.patch b/queue-5.4/kvm-nvmx-unconditionally-clear-nested.pi_pending-on-nested-vm-enter.patch
new file mode 100644 (file)
index 0000000..a89186e
--- /dev/null
@@ -0,0 +1,60 @@
+From f7782bb8d818d8f47c26b22079db10599922787a Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 10 Aug 2021 07:45:26 -0700
+Subject: KVM: nVMX: Unconditionally clear nested.pi_pending on nested VM-Enter
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit f7782bb8d818d8f47c26b22079db10599922787a upstream.
+
+Clear nested.pi_pending on nested VM-Enter even if L2 will run without
+posted interrupts enabled.  If nested.pi_pending is left set from a
+previous L2, vmx_complete_nested_posted_interrupt() will pick up the
+stale flag and exit to userspace with an "internal emulation error" due
+the new L2 not having a valid nested.pi_desc.
+
+Arguably, vmx_complete_nested_posted_interrupt() should first check for
+posted interrupts being enabled, but it's also completely reasonable that
+KVM wouldn't screw up a fundamental flag.  Not to mention that the mere
+existence of nested.pi_pending is a long-standing bug as KVM shouldn't
+move the posted interrupt out of the IRR until it's actually processed,
+e.g. KVM effectively drops an interrupt when it performs a nested VM-Exit
+with a "pending" posted interrupt.  Fixing the mess is a future problem.
+
+Prior to vmx_complete_nested_posted_interrupt() interpreting a null PI
+descriptor as an error, this was a benign bug as the null PI descriptor
+effectively served as a check on PI not being enabled.  Even then, the
+new flow did not become problematic until KVM started checking the result
+of kvm_check_nested_events().
+
+Fixes: 705699a13994 ("KVM: nVMX: Enable nested posted interrupt processing")
+Fixes: 966eefb89657 ("KVM: nVMX: Disable vmcs02 posted interrupts if vmcs12 PID isn't mappable")
+Fixes: 47d3530f86c0 ("KVM: x86: Exit to userspace when kvm_check_nested_events fails")
+Cc: stable@vger.kernel.org
+Cc: Jim Mattson <jmattson@google.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210810144526.2662272-1-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2057,12 +2057,11 @@ static void prepare_vmcs02_early(struct
+                        ~PIN_BASED_VMX_PREEMPTION_TIMER);
+       /* Posted interrupts setting is only taken from vmcs12.  */
+-      if (nested_cpu_has_posted_intr(vmcs12)) {
++      vmx->nested.pi_pending = false;
++      if (nested_cpu_has_posted_intr(vmcs12))
+               vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
+-              vmx->nested.pi_pending = false;
+-      } else {
++      else
+               exec_control &= ~PIN_BASED_POSTED_INTR;
+-      }
+       pin_controls_set(vmx, exec_control);
+       /*
diff --git a/queue-5.4/kvm-s390-index-kvm-arch.idle_mask-by-vcpu_idx.patch b/queue-5.4/kvm-s390-index-kvm-arch.idle_mask-by-vcpu_idx.patch
new file mode 100644 (file)
index 0000000..2c80211
--- /dev/null
@@ -0,0 +1,122 @@
+From a3e03bc1368c1bc16e19b001fc96dc7430573cc8 Mon Sep 17 00:00:00 2001
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Fri, 27 Aug 2021 14:54:29 +0200
+Subject: KVM: s390: index kvm->arch.idle_mask by vcpu_idx
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Halil Pasic <pasic@linux.ibm.com>
+
+commit a3e03bc1368c1bc16e19b001fc96dc7430573cc8 upstream.
+
+While in practice vcpu->vcpu_idx ==  vcpu->vcp_id is often true, it may
+not always be, and we must not rely on this. Reason is that KVM decides
+the vcpu_idx, userspace decides the vcpu_id, thus the two might not
+match.
+
+Currently kvm->arch.idle_mask is indexed by vcpu_id, which implies
+that code like
+for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
+                vcpu = kvm_get_vcpu(kvm, vcpu_id);
+               do_stuff(vcpu);
+}
+is not legit. Reason is that kvm_get_vcpu expects an vcpu_idx, not an
+vcpu_id.  The trouble is, we do actually use kvm->arch.idle_mask like
+this. To fix this problem we have two options. Either use
+kvm_get_vcpu_by_id(vcpu_id), which would loop to find the right vcpu_id,
+or switch to indexing via vcpu_idx. The latter is preferable for obvious
+reasons.
+
+Let us make switch from indexing kvm->arch.idle_mask by vcpu_id to
+indexing it by vcpu_idx.  To keep gisa_int.kicked_mask indexed by the
+same index as idle_mask lets make the same change for it as well.
+
+Fixes: 1ee0bc559dc3 ("KVM: s390: get rid of local_int array")
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Christian Bornträger <borntraeger@de.ibm.com>
+Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Cc: <stable@vger.kernel.org> # 3.15+
+Link: https://lore.kernel.org/r/20210827125429.1912577-1-pasic@linux.ibm.com
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/include/asm/kvm_host.h |    1 +
+ arch/s390/kvm/interrupt.c        |   12 ++++++------
+ arch/s390/kvm/kvm-s390.c         |    2 +-
+ arch/s390/kvm/kvm-s390.h         |    2 +-
+ 4 files changed, 9 insertions(+), 8 deletions(-)
+
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -873,6 +873,7 @@ struct kvm_arch{
+       atomic64_t cmma_dirty_pages;
+       /* subset of available cpu features enabled by user space */
+       DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
++      /* indexed by vcpu_idx */
+       DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
+       struct kvm_s390_gisa_interrupt gisa_int;
+ };
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -408,13 +408,13 @@ static unsigned long deliverable_irqs(st
+ static void __set_cpu_idle(struct kvm_vcpu *vcpu)
+ {
+       kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
+-      set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
++      set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+ }
+ static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
+ {
+       kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
+-      clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
++      clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+ }
+ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
+@@ -2984,18 +2984,18 @@ int kvm_s390_get_irq_state(struct kvm_vc
+ static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
+ {
+-      int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
++      int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
+       struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
+       struct kvm_vcpu *vcpu;
+-      for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
+-              vcpu = kvm_get_vcpu(kvm, vcpu_id);
++      for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
++              vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+               if (psw_ioint_disabled(vcpu))
+                       continue;
+               deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
+               if (deliverable_mask) {
+                       /* lately kicked but not yet running */
+-                      if (test_and_set_bit(vcpu_id, gi->kicked_mask))
++                      if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
+                               return;
+                       kvm_s390_vcpu_wakeup(vcpu);
+                       return;
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -3726,7 +3726,7 @@ static int vcpu_pre_run(struct kvm_vcpu
+               kvm_s390_patch_guest_per_regs(vcpu);
+       }
+-      clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
++      clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
+       vcpu->arch.sie_block->icptcode = 0;
+       cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct
+ static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
+ {
+-      return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
++      return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
+ }
+ static inline int kvm_is_ucontrol(struct kvm *kvm)
diff --git a/queue-5.4/kvm-x86-update-vcpu-s-hv_clock-before-back-to-guest-when-tsc_offset-is-adjusted.patch b/queue-5.4/kvm-x86-update-vcpu-s-hv_clock-before-back-to-guest-when-tsc_offset-is-adjusted.patch
new file mode 100644 (file)
index 0000000..1cf1b6f
--- /dev/null
@@ -0,0 +1,40 @@
+From d9130a2dfdd4b21736c91b818f87dbc0ccd1e757 Mon Sep 17 00:00:00 2001
+From: Zelin Deng <zelin.deng@linux.alibaba.com>
+Date: Wed, 28 Apr 2021 10:22:01 +0800
+Subject: KVM: x86: Update vCPU's hv_clock before back to guest when tsc_offset is adjusted
+
+From: Zelin Deng <zelin.deng@linux.alibaba.com>
+
+commit d9130a2dfdd4b21736c91b818f87dbc0ccd1e757 upstream.
+
+When MSR_IA32_TSC_ADJUST is written by guest due to TSC ADJUST feature
+especially there's a big tsc warp (like a new vCPU is hot-added into VM
+which has been up for a long time), tsc_offset is added by a large value
+then go back to guest. This causes system time jump as tsc_timestamp is
+not adjusted in the meantime and pvclock monotonic character.
+To fix this, just notify kvm to update vCPU's guest time before back to
+guest.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Zelin Deng <zelin.deng@linux.alibaba.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Message-Id: <1619576521-81399-2-git-send-email-zelin.deng@linux.alibaba.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/x86.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2764,6 +2764,10 @@ int kvm_set_msr_common(struct kvm_vcpu *
+                       if (!msr_info->host_initiated) {
+                               s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
+                               adjust_tsc_offset_guest(vcpu, adj);
++                              /* Before back to guest, tsc_timestamp must be adjusted
++                               * as well, otherwise guest's percpu pvclock time could jump.
++                               */
++                              kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+                       }
+                       vcpu->arch.ia32_tsc_adjust_msr = data;
+               }
diff --git a/queue-5.4/perf-x86-amd-ibs-extend-perf_pmu_cap_no_exclude-to-ibs-op.patch b/queue-5.4/perf-x86-amd-ibs-extend-perf_pmu_cap_no_exclude-to-ibs-op.patch
new file mode 100644 (file)
index 0000000..2b5a098
--- /dev/null
@@ -0,0 +1,36 @@
+From f11dd0d80555cdc8eaf5cfc9e19c9e198217f9f1 Mon Sep 17 00:00:00 2001
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Tue, 17 Aug 2021 17:10:41 -0500
+Subject: perf/x86/amd/ibs: Extend PERF_PMU_CAP_NO_EXCLUDE to IBS Op
+
+From: Kim Phillips <kim.phillips@amd.com>
+
+commit f11dd0d80555cdc8eaf5cfc9e19c9e198217f9f1 upstream.
+
+Commit:
+
+   2ff40250691e ("perf/core, arch/x86: Use PERF_PMU_CAP_NO_EXCLUDE for exclusion incapable PMUs")
+
+neglected to do so.
+
+Fixes: 2ff40250691e ("perf/core, arch/x86: Use PERF_PMU_CAP_NO_EXCLUDE for exclusion incapable PMUs")
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210817221048.88063-2-kim.phillips@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/amd/ibs.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/events/amd/ibs.c
++++ b/arch/x86/events/amd/ibs.c
+@@ -562,6 +562,7 @@ static struct perf_ibs perf_ibs_op = {
+               .start          = perf_ibs_start,
+               .stop           = perf_ibs_stop,
+               .read           = perf_ibs_read,
++              .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       },
+       .msr                    = MSR_AMD64_IBSOPCTL,
+       .config_mask            = IBS_OP_CONFIG_MASK,
index ca0b4ad1c6b9b512272a489ab4d8e0a80eaa7adc..b0e32b988f082f6b3a203a99a5e351b7be43bb8d 100644 (file)
@@ -130,3 +130,12 @@ bpf-verifier-allocate-idmap-scratch-in-verifier-env.patch
 bpf-fix-pointer-arithmetic-mask-tightening-under-state-pruning.patch
 time-handle-negative-seconds-correctly-in-timespec64_to_ns.patch
 tty-fix-data-race-between-tiocsti-and-flush_to_ldisc.patch
+perf-x86-amd-ibs-extend-perf_pmu_cap_no_exclude-to-ibs-op.patch
+x86-resctrl-fix-a-maybe-uninitialized-build-warning-treated-as-error.patch
+kvm-s390-index-kvm-arch.idle_mask-by-vcpu_idx.patch
+kvm-x86-update-vcpu-s-hv_clock-before-back-to-guest-when-tsc_offset-is-adjusted.patch
+kvm-nvmx-unconditionally-clear-nested.pi_pending-on-nested-vm-enter.patch
+fuse-truncate-pagecache-on-atomic_o_trunc.patch
+fuse-flush-extending-writes.patch
+ima-remove-wmissing-prototypes-warning.patch
+ima-remove-the-dependency-on-crypto_md5.patch
diff --git a/queue-5.4/x86-resctrl-fix-a-maybe-uninitialized-build-warning-treated-as-error.patch b/queue-5.4/x86-resctrl-fix-a-maybe-uninitialized-build-warning-treated-as-error.patch
new file mode 100644 (file)
index 0000000..8317c73
--- /dev/null
@@ -0,0 +1,67 @@
+From 527f721478bce3f49b513a733bacd19d6f34b08c Mon Sep 17 00:00:00 2001
+From: Babu Moger <babu.moger@amd.com>
+Date: Fri, 20 Aug 2021 16:52:42 -0500
+Subject: x86/resctrl: Fix a maybe-uninitialized build warning treated as error
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Babu Moger <babu.moger@amd.com>
+
+commit 527f721478bce3f49b513a733bacd19d6f34b08c upstream.
+
+The recent commit
+
+  064855a69003 ("x86/resctrl: Fix default monitoring groups reporting")
+
+caused a RHEL build failure with an uninitialized variable warning
+treated as an error because it removed the default case snippet.
+
+The RHEL Makefile uses '-Werror=maybe-uninitialized' to force possibly
+uninitialized variable warnings to be treated as errors. This is also
+reported by smatch via the 0day robot.
+
+The error from the RHEL build is:
+
+  arch/x86/kernel/cpu/resctrl/monitor.c: In function ‘__mon_event_count’:
+  arch/x86/kernel/cpu/resctrl/monitor.c:261:12: error: ‘m’ may be used
+  uninitialized in this function [-Werror=maybe-uninitialized]
+    m->chunks += chunks;
+              ^~
+
+The upstream Makefile does not build using '-Werror=maybe-uninitialized'.
+So, the problem is not seen there. Fix the problem by putting back the
+default case snippet.
+
+ [ bp: note that there's nothing wrong with the code and other compilers
+   do not trigger this warning - this is being done just so the RHEL compiler
+   is happy. ]
+
+Fixes: 064855a69003 ("x86/resctrl: Fix default monitoring groups reporting")
+Reported-by: Terry Bowman <Terry.Bowman@amd.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Babu Moger <babu.moger@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/162949631908.23903.17090272726012848523.stgit@bmoger-ubuntu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/resctrl/monitor.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/x86/kernel/cpu/resctrl/monitor.c
++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
+@@ -242,6 +242,12 @@ static u64 __mon_event_count(u32 rmid, s
+       case QOS_L3_MBM_LOCAL_EVENT_ID:
+               m = &rr->d->mbm_local[rmid];
+               break;
++      default:
++              /*
++               * Code would never reach here because an invalid
++               * event id would fail the __rmid_read.
++               */
++              return RMID_VAL_ERROR;
+       }
+       if (rr->first) {