+++ /dev/null
-From 9228b26194d1cc00449f12f306f53ef2e234a55b Mon Sep 17 00:00:00 2001
-From: Reiji Watanabe <reijiw@google.com>
-Date: Sun, 12 Mar 2023 20:32:08 -0700
-Subject: KVM: arm64: PMU: Fix GET_ONE_REG for vPMC regs to return the current value
-
-From: Reiji Watanabe <reijiw@google.com>
-
-commit 9228b26194d1cc00449f12f306f53ef2e234a55b upstream.
-
-Have KVM_GET_ONE_REG for vPMU counter (vPMC) registers (PMCCNTR_EL0
-and PMEVCNTR<n>_EL0) return the sum of the register value in the sysreg
-file and the current perf event counter value.
-
-Values of vPMC registers are saved in sysreg files on certain occasions.
-These saved values don't represent the current values of the vPMC
-registers if the perf events for the vPMCs count events after the save.
-The current values of those registers are the sum of the sysreg file
-value and the current perf event counter value. But, when userspace
-reads those registers (using KVM_GET_ONE_REG), KVM returns the sysreg
-file value to userspace (not the sum value).
-
-Fix this to return the sum value for KVM_GET_ONE_REG.
-
-Fixes: 051ff581ce70 ("arm64: KVM: Add access handler for event counter register")
-Cc: stable@vger.kernel.org
-Reviewed-by: Marc Zyngier <maz@kernel.org>
-Signed-off-by: Reiji Watanabe <reijiw@google.com>
-Link: https://lore.kernel.org/r/20230313033208.1475499-1-reijiw@google.com
-Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/arm64/kvm/sys_regs.c | 21 +++++++++++++++++++--
- 1 file changed, 19 insertions(+), 2 deletions(-)
-
---- a/arch/arm64/kvm/sys_regs.c
-+++ b/arch/arm64/kvm/sys_regs.c
-@@ -764,6 +764,22 @@ static bool pmu_counter_idx_valid(struct
- return true;
- }
-
-+static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
-+ u64 *val)
-+{
-+ u64 idx;
-+
-+ if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
-+ /* PMCCNTR_EL0 */
-+ idx = ARMV8_PMU_CYCLE_IDX;
-+ else
-+ /* PMEVCNTRn_EL0 */
-+ idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
-+
-+ *val = kvm_pmu_get_counter_value(vcpu, idx);
-+ return 0;
-+}
-+
- static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
- struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-@@ -980,7 +996,7 @@ static bool access_pmuserenr(struct kvm_
- /* Macro to expand the PMEVCNTRn_EL0 register */
- #define PMU_PMEVCNTR_EL0(n) \
- { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
-- .reset = reset_pmevcntr, \
-+ .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
- .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
-
- /* Macro to expand the PMEVTYPERn_EL0 register */
-@@ -1651,7 +1667,8 @@ static const struct sys_reg_desc sys_reg
- { PMU_SYS_REG(SYS_PMCEID1_EL0),
- .access = access_pmceid, .reset = NULL },
- { PMU_SYS_REG(SYS_PMCCNTR_EL0),
-- .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
-+ .access = access_pmu_evcntr, .reset = reset_unknown,
-+ .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
- { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
- .access = access_pmu_evtyper, .reset = NULL },
- { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
--- /dev/null
+From 98c25ead5eda5e9d41abe57839ad3e8caf19500c Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 8 Dec 2021 01:52:17 +0000
+Subject: KVM: VMX: Move preemption timer <=> hrtimer dance to common x86
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 98c25ead5eda5e9d41abe57839ad3e8caf19500c upstream.
+
+Handle the switch to/from the hypervisor/software timer when a vCPU is
+blocking in common x86 instead of in VMX. Even though VMX is the only
+user of a hypervisor timer, the logic and all functions involved are
+generic x86 (unless future CPUs do something completely different and
+implement a hypervisor timer that runs regardless of mode).
+
+Handling the switch in common x86 will allow for the elimination of the
+pre/post_blocks hooks, and also lets KVM switch back to the hypervisor
+timer if and only if it was in use (without additional params). Add a
+comment explaining why the switch cannot be deferred to kvm_sched_out()
+or kvm_vcpu_block().
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20211208015236.1616697-8-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[ta: Fix conflicts in vmx_pre_block and vmx_post_block as per Paolo's
+suggestion. Add Reported-by and Link tags.]
+Reported-by: syzbot+b6a74be92b5063a0f1ff@syzkaller.appspotmail.com
+Link: https://syzkaller.appspot.com/bug?id=489beb3d76ef14cc6cd18125782dc6f86051a605
+Tested-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/vmx.c | 6 ------
+ arch/x86/kvm/x86.c | 21 +++++++++++++++++++++
+ 2 files changed, 21 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7597,17 +7597,11 @@ static int vmx_pre_block(struct kvm_vcpu
+ if (pi_pre_block(vcpu))
+ return 1;
+
+- if (kvm_lapic_hv_timer_in_use(vcpu))
+- kvm_lapic_switch_to_sw_timer(vcpu);
+-
+ return 0;
+ }
+
+ static void vmx_post_block(struct kvm_vcpu *vcpu)
+ {
+- if (kvm_x86_ops.set_hv_timer)
+- kvm_lapic_switch_to_hv_timer(vcpu);
+-
+ pi_post_block(vcpu);
+ }
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10043,12 +10043,28 @@ out:
+
+ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
+ {
++ bool hv_timer;
++
+ if (!kvm_arch_vcpu_runnable(vcpu) &&
+ (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) {
++ /*
++ * Switch to the software timer before halt-polling/blocking as
++ * the guest's timer may be a break event for the vCPU, and the
++ * hypervisor timer runs only when the CPU is in guest mode.
++ * Switch before halt-polling so that KVM recognizes an expired
++ * timer before blocking.
++ */
++ hv_timer = kvm_lapic_hv_timer_in_use(vcpu);
++ if (hv_timer)
++ kvm_lapic_switch_to_sw_timer(vcpu);
++
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_block(vcpu);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+
++ if (hv_timer)
++ kvm_lapic_switch_to_hv_timer(vcpu);
++
+ if (kvm_x86_ops.post_block)
+ static_call(kvm_x86_post_block)(vcpu);
+
+@@ -10287,6 +10303,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+ r = -EINTR;
+ goto out;
+ }
++ /*
++ * It should be impossible for the hypervisor timer to be in
++ * use before KVM has ever run the vCPU.
++ */
++ WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
+ kvm_vcpu_block(vcpu);
+ if (kvm_apic_accept_events(vcpu) < 0) {
+ r = 0;
--- /dev/null
+From ab52be1b310bcb39e6745d34a8f0e8475d67381a Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Sat, 7 Jan 2023 01:10:21 +0000
+Subject: KVM: x86: Inject #GP on x2APIC WRMSR that sets reserved bits 63:32
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit ab52be1b310bcb39e6745d34a8f0e8475d67381a upstream.
+
+Reject attempts to set bits 63:32 for 32-bit x2APIC registers, i.e. all
+x2APIC registers except ICR. Per Intel's SDM:
+
+ Non-zero writes (by WRMSR instruction) to reserved bits to these
+ registers will raise a general protection fault exception
+
+Opportunistically fix a typo in a nearby comment.
+
+Reported-by: Marc Orr <marcorr@google.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Link: https://lore.kernel.org/r/20230107011025.565472-3-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/lapic.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2802,6 +2802,10 @@ int kvm_x2apic_msr_write(struct kvm_vcpu
+ /* if this is ICR write vector before command */
+ if (reg == APIC_ICR)
+ kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
++ else if (data >> 32)
++ /* Bits 63:32 are reserved in all other registers. */
++ return 1;
++
+ return kvm_lapic_reg_write(apic, reg, (u32)data);
+ }
+
+@@ -2836,6 +2840,10 @@ int kvm_hv_vapic_msr_write(struct kvm_vc
+ /* if this is ICR write vector before command */
+ if (reg == APIC_ICR)
+ kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
++ else if (data >> 32)
++ /* Bits 63:32 are reserved in all other registers. */
++ return 1;
++
+ return kvm_lapic_reg_write(apic, reg, (u32)data);
+ }
+
--- /dev/null
+From 97a71c444a147ae41c7d0ab5b3d855d7f762f3ed Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 6 Jan 2023 01:12:35 +0000
+Subject: KVM: x86: Purge "highest ISR" cache when updating APICv state
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 97a71c444a147ae41c7d0ab5b3d855d7f762f3ed upstream.
+
+Purge the "highest ISR" cache when updating APICv state on a vCPU. The
+cache must not be used when APICv is active as hardware may emulate EOIs
+(and other operations) without exiting to KVM.
+
+This fixes a bug where KVM will effectively block IRQs in perpetuity due
+to the "highest ISR" never getting reset if APICv is activated on a vCPU
+while an IRQ is in-service. Hardware emulates the EOI and KVM never gets
+a chance to update its cache.
+
+Fixes: b26a695a1d78 ("kvm: lapic: Introduce APICv update helper function")
+Cc: stable@vger.kernel.org
+Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Cc: Maxim Levitsky <mlevitsk@redhat.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20230106011306.85230-3-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/lapic.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2316,6 +2316,7 @@ void kvm_apic_update_apicv(struct kvm_vc
+ apic->irr_pending = (apic_search_irr(apic) != -1);
+ apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+ }
++ apic->highest_isr_cache = -1;
+ }
+ EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
+
+@@ -2368,7 +2369,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vc
+ kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
+ }
+ kvm_apic_update_apicv(vcpu);
+- apic->highest_isr_cache = -1;
+ update_divide_count(apic);
+ atomic_set(&apic->lapic_timer.pending, 0);
+
+@@ -2638,7 +2638,6 @@ int kvm_apic_set_state(struct kvm_vcpu *
+ __start_apic_timer(apic, APIC_TMCCT);
+ kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
+ kvm_apic_update_apicv(vcpu);
+- apic->highest_isr_cache = -1;
+ if (vcpu->arch.apicv_active) {
+ static_call(kvm_x86_apicv_post_state_restore)(vcpu);
+ static_call(kvm_x86_hwapic_irr_update)(vcpu,
--- /dev/null
+From 89aba4c26fae4e459f755a18912845c348ee48f3 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Thu, 23 Mar 2023 13:09:16 +0100
+Subject: s390/uaccess: add missing earlyclobber annotations to __clear_user()
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit 89aba4c26fae4e459f755a18912845c348ee48f3 upstream.
+
+Add missing earlyclobber annotation to size, to, and tmp2 operands of the
+__clear_user() inline assembly since they are modified or written to before
+the last usage of all input operands. This can lead to incorrect register
+allocation for the inline assembly.
+
+Fixes: 6c2a9e6df604 ("[S390] Use alternative user-copy operations for new hardware.")
+Reported-by: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/all/20230321122514.1743889-3-mark.rutland@arm.com/
+Cc: stable@vger.kernel.org
+Reviewed-by: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/lib/uaccess.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/lib/uaccess.c
++++ b/arch/s390/lib/uaccess.c
+@@ -227,7 +227,7 @@ static inline unsigned long clear_user_m
+ "4: slgr %0,%0\n"
+ "5:\n"
+ EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
+- : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
++ : "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
+ : "a" (empty_zero_page), [spec] "K" (0x81UL)
+ : "cc", "memory", "0");
+ return size;
rcu-fix-rcu_torture_read-ftrace-event.patch
drm-etnaviv-fix-reference-leak-when-mmaping-imported-buffer.patch
drm-amd-display-add-dsc-support-for-synaptics-cascaded-mst-hub.patch
-kvm-arm64-pmu-fix-get_one_reg-for-vpmc-regs-to-return-the-current-value.patch
kvm-arm64-disable-interrupts-while-walking-userspace-pts.patch
+s390-uaccess-add-missing-earlyclobber-annotations-to-__clear_user.patch
+kvm-vmx-move-preemption-timer-hrtimer-dance-to-common-x86.patch
+kvm-x86-inject-gp-on-x2apic-wrmsr-that-sets-reserved-bits-63-32.patch
+kvm-x86-purge-highest-isr-cache-when-updating-apicv-state.patch
+zonefs-fix-error-message-in-zonefs_file_dio_append.patch
--- /dev/null
+From 88b170088ad2c3e27086fe35769aa49f8a512564 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Date: Mon, 20 Mar 2023 22:49:15 +0900
+Subject: zonefs: Fix error message in zonefs_file_dio_append()
+
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+
+commit 88b170088ad2c3e27086fe35769aa49f8a512564 upstream.
+
+Since the expected write location in a sequential file is always at the
+end of the file (append write), when an invalid write append location is
+detected in zonefs_file_dio_append(), print the invalid written location
+instead of the expected write location.
+
+Fixes: a608da3bd730 ("zonefs: Detect append writes at invalid locations")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/zonefs/super.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -796,7 +796,7 @@ static ssize_t zonefs_file_dio_append(st
+ if (bio->bi_iter.bi_sector != wpsector) {
+ zonefs_warn(inode->i_sb,
+ "Corrupted write pointer %llu for zone at %llu\n",
+- wpsector, zi->i_zsector);
++ bio->bi_iter.bi_sector, zi->i_zsector);
+ ret = -EIO;
+ }
+ }