--- /dev/null
+From 5ac14bac08ae827b619f21bcceaaac3b8c497e31 Mon Sep 17 00:00:00 2001
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Date: Mon, 1 Feb 2021 17:26:54 +0100
+Subject: KVM: s390: extend kvm_s390_shadow_fault to return entry pointer
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+commit 5ac14bac08ae827b619f21bcceaaac3b8c497e31 upstream.
+
+Extend kvm_s390_shadow_fault to return the pointer to the valid leaf
+DAT table entry, or to the invalid entry.
+
+Also return some flags in the lower bits of the address:
+PEI_DAT_PROT: indicates that DAT protection applies because of the
+ protection bit in the segment (or, if EDAT, region) tables.
+PEI_NOT_PTE: indicates that the address of the DAT table entry returned
+ does not refer to a PTE, but to a segment or region table.
+
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Janosch Frank <frankja@de.ibm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Link: https://lore.kernel.org/r/20210302174443.514363-3-imbrenda@linux.ibm.com
+[borntraeger@de.ibm.com: fold in a fix from Claudio]
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kvm/gaccess.c | 30 +++++++++++++++++++++++++-----
+ arch/s390/kvm/gaccess.h | 6 +++++-
+ arch/s390/kvm/vsie.c | 8 ++++----
+ 3 files changed, 34 insertions(+), 10 deletions(-)
+
+--- a/arch/s390/kvm/gaccess.c
++++ b/arch/s390/kvm/gaccess.c
+@@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(st
+ * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: faulting address in the shadow gmap
+- * @pgt: pointer to the page table address result
++ * @pgt: pointer to the beginning of the page table for the given address if
++ * successful (return value 0), or to the first invalid DAT entry in
++ * case of exceptions (return value > 0)
+ * @fake: pgt references contiguous guest memory block, not a pgtable
+ */
+ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+@@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct
+ rfte.val = ptr;
+ goto shadow_r2t;
+ }
++ *pgt = ptr + vaddr.rfx * 8;
+ rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
+ if (rc)
+ return rc;
+@@ -1060,6 +1063,7 @@ shadow_r2t:
+ rste.val = ptr;
+ goto shadow_r3t;
+ }
++ *pgt = ptr + vaddr.rsx * 8;
+ rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
+ if (rc)
+ return rc;
+@@ -1087,6 +1091,7 @@ shadow_r3t:
+ rtte.val = ptr;
+ goto shadow_sgt;
+ }
++ *pgt = ptr + vaddr.rtx * 8;
+ rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
+ if (rc)
+ return rc;
+@@ -1123,6 +1128,7 @@ shadow_sgt:
+ ste.val = ptr;
+ goto shadow_pgt;
+ }
++ *pgt = ptr + vaddr.sx * 8;
+ rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
+ if (rc)
+ return rc;
+@@ -1157,6 +1163,8 @@ shadow_pgt:
+ * @vcpu: virtual cpu
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: faulting address in the shadow gmap
++ * @datptr: will contain the address of the faulting DAT table entry, or of
++ * the valid leaf, plus some flags
+ *
+ * Returns: - 0 if the shadow fault was successfully resolved
+ * - > 0 (pgm exception code) on exceptions while faulting
+@@ -1165,11 +1173,11 @@ shadow_pgt:
+ * - -ENOMEM if out of memory
+ */
+ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
+- unsigned long saddr)
++ unsigned long saddr, unsigned long *datptr)
+ {
+ union vaddress vaddr;
+ union page_table_entry pte;
+- unsigned long pgt;
++ unsigned long pgt = 0;
+ int dat_protection, fake;
+ int rc;
+
+@@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcp
+ pte.val = pgt + vaddr.px * PAGE_SIZE;
+ goto shadow_page;
+ }
+- if (!rc)
+- rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
++
++ switch (rc) {
++ case PGM_SEGMENT_TRANSLATION:
++ case PGM_REGION_THIRD_TRANS:
++ case PGM_REGION_SECOND_TRANS:
++ case PGM_REGION_FIRST_TRANS:
++ pgt |= PEI_NOT_PTE;
++ break;
++ case 0:
++ pgt += vaddr.px * 8;
++ rc = gmap_read_table(sg->parent, pgt, &pte.val);
++ }
++ if (datptr)
++ *datptr = pgt | dat_protection * PEI_DAT_PROT;
+ if (!rc && pte.i)
+ rc = PGM_PAGE_TRANSLATION;
+ if (!rc && pte.z)
+--- a/arch/s390/kvm/gaccess.h
++++ b/arch/s390/kvm/gaccess.h
+@@ -387,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
+ int ipte_lock_held(struct kvm_vcpu *vcpu);
+ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
+
++/* MVPG PEI indication bits */
++#define PEI_DAT_PROT 2
++#define PEI_NOT_PTE 4
++
+ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
+- unsigned long saddr);
++ unsigned long saddr, unsigned long *datptr);
+
+ #endif /* __KVM_S390_GACCESS_H */
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -614,10 +614,10 @@ static int map_prefix(struct kvm_vcpu *v
+ /* with mso/msl, the prefix lies at offset *mso* */
+ prefix += scb_s->mso;
+
+- rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
++ rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
+ if (!rc && (scb_s->ecb & ECB_TE))
+ rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+- prefix + PAGE_SIZE);
++ prefix + PAGE_SIZE, NULL);
+ /*
+ * We don't have to mprotect, we will be called for all unshadows.
+ * SIE will detect if protection applies and trigger a validity.
+@@ -908,7 +908,7 @@ static int handle_fault(struct kvm_vcpu
+ current->thread.gmap_addr, 1);
+
+ rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+- current->thread.gmap_addr);
++ current->thread.gmap_addr, NULL);
+ if (rc > 0) {
+ rc = inject_fault(vcpu, rc,
+ current->thread.gmap_addr,
+@@ -930,7 +930,7 @@ static void handle_last_fault(struct kvm
+ {
+ if (vsie_page->fault_addr)
+ kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+- vsie_page->fault_addr);
++ vsie_page->fault_addr, NULL);
+ vsie_page->fault_addr = 0;
+ }
+
--- /dev/null
+From 44bada28219031f9e8e86b84460606efa57b871e Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Thu, 15 Apr 2021 10:01:27 +0200
+Subject: KVM: s390: fix guarded storage control register handling
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit 44bada28219031f9e8e86b84460606efa57b871e upstream.
+
+store_regs_fmt2() has an ordering problem: first the guarded storage
+facility is enabled on the local cpu, then preemption disabled, and
+then the STGSC (store guarded storage controls) instruction is
+executed.
+
+If the process gets scheduled away between enabling the guarded
+storage facility and before preemption is disabled, this might lead to
+a special operation exception and therefore kernel crash as soon as
+the process is scheduled back and the STGSC instruction is executed.
+
+Fixes: 4e0b1ab72b8a ("KVM: s390: gs support for kvm guests")
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Cc: <stable@vger.kernel.org> # 4.12
+Link: https://lore.kernel.org/r/20210415080127.1061275-1-hca@linux.ibm.com
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kvm/kvm-s390.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -4310,16 +4310,16 @@ static void store_regs_fmt2(struct kvm_v
+ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
+ kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
+ if (MACHINE_HAS_GS) {
++ preempt_disable();
+ __ctl_set_bit(2, 4);
+ if (vcpu->arch.gs_enabled)
+ save_gs_cb(current->thread.gs_cb);
+- preempt_disable();
+ current->thread.gs_cb = vcpu->arch.host_gscb;
+ restore_gs_cb(vcpu->arch.host_gscb);
+- preempt_enable();
+ if (!vcpu->arch.host_gscb)
+ __ctl_clear_bit(2, 4);
+ vcpu->arch.host_gscb = NULL;
++ preempt_enable();
+ }
+ /* SIE will save etoken directly into SDNX and therefore kvm_run */
+ }
--- /dev/null
+From f85f1baaa18932a041fd2b1c2ca6cfd9898c7d2b Mon Sep 17 00:00:00 2001
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Date: Tue, 2 Mar 2021 13:36:44 +0100
+Subject: KVM: s390: split kvm_s390_logical_to_effective
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+commit f85f1baaa18932a041fd2b1c2ca6cfd9898c7d2b upstream.
+
+Split kvm_s390_logical_to_effective to a generic function called
+_kvm_s390_logical_to_effective. The new function takes a PSW and an address
+and returns the address with the appropriate bits masked off. The old
+function now calls the new function with the appropriate PSW from the vCPU.
+
+This is needed to avoid code duplication for vSIE.
+
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: stable@vger.kernel.org # for VSIE: correctly handle MVPG when in VSIE
+Link: https://lore.kernel.org/r/20210302174443.514363-2-imbrenda@linux.ibm.com
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kvm/gaccess.h | 31 ++++++++++++++++++++++++-------
+ 1 file changed, 24 insertions(+), 7 deletions(-)
+
+--- a/arch/s390/kvm/gaccess.h
++++ b/arch/s390/kvm/gaccess.h
+@@ -37,6 +37,29 @@ static inline unsigned long kvm_s390_rea
+ }
+
+ /**
++ * _kvm_s390_logical_to_effective - convert guest logical to effective address
++ * @psw: psw of the guest
++ * @ga: guest logical address
++ *
++ * Convert a guest logical address to an effective address by applying the
++ * rules of the addressing mode defined by bits 31 and 32 of the given PSW
++ * (extendended/basic addressing mode).
++ *
++ * Depending on the addressing mode, the upper 40 bits (24 bit addressing
++ * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
++ * mode) of @ga will be zeroed and the remaining bits will be returned.
++ */
++static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
++ unsigned long ga)
++{
++ if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
++ return ga;
++ if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
++ return ga & ((1UL << 31) - 1);
++ return ga & ((1UL << 24) - 1);
++}
++
++/**
+ * kvm_s390_logical_to_effective - convert guest logical to effective address
+ * @vcpu: guest virtual cpu
+ * @ga: guest logical address
+@@ -52,13 +75,7 @@ static inline unsigned long kvm_s390_rea
+ static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
+ unsigned long ga)
+ {
+- psw_t *psw = &vcpu->arch.sie_block->gpsw;
+-
+- if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
+- return ga;
+- if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
+- return ga & ((1UL << 31) - 1);
+- return ga & ((1UL << 24) - 1);
++ return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
+ }
+
+ /*
--- /dev/null
+From c5d1f6b531e68888cbe6718b3f77a60115d58b9c Mon Sep 17 00:00:00 2001
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Date: Mon, 22 Mar 2021 15:05:58 +0100
+Subject: KVM: s390: split kvm_s390_real_to_abs
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+commit c5d1f6b531e68888cbe6718b3f77a60115d58b9c upstream.
+
+A new function _kvm_s390_real_to_abs will apply prefixing to a real address
+with a given prefix value.
+
+The old kvm_s390_real_to_abs becomes now a wrapper around the new function.
+
+This is needed to avoid code duplication in vSIE.
+
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Thomas Huth <thuth@redhat.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210322140559.500716-2-imbrenda@linux.ibm.com
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kvm/gaccess.h | 23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
+
+--- a/arch/s390/kvm/gaccess.h
++++ b/arch/s390/kvm/gaccess.h
+@@ -18,17 +18,14 @@
+
+ /**
+ * kvm_s390_real_to_abs - convert guest real address to guest absolute address
+- * @vcpu - guest virtual cpu
++ * @prefix - guest prefix
+ * @gra - guest real address
+ *
+ * Returns the guest absolute address that corresponds to the passed guest real
+- * address @gra of a virtual guest cpu by applying its prefix.
++ * address @gra of by applying the given prefix.
+ */
+-static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+- unsigned long gra)
++static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
+ {
+- unsigned long prefix = kvm_s390_get_prefix(vcpu);
+-
+ if (gra < 2 * PAGE_SIZE)
+ gra += prefix;
+ else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
+@@ -37,6 +34,20 @@ static inline unsigned long kvm_s390_rea
+ }
+
+ /**
++ * kvm_s390_real_to_abs - convert guest real address to guest absolute address
++ * @vcpu - guest virtual cpu
++ * @gra - guest real address
++ *
++ * Returns the guest absolute address that corresponds to the passed guest real
++ * address @gra of a virtual guest cpu by applying its prefix.
++ */
++static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
++ unsigned long gra)
++{
++ return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
++}
++
++/**
+ * _kvm_s390_logical_to_effective - convert guest logical to effective address
+ * @psw: psw of the guest
+ * @ga: guest logical address
--- /dev/null
+From bdf7509bbefa20855d5f6bacdc5b62a8489477c9 Mon Sep 17 00:00:00 2001
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Date: Mon, 1 Feb 2021 21:54:13 +0100
+Subject: KVM: s390: VSIE: correctly handle MVPG when in VSIE
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+commit bdf7509bbefa20855d5f6bacdc5b62a8489477c9 upstream.
+
+Correctly handle the MVPG instruction when issued by a VSIE guest.
+
+Fixes: a3508fbe9dc6d ("KVM: s390: vsie: initial support for nested virtualization")
+Cc: stable@vger.kernel.org # f85f1baaa189: KVM: s390: split kvm_s390_logical_to_effective
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Acked-by: Janosch Frank <frankja@linux.ibm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Link: https://lore.kernel.org/r/20210302174443.514363-4-imbrenda@linux.ibm.com
+[borntraeger@de.ibm.com: apply fixup from Claudio]
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kvm/vsie.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 93 insertions(+), 5 deletions(-)
+
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -416,11 +416,6 @@ static void unshadow_scb(struct kvm_vcpu
+ memcpy((void *)((u64)scb_o + 0xc0),
+ (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
+ break;
+- case ICPT_PARTEXEC:
+- /* MVPG only */
+- memcpy((void *)((u64)scb_o + 0xc0),
+- (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
+- break;
+ }
+
+ if (scb_s->ihcpu != 0xffffU)
+@@ -983,6 +978,95 @@ static int handle_stfle(struct kvm_vcpu
+ }
+
+ /*
++ * Get a register for a nested guest.
++ * @vcpu the vcpu of the guest
++ * @vsie_page the vsie_page for the nested guest
++ * @reg the register number, the upper 4 bits are ignored.
++ * returns: the value of the register.
++ */
++static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
++{
++ /* no need to validate the parameter and/or perform error handling */
++ reg &= 0xf;
++ switch (reg) {
++ case 15:
++ return vsie_page->scb_s.gg15;
++ case 14:
++ return vsie_page->scb_s.gg14;
++ default:
++ return vcpu->run->s.regs.gprs[reg];
++ }
++}
++
++static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
++{
++ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
++ unsigned long pei_dest, pei_src, src, dest, mask;
++ u64 *pei_block = &vsie_page->scb_o->mcic;
++ int edat, rc_dest, rc_src;
++ union ctlreg0 cr0;
++
++ cr0.val = vcpu->arch.sie_block->gcr[0];
++ edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
++ mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
++
++ dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
++ src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
++
++ rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
++ rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
++ /*
++ * Either everything went well, or something non-critical went wrong
++ * e.g. because of a race. In either case, simply retry.
++ */
++ if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
++ retry_vsie_icpt(vsie_page);
++ return -EAGAIN;
++ }
++ /* Something more serious went wrong, propagate the error */
++ if (rc_dest < 0)
++ return rc_dest;
++ if (rc_src < 0)
++ return rc_src;
++
++ /* The only possible suppressing exception: just deliver it */
++ if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
++ clear_vsie_icpt(vsie_page);
++ rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
++ WARN_ON_ONCE(rc_dest);
++ return 1;
++ }
++
++ /*
++ * Forward the PEI intercept to the guest if it was a page fault, or
++ * also for segment and region table faults if EDAT applies.
++ */
++ if (edat) {
++ rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
++ rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
++ } else {
++ rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
++ rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
++ }
++ if (!rc_dest && !rc_src) {
++ pei_block[0] = pei_dest;
++ pei_block[1] = pei_src;
++ return 1;
++ }
++
++ retry_vsie_icpt(vsie_page);
++
++ /*
++ * The host has edat, and the guest does not, or it was an ASCE type
++ * exception. The host needs to inject the appropriate DAT interrupts
++ * into the guest.
++ */
++ if (rc_dest)
++ return inject_fault(vcpu, rc_dest, dest, 1);
++ return inject_fault(vcpu, rc_src, src, 0);
++}
++
++/*
+ * Run the vsie on a shadow scb and a shadow gmap, without any further
+ * sanity checks, handling SIE faults.
+ *
+@@ -1068,6 +1152,10 @@ static int do_vsie_run(struct kvm_vcpu *
+ if ((scb_s->ipa & 0xf000) != 0xf000)
+ scb_s->ipa += 0x1000;
+ break;
++ case ICPT_PARTEXEC:
++ if (scb_s->ipa == 0xb254)
++ rc = vsie_handle_mvpg(vcpu, vsie_page);
++ break;
+ }
+ return rc;
+ }
--- /dev/null
+From c3171e94cc1cdcc3229565244112e869f052b8d9 Mon Sep 17 00:00:00 2001
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Date: Mon, 22 Mar 2021 15:05:59 +0100
+Subject: KVM: s390: VSIE: fix MVPG handling for prefixing and MSO
+
+From: Claudio Imbrenda <imbrenda@linux.ibm.com>
+
+commit c3171e94cc1cdcc3229565244112e869f052b8d9 upstream.
+
+Prefixing needs to be applied to the guest real address to translate it
+into a guest absolute address.
+
+The value of MSO needs to be added to a guest-absolute address in order to
+obtain the host-virtual.
+
+Fixes: bdf7509bbefa ("s390/kvm: VSIE: correctly handle MVPG when in VSIE")
+Reported-by: Janosch Frank <frankja@linux.ibm.com>
+Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210322140559.500716-3-imbrenda@linux.ibm.com
+[borntraeger@de.ibm.com simplify mso]
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kvm/vsie.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -1001,7 +1001,7 @@ static u64 vsie_get_register(struct kvm_
+ static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ {
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+- unsigned long pei_dest, pei_src, src, dest, mask;
++ unsigned long pei_dest, pei_src, src, dest, mask, prefix;
+ u64 *pei_block = &vsie_page->scb_o->mcic;
+ int edat, rc_dest, rc_src;
+ union ctlreg0 cr0;
+@@ -1009,9 +1009,12 @@ static int vsie_handle_mvpg(struct kvm_v
+ cr0.val = vcpu->arch.sie_block->gcr[0];
+ edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
+ mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
++ prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
+
+ dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
++ dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
+ src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
++ src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
+
+ rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
+ rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
--- /dev/null
+From b208108638c4bd3215792415944467c36f5dfd97 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Mon, 3 May 2021 14:12:44 +0200
+Subject: s390: fix detection of vector enhancements facility 1 vs. vector packed decimal facility
+
+From: David Hildenbrand <david@redhat.com>
+
+commit b208108638c4bd3215792415944467c36f5dfd97 upstream.
+
+The PoP documents:
+ 134: The vector packed decimal facility is installed in the
+ z/Architecture architectural mode. When bit 134 is
+ one, bit 129 is also one.
+ 135: The vector enhancements facility 1 is installed in
+ the z/Architecture architectural mode. When bit 135
+ is one, bit 129 is also one.
+
+Looks like we confuse the vector enhancements facility 1 ("EXT") with the
+Vector packed decimal facility ("BCD"). Let's fix the facility checks.
+
+Detected while working on QEMU/tcg z14 support and only unlocking
+the vector enhancements facility 1, but not the vector packed decimal
+facility.
+
+Fixes: 2583b848cad0 ("s390: report new vector facilities")
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
+Link: https://lore.kernel.org/r/20210503121244.25232-1-david@redhat.com
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/kernel/setup.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -924,9 +924,9 @@ static int __init setup_hwcaps(void)
+ if (MACHINE_HAS_VX) {
+ elf_hwcap |= HWCAP_S390_VXRS;
+ if (test_facility(134))
+- elf_hwcap |= HWCAP_S390_VXRS_EXT;
+- if (test_facility(135))
+ elf_hwcap |= HWCAP_S390_VXRS_BCD;
++ if (test_facility(135))
++ elf_hwcap |= HWCAP_S390_VXRS_EXT;
+ if (test_facility(148))
+ elf_hwcap |= HWCAP_S390_VXRS_EXT2;
+ if (test_facility(152))
alsa-hda-realtek-remove-redundant-entry-for-alc861-haier-uniwill-devices.patch
alsa-hda-realtek-alc285-thinkpad-jack-pin-quirk-is-unreachable.patch
alsa-hda-realtek-fix-speaker-amp-on-hp-envy-aio-32.patch
+kvm-s390-vsie-correctly-handle-mvpg-when-in-vsie.patch
+kvm-s390-split-kvm_s390_logical_to_effective.patch
+kvm-s390-fix-guarded-storage-control-register-handling.patch
+s390-fix-detection-of-vector-enhancements-facility-1-vs.-vector-packed-decimal-facility.patch
+kvm-s390-vsie-fix-mvpg-handling-for-prefixing-and-mso.patch
+kvm-s390-split-kvm_s390_real_to_abs.patch
+kvm-s390-extend-kvm_s390_shadow_fault-to-return-entry-pointer.patch