]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: s390: vsie: Avoid injecting machine check on signal
authorChristian Borntraeger <borntraeger@linux.ibm.com>
Mon, 16 Mar 2026 12:13:17 +0000 (13:13 +0100)
committerChristian Borntraeger <borntraeger@linux.ibm.com>
Mon, 16 Mar 2026 15:56:39 +0000 (16:56 +0100)
The recent XFER_TO_GUEST_WORK change resulted in a situation, where the
vsie code would interpret a signal during work as a machine check during
SIE as both use the EINTR return code.
The exit_reason of the sie64a function has nothing to do with the
kvm_run exit_reason. Rename it and define a specific code for machine
checks instead of abusing -EINTR.
rename exit_reason into sie_return to avoid the naming conflict
and change the code flow in vsie.c to have a separate variable for rc
and sie_return.

Fixes: 2bd1337a1295e ("KVM: s390: Use generic VIRT_XFER_TO_GUEST_WORK functions")
Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/stacktrace.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/nmi.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c

index 64a50f0862aabb994089090c750bd312a7233e2c..3039c88daa63332d045f2c673cf9bb584238f633 100644 (file)
@@ -710,6 +710,9 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm);
 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
                               unsigned long *aqm, unsigned long *adm);
 
+#define SIE64_RETURN_NORMAL    0
+#define SIE64_RETURN_MCCK      1
+
 int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa,
             unsigned long gasce);
 
index c9ae680a28af910c4703eee179be4db6c1ec9ad1..ac3606c3babe492b1d0deaf33b1b078dd0ed1c95 100644 (file)
@@ -62,7 +62,7 @@ struct stack_frame {
                struct {
                        unsigned long sie_control_block;
                        unsigned long sie_savearea;
-                       unsigned long sie_reason;
+                       unsigned long sie_return;
                        unsigned long sie_flags;
                        unsigned long sie_control_block_phys;
                        unsigned long sie_guest_asce;
index e1a5b5b54e4f8e07d6ea030f848ade4df2ff16a7..fbd26f3e9f96bbb4d31c456a1bcc31a3d4ba0118 100644 (file)
@@ -63,7 +63,7 @@ int main(void)
        OFFSET(__SF_EMPTY, stack_frame, empty[0]);
        OFFSET(__SF_SIE_CONTROL, stack_frame, sie_control_block);
        OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
-       OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
+       OFFSET(__SF_SIE_RETURN, stack_frame, sie_return);
        OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
        OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
        OFFSET(__SF_SIE_GUEST_ASCE, stack_frame, sie_guest_asce);
index 4873fe9d891ba2748c5bb5519c4b3b9ade92de64..5817cb47b2d0be5d340bd67d748e2c4c92e5b582 100644 (file)
@@ -200,7 +200,7 @@ SYM_FUNC_START(__sie64a)
        stg     %r3,__SF_SIE_CONTROL(%r15)      # ...and virtual addresses
        stg     %r4,__SF_SIE_SAVEAREA(%r15)     # save guest register save area
        stg     %r5,__SF_SIE_GUEST_ASCE(%r15)   # save guest asce
-       xc      __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
+       xc      __SF_SIE_RETURN(8,%r15),__SF_SIE_RETURN(%r15) # return code = 0
        mvc     __SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags
        lmg     %r0,%r13,0(%r4)                 # load guest gprs 0-13
        mvi     __TI_sie(%r14),1
@@ -237,7 +237,7 @@ SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)
        xgr     %r4,%r4
        xgr     %r5,%r5
        lmg     %r6,%r14,__SF_GPRS(%r15)        # restore kernel registers
-       lg      %r2,__SF_SIE_REASON(%r15)       # return exit reason code
+       lg      %r2,__SF_SIE_RETURN(%r15)       # return sie return code
        BR_EX   %r14
 SYM_FUNC_END(__sie64a)
 EXPORT_SYMBOL(__sie64a)
index a55abbf65333a17dd869ba5fb9fd6189e2a152ee..94fbfad49f62051a901ea719d4ba511befd6de69 100644 (file)
@@ -487,8 +487,8 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
        mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
        if (test_cpu_flag(CIF_MCCK_GUEST) &&
        (mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
-               /* Set exit reason code for host's later handling */
-               *((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
+               /* Set sie return code for host's later handling */
+               ((struct stack_frame *)regs->gprs[15])->sie_return = SIE64_RETURN_MCCK;
        }
        clear_cpu_flag(CIF_MCCK_GUEST);
 
index 1668580008c6d0af24b8fdff532017dd4011c9c7..ebcb0ef8835e0fb42769d83bd31d38e679a5af59 100644 (file)
@@ -4617,7 +4617,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
+static int vcpu_post_run(struct kvm_vcpu *vcpu, int sie_return)
 {
        struct mcck_volatile_info *mcck_info;
        struct sie_page *sie_page;
@@ -4633,13 +4633,14 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
        vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
        vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
 
-       if (exit_reason == -EINTR) {
+       if (sie_return == SIE64_RETURN_MCCK) {
                sie_page = container_of(vcpu->arch.sie_block,
                                        struct sie_page, sie_block);
                mcck_info = &sie_page->mcck_info;
                kvm_s390_reinject_machine_check(vcpu, mcck_info);
                return 0;
        }
+       WARN_ON_ONCE(sie_return != SIE64_RETURN_NORMAL);
 
        if (vcpu->arch.sie_block->icptcode > 0) {
                rc = kvm_handle_sie_intercept(vcpu);
@@ -4678,7 +4679,7 @@ int noinstr kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
 static int __vcpu_run(struct kvm_vcpu *vcpu)
 {
-       int rc, exit_reason;
+       int rc, sie_return;
        struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
 
        /*
@@ -4718,9 +4719,9 @@ xfer_to_guest_mode_check:
                guest_timing_enter_irqoff();
                __disable_cpu_timer_accounting(vcpu);
 
-               exit_reason = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
-                                                     vcpu->run->s.regs.gprs,
-                                                     vcpu->arch.gmap->asce.val);
+               sie_return = kvm_s390_enter_exit_sie(vcpu->arch.sie_block,
+                                                    vcpu->run->s.regs.gprs,
+                                                    vcpu->arch.gmap->asce.val);
 
                __enable_cpu_timer_accounting(vcpu);
                guest_timing_exit_irqoff();
@@ -4743,7 +4744,7 @@ xfer_to_guest_mode_check:
                }
                kvm_vcpu_srcu_read_lock(vcpu);
 
-               rc = vcpu_post_run(vcpu, exit_reason);
+               rc = vcpu_post_run(vcpu, sie_return);
                if (rc || guestdbg_exit_pending(vcpu)) {
                        kvm_vcpu_srcu_read_unlock(vcpu);
                        break;
index c0d36afd4023f315e3eb69e208878ccba968a6b9..0330829b4046cd9d15ea933cab9121ab713ff5bf 100644 (file)
@@ -1122,6 +1122,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, struc
 {
        struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
        struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+       unsigned long sie_return = SIE64_RETURN_NORMAL;
        int guest_bp_isolation;
        int rc = 0;
 
@@ -1163,7 +1164,7 @@ xfer_to_guest_mode_check:
                        goto xfer_to_guest_mode_check;
                }
                guest_timing_enter_irqoff();
-               rc = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, sg->asce.val);
+               sie_return = kvm_s390_enter_exit_sie(scb_s, vcpu->run->s.regs.gprs, sg->asce.val);
                guest_timing_exit_irqoff();
                local_irq_enable();
        }
@@ -1178,11 +1179,13 @@ skip_sie:
 
        kvm_vcpu_srcu_read_lock(vcpu);
 
-       if (rc == -EINTR) {
+       if (sie_return == SIE64_RETURN_MCCK) {
                kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
                return 0;
        }
 
+       WARN_ON_ONCE(sie_return != SIE64_RETURN_NORMAL);
+
        if (rc > 0)
                rc = 0; /* we could still have an icpt */
        else if (current->thread.gmap_int_code)