]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: S390: Remove sca_lock
authorChristoph Schlameuss <schlameuss@linux.ibm.com>
Thu, 5 Jun 2025 16:14:05 +0000 (18:14 +0200)
committerJanosch Frank <frankja@linux.ibm.com>
Mon, 20 Oct 2025 08:47:58 +0000 (08:47 +0000)
Since we are no longer switching from a BSCA to a ESCA we can completely
get rid of the sca_lock. The write lock was only taken for that
conversion.

After removal of the lock some local code cleanups are possible.

Signed-off-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
Suggested-by: Janosch Frank <frankja@linux.ibm.com>
[frankja@linux.ibm.com: Added suggested-by tag as discussed on list]
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/gaccess.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c

index 3cf14dd75409ad570b9a47c455c260839c48ee08..22cedcaea4756be50dcd65bdd85b83cdb0386dbb 100644 (file)
@@ -633,7 +633,6 @@ struct kvm_s390_pv {
 
 struct kvm_arch {
        struct esca_block *sca;
-       rwlock_t sca_lock;
        debug_info_t *dbf;
        struct kvm_s390_float_interrupt float_int;
        struct kvm_device *flic;
index 3651ab682fd76b5780790e9547c968eb490f1c4b..41ca6b0ee7a928ed16f4c8c88f35ccf60a6f37a7 100644 (file)
@@ -109,14 +109,9 @@ struct aste {
 
 int ipte_lock_held(struct kvm *kvm)
 {
-       if (sclp.has_siif) {
-               int rc;
+       if (sclp.has_siif)
+               return kvm->arch.sca->ipte_control.kh != 0;
 
-               read_lock(&kvm->arch.sca_lock);
-               rc = kvm->arch.sca->ipte_control.kh != 0;
-               read_unlock(&kvm->arch.sca_lock);
-               return rc;
-       }
        return kvm->arch.ipte_lock_count != 0;
 }
 
@@ -129,19 +124,16 @@ static void ipte_lock_simple(struct kvm *kvm)
        if (kvm->arch.ipte_lock_count > 1)
                goto out;
 retry:
-       read_lock(&kvm->arch.sca_lock);
        ic = &kvm->arch.sca->ipte_control;
        old = READ_ONCE(*ic);
        do {
                if (old.k) {
-                       read_unlock(&kvm->arch.sca_lock);
                        cond_resched();
                        goto retry;
                }
                new = old;
                new.k = 1;
        } while (!try_cmpxchg(&ic->val, &old.val, new.val));
-       read_unlock(&kvm->arch.sca_lock);
 out:
        mutex_unlock(&kvm->arch.ipte_mutex);
 }
@@ -154,14 +146,12 @@ static void ipte_unlock_simple(struct kvm *kvm)
        kvm->arch.ipte_lock_count--;
        if (kvm->arch.ipte_lock_count)
                goto out;
-       read_lock(&kvm->arch.sca_lock);
        ic = &kvm->arch.sca->ipte_control;
        old = READ_ONCE(*ic);
        do {
                new = old;
                new.k = 0;
        } while (!try_cmpxchg(&ic->val, &old.val, new.val));
-       read_unlock(&kvm->arch.sca_lock);
        wake_up(&kvm->arch.ipte_wq);
 out:
        mutex_unlock(&kvm->arch.ipte_mutex);
@@ -172,12 +162,10 @@ static void ipte_lock_siif(struct kvm *kvm)
        union ipte_control old, new, *ic;
 
 retry:
-       read_lock(&kvm->arch.sca_lock);
        ic = &kvm->arch.sca->ipte_control;
        old = READ_ONCE(*ic);
        do {
                if (old.kg) {
-                       read_unlock(&kvm->arch.sca_lock);
                        cond_resched();
                        goto retry;
                }
@@ -185,14 +173,12 @@ retry:
                new.k = 1;
                new.kh++;
        } while (!try_cmpxchg(&ic->val, &old.val, new.val));
-       read_unlock(&kvm->arch.sca_lock);
 }
 
 static void ipte_unlock_siif(struct kvm *kvm)
 {
        union ipte_control old, new, *ic;
 
-       read_lock(&kvm->arch.sca_lock);
        ic = &kvm->arch.sca->ipte_control;
        old = READ_ONCE(*ic);
        do {
@@ -201,7 +187,6 @@ static void ipte_unlock_siif(struct kvm *kvm)
                if (!new.kh)
                        new.k = 0;
        } while (!try_cmpxchg(&ic->val, &old.val, new.val));
-       read_unlock(&kvm->arch.sca_lock);
        if (!new.kh)
                wake_up(&kvm->arch.ipte_wq);
 }
index 36394ba897f5a490661f1a6462b2e4b72cb2bd1b..220d9d00c23dd38d82cc354f935acd1bfc1dcc0f 100644 (file)
@@ -45,48 +45,34 @@ static struct kvm_s390_gib *gib;
 /* handle external calls via sigp interpretation facility */
 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
 {
-       union esca_sigp_ctrl sigp_ctrl;
-       struct esca_block *sca;
-       int c, scn;
+       struct esca_block *sca = vcpu->kvm->arch.sca;
+       union esca_sigp_ctrl sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
 
        if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
                return 0;
 
        BUG_ON(!kvm_s390_use_sca_entries());
-       read_lock(&vcpu->kvm->arch.sca_lock);
-       sca = vcpu->kvm->arch.sca;
-       sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
-
-       c = sigp_ctrl.c;
-       scn = sigp_ctrl.scn;
-       read_unlock(&vcpu->kvm->arch.sca_lock);
 
        if (src_id)
-               *src_id = scn;
+               *src_id = sigp_ctrl.scn;
 
-       return c;
+       return sigp_ctrl.c;
 }
 
 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
 {
-       union esca_sigp_ctrl old_val, new_val = {0};
-       union esca_sigp_ctrl *sigp_ctrl;
-       struct esca_block *sca;
+       struct esca_block *sca = vcpu->kvm->arch.sca;
+       union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
+       union esca_sigp_ctrl old_val, new_val = {.scn = src_id, .c = 1};
        int expect, rc;
 
        BUG_ON(!kvm_s390_use_sca_entries());
-       read_lock(&vcpu->kvm->arch.sca_lock);
-       sca = vcpu->kvm->arch.sca;
-       sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
 
        old_val = READ_ONCE(*sigp_ctrl);
-       new_val.scn = src_id;
-       new_val.c = 1;
        old_val.c = 0;
 
        expect = old_val.value;
        rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
-       read_unlock(&vcpu->kvm->arch.sca_lock);
 
        if (rc != expect) {
                /* another external call is pending */
@@ -98,18 +84,14 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
 
 static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
 {
-       union esca_sigp_ctrl *sigp_ctrl;
-       struct esca_block *sca;
+       struct esca_block *sca = vcpu->kvm->arch.sca;
+       union esca_sigp_ctrl *sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
 
        if (!kvm_s390_use_sca_entries())
                return;
        kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
-       read_lock(&vcpu->kvm->arch.sca_lock);
-       sca = vcpu->kvm->arch.sca;
-       sigp_ctrl = &sca->cpu[vcpu->vcpu_id].sigp_ctrl;
 
        WRITE_ONCE(sigp_ctrl->value, 0);
-       read_unlock(&vcpu->kvm->arch.sca_lock);
 }
 
 int psw_extint_disabled(struct kvm_vcpu *vcpu)
index 78468b96d2501f06a367b4d354fa61f8392e12d0..769820e3a2431c16c7ec85dbf313f61f7ba1a3cc 100644 (file)
@@ -1938,14 +1938,12 @@ static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
        union sca_utility new, old;
        struct esca_block *sca;
 
-       read_lock(&kvm->arch.sca_lock);
        sca = kvm->arch.sca;
        old = READ_ONCE(sca->utility);
        do {
                new = old;
                new.mtcr = val;
        } while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
-       read_unlock(&kvm->arch.sca_lock);
 }
 
 static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
@@ -1966,9 +1964,7 @@ static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
        if (!test_kvm_facility(kvm, 11))
                return -ENXIO;
 
-       read_lock(&kvm->arch.sca_lock);
        topo = kvm->arch.sca->utility.mtcr;
-       read_unlock(&kvm->arch.sca_lock);
 
        return put_user(topo, (u8 __user *)attr->addr);
 }
@@ -3345,7 +3341,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        if (!sclp.has_64bscao)
                alloc_flags |= GFP_DMA;
-       rwlock_init(&kvm->arch.sca_lock);
        mutex_lock(&kvm_lock);
 
        kvm->arch.sca = alloc_pages_exact(sizeof(*kvm->arch.sca), alloc_flags);
@@ -3530,41 +3525,30 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
 
 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
 {
-       struct esca_block *sca;
+       struct esca_block *sca = vcpu->kvm->arch.sca;
 
        if (!kvm_s390_use_sca_entries())
                return;
-       read_lock(&vcpu->kvm->arch.sca_lock);
-       sca = vcpu->kvm->arch.sca;
 
        clear_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
        sca->cpu[vcpu->vcpu_id].sda = 0;
-       read_unlock(&vcpu->kvm->arch.sca_lock);
 }
 
 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
 {
-       struct esca_block *sca;
-       phys_addr_t sca_phys;
-
-       if (!kvm_s390_use_sca_entries()) {
-               sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
-
-               /* we still need the basic sca for the ipte control */
-               vcpu->arch.sie_block->scaoh = sca_phys >> 32;
-               vcpu->arch.sie_block->scaol = sca_phys;
-               return;
-       }
-       read_lock(&vcpu->kvm->arch.sca_lock);
-       sca = vcpu->kvm->arch.sca;
-       sca_phys = virt_to_phys(sca);
+       struct esca_block *sca = vcpu->kvm->arch.sca;
+       phys_addr_t sca_phys = virt_to_phys(sca);
 
-       sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
+       /* we still need the sca header for the ipte control */
        vcpu->arch.sie_block->scaoh = sca_phys >> 32;
        vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
        vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
+
+       if (!kvm_s390_use_sca_entries())
+               return;
+
        set_bit_inv(vcpu->vcpu_id, (unsigned long *)sca->mcn);
-       read_unlock(&vcpu->kvm->arch.sca_lock);
+       sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
 }
 
 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)