]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: s390: Add some helper functions needed for vSIE
authorClaudio Imbrenda <imbrenda@linux.ibm.com>
Wed, 4 Feb 2026 15:02:50 +0000 (16:02 +0100)
committerClaudio Imbrenda <imbrenda@linux.ibm.com>
Wed, 4 Feb 2026 16:00:09 +0000 (17:00 +0100)
Implement gmap_protect_asce_top_level(), which was a stub. This
function was a stub due to cross dependencies with other patches.

Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
arch/s390/kvm/gmap.c

index 5736145ef53a9b730c8f04706b335c7cae5b7f4c..fea1c66fcabee2ed9abfa72a11d9412ee6f41936 100644 (file)
@@ -22,6 +22,7 @@
 #include "dat.h"
 #include "gmap.h"
 #include "kvm-s390.h"
+#include "faultin.h"
 
 static inline bool kvm_s390_is_in_sie(struct kvm_vcpu *vcpu)
 {
@@ -1091,10 +1092,79 @@ static struct gmap *gmap_find_shadow(struct gmap *parent, union asce asce, int e
        return NULL;
 }
 
+#define CRST_TABLE_PAGES (_CRST_TABLE_SIZE / PAGE_SIZE)
+struct gmap_protect_asce_top_level {
+       unsigned long seq;
+       struct guest_fault f[CRST_TABLE_PAGES];
+};
+
+static inline int __gmap_protect_asce_top_level(struct kvm_s390_mmu_cache *mc, struct gmap *sg,
+                                               struct gmap_protect_asce_top_level *context)
+{
+       int rc, i;
+
+       guard(write_lock)(&sg->kvm->mmu_lock);
+
+       if (kvm_s390_array_needs_retry_safe(sg->kvm, context->seq, context->f))
+               return -EAGAIN;
+
+       scoped_guard(spinlock, &sg->parent->children_lock) {
+               for (i = 0; i < CRST_TABLE_PAGES; i++) {
+                       if (!context->f[i].valid)
+                               continue;
+                       rc = gmap_protect_rmap(mc, sg, context->f[i].gfn, 0, context->f[i].pfn,
+                                              TABLE_TYPE_REGION1 + 1, context->f[i].writable);
+                       if (rc)
+                               return rc;
+               }
+               gmap_add_child(sg->parent, sg);
+       }
+
+       kvm_s390_release_faultin_array(sg->kvm, context->f, false);
+       return 0;
+}
+
+static inline int _gmap_protect_asce_top_level(struct kvm_s390_mmu_cache *mc, struct gmap *sg,
+                                              struct gmap_protect_asce_top_level *context)
+{
+       int rc;
+
+       if (kvm_s390_array_needs_retry_unsafe(sg->kvm, context->seq, context->f))
+               return -EAGAIN;
+       do {
+               rc = kvm_s390_mmu_cache_topup(mc);
+               if (rc)
+                       return rc;
+               rc = radix_tree_preload(GFP_KERNEL);
+               if (rc)
+                       return rc;
+               rc = __gmap_protect_asce_top_level(mc, sg, context);
+               radix_tree_preload_end();
+       } while (rc == -ENOMEM);
+
+       return rc;
+}
+
 static int gmap_protect_asce_top_level(struct kvm_s390_mmu_cache *mc, struct gmap *sg)
 {
-       KVM_BUG_ON(1, sg->kvm);
-       return -EINVAL;
+       struct gmap_protect_asce_top_level context = {};
+       union asce asce = sg->guest_asce;
+       int rc;
+
+       KVM_BUG_ON(!is_shadow(sg), sg->kvm);
+
+       context.seq = sg->kvm->mmu_invalidate_seq;
+       /* Pairs with the smp_wmb() in kvm_mmu_invalidate_end(). */
+       smp_rmb();
+
+       rc = kvm_s390_get_guest_pages(sg->kvm, context.f, asce.rsto, asce.dt + 1, false);
+       if (rc > 0)
+               rc = -EFAULT;
+       if (!rc)
+               rc = _gmap_protect_asce_top_level(mc, sg, &context);
+       if (rc)
+               kvm_s390_release_faultin_array(sg->kvm, context.f, true);
+       return rc;
 }
 
 /**