]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
LoongArch: KVM: Add paravirt preempt feature in hypervisor side
authorBibo Mao <maobibo@loongson.cn>
Fri, 6 Feb 2026 01:28:01 +0000 (09:28 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Fri, 6 Feb 2026 01:28:01 +0000 (09:28 +0800)
Feature KVM_FEATURE_PREEMPT is added to show whether vCPU is preempted
or not. It is to help guest OS scheduling or lock checking etc. Here
add KVM_FEATURE_PREEMPT feature and use one byte as preempted flag in
the steal time structure.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/include/asm/kvm_host.h
arch/loongarch/include/asm/kvm_para.h
arch/loongarch/include/uapi/asm/kvm.h
arch/loongarch/include/uapi/asm/kvm_para.h
arch/loongarch/kvm/vcpu.c
arch/loongarch/kvm/vm.c

index 4a7e816fae84f2af763e1c28b34220577d3a8c8a..19eb5e5c39841a6dfbf3b3fddad020294ea3bda8 100644 (file)
@@ -165,6 +165,7 @@ enum emulation_result {
 
 #define LOONGARCH_PV_FEAT_UPDATED      BIT_ULL(63)
 #define LOONGARCH_PV_FEAT_MASK         (BIT(KVM_FEATURE_IPI) |         \
+                                        BIT(KVM_FEATURE_PREEMPT) |     \
                                         BIT(KVM_FEATURE_STEAL_TIME) |  \
                                         BIT(KVM_FEATURE_USER_HCALL) |  \
                                         BIT(KVM_FEATURE_VIRT_EXTIOI))
@@ -254,6 +255,7 @@ struct kvm_vcpu_arch {
                u64 guest_addr;
                u64 last_steal;
                struct gfn_to_hva_cache cache;
+               u8  preempted;
        } st;
 };
 
index 3e4b397f423f48f3157bf0efd1bd9d20bbd9d8e1..fb17ba0fa101ba5204656a8f8f985b625a74369f 100644 (file)
@@ -37,8 +37,10 @@ struct kvm_steal_time {
        __u64 steal;
        __u32 version;
        __u32 flags;
-       __u32 pad[12];
+       __u8  preempted;
+       __u8  pad[47];
 };
+#define KVM_VCPU_PREEMPTED             (1 << 0)
 
 /*
  * Hypercall interface for KVM hypervisor
index de6c3f18e40ab13f9f56daeeed9b6d3c7a9fe17b..419647aacdf35a8e7399142ce986ac9e400b2426 100644 (file)
@@ -105,6 +105,7 @@ struct kvm_fpu {
 #define  KVM_LOONGARCH_VM_FEAT_PV_STEALTIME    7
 #define  KVM_LOONGARCH_VM_FEAT_PTW             8
 #define  KVM_LOONGARCH_VM_FEAT_MSGINT          9
+#define  KVM_LOONGARCH_VM_FEAT_PV_PREEMPT      10
 
 /* Device Control API on vcpu fd */
 #define KVM_LOONGARCH_VCPU_CPUCFG      0
index 76d802ef01ce3cb426fb8627a7aac7fb41cc6ec6..d28cbcadd276b93b149e873a606660e96d023640 100644 (file)
@@ -15,6 +15,7 @@
 #define CPUCFG_KVM_FEATURE             (CPUCFG_KVM_BASE + 4)
 #define  KVM_FEATURE_IPI               1
 #define  KVM_FEATURE_STEAL_TIME                2
+#define  KVM_FEATURE_PREEMPT           3
 /* BIT 24 - 31 are features configurable by user space vmm */
 #define  KVM_FEATURE_VIRT_EXTIOI       24
 #define  KVM_FEATURE_USER_HCALL                25
index 4f0d10f52b99bdaab34139ef6e8105f71c36da67..550c0d05666a01b8820e9f6d139728c9c827a425 100644 (file)
@@ -181,6 +181,11 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
        }
 
        st = (struct kvm_steal_time __user *)ghc->hva;
+       if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
+               unsafe_put_user(0, &st->preempted, out);
+               vcpu->arch.st.preempted = 0;
+       }
+
        unsafe_get_user(version, &st->version, out);
        if (version & 1)
                version += 1; /* first time write, random junk */
@@ -1795,11 +1800,57 @@ out:
        return 0;
 }
 
+static void kvm_vcpu_set_pv_preempted(struct kvm_vcpu *vcpu)
+{
+       gpa_t gpa;
+       struct gfn_to_hva_cache *ghc;
+       struct kvm_memslots *slots;
+       struct kvm_steal_time __user *st;
+
+       gpa = vcpu->arch.st.guest_addr;
+       if (!(gpa & KVM_STEAL_PHYS_VALID))
+               return;
+
+       /* vCPU may be preempted for many times */
+       if (vcpu->arch.st.preempted)
+               return;
+
+       /* This happens on process exit */
+       if (unlikely(current->mm != vcpu->kvm->mm))
+               return;
+
+       gpa &= KVM_STEAL_PHYS_MASK;
+       ghc = &vcpu->arch.st.cache;
+       slots = kvm_memslots(vcpu->kvm);
+       if (slots->generation != ghc->generation || gpa != ghc->gpa) {
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
+                       ghc->gpa = INVALID_GPA;
+                       return;
+               }
+       }
+
+       st = (struct kvm_steal_time __user *)ghc->hva;
+       unsafe_put_user(KVM_VCPU_PREEMPTED, &st->preempted, out);
+       vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
+out:
+       mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
+}
+
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
-       int cpu;
+       int cpu, idx;
        unsigned long flags;
 
+       if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
+               /*
+                * Take the srcu lock as memslots will be accessed to check
+                * the gfn cache generation against the memslots generation.
+                */
+               idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_set_pv_preempted(vcpu);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
+       }
+
        local_irq_save(flags);
        cpu = smp_processor_id();
        vcpu->arch.last_sched_cpu = cpu;
index d3ff6d6966f8803abb51082e6fcafe4d1832e5a7..9681ade890c600e16fc2e682b1b5b2839bdb8300 100644 (file)
@@ -52,7 +52,9 @@ static void kvm_vm_init_features(struct kvm *kvm)
        kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
        kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
        if (kvm_pvtime_supported()) {
+               kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT);
                kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
+               kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_PREEMPT);
                kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME);
        }
 }
@@ -154,6 +156,7 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr
        case KVM_LOONGARCH_VM_FEAT_MSGINT:
        case KVM_LOONGARCH_VM_FEAT_PMU:
        case KVM_LOONGARCH_VM_FEAT_PV_IPI:
+       case KVM_LOONGARCH_VM_FEAT_PV_PREEMPT:
        case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
                if (kvm_vm_support(&kvm->arch, attr->attr))
                        return 0;