From: Bibo Mao Date: Fri, 6 Feb 2026 01:28:01 +0000 (+0800) Subject: LoongArch: KVM: Add paravirt preempt feature in hypervisor side X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9b486cdd032a90032c6b567ea723595205ca2626;p=thirdparty%2Flinux.git LoongArch: KVM: Add paravirt preempt feature in hypervisor side Feature KVM_FEATURE_PREEMPT is added to show whether vCPU is preempted or not. It is to help guest OS scheduling or lock checking etc. Here add KVM_FEATURE_PREEMPT feature and use one byte as preempted flag in the steal time structure. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen --- diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 4a7e816fae84f..19eb5e5c39841 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -165,6 +165,7 @@ enum emulation_result { #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ + BIT(KVM_FEATURE_PREEMPT) | \ BIT(KVM_FEATURE_STEAL_TIME) | \ BIT(KVM_FEATURE_USER_HCALL) | \ BIT(KVM_FEATURE_VIRT_EXTIOI)) @@ -254,6 +255,7 @@ struct kvm_vcpu_arch { u64 guest_addr; u64 last_steal; struct gfn_to_hva_cache cache; + u8 preempted; } st; }; diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index 3e4b397f423f4..fb17ba0fa101b 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -37,8 +37,10 @@ struct kvm_steal_time { __u64 steal; __u32 version; __u32 flags; - __u32 pad[12]; + __u8 preempted; + __u8 pad[47]; }; +#define KVM_VCPU_PREEMPTED (1 << 0) /* * Hypercall interface for KVM hypervisor diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index de6c3f18e40ab..419647aacdf35 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -105,6 +105,7 @@ struct kvm_fpu { #define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7 #define KVM_LOONGARCH_VM_FEAT_PTW 8 #define KVM_LOONGARCH_VM_FEAT_MSGINT 9 +#define KVM_LOONGARCH_VM_FEAT_PV_PREEMPT 10 /* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h index 76d802ef01ce3..d28cbcadd276b 100644 --- a/arch/loongarch/include/uapi/asm/kvm_para.h +++ b/arch/loongarch/include/uapi/asm/kvm_para.h @@ -15,6 +15,7 @@ #define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) #define KVM_FEATURE_IPI 1 #define KVM_FEATURE_STEAL_TIME 2 +#define KVM_FEATURE_PREEMPT 3 /* BIT 24 - 31 are features configurable by user space vmm */ #define KVM_FEATURE_VIRT_EXTIOI 24 #define KVM_FEATURE_USER_HCALL 25 diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 4f0d10f52b99b..550c0d05666a0 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -181,6 +181,11 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) } st = (struct kvm_steal_time __user *)ghc->hva; + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) { + unsafe_put_user(0, &st->preempted, out); + vcpu->arch.st.preempted = 0; + } + unsafe_get_user(version, &st->version, out); if (version & 1) version += 1; /* first time write, random junk */ @@ -1795,11 +1800,57 @@ out: return 0; } +static void kvm_vcpu_set_pv_preempted(struct kvm_vcpu *vcpu) +{ + gpa_t gpa; + struct gfn_to_hva_cache *ghc; + struct kvm_memslots *slots; + struct kvm_steal_time __user *st; + + gpa = vcpu->arch.st.guest_addr; + if (!(gpa & KVM_STEAL_PHYS_VALID)) + return; + + /* vCPU may be preempted for many times */ + if (vcpu->arch.st.preempted) + return; + + /* This happens on process exit */ + if (unlikely(current->mm != vcpu->kvm->mm)) + return; + + gpa &= KVM_STEAL_PHYS_MASK; + ghc = &vcpu->arch.st.cache; + slots = kvm_memslots(vcpu->kvm); + if (slots->generation != ghc->generation || gpa != ghc->gpa) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { + ghc->gpa = INVALID_GPA; + return; + } + } + + st = (struct kvm_steal_time __user *)ghc->hva; + unsafe_put_user(KVM_VCPU_PREEMPTED, &st->preempted, out); + vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; +out: + mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); +} + void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - int cpu; + int cpu, idx; unsigned long flags; + if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) { + /* + * Take the srcu lock as memslots will be accessed to check + * the gfn cache generation against the memslots generation. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_vcpu_set_pv_preempted(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + } + local_irq_save(flags); cpu = smp_processor_id(); vcpu->arch.last_sched_cpu = cpu; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index d3ff6d6966f88..9681ade890c60 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -52,7 +52,9 @@ static void kvm_vm_init_features(struct kvm *kvm) kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI); if (kvm_pvtime_supported()) { + kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT); kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_PREEMPT); kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME); } } @@ -154,6 +156,7 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr case KVM_LOONGARCH_VM_FEAT_MSGINT: case KVM_LOONGARCH_VM_FEAT_PMU: case KVM_LOONGARCH_VM_FEAT_PV_IPI: + case KVM_LOONGARCH_VM_FEAT_PV_PREEMPT: case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME: if (kvm_vm_support(&kvm->arch, attr->attr)) return 0;