]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RISC-V: KVM: Move copy_sbi_ext_reg_indices() to SBI implementation
authorAnup Patel <apatel@ventanamicro.com>
Sat, 23 Aug 2025 15:59:45 +0000 (21:29 +0530)
committerAnup Patel <anup@brainfault.org>
Tue, 16 Sep 2025 05:24:18 +0000 (10:54 +0530)
The ONE_REG handling of SBI extension enable/disable registers and
SBI extension state registers is already under SBI implementation.
On similar lines, let's move copy_sbi_ext_reg_indices() under SBI
implementation.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Link: https://lore.kernel.org/r/20250823155947.1354229-5-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/include/asm/kvm_vcpu_sbi.h
arch/riscv/kvm/vcpu_onereg.c
arch/riscv/kvm/vcpu_sbi.c

index 8970cc7530c4169e7fd1a27bea95dedef1bc67da..d75ca45c01529de13646cb04fd412c5a0229d76e 100644 (file)
@@ -77,6 +77,7 @@ void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
                                      unsigned long pc, unsigned long a1);
 void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu);
 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_riscv_vcpu_reg_indices_sbi_ext(struct kvm_vcpu *vcpu, u64 __user *uindices);
 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
                                   const struct kvm_one_reg *reg);
 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
@@ -86,7 +87,6 @@ int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *
 int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
                                struct kvm_vcpu *vcpu, unsigned long extid);
-bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx);
 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu);
 void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu);
index 0f4e444e5e100c565208672da8167817966a304a..865dae903aa0f66cea8483576437f2402e9a7711 100644 (file)
@@ -1082,34 +1082,9 @@ static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
        return copy_isa_ext_reg_indices(vcpu, NULL);
 }
 
-static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
-{
-       unsigned int n = 0;
-
-       for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
-               u64 size = IS_ENABLED(CONFIG_32BIT) ?
-                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
-               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
-                         KVM_REG_RISCV_SBI_SINGLE | i;
-
-               if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
-                       continue;
-
-               if (uindices) {
-                       if (put_user(reg, uindices))
-                               return -EFAULT;
-                       uindices++;
-               }
-
-               n++;
-       }
-
-       return n;
-}
-
 static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
 {
-       return copy_sbi_ext_reg_indices(vcpu, NULL);
+       return kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, NULL);
 }
 
 static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
@@ -1237,7 +1212,7 @@ int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
                return ret;
        uindices += ret;
 
-       ret = copy_sbi_ext_reg_indices(vcpu, uindices);
+       ret = kvm_riscv_vcpu_reg_indices_sbi_ext(vcpu, uindices);
        if (ret < 0)
                return ret;
        uindices += ret;
index 04903e5012d641a069a29ea9b25eb97abdc9b8a4..1b13623380e158cb15fc9beb1062aacaf3416899 100644 (file)
@@ -110,7 +110,7 @@ riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
        return sext;
 }
 
-bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
+static bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
 {
        struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
        const struct kvm_riscv_sbi_extension_entry *sext;
@@ -288,6 +288,31 @@ static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+int kvm_riscv_vcpu_reg_indices_sbi_ext(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       unsigned int n = 0;
+
+       for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
+               u64 size = IS_ENABLED(CONFIG_32BIT) ?
+                          KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+               u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+                         KVM_REG_RISCV_SBI_SINGLE | i;
+
+               if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
+                       continue;
+
+               if (uindices) {
+                       if (put_user(reg, uindices))
+                               return -EFAULT;
+                       uindices++;
+               }
+
+               n++;
+       }
+
+       return n;
+}
+
 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
                                   const struct kvm_one_reg *reg)
 {