unsigned long reg_size, const void *reg_val);
};
-void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_riscv_vcpu_sbi_forward_handler(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata);
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run,
u32 type, u64 flags);
return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
}
-void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_riscv_vcpu_sbi_forward_handler(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
run->riscv_sbi.args[5] = cp->a5;
run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
run->riscv_sbi.ret[1] = 0;
+ retdata->uexit = true;
+ return 0;
}
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
* For experimental/vendor extensions
* forward it to the userspace
*/
- kvm_riscv_vcpu_sbi_forward(vcpu, run);
- retdata->uexit = true;
+ return kvm_riscv_vcpu_sbi_forward_handler(vcpu, run, retdata);
} else {
sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a0);
*out_val = sbi_ext && sbi_ext->probe ?
.handler = kvm_sbi_ext_base_handler,
};
-static int kvm_sbi_ext_forward_handler(struct kvm_vcpu *vcpu,
- struct kvm_run *run,
- struct kvm_vcpu_sbi_return *retdata)
-{
- /*
- * Both SBI experimental and vendor extensions are
- * unconditionally forwarded to userspace.
- */
- kvm_riscv_vcpu_sbi_forward(vcpu, run);
- retdata->uexit = true;
- return 0;
-}
-
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental = {
.extid_start = SBI_EXT_EXPERIMENTAL_START,
.extid_end = SBI_EXT_EXPERIMENTAL_END,
- .handler = kvm_sbi_ext_forward_handler,
+ .handler = kvm_riscv_vcpu_sbi_forward_handler,
};
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor = {
.extid_start = SBI_EXT_VENDOR_START,
.extid_end = SBI_EXT_VENDOR_END,
- .handler = kvm_sbi_ext_forward_handler,
+ .handler = kvm_riscv_vcpu_sbi_forward_handler,
};
.handler = kvm_sbi_ext_srst_handler,
};
-static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu *vcpu,
- struct kvm_run *run,
- struct kvm_vcpu_sbi_return *retdata)
-{
- struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
- unsigned long funcid = cp->a6;
-
- switch (funcid) {
- case SBI_EXT_DBCN_CONSOLE_WRITE:
- case SBI_EXT_DBCN_CONSOLE_READ:
- case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
- /*
- * The SBI debug console functions are unconditionally
- * forwarded to the userspace.
- */
- kvm_riscv_vcpu_sbi_forward(vcpu, run);
- retdata->uexit = true;
- break;
- default:
- retdata->err_val = SBI_ERR_NOT_SUPPORTED;
- }
-
- return 0;
-}
-
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = {
.extid_start = SBI_EXT_DBCN,
.extid_end = SBI_EXT_DBCN,
.default_disabled = true,
- .handler = kvm_sbi_ext_dbcn_handler,
+ .handler = kvm_riscv_vcpu_sbi_forward_handler,
};
kvm_riscv_vcpu_sbi_request_reset(vcpu, cp->a1, cp->a2);
/* userspace provides the suspend implementation */
- kvm_riscv_vcpu_sbi_forward(vcpu, run);
- retdata->uexit = true;
- break;
+ return kvm_riscv_vcpu_sbi_forward_handler(vcpu, run, retdata);
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
break;
* The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
* handled in kernel so we forward these to user-space
*/
- kvm_riscv_vcpu_sbi_forward(vcpu, run);
- retdata->uexit = true;
+ ret = kvm_riscv_vcpu_sbi_forward_handler(vcpu, run, retdata);
break;
case SBI_EXT_0_1_SET_TIMER:
#if __riscv_xlen == 32