*/
enum sbi_fwft_feature_t id;
+ /**
+ * @first_reg_num: ONE_REG index of the first ONE_REG register
+ */
+ unsigned long first_reg_num;
+
/**
* @supported: Check if the feature is supported on the vcpu
*
*
* This callback is mandatory
*/
- long (*set)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf, unsigned long value);
+ long (*set)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf,
+ bool one_reg_access, unsigned long value);
/**
* @get: Get the feature current value
*
* This callback is mandatory
*/
- long (*get)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf, unsigned long *value);
+ long (*get)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf,
+ bool one_reg_access, unsigned long *value);
};
static const enum sbi_fwft_feature_t kvm_fwft_defined_features[] = {
static long kvm_sbi_fwft_set_misaligned_delegation(struct kvm_vcpu *vcpu,
struct kvm_sbi_fwft_config *conf,
- unsigned long value)
+ bool one_reg_access, unsigned long value)
{
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
if (value == 1) {
cfg->hedeleg |= MIS_DELEG;
- csr_set(CSR_HEDELEG, MIS_DELEG);
+ if (!one_reg_access)
+ csr_set(CSR_HEDELEG, MIS_DELEG);
} else if (value == 0) {
cfg->hedeleg &= ~MIS_DELEG;
- csr_clear(CSR_HEDELEG, MIS_DELEG);
+ if (!one_reg_access)
+ csr_clear(CSR_HEDELEG, MIS_DELEG);
} else {
return SBI_ERR_INVALID_PARAM;
}
static long kvm_sbi_fwft_get_misaligned_delegation(struct kvm_vcpu *vcpu,
struct kvm_sbi_fwft_config *conf,
- unsigned long *value)
+ bool one_reg_access, unsigned long *value)
{
- *value = (csr_read(CSR_HEDELEG) & MIS_DELEG) == MIS_DELEG;
+ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+ *value = (cfg->hedeleg & MIS_DELEG) == MIS_DELEG;
return SBI_SUCCESS;
}
static long kvm_sbi_fwft_set_pointer_masking_pmlen(struct kvm_vcpu *vcpu,
struct kvm_sbi_fwft_config *conf,
- unsigned long value)
+ bool one_reg_access, unsigned long value)
{
struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu);
unsigned long pmm;
* update here so that VCPU see's pointer masking mode change
* immediately.
*/
- csr_write(CSR_HENVCFG, vcpu->arch.cfg.henvcfg);
+ if (!one_reg_access)
+ csr_write(CSR_HENVCFG, vcpu->arch.cfg.henvcfg);
return SBI_SUCCESS;
}
static long kvm_sbi_fwft_get_pointer_masking_pmlen(struct kvm_vcpu *vcpu,
struct kvm_sbi_fwft_config *conf,
- unsigned long *value)
+ bool one_reg_access, unsigned long *value)
{
switch (vcpu->arch.cfg.henvcfg & ENVCFG_PMM) {
case ENVCFG_PMM_PMLEN_0:
static const struct kvm_sbi_fwft_feature features[] = {
{
.id = SBI_FWFT_MISALIGNED_EXC_DELEG,
+ .first_reg_num = offsetof(struct kvm_riscv_sbi_fwft, misaligned_deleg.enable) /
+ sizeof(unsigned long),
.supported = kvm_sbi_fwft_misaligned_delegation_supported,
.reset = kvm_sbi_fwft_reset_misaligned_delegation,
.set = kvm_sbi_fwft_set_misaligned_delegation,
#ifndef CONFIG_32BIT
{
.id = SBI_FWFT_POINTER_MASKING_PMLEN,
+ .first_reg_num = offsetof(struct kvm_riscv_sbi_fwft, pointer_masking.enable) /
+ sizeof(unsigned long),
.supported = kvm_sbi_fwft_pointer_masking_pmlen_supported,
.reset = kvm_sbi_fwft_reset_pointer_masking_pmlen,
.set = kvm_sbi_fwft_set_pointer_masking_pmlen,
#endif
};
+static const struct kvm_sbi_fwft_feature *kvm_sbi_fwft_regnum_to_feature(unsigned long reg_num)
+{
+ const struct kvm_sbi_fwft_feature *feature;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(features); i++) {
+ feature = &features[i];
+ if (feature->first_reg_num <= reg_num && reg_num < (feature->first_reg_num + 3))
+ return feature;
+ }
+
+ return NULL;
+}
+
static struct kvm_sbi_fwft_config *
kvm_sbi_fwft_get_config(struct kvm_vcpu *vcpu, enum sbi_fwft_feature_t feature)
{
return SBI_ERR_DENIED;
}
- if (!tconf->supported)
+ if (!tconf->supported || !tconf->enabled)
return SBI_ERR_NOT_SUPPORTED;
*conf = tconf;
conf->flags = flags;
- return conf->feature->set(vcpu, conf, value);
+ return conf->feature->set(vcpu, conf, false, value);
}
static int kvm_sbi_fwft_get(struct kvm_vcpu *vcpu, unsigned long feature,
if (ret)
return ret;
- return conf->feature->get(vcpu, conf, value);
+ return conf->feature->get(vcpu, conf, false, value);
}
static int kvm_sbi_ext_fwft_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
else
conf->supported = true;
+ conf->enabled = conf->supported;
conf->feature = feature;
}
}
}
+static unsigned long kvm_sbi_ext_fwft_get_reg_count(struct kvm_vcpu *vcpu)
+{
+ unsigned long max_reg_count = sizeof(struct kvm_riscv_sbi_fwft) / sizeof(unsigned long);
+ const struct kvm_sbi_fwft_feature *feature;
+ struct kvm_sbi_fwft_config *conf;
+ unsigned long reg, ret = 0;
+
+ for (reg = 0; reg < max_reg_count; reg++) {
+ feature = kvm_sbi_fwft_regnum_to_feature(reg);
+ if (!feature)
+ continue;
+
+ conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
+ if (!conf || !conf->supported)
+ continue;
+
+ ret++;
+ }
+
+ return ret;
+}
+
+static int kvm_sbi_ext_fwft_get_reg_id(struct kvm_vcpu *vcpu, int index, u64 *reg_id)
+{
+ int reg, max_reg_count = sizeof(struct kvm_riscv_sbi_fwft) / sizeof(unsigned long);
+ const struct kvm_sbi_fwft_feature *feature;
+ struct kvm_sbi_fwft_config *conf;
+ int idx = 0;
+
+ for (reg = 0; reg < max_reg_count; reg++) {
+ feature = kvm_sbi_fwft_regnum_to_feature(reg);
+ if (!feature)
+ continue;
+
+ conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
+ if (!conf || !conf->supported)
+ continue;
+
+ if (index == idx) {
+ *reg_id = KVM_REG_RISCV |
+ (IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64) |
+ KVM_REG_RISCV_SBI_STATE |
+ KVM_REG_RISCV_SBI_FWFT | reg;
+ return 0;
+ }
+
+ idx++;
+ }
+
+ return -ENOENT;
+}
+
+static int kvm_sbi_ext_fwft_get_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, void *reg_val)
+{
+ const struct kvm_sbi_fwft_feature *feature;
+ struct kvm_sbi_fwft_config *conf;
+ unsigned long *value;
+ int ret = 0;
+
+ if (reg_size != sizeof(unsigned long))
+ return -EINVAL;
+ value = reg_val;
+
+ feature = kvm_sbi_fwft_regnum_to_feature(reg_num);
+ if (!feature)
+ return -ENOENT;
+
+ conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
+ if (!conf || !conf->supported)
+ return -ENOENT;
+
+ switch (reg_num - feature->first_reg_num) {
+ case 0:
+ *value = conf->enabled;
+ break;
+ case 1:
+ *value = conf->flags;
+ break;
+ case 2:
+ ret = conf->feature->get(vcpu, conf, true, value);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return sbi_err_map_linux_errno(ret);
+}
+
+static int kvm_sbi_ext_fwft_set_reg(struct kvm_vcpu *vcpu, unsigned long reg_num,
+ unsigned long reg_size, const void *reg_val)
+{
+ const struct kvm_sbi_fwft_feature *feature;
+ struct kvm_sbi_fwft_config *conf;
+ unsigned long value;
+ int ret = 0;
+
+ if (reg_size != sizeof(unsigned long))
+ return -EINVAL;
+ value = *(const unsigned long *)reg_val;
+
+ feature = kvm_sbi_fwft_regnum_to_feature(reg_num);
+ if (!feature)
+ return -ENOENT;
+
+ conf = kvm_sbi_fwft_get_config(vcpu, feature->id);
+ if (!conf || !conf->supported)
+ return -ENOENT;
+
+ switch (reg_num - feature->first_reg_num) {
+ case 0:
+ switch (value) {
+ case 0:
+ conf->enabled = false;
+ break;
+ case 1:
+ conf->enabled = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ conf->flags = value & SBI_FWFT_SET_FLAG_LOCK;
+ break;
+ case 2:
+ ret = conf->feature->set(vcpu, conf, true, value);
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return sbi_err_map_linux_errno(ret);
+}
+
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_fwft = {
.extid_start = SBI_EXT_FWFT,
.extid_end = SBI_EXT_FWFT,
.init = kvm_sbi_ext_fwft_init,
.deinit = kvm_sbi_ext_fwft_deinit,
.reset = kvm_sbi_ext_fwft_reset,
+ .state_reg_subtype = KVM_REG_RISCV_SBI_FWFT,
+ .get_state_reg_count = kvm_sbi_ext_fwft_get_reg_count,
+ .get_state_reg_id = kvm_sbi_ext_fwft_get_reg_id,
+ .get_state_reg = kvm_sbi_ext_fwft_get_reg,
+ .set_state_reg = kvm_sbi_ext_fwft_set_reg,
};