{
ARMISARegisters host_isar = {};
static const struct isar_regs {
- int reg;
+ hv_feature_reg_t reg;
ARMIDRegisterIdx index;
} regs[] = {
- { HV_SYS_REG_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_IDX },
- { HV_SYS_REG_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_IDX },
+ { HV_FEATURE_REG_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_IDX },
+ { HV_FEATURE_REG_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_IDX },
/* Add ID_AA64PFR2_EL1 here when HVF supports it */
- { HV_SYS_REG_ID_AA64DFR0_EL1, ID_AA64DFR0_EL1_IDX },
- { HV_SYS_REG_ID_AA64DFR1_EL1, ID_AA64DFR1_EL1_IDX },
- { HV_SYS_REG_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_IDX },
- { HV_SYS_REG_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_IDX },
+ { HV_FEATURE_REG_ID_AA64DFR0_EL1, ID_AA64DFR0_EL1_IDX },
+ { HV_FEATURE_REG_ID_AA64DFR1_EL1, ID_AA64DFR1_EL1_IDX },
+ { HV_FEATURE_REG_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_IDX },
+ { HV_FEATURE_REG_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_IDX },
/* Add ID_AA64ISAR2_EL1 here when HVF supports it */
- { HV_SYS_REG_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_IDX },
- { HV_SYS_REG_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_IDX },
- { HV_SYS_REG_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_IDX },
+ { HV_FEATURE_REG_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_IDX },
+ { HV_FEATURE_REG_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_IDX },
+ { HV_FEATURE_REG_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_IDX },
/* Add ID_AA64MMFR3_EL1 here when HVF supports it */
};
- hv_vcpu_t fd;
hv_return_t r = HV_SUCCESS;
- hv_vcpu_exit_t *exit;
+ hv_vcpu_config_t config = hv_vcpu_config_create();
uint64_t t;
int i;
(1ULL << ARM_FEATURE_PMU) |
(1ULL << ARM_FEATURE_GENERIC_TIMER);
- /* We set up a small vcpu to extract host registers */
-
- if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
- return false;
- }
-
for (i = 0; i < ARRAY_SIZE(regs); i++) {
- r |= hv_vcpu_get_sys_reg(fd, regs[i].reg,
- &host_isar.idregs[regs[i].index]);
+ r |= hv_vcpu_config_get_feature_reg(config, regs[i].reg,
+ &host_isar.idregs[regs[i].index]);
}
- r |= hv_vcpu_destroy(fd);
+ os_release(config);
/*
* Hardcode MIDR because Apple deliberately doesn't expose a divergent