int ret;
ms->accelerator = accel;
*(acc->allowed) = true;
- ret = acc->init_machine(ms);
+ ret = acc->init_machine(accel, ms);
if (ret < 0) {
ms->accelerator = NULL;
*(acc->allowed) = false;
.log_sync = hvf_log_sync,
};
-static int hvf_accel_init(MachineState *ms)
+static int hvf_accel_init(AccelState *as, MachineState *ms)
{
int x;
hv_return_t ret;
return 0;
}
-static int kvm_init(MachineState *ms)
+static int kvm_init(AccelState *as, MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
static const char upgrade_note[] =
qatomic_set_i64(&qtest_clock_counter, count);
}
-static int qtest_init_accel(MachineState *ms)
+static int qtest_init_accel(AccelState *as, MachineState *ms)
{
return 0;
}
bool one_insn_per_tb;
-static int tcg_init_machine(MachineState *ms)
+static int tcg_init_machine(AccelState *as, MachineState *ms)
{
TCGState *s = TCG_STATE(current_accel());
unsigned max_threads = 1;
}
}
-static int xen_init(MachineState *ms)
+static int xen_init(AccelState *as, MachineState *ms)
{
MachineClass *mc = MACHINE_GET_CLASS(ms);
opt_one_insn_per_tb, &error_abort);
object_property_set_int(OBJECT(accel), "tb-size",
opt_tb_size, &error_abort);
- ac->init_machine(NULL);
+ ac->init_machine(accel, NULL);
}
/*
/* Cached by accel_init_ops_interfaces() when created */
AccelOpsClass *ops;
- int (*init_machine)(MachineState *ms);
+ int (*init_machine)(AccelState *as, MachineState *ms);
bool (*cpu_common_realize)(CPUState *cpu, Error **errp);
void (*cpu_common_unrealize)(CPUState *cpu);
opt_one_insn_per_tb, &error_abort);
object_property_set_int(OBJECT(accel), "tb-size",
opt_tb_size, &error_abort);
- ac->init_machine(NULL);
+ ac->init_machine(accel, NULL);
}
/*
/* -------------------------------------------------------------------------- */
static int
-nvmm_accel_init(MachineState *ms)
+nvmm_accel_init(AccelState *as, MachineState *ms)
{
int ret, err;
* Partition support
*/
-static int whpx_accel_init(MachineState *ms)
+static int whpx_accel_init(AccelState *as, MachineState *ms)
{
struct whpx_state *whpx;
int ret;