struct x86_model *models;
};
+static struct x86_map* virCPUx86Map = NULL;
+int virCPUx86MapOnceInit(void);
+VIR_ONCE_GLOBAL_INIT(virCPUx86Map);
+
enum compare_result {
SUBSET,
static struct x86_map *
-x86LoadMap(void)
+virCPUx86LoadMap(void)
{
struct x86_map *map;
}
+int
+virCPUx86MapOnceInit(void)
+{
+ if (!(virCPUx86Map = virCPUx86LoadMap()))
+ return -1;
+
+ return 0;
+}
+
+
+static const struct x86_map *
+virCPUx86GetMap(void)
+{
+ if (virCPUx86MapInitialize() < 0)
+ return NULL;
+
+ return virCPUx86Map;
+}
+
+
static char *
x86CPUDataFormat(const virCPUData *data)
{
virCPUDataPtr *guest,
char **message)
{
- struct x86_map *map = NULL;
+ const struct x86_map *map = NULL;
struct x86_model *host_model = NULL;
struct x86_model *cpu_force = NULL;
struct x86_model *cpu_require = NULL;
return VIR_CPU_COMPARE_INCOMPATIBLE;
}
- if (!(map = x86LoadMap()) ||
+ if (!(map = virCPUx86GetMap()) ||
!(host_model = x86ModelFromCPU(host, map, VIR_CPU_FEATURE_REQUIRE)) ||
!(cpu_force = x86ModelFromCPU(cpu, map, VIR_CPU_FEATURE_FORCE)) ||
!(cpu_require = x86ModelFromCPU(cpu, map, VIR_CPU_FEATURE_REQUIRE)) ||
}
cleanup:
- x86MapFree(map);
x86ModelFree(host_model);
x86ModelFree(diff);
x86ModelFree(cpu_force);
static int
x86AddFeatures(virCPUDefPtr cpu,
- struct x86_map *map)
+ const struct x86_map *map)
{
const struct x86_model *candidate;
const struct x86_feature *feature = map->features;
unsigned int flags)
{
int ret = -1;
- struct x86_map *map;
+ const struct x86_map *map;
const struct x86_model *candidate;
virCPUDefPtr cpuCandidate;
virCPUDefPtr cpuModel = NULL;
virCheckFlags(VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES, -1);
- if (data == NULL || (map = x86LoadMap()) == NULL)
+ if (!data || !(map = virCPUx86GetMap()))
return -1;
candidate = map->models;
ret = 0;
out:
- x86MapFree(map);
virCPUDefFree(cpuModel);
return ret;
virCPUDataPtr *forbidden,
virCPUDataPtr *vendor)
{
- struct x86_map *map = NULL;
+ const struct x86_map *map = NULL;
virCPUx86Data *data_forced = NULL;
virCPUx86Data *data_required = NULL;
virCPUx86Data *data_optional = NULL;
virCPUx86Data *data_disabled = NULL;
virCPUx86Data *data_forbidden = NULL;
virCPUx86Data *data_vendor = NULL;
- int ret = -1;
if (forced)
*forced = NULL;
if (vendor)
*vendor = NULL;
- if ((map = x86LoadMap()) == NULL)
+ if ((map = virCPUx86GetMap()) == NULL)
goto error;
if (forced) {
!(*vendor = virCPUx86MakeData(arch, &data_vendor)))
goto error;
- ret = 0;
-
-cleanup:
- x86MapFree(map);
-
- return ret;
+ return 0;
error:
virCPUx86DataFree(data_forced);
x86FreeCPUData(*forbidden);
if (vendor)
x86FreeCPUData(*vendor);
- goto cleanup;
+ return -1;
}
unsigned int nmodels,
unsigned int flags)
{
- struct x86_map *map = NULL;
+ const struct x86_map *map = NULL;
struct x86_model *base_model = NULL;
virCPUDefPtr cpu = NULL;
size_t i;
struct x86_model *model = NULL;
bool outputVendor = true;
- if (!(map = x86LoadMap()))
+ if (!(map = virCPUx86GetMap()))
goto error;
if (!(base_model = x86ModelFromCPU(cpus[0], map, VIR_CPU_FEATURE_REQUIRE)))
cleanup:
x86ModelFree(base_model);
- x86MapFree(map);
return cpu;
{
int ret = -1;
size_t i;
- struct x86_map *map;
+ const struct x86_map *map;
struct x86_model *host_model = NULL;
- if (!(map = x86LoadMap()) ||
+ if (!(map = virCPUx86GetMap()) ||
!(host_model = x86ModelFromCPU(host, map, VIR_CPU_FEATURE_REQUIRE)))
goto cleanup;
ret = 0;
cleanup:
- x86MapFree(map);
x86ModelFree(host_model);
return ret;
}
x86HasFeature(const virCPUData *data,
const char *name)
{
- struct x86_map *map;
+ const struct x86_map *map;
struct x86_feature *feature;
int ret = -1;
- if (!(map = x86LoadMap()))
+ if (!(map = virCPUx86GetMap()))
return -1;
if (!(feature = x86FeatureFind(map, name)))
ret = x86DataIsSubset(data->data.x86, feature->data) ? 1 : 0;
cleanup:
- x86MapFree(map);
return ret;
}