CPUX86State *env = &cpu->env;
MachineState *ms = MACHINE(hotplug_dev);
X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
- X86CPUTopoInfo topo_info;
+ X86CPUTopoInfo *topo_info = &env->topo_info;
if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) {
error_setg(errp, "Invalid CPU type, expected cpu type: '%s'",
}
}
- init_topo_info(&topo_info, x86ms);
+ init_topo_info(topo_info, x86ms);
if (ms->smp.modules > 1) {
- env->nr_modules = ms->smp.modules;
set_bit(CPU_TOPOLOGY_LEVEL_MODULE, env->avail_cpu_topo);
}
if (ms->smp.dies > 1) {
- env->nr_dies = ms->smp.dies;
set_bit(CPU_TOPOLOGY_LEVEL_DIE, env->avail_cpu_topo);
}
topo_ids.module_id = cpu->module_id;
topo_ids.core_id = cpu->core_id;
topo_ids.smt_id = cpu->thread_id;
- cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids);
+ cpu->apic_id = x86_apicid_from_topo_ids(topo_info, &topo_ids);
}
cpu_slot = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx);
if (!cpu_slot) {
- x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+ x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids);
error_setg(errp,
"Invalid CPU [socket: %u, die: %u, module: %u, core: %u, thread: %u]"
/* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
* once -smp refactoring is complete and there will be CPU private
* CPUState::nr_cores and CPUState::nr_threads fields instead of globals */
- x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+ x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids);
if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) {
error_setg(errp, "property socket-id: %u doesn't match set apic-id:"
" 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id,
CPUState *cs = env_cpu(env);
uint32_t limit;
uint32_t signature[3];
- X86CPUTopoInfo topo_info;
+ X86CPUTopoInfo *topo_info = &env->topo_info;
uint32_t threads_per_pkg;
- topo_info.dies_per_pkg = env->nr_dies;
- topo_info.modules_per_die = env->nr_modules;
- topo_info.cores_per_module = cs->nr_cores / env->nr_dies / env->nr_modules;
- topo_info.threads_per_core = cs->nr_threads;
-
- threads_per_pkg = x86_threads_per_pkg(&topo_info);
+ threads_per_pkg = x86_threads_per_pkg(topo_info);
/* Calculate & apply limits for different index ranges */
if (index >= 0xC0000000) {
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
*eax &= ~0xFC000000;
- *eax |= max_core_ids_in_package(&topo_info) << 26;
+ *eax |= max_core_ids_in_package(topo_info) << 26;
if (host_vcpus_per_cache > threads_per_pkg) {
*eax &= ~0x3FFC000;
/* Share the cache at package level. */
- *eax |= max_thread_ids_for_cache(&topo_info,
+ *eax |= max_thread_ids_for_cache(topo_info,
CPU_TOPOLOGY_LEVEL_SOCKET) << 14;
}
}
switch (count) {
case 0: /* L1 dcache info */
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
if (!cpu->l1_cache_per_core) {
*eax &= ~MAKE_64BIT_MASK(14, 12);
break;
case 1: /* L1 icache info */
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
if (!cpu->l1_cache_per_core) {
*eax &= ~MAKE_64BIT_MASK(14, 12);
break;
case 2: /* L2 cache info */
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
if (cpu->enable_l3_cache) {
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
break;
}
switch (count) {
case 0:
- *eax = apicid_core_offset(&topo_info);
- *ebx = topo_info.threads_per_core;
+ *eax = apicid_core_offset(topo_info);
+ *ebx = topo_info->threads_per_core;
*ecx |= CPUID_B_ECX_TOPO_LEVEL_SMT << 8;
break;
case 1:
- *eax = apicid_pkg_offset(&topo_info);
+ *eax = apicid_pkg_offset(topo_info);
*ebx = threads_per_pkg;
*ecx |= CPUID_B_ECX_TOPO_LEVEL_CORE << 8;
break;
break;
}
- encode_topo_cpuid1f(env, count, &topo_info, eax, ebx, ecx, edx);
+ encode_topo_cpuid1f(env, count, topo_info, eax, ebx, ecx, edx);
break;
case 0xD: {
/* Processor Extended State */
* thread ID within a package".
* Bits 7:0 is "The number of threads in the package is NC+1"
*/
- *ecx = (apicid_pkg_offset(&topo_info) << 12) |
+ *ecx = (apicid_pkg_offset(topo_info) << 12) |
(threads_per_pkg - 1);
} else {
*ecx = 0;
switch (count) {
case 0: /* L1 dcache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
default: /* end of info */
*eax = *ebx = *ecx = *edx = 0;
break;
case 0x8000001E:
if (cpu->core_id <= 255) {
- encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx);
+ encode_topo_cpuid8000001e(cpu, topo_info, eax, ebx, ecx, edx);
} else {
*eax = 0;
*ebx = 0;
* fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
* based on inputs (sockets,cores,threads), it is still better to give
* users a warning.
- *
- * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
- * cs->nr_threads hasn't be populated yet and the checking is incorrect.
*/
if (IS_AMD_CPU(env) &&
!(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
- cs->nr_threads > 1) {
+ env->topo_info.threads_per_core > 1) {
warn_report_once("This family of AMD CPU doesn't support "
"hyperthreading(%d). Please configure -smp "
"options properly or try enabling topoext "
- "feature.", cs->nr_threads);
+ "feature.", env->topo_info.threads_per_core);
}
#ifndef CONFIG_USER_ONLY
{
CPUX86State *env = &cpu->env;
- env->nr_modules = 1;
- env->nr_dies = 1;
+ env->topo_info = (X86CPUTopoInfo) {1, 1, 1, 1};
/* thread, core and socket levels are set by default. */
set_bit(CPU_TOPOLOGY_LEVEL_THREAD, env->avail_cpu_topo);