{
assert(l2->size % 1024 == 0);
assert(l2->associativity > 0);
- assert(l2->lines_per_tag > 0);
assert(l2->line_size > 0);
*ecx = ((l2->size / 1024) << 16) |
(X86_ENC_ASSOC(l2->associativity) << 12) |
(l2->lines_per_tag << 8) | (l2->line_size);
+ /* For Intel, EDX is reserved. */
if (l3) {
assert(l3->size % (512 * 1024) == 0);
assert(l3->associativity > 0);
- assert(l3->lines_per_tag > 0);
assert(l3->line_size > 0);
*edx = ((l3->size / (512 * 1024)) << 18) |
(X86_ENC_ASSOC(l3->associativity) << 12) |
.share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
-/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
static CPUCacheInfo legacy_l2_cache_amd = {
.type = UNIFIED_CACHE,
.level = 2,
*edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
break;
case 0x80000006:
- /* cache info (L2 cache) */
+ /* cache info (L2 cache/TLB/L3 cache) */
if (cpu->cache_info_passthrough) {
x86_cpu_get_cache_cpuid(index, 0, eax, ebx, ecx, edx);
break;
}
+
+ if (cpu->vendor_cpuid_only_v2 &&
+ (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) {
+ *eax = *ebx = 0;
+ encode_cache_cpuid80000006(env->cache_info_cpuid4.l2_cache,
+ NULL, ecx, edx);
+ break;
+ }
+
*eax = (X86_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) |
(L2_DTLB_2M_ENTRIES << 16) |
(X86_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) |
(L2_DTLB_4K_ENTRIES << 16) |
(X86_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) |
(L2_ITLB_4K_ENTRIES);
+
encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
cpu->enable_l3_cache ?
env->cache_info_amd.l3_cache : NULL,