The x86_ext_save_areas[] is expected to be well initialized by
accelerators and its xstate detail information cannot be changed by
user. So use x86_ext_save_areas[] to encode CPUID.0XD subleaves directly
without other hardcoding & masking.
And for arch LBR, KVM fills its xstate in x86_ext_save_areas[] via
host_cpuid(). The info obtained this way matches what would be retrieved
from x86_cpu_get_supported_cpuid() (since KVM just fills CPUID with the
host xstate info directly anyway). So just use the initialized
x86_ext_save_areas[] instead of calling x86_cpu_get_supported_cpuid().
Tested-by: Farrah Chen <farrah.chen@intel.com>
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Link: https://lore.kernel.org/r/20251211060801.3600039-7-zhao1.liu@intel.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
}
} else if (count == 0xf && cpu->enable_pmu
&& (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
- x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx);
+ const ExtSaveArea *esa = &x86_ext_save_areas[count];
+
+ *eax = esa->size;
+ *ebx = esa->offset;
+ *ecx = esa->ecx;
} else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
const ExtSaveArea *esa = &x86_ext_save_areas[count];
- if (x86_cpu_xsave_xcr0_components(cpu) & (1ULL << count)) {
- *eax = esa->size;
- *ebx = esa->offset;
- *ecx = esa->ecx &
- (ESA_FEATURE_ALIGN64_MASK | ESA_FEATURE_XFD_MASK);
- } else if (x86_cpu_xsave_xss_components(cpu) & (1ULL << count)) {
- *eax = esa->size;
- *ebx = 0;
- *ecx = 1;
- }
+ *eax = esa->size;
+ *ebx = esa->offset;
+ *ecx = esa->ecx;
}
break;
}