unsigned long size;
};
-/* AMD doesn't have CPUID4. Emulate it here to report the same
- information to the user. This makes some assumptions about the machine:
- L2 not shared, no SMT etc. that is currently true on AMD CPUs.
+/*
+ * Fallback AMD CPUID(4) emulation
+ * AMD CPUs with TOPOEXT can just use CPUID(0x8000001d)
+ */
- In theory the TLBs could be reported as fake type (they are in "dummy").
- Maybe later */
union l1_cache {
struct {
unsigned line_size:8;
[CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
};
-static void
-amd_cpuid4(int index, union _cpuid4_leaf_eax *eax,
- union _cpuid4_leaf_ebx *ebx, union _cpuid4_leaf_ecx *ecx)
+static void legacy_amd_cpuid4(int index, union _cpuid4_leaf_eax *eax,
+ union _cpuid4_leaf_ebx *ebx, union _cpuid4_leaf_ecx *ecx)
{
unsigned int dummy, line_size, lines_per_tag, assoc, size_in_kb;
union l1_cache l1i, l1d;
(ebx->split.ways_of_associativity + 1) - 1;
}
-/*
- * Fill passed _cpuid4_info_regs structure.
- * Intel-only code paths should pass NULL for the amd_northbridge
- * return pointer.
- */
-static int cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *id4,
- struct amd_northbridge **nb)
+static int cpuid4_info_fill_done(struct _cpuid4_info_regs *id4, union _cpuid4_leaf_eax eax,
+ union _cpuid4_leaf_ebx ebx, union _cpuid4_leaf_ecx ecx)
{
- u8 cpu_vendor = boot_cpu_data.x86_vendor;
- union _cpuid4_leaf_eax eax;
- union _cpuid4_leaf_ebx ebx;
- union _cpuid4_leaf_ecx ecx;
- u32 edx;
-
- if (cpu_vendor == X86_VENDOR_AMD || cpu_vendor == X86_VENDOR_HYGON) {
- if (boot_cpu_has(X86_FEATURE_TOPOEXT) || cpu_vendor == X86_VENDOR_HYGON) {
- /* AMD with TOPOEXT, or HYGON */
- cpuid_count(0x8000001d, index, &eax.full, &ebx.full, &ecx.full, &edx);
- } else {
- /* Legacy AMD fallback */
- amd_cpuid4(index, &eax, &ebx, &ecx);
- }
-
- if (nb)
- *nb = amd_init_l3_cache(index);
- } else {
- /* Intel */
- cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
- }
-
if (eax.split.type == CTYPE_NULL)
return -EIO;
return 0;
}
+static int amd_fill_cpuid4_info(int index, struct _cpuid4_info_regs *id4)
+{
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ u32 ignored;
+
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT) || boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+ cpuid_count(0x8000001d, index, &eax.full, &ebx.full, &ecx.full, &ignored);
+ else
+ legacy_amd_cpuid4(index, &eax, &ebx, &ecx);
+
+ return cpuid4_info_fill_done(id4, eax, ebx, ecx);
+}
+
+static int intel_fill_cpuid4_info(int index, struct _cpuid4_info_regs *id4)
+{
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ u32 ignored;
+
+ cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &ignored);
+
+ return cpuid4_info_fill_done(id4, eax, ebx, ecx);
+}
+
+static int fill_cpuid4_info(int index, struct _cpuid4_info_regs *id4)
+{
+ u8 cpu_vendor = boot_cpu_data.x86_vendor;
+
+ return (cpu_vendor == X86_VENDOR_AMD || cpu_vendor == X86_VENDOR_HYGON) ?
+ amd_fill_cpuid4_info(index, id4) :
+ intel_fill_cpuid4_info(index, id4);
+}
+
static int find_num_cache_leaves(struct cpuinfo_x86 *c)
{
unsigned int eax, ebx, ecx, edx, op;
struct _cpuid4_info_regs id4 = {};
int retval;
- retval = cpuid4_cache_lookup_regs(i, &id4, NULL);
+ retval = intel_fill_cpuid4_info(i, &id4);
if (retval < 0)
continue;
int populate_cache_leaves(unsigned int cpu)
{
- unsigned int idx, ret;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *ci = this_cpu_ci->info_list;
+ u8 cpu_vendor = boot_cpu_data.x86_vendor;
struct _cpuid4_info_regs id4 = {};
- struct amd_northbridge *nb;
+ struct amd_northbridge *nb = NULL;
+ int idx, ret;
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
- ret = cpuid4_cache_lookup_regs(idx, &id4, &nb);
+ ret = fill_cpuid4_info(idx, &id4);
if (ret)
return ret;
+
get_cache_id(cpu, &id4);
+
+ if (cpu_vendor == X86_VENDOR_AMD || cpu_vendor == X86_VENDOR_HYGON)
+ nb = amd_init_l3_cache(idx);
+
ci_info_init(ci++, &id4, nb);
__cache_cpumap_setup(cpu, idx, &id4);
}