]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
x86/cacheinfo: Separate Intel CPUID leaf 0x4 handling
authorAhmed S. Darwish <darwi@linutronix.de>
Mon, 24 Mar 2025 13:33:19 +0000 (14:33 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 25 Mar 2025 09:23:18 +0000 (10:23 +0100)
init_intel_cacheinfo() was overly complex.  It parsed leaf 0x4 data,
leaf 0x2 data, and performed post-processing, all within one function.
Parent commit moved leaf 0x2 parsing and the post-processing logic into
their own functions.

Continue the refactoring by extracting leaf 0x4 parsing into its own
function.  Initialize local L2/L3 topology ID variables to BAD_APICID by
default, thus ensuring they can be used unconditionally.

Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ahmed S. Darwish <darwi@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/r/20250324133324.23458-25-darwi@linutronix.de
arch/x86/kernel/cpu/cacheinfo.c

index b39aad1ecf9cc5c672014fde4d90f6c4330f7505..72cc32d22c4d4d836ea614e444f7cb45cba1ee87 100644 (file)
@@ -400,73 +400,71 @@ static void intel_cacheinfo_0x2(struct cpuinfo_x86 *c)
        intel_cacheinfo_done(c, l3, l2, l1i, l1d);
 }
 
-void init_intel_cacheinfo(struct cpuinfo_x86 *c)
+static bool intel_cacheinfo_0x4(struct cpuinfo_x86 *c)
 {
        struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
-       unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0;
-       unsigned int l2_id = 0, l3_id = 0;
-
-       if (c->cpuid_level > 3) {
-               /*
-                * There should be at least one leaf. A non-zero value means
-                * that the number of leaves has been initialized.
-                */
-               if (!ci->num_leaves)
-                       ci->num_leaves = find_num_cache_leaves(c);
+       unsigned int l2_id = BAD_APICID, l3_id = BAD_APICID;
+       unsigned int l1d = 0, l1i = 0, l2 = 0, l3 = 0;
 
-               /*
-                * Whenever possible use cpuid(4), deterministic cache
-                * parameters cpuid leaf to find the cache details
-                */
-               for (int i = 0; i < ci->num_leaves; i++) {
-                       unsigned int num_threads_sharing, index_msb;
-                       struct _cpuid4_info id4 = {};
-                       int retval;
+       if (c->cpuid_level < 4)
+               return false;
 
-                       retval = intel_fill_cpuid4_info(i, &id4);
-                       if (retval < 0)
-                               continue;
+       /*
+        * There should be at least one leaf. A non-zero value means
+        * that the number of leaves has been previously initialized.
+        */
+       if (!ci->num_leaves)
+               ci->num_leaves = find_num_cache_leaves(c);
 
-                       switch (id4.eax.split.level) {
-                       case 1:
-                               if (id4.eax.split.type == CTYPE_DATA)
-                                       l1d = id4.size / 1024;
-                               else if (id4.eax.split.type == CTYPE_INST)
-                                       l1i = id4.size / 1024;
-                               break;
-                       case 2:
-                               l2 = id4.size / 1024;
-                               num_threads_sharing = 1 + id4.eax.split.num_threads_sharing;
-                               index_msb = get_count_order(num_threads_sharing);
-                               l2_id = c->topo.apicid & ~((1 << index_msb) - 1);
-                               break;
-                       case 3:
-                               l3 = id4.size / 1024;
-                               num_threads_sharing = 1 + id4.eax.split.num_threads_sharing;
-                               index_msb = get_count_order(num_threads_sharing);
-                               l3_id = c->topo.apicid & ~((1 << index_msb) - 1);
-                               break;
-                       default:
-                               break;
-                       }
+       if (!ci->num_leaves)
+               return false;
+
+       for (int i = 0; i < ci->num_leaves; i++) {
+               unsigned int num_threads_sharing, index_msb;
+               struct _cpuid4_info id4 = {};
+               int ret;
+
+               ret = intel_fill_cpuid4_info(i, &id4);
+               if (ret < 0)
+                       continue;
+
+               switch (id4.eax.split.level) {
+               case 1:
+                       if (id4.eax.split.type == CTYPE_DATA)
+                               l1d = id4.size / 1024;
+                       else if (id4.eax.split.type == CTYPE_INST)
+                               l1i = id4.size / 1024;
+                       break;
+               case 2:
+                       l2 = id4.size / 1024;
+                       num_threads_sharing = 1 + id4.eax.split.num_threads_sharing;
+                       index_msb = get_count_order(num_threads_sharing);
+                       l2_id = c->topo.apicid & ~((1 << index_msb) - 1);
+                       break;
+               case 3:
+                       l3 = id4.size / 1024;
+                       num_threads_sharing = 1 + id4.eax.split.num_threads_sharing;
+                       index_msb = get_count_order(num_threads_sharing);
+                       l3_id = c->topo.apicid & ~((1 << index_msb) - 1);
+                       break;
+               default:
+                       break;
                }
        }
 
+       c->topo.l2c_id = l2_id;
+       c->topo.llc_id = (l3_id == BAD_APICID) ? l2_id : l3_id;
+       intel_cacheinfo_done(c, l3, l2, l1i, l1d);
+       return true;
+}
+
+void init_intel_cacheinfo(struct cpuinfo_x86 *c)
+{
        /* Don't use CPUID(2) if CPUID(4) is supported. */
-       if (!ci->num_leaves && c->cpuid_level > 1) {
-               intel_cacheinfo_0x2(c);
+       if (intel_cacheinfo_0x4(c))
                return;
-       }
-
-       if (l2) {
-               c->topo.llc_id = l2_id;
-               c->topo.l2c_id = l2_id;
-       }
-
-       if (l3)
-               c->topo.llc_id = l3_id;
 
-       intel_cacheinfo_done(c, l3, l2, l1i, l1d);
+       intel_cacheinfo_0x2(c);
 }
 
 static int __cache_amd_cpumap_setup(unsigned int cpu, int index,