{ 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
{ 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
{ 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
- { 0x00, 0, 0}
};
ci->num_leaves = find_num_cache_leaves(c);
}
+static const struct _cache_table *cache_table_get(u8 desc)
+{
+ for (int i = 0; i < ARRAY_SIZE(cache_table); i++) {
+ if (cache_table[i].descriptor == desc)
+ return &cache_table[i];
+ }
+
+ return NULL;
+}
+
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
/* Cache sizes */
/* Don't use CPUID(2) if CPUID(4) is supported. */
if (!ci->num_leaves && c->cpuid_level > 1) {
+ const struct _cache_table *entry;
union leaf_0x2_regs regs;
u8 *desc;
cpuid_get_leaf_0x2_regs(®s);
for_each_leaf_0x2_desc(regs, desc) {
- u8 k = 0;
-
- /* look up this descriptor in the table */
- while (cache_table[k].descriptor != 0) {
- if (cache_table[k].descriptor == *desc) {
- switch (cache_table[k].cache_type) {
- case LVL_1_INST:
- l1i += cache_table[k].size;
- break;
- case LVL_1_DATA:
- l1d += cache_table[k].size;
- break;
- case LVL_2:
- l2 += cache_table[k].size;
- break;
- case LVL_3:
- l3 += cache_table[k].size;
- break;
- }
-
- break;
- }
- k++;
+ entry = cache_table_get(*desc);
+ if (!entry)
+ continue;
+
+ switch (entry->cache_type) {
+ case LVL_1_INST: l1i += entry->size; break;
+ case LVL_1_DATA: l1d += entry->size; break;
+ case LVL_2: l2 += entry->size; break;
+ case LVL_3: l3 += entry->size; break;
}
}
}