return -1;
}
-static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
- unsigned int slot)
+static ssize_t show_cache_disable(struct cacheinfo *ci, char *buf, unsigned int slot)
{
int index;
- struct amd_northbridge *nb = this_leaf->priv;
+ struct amd_northbridge *nb = ci->priv;
index = amd_get_l3_disable_slot(nb, slot);
if (index >= 0)
cache_disable_##slot##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
- return show_cache_disable(this_leaf, buf, slot); \
+ struct cacheinfo *ci = dev_get_drvdata(dev); \
+ return show_cache_disable(ci, buf, slot); \
}
SHOW_CACHE_DISABLE(0)
SHOW_CACHE_DISABLE(1)
return 0;
}
-static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
- const char *buf, size_t count,
- unsigned int slot)
+static ssize_t store_cache_disable(struct cacheinfo *ci, const char *buf,
+ size_t count, unsigned int slot)
{
unsigned long val = 0;
int cpu, err = 0;
- struct amd_northbridge *nb = this_leaf->priv;
+ struct amd_northbridge *nb = ci->priv;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- cpu = cpumask_first(&this_leaf->shared_cpu_map);
+ cpu = cpumask_first(&ci->shared_cpu_map);
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
- return store_cache_disable(this_leaf, buf, count, slot); \
+ struct cacheinfo *ci = dev_get_drvdata(dev); \
+ return store_cache_disable(ci, buf, count, slot); \
}
STORE_CACHE_DISABLE(0)
STORE_CACHE_DISABLE(1)
static ssize_t subcaches_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct cacheinfo *this_leaf = dev_get_drvdata(dev);
- int cpu = cpumask_first(&this_leaf->shared_cpu_map);
+ struct cacheinfo *ci = dev_get_drvdata(dev);
+ int cpu = cpumask_first(&ci->shared_cpu_map);
return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
}
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct cacheinfo *this_leaf = dev_get_drvdata(dev);
- int cpu = cpumask_first(&this_leaf->shared_cpu_map);
+ struct cacheinfo *ci = dev_get_drvdata(dev);
+ int cpu = cpumask_first(&ci->shared_cpu_map);
unsigned long val;
if (!capable(CAP_SYS_ADMIN))
struct attribute *attr, int unused)
{
struct device *dev = kobj_to_dev(kobj);
- struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ struct cacheinfo *ci = dev_get_drvdata(dev);
umode_t mode = attr->mode;
- if (!this_leaf->priv)
+ if (!ci->priv)
return 0;
if ((attr == &dev_attr_subcaches.attr) &&
}
const struct attribute_group *
-cache_get_priv_group(struct cacheinfo *this_leaf)
+cache_get_priv_group(struct cacheinfo *ci)
{
- struct amd_northbridge *nb = this_leaf->priv;
+ struct amd_northbridge *nb = ci->priv;
- if (this_leaf->level < 3 || !nb)
+ if (ci->level < 3 || !nb)
return NULL;
if (nb && nb->l3_cache.indices)
struct _cpuid4_info_regs *base)
{
struct cpu_cacheinfo *this_cpu_ci;
- struct cacheinfo *this_leaf;
+ struct cacheinfo *ci;
int i, sibling;
/*
this_cpu_ci = get_cpu_cacheinfo(i);
if (!this_cpu_ci->info_list)
continue;
- this_leaf = this_cpu_ci->info_list + index;
+ ci = this_cpu_ci->info_list + index;
for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
if (!cpu_online(sibling))
continue;
cpumask_set_cpu(sibling,
- &this_leaf->shared_cpu_map);
+ &ci->shared_cpu_map);
}
}
} else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
if ((apicid < first) || (apicid > last))
continue;
- this_leaf = this_cpu_ci->info_list + index;
+ ci = this_cpu_ci->info_list + index;
for_each_online_cpu(sibling) {
apicid = cpu_data(sibling).topo.apicid;
if ((apicid < first) || (apicid > last))
continue;
cpumask_set_cpu(sibling,
- &this_leaf->shared_cpu_map);
+ &ci->shared_cpu_map);
}
}
} else
struct _cpuid4_info_regs *base)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- struct cacheinfo *this_leaf, *sibling_leaf;
+ struct cacheinfo *ci, *sibling_ci;
unsigned long num_threads_sharing;
int index_msb, i;
struct cpuinfo_x86 *c = &cpu_data(cpu);
return;
}
- this_leaf = this_cpu_ci->info_list + index;
+ ci = this_cpu_ci->info_list + index;
num_threads_sharing = 1 + base->eax.split.num_threads_sharing;
- cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ cpumask_set_cpu(cpu, &ci->shared_cpu_map);
if (num_threads_sharing == 1)
return;
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
- sibling_leaf = sib_cpu_ci->info_list + index;
- cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
- cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
+ sibling_ci = sib_cpu_ci->info_list + index;
+ cpumask_set_cpu(i, &ci->shared_cpu_map);
+ cpumask_set_cpu(cpu, &sibling_ci->shared_cpu_map);
}
}
-static void ci_leaf_init(struct cacheinfo *this_leaf,
- struct _cpuid4_info_regs *base)
+static void ci_info_init(struct cacheinfo *ci, struct _cpuid4_info_regs *base)
{
- this_leaf->id = base->id;
- this_leaf->attributes = CACHE_ID;
- this_leaf->level = base->eax.split.level;
- this_leaf->type = cache_type_map[base->eax.split.type];
- this_leaf->coherency_line_size =
+ ci->id = base->id;
+ ci->attributes = CACHE_ID;
+ ci->level = base->eax.split.level;
+ ci->type = cache_type_map[base->eax.split.type];
+ ci->coherency_line_size =
base->ebx.split.coherency_line_size + 1;
- this_leaf->ways_of_associativity =
+ ci->ways_of_associativity =
base->ebx.split.ways_of_associativity + 1;
- this_leaf->size = base->size;
- this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1;
- this_leaf->physical_line_partition =
+ ci->size = base->size;
+ ci->number_of_sets = base->ecx.split.number_of_sets + 1;
+ ci->physical_line_partition =
base->ebx.split.physical_line_partition + 1;
- this_leaf->priv = base->nb;
+ ci->priv = base->nb;
}
int init_cache_level(unsigned int cpu)
{
unsigned int idx, ret;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ struct cacheinfo *ci = this_cpu_ci->info_list;
struct _cpuid4_info_regs id4_regs = {};
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
if (ret)
return ret;
get_cache_id(cpu, &id4_regs);
- ci_leaf_init(this_leaf++, &id4_regs);
+ ci_info_init(ci++, &id4_regs);
__cache_cpumap_setup(cpu, idx, &id4_regs);
}
this_cpu_ci->cpu_map_populated = true;