]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
cacheinfo: Set cache 'id' based on DT data
authorRob Herring <robh@kernel.org>
Fri, 11 Jul 2025 18:27:41 +0000 (18:27 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 16 Jul 2025 13:04:16 +0000 (15:04 +0200)
Use the minimum CPU h/w id of the CPUs associated with the cache for the
cache 'id'. This will provide a stable id value for a given system. As
we need to check all possible CPUs, we can't use the shared_cpu_map
which is just online CPUs. As there's not a cache to CPUs mapping in DT,
we have to walk all CPU nodes and then walk cache levels.

The cache_id exposed to user-space has historically been 32 bits, and
is too late to change. This value is parsed into a u32 by user-space
libraries such as libvirt:
https://github.com/libvirt/libvirt/blob/master/src/util/virresctrl.c#L1588

Give up on assigning cache-id's if a CPU h/w id greater than 32 bits
is found.

match_cache_node() does not make use of the __free() cleanup helpers
because of_find_next_cache_node(prev) does not drop a reference to prev,
and its too easy to accidentally drop the reference on cpu, which belongs
to for_each_of_cpu_node().

Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Signed-off-by: Rob Herring <robh@kernel.org>
[ ben: converted to use the __free cleanup idiom ]
Signed-off-by: Ben Horgan <ben.horgan@arm.com>
[ morse: Add checks to give up if a value larger than 32 bits is seen. ]
Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Link: https://lore.kernel.org/r/20250711182743.30141-2-james.morse@arm.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/base/cacheinfo.c

index cf0d455209d7fc3d6dd0d4856ecd850ae7fb1ed9..4e2f60c85e741970bacb219c76b0132a8497d576 100644 (file)
@@ -8,6 +8,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/acpi.h>
+#include <linux/bitfield.h>
 #include <linux/bitops.h>
 #include <linux/cacheinfo.h>
 #include <linux/compiler.h>
@@ -183,6 +184,49 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf,
        return of_property_read_bool(np, "cache-unified");
 }
 
+static bool match_cache_node(struct device_node *cpu,
+                            const struct device_node *cache_node)
+{
+       struct device_node *prev, *cache = of_find_next_cache_node(cpu);
+
+       while (cache) {
+               if (cache == cache_node) {
+                       of_node_put(cache);
+                       return true;
+               }
+
+               prev = cache;
+               cache = of_find_next_cache_node(cache);
+               of_node_put(prev);
+       }
+
+       return false;
+}
+
+static void cache_of_set_id(struct cacheinfo *this_leaf,
+                           struct device_node *cache_node)
+{
+       struct device_node *cpu;
+       u32 min_id = ~0;
+
+       for_each_of_cpu_node(cpu) {
+               u64 id = of_get_cpu_hwid(cpu, 0);
+
+               if (FIELD_GET(GENMASK_ULL(63, 32), id)) {
+                       of_node_put(cpu);
+                       return;
+               }
+
+               if (match_cache_node(cpu, cache_node))
+                       min_id = min(min_id, id);
+       }
+
+       if (min_id != ~0) {
+               this_leaf->id = min_id;
+               this_leaf->attributes |= CACHE_ID;
+       }
+}
+
 static void cache_of_set_props(struct cacheinfo *this_leaf,
                               struct device_node *np)
 {
@@ -198,6 +242,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf,
        cache_get_line_size(this_leaf, np);
        cache_nr_sets(this_leaf, np);
        cache_associativity(this_leaf);
+       cache_of_set_id(this_leaf, np);
 }
 
 static int cache_setup_of_node(unsigned int cpu)