]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
arch_topology: move parse_acpi_topology() to common code
authorYunhui Cui <cuiyunhui@bytedance.com>
Tue, 23 Sep 2025 01:54:09 +0000 (09:54 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Oct 2025 06:10:57 +0000 (08:10 +0200)
Currently, RISC-V lacks arch-specific registers for CPU topology
properties and must get them from ACPI. Thus, parse_acpi_topology()
is moved from arm64/ to drivers/ for RISC-V reuse.

Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Link: https://patch.msgid.link/20250923015409.15983-2-cuiyunhui@bytedance.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/include/asm/topology.h
arch/arm64/kernel/topology.c
drivers/base/arch_topology.c
include/linux/arch_topology.h

index 341174bf91063d3d56dde0a2bf7b52f1abcc6ee5..b9eaf4ad7085036fbf0ea7310a425e8d7e554a95 100644 (file)
@@ -36,6 +36,9 @@ void update_freq_counters_refs(void);
 #define arch_scale_hw_pressure topology_get_hw_pressure
 #define arch_update_hw_pressure        topology_update_hw_pressure
 
+#undef arch_cpu_is_threaded
+#define arch_cpu_is_threaded() (read_cpuid_mpidr() & MPIDR_MT_BITMASK)
+
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_ARM_TOPOLOGY_H */
index 5d07ee85bdae47db194d2da689f8eebc368f890c..5d24dc53799b7687d4aea5c5d0f49e7a5580be28 100644 (file)
 #include <asm/cputype.h>
 #include <asm/topology.h>
 
-#ifdef CONFIG_ACPI
-static bool __init acpi_cpu_is_threaded(int cpu)
-{
-       int is_threaded = acpi_pptt_cpu_is_thread(cpu);
-
-       /*
-        * if the PPTT doesn't have thread information, assume a homogeneous
-        * machine and return the current CPU's thread state.
-        */
-       if (is_threaded < 0)
-               is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
-
-       return !!is_threaded;
-}
-
-struct cpu_smt_info {
-       unsigned int thread_num;
-       int core_id;
-};
-
-/*
- * Propagate the topology information of the processor_topology_node tree to the
- * cpu_topology array.
- */
-int __init parse_acpi_topology(void)
-{
-       unsigned int max_smt_thread_num = 1;
-       struct cpu_smt_info *entry;
-       struct xarray hetero_cpu;
-       unsigned long hetero_id;
-       int cpu, topology_id;
-
-       if (acpi_disabled)
-               return 0;
-
-       xa_init(&hetero_cpu);
-
-       for_each_possible_cpu(cpu) {
-               topology_id = find_acpi_cpu_topology(cpu, 0);
-               if (topology_id < 0)
-                       return topology_id;
-
-               if (acpi_cpu_is_threaded(cpu)) {
-                       cpu_topology[cpu].thread_id = topology_id;
-                       topology_id = find_acpi_cpu_topology(cpu, 1);
-                       cpu_topology[cpu].core_id   = topology_id;
-
-                       /*
-                        * In the PPTT, CPUs below a node with the 'identical
-                        * implementation' flag have the same number of threads.
-                        * Count the number of threads for only one CPU (i.e.
-                        * one core_id) among those with the same hetero_id.
-                        * See the comment of find_acpi_cpu_topology_hetero_id()
-                        * for more details.
-                        *
-                        * One entry is created for each node having:
-                        * - the 'identical implementation' flag
-                        * - its parent not having the flag
-                        */
-                       hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
-                       entry = xa_load(&hetero_cpu, hetero_id);
-                       if (!entry) {
-                               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-                               WARN_ON_ONCE(!entry);
-
-                               if (entry) {
-                                       entry->core_id = topology_id;
-                                       entry->thread_num = 1;
-                                       xa_store(&hetero_cpu, hetero_id,
-                                                entry, GFP_KERNEL);
-                               }
-                       } else if (entry->core_id == topology_id) {
-                               entry->thread_num++;
-                       }
-               } else {
-                       cpu_topology[cpu].thread_id  = -1;
-                       cpu_topology[cpu].core_id    = topology_id;
-               }
-               topology_id = find_acpi_cpu_topology_cluster(cpu);
-               cpu_topology[cpu].cluster_id = topology_id;
-               topology_id = find_acpi_cpu_topology_package(cpu);
-               cpu_topology[cpu].package_id = topology_id;
-       }
-
-       /*
-        * This is a short loop since the number of XArray elements is the
-        * number of heterogeneous CPU clusters. On a homogeneous system
-        * there's only one entry in the XArray.
-        */
-       xa_for_each(&hetero_cpu, hetero_id, entry) {
-               max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
-               xa_erase(&hetero_cpu, hetero_id);
-               kfree(entry);
-       }
-
-       cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
-       xa_destroy(&hetero_cpu);
-       return 0;
-}
-#endif
-
 #ifdef CONFIG_ARM64_AMU_EXTN
 #define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
 #define read_constcnt()        read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)
index 1037169abb4598f830d59aa17fa522133f8a5700..1ccb1eda4ce8b599416d26ebe8a66a6041c32766 100644 (file)
@@ -823,12 +823,106 @@ void remove_cpu_topology(unsigned int cpu)
        clear_cpu_topology(cpu);
 }
 
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+struct cpu_smt_info {
+       unsigned int thread_num;
+       int core_id;
+};
+
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+       int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+       /*
+        * if the PPTT doesn't have thread information, check for architecture
+        * specific fallback if available
+        */
+       if (is_threaded < 0)
+               is_threaded = arch_cpu_is_threaded();
+
+       return !!is_threaded;
+}
+
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
 __weak int __init parse_acpi_topology(void)
 {
+       unsigned int max_smt_thread_num = 1;
+       struct cpu_smt_info *entry;
+       struct xarray hetero_cpu;
+       unsigned long hetero_id;
+       int cpu, topology_id;
+
+       if (acpi_disabled)
+               return 0;
+
+       xa_init(&hetero_cpu);
+
+       for_each_possible_cpu(cpu) {
+               topology_id = find_acpi_cpu_topology(cpu, 0);
+               if (topology_id < 0)
+                       return topology_id;
+
+               if (acpi_cpu_is_threaded(cpu)) {
+                       cpu_topology[cpu].thread_id = topology_id;
+                       topology_id = find_acpi_cpu_topology(cpu, 1);
+                       cpu_topology[cpu].core_id   = topology_id;
+
+                       /*
+                        * In the PPTT, CPUs below a node with the 'identical
+                        * implementation' flag have the same number of threads.
+                        * Count the number of threads for only one CPU (i.e.
+                        * one core_id) among those with the same hetero_id.
+                        * See the comment of find_acpi_cpu_topology_hetero_id()
+                        * for more details.
+                        *
+                        * One entry is created for each node having:
+                        * - the 'identical implementation' flag
+                        * - its parent not having the flag
+                        */
+                       hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
+                       entry = xa_load(&hetero_cpu, hetero_id);
+                       if (!entry) {
+                               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+                               WARN_ON_ONCE(!entry);
+
+                               if (entry) {
+                                       entry->core_id = topology_id;
+                                       entry->thread_num = 1;
+                                       xa_store(&hetero_cpu, hetero_id,
+                                                entry, GFP_KERNEL);
+                               }
+                       } else if (entry->core_id == topology_id) {
+                               entry->thread_num++;
+                       }
+               } else {
+                       cpu_topology[cpu].thread_id  = -1;
+                       cpu_topology[cpu].core_id    = topology_id;
+               }
+               topology_id = find_acpi_cpu_topology_cluster(cpu);
+               cpu_topology[cpu].cluster_id = topology_id;
+               topology_id = find_acpi_cpu_topology_package(cpu);
+               cpu_topology[cpu].package_id = topology_id;
+       }
+
+       /*
+        * This is a short loop since the number of XArray elements is the
+        * number of heterogeneous CPU clusters. On a homogeneous system
+        * there's only one entry in the XArray.
+        */
+       xa_for_each(&hetero_cpu, hetero_id, entry) {
+               max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
+               xa_erase(&hetero_cpu, hetero_id);
+               kfree(entry);
+       }
+
+       cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+       xa_destroy(&hetero_cpu);
        return 0;
 }
 
-#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
 void __init init_cpu_topology(void)
 {
        int cpu, ret;
index d72d6e5aa2002260f04d76071fa14f35120bb4d8..766ed9cf0e5495334c82b009e1a96a3779609d1a 100644 (file)
@@ -80,6 +80,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
 #define topology_sibling_cpumask(cpu)  (&cpu_topology[cpu].thread_sibling)
 #define topology_cluster_cpumask(cpu)  (&cpu_topology[cpu].cluster_sibling)
 #define topology_llc_cpumask(cpu)      (&cpu_topology[cpu].llc_sibling)
+
+#ifndef arch_cpu_is_threaded
+#define arch_cpu_is_threaded() (0)
+#endif
+
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);