]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
MIPS: CPS: Boot CPUs in secondary clusters
authorPaul Burton <paulburton@kernel.org>
Wed, 29 Jan 2025 12:32:50 +0000 (13:32 +0100)
committerThomas Bogendoerfer <tsbogend@alpha.franken.de>
Fri, 21 Feb 2025 09:19:37 +0000 (10:19 +0100)
Probe for & boot CPUs (cores & VPs) in secondary clusters (ie. not the
cluster that began booting Linux) when they are present in systems with
CM 3.5 or higher.

Signed-off-by: Paul Burton <paulburton@kernel.org>
Signed-off-by: Chao-ying Fu <cfu@wavecomp.com>
Signed-off-by: Dragan Mladjenovic <dragan.mladjenovic@syrmia.com>
Signed-off-by: Aleksandar Rikalo <arikalo@gmail.com>
Tested-by: Serge Semin <fancer.lancer@gmail.com>
Tested-by: Gregory CLEMENT <gregory.clement@bootlin.com>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
arch/mips/include/asm/mips-cm.h
arch/mips/include/asm/smp-cps.h
arch/mips/kernel/mips-cm.c
arch/mips/kernel/smp-cps.c

index 23ce951f445bb0d48e5b6abf0418d733b7b7be0e..1afa85db1fb37d1017fbe7d6b7a2b7d2470e8257 100644 (file)
@@ -255,6 +255,12 @@ GCR_ACCESSOR_RW(32, 0x130, l2_config)
 GCR_ACCESSOR_RO(32, 0x150, sys_config2)
 #define CM_GCR_SYS_CONFIG2_MAXVPW              GENMASK(3, 0)
 
+/* GCR_L2-RAM_CONFIG - Configuration & status of L2 cache RAMs */
+GCR_ACCESSOR_RW(64, 0x240, l2_ram_config)
+#define CM_GCR_L2_RAM_CONFIG_PRESENT           BIT(31)
+#define CM_GCR_L2_RAM_CONFIG_HCI_DONE          BIT(30)
+#define CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED     BIT(29)
+
 /* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */
 GCR_ACCESSOR_RW(32, 0x300, l2_pft_control)
 #define CM_GCR_L2_PFT_CONTROL_PAGEMASK         GENMASK(31, 12)
@@ -266,6 +272,18 @@ GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b)
 #define CM_GCR_L2_PFT_CONTROL_B_CEN            BIT(8)
 #define CM_GCR_L2_PFT_CONTROL_B_PORTID         GENMASK(7, 0)
 
+/* GCR_L2_TAG_ADDR - Access addresses in L2 cache tags */
+GCR_ACCESSOR_RW(64, 0x600, l2_tag_addr)
+
+/* GCR_L2_TAG_STATE - Access L2 cache tag state */
+GCR_ACCESSOR_RW(64, 0x608, l2_tag_state)
+
+/* GCR_L2_DATA - Access data in L2 cache lines */
+GCR_ACCESSOR_RW(64, 0x610, l2_data)
+
+/* GCR_L2_ECC - Access ECC information from L2 cache lines */
+GCR_ACCESSOR_RW(64, 0x618, l2_ecc)
+
 /* GCR_L2SM_COP - L2 cache op state machine control */
 GCR_ACCESSOR_RW(32, 0x620, l2sm_cop)
 #define CM_GCR_L2SM_COP_PRESENT                        BIT(31)
index a629e948a6fdacbc780c109e11ff14b930c88dbb..10d3ebd890cb2f3ac2b1db5a541cfe9e7f1a0c99 100644 (file)
@@ -23,6 +23,7 @@ struct core_boot_config {
 };
 
 struct cluster_boot_config {
+       unsigned long *core_power;
        struct core_boot_config *core_config;
 };
 
index 3eb2cfb893e19c7260ff0cdc9113c7b3d6e92015..9854bc2b6895d4db67d216586f65e4810661d29b 100644 (file)
@@ -308,7 +308,9 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
                      FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp);
 
                if (cm_rev >= CM_REV_CM3_5) {
-                       val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
+                       if (cluster != cpu_cluster(&current_cpu_data))
+                               val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
+                       val |= CM_GCR_Cx_OTHER_GIC_EN;
                        val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster);
                        val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block);
                } else {
index 3d8cf3098656ce4b4218c2cd86ed6478e8b2732a..b20ea4048429e1aab2bffbada793ee594bee1e05 100644 (file)
@@ -36,12 +36,56 @@ enum label_id {
 
 UASM_L_LA(_not_nmi)
 
-static DECLARE_BITMAP(core_power, NR_CPUS);
 static u64 core_entry_reg;
 static phys_addr_t cps_vec_pa;
 
 struct cluster_boot_config *mips_cps_cluster_bootcfg;
 
+static void power_up_other_cluster(unsigned int cluster)
+{
+       u32 stat, seq_state;
+       unsigned int timeout;
+
+       mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
+                          CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+       stat = read_cpc_co_stat_conf();
+       mips_cm_unlock_other();
+
+       seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+       seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+       if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
+               return;
+
+       /* Set endianness & power up the CM */
+       mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+       write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN));
+       write_cpc_redir_pwrup_ctl(1);
+       mips_cm_unlock_other();
+
+       /* Wait for the CM to start up */
+       timeout = 1000;
+       mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
+                          CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+       while (1) {
+               stat = read_cpc_co_stat_conf();
+               seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+               seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+               if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
+                       break;
+
+               if (timeout) {
+                       mdelay(1);
+                       timeout--;
+               } else {
+                       pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n",
+                               cluster, stat);
+                       mdelay(1000);
+               }
+       }
+
+       mips_cm_unlock_other();
+}
+
 static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
 {
        return min(smp_max_threads, mips_cps_numvps(cluster, core));
@@ -178,6 +222,9 @@ static void __init cps_smp_setup(void)
                        pr_cont(",");
                pr_cont("{");
 
+               if (mips_cm_revision() >= CM_REV_CM3_5)
+                       power_up_other_cluster(cl);
+
                ncores = mips_cps_numcores(cl);
                for (c = 0; c < ncores; c++) {
                        core_vpes = core_vpe_count(cl, c);
@@ -205,8 +252,8 @@ static void __init cps_smp_setup(void)
 
        /* Indicate present CPUs (CPU being synonymous with VPE) */
        for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
-               set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
-               set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
+               set_cpu_possible(v, true);
+               set_cpu_present(v, true);
                __cpu_number_map[v] = v;
                __cpu_logical_map[v] = v;
        }
@@ -214,9 +261,6 @@ static void __init cps_smp_setup(void)
        /* Set a coherent default CCA (CWB) */
        change_c0_config(CONF_CM_CMASK, 0x5);
 
-       /* Core 0 is powered up (we're running on it) */
-       bitmap_set(core_power, 0, 1);
-
        /* Initialise core 0 */
        mips_cps_core_init();
 
@@ -298,6 +342,10 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
                        goto err_out;
                mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg;
 
+               mips_cps_cluster_bootcfg[cl].core_power =
+                       kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
+                               GFP_KERNEL);
+
                /* Allocate VPE boot configuration structs */
                for (c = 0; c < ncores; c++) {
                        core_vpes = core_vpe_count(cl, c);
@@ -309,11 +357,12 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
                }
        }
 
-       /* Mark this CPU as booted */
+       /* Mark this CPU as powered up & booted */
        cl = cpu_cluster(&current_cpu_data);
        c = cpu_core(&current_cpu_data);
        cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
        core_bootcfg = &cluster_bootcfg->core_config[c];
+       bitmap_set(cluster_bootcfg->core_power, cpu_core(&current_cpu_data), 1);
        atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(&current_cpu_data));
 
        return;
@@ -341,13 +390,118 @@ err_out:
        }
 }
 
-static void boot_core(unsigned int core, unsigned int vpe_id)
+static void init_cluster_l2(void)
 {
-       u32 stat, seq_state;
-       unsigned timeout;
+       u32 l2_cfg, l2sm_cop, result;
+
+       while (1) {
+               l2_cfg = read_gcr_redir_l2_ram_config();
+
+               /* If HCI is not supported, use the state machine below */
+               if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT))
+                       break;
+               if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED))
+                       break;
+
+               /* If the HCI_DONE bit is set, we're finished */
+               if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE)
+                       return;
+       }
+
+       l2sm_cop = read_gcr_redir_l2sm_cop();
+       if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT),
+                "L2 init not supported on this system yet"))
+               return;
+
+       /* Clear L2 tag registers */
+       write_gcr_redir_l2_tag_state(0);
+       write_gcr_redir_l2_ecc(0);
+
+       /* Ensure the L2 tag writes complete before the state machine starts */
+       mb();
+
+       /* Wait for the L2 state machine to be idle */
+       do {
+               l2sm_cop = read_gcr_redir_l2sm_cop();
+       } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING);
+
+       /* Start a store tag operation */
+       l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG;
+       l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE);
+       l2sm_cop |= CM_GCR_L2SM_COP_CMD_START;
+       write_gcr_redir_l2sm_cop(l2sm_cop);
+
+       /* Ensure the state machine starts before we poll for completion */
+       mb();
+
+       /* Wait for the operation to be complete */
+       do {
+               l2sm_cop = read_gcr_redir_l2sm_cop();
+               result = l2sm_cop & CM_GCR_L2SM_COP_RESULT;
+               result >>= __ffs(CM_GCR_L2SM_COP_RESULT);
+       } while (!result);
+
+       WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK,
+            "L2 state machine failed cache init with error %u\n", result);
+}
+
+static void boot_core(unsigned int cluster, unsigned int core,
+                     unsigned int vpe_id)
+{
+       struct cluster_boot_config *cluster_cfg;
+       u32 access, stat, seq_state;
+       unsigned int timeout, ncores;
+
+       cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
+       ncores = mips_cps_numcores(cluster);
+
+       if ((cluster != cpu_cluster(&current_cpu_data)) &&
+           bitmap_empty(cluster_cfg->core_power, ncores)) {
+               power_up_other_cluster(cluster);
+
+               mips_cm_lock_other(cluster, core, 0,
+                                  CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+               /* Ensure cluster GCRs are where we expect */
+               write_gcr_redir_base(read_gcr_base());
+               write_gcr_redir_cpc_base(read_gcr_cpc_base());
+               write_gcr_redir_gic_base(read_gcr_gic_base());
+
+               init_cluster_l2();
+
+               /* Mirror L2 configuration */
+               write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base());
+               write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control());
+               write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b());
+
+               /* Mirror ECC/parity setup */
+               write_gcr_redir_err_control(read_gcr_err_control());
+
+               /* Set BEV base */
+               write_gcr_redir_bev_base(core_entry_reg);
+
+               mips_cm_unlock_other();
+       }
+
+       if (cluster != cpu_cluster(&current_cpu_data)) {
+               mips_cm_lock_other(cluster, core, 0,
+                                  CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+               /* Ensure the core can access the GCRs */
+               access = read_gcr_redir_access();
+               access |= BIT(core);
+               write_gcr_redir_access(access);
+
+               mips_cm_unlock_other();
+       } else {
+               /* Ensure the core can access the GCRs */
+               access = read_gcr_access();
+               access |= BIT(core);
+               write_gcr_access(access);
+       }
 
        /* Select the appropriate core */
-       mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+       mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
 
        /* Set its reset vector */
        if (mips_cm_is64)
@@ -416,7 +570,17 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
        mips_cm_unlock_other();
 
        /* The core is now powered up */
-       bitmap_set(core_power, core, 1);
+       bitmap_set(cluster_cfg->core_power, core, 1);
+
+       /*
+        * Restore CM_PWRUP=0 so that the CM can power down if all the cores in
+        * the cluster do (eg. if they're all removed via hotplug.
+        */
+       if (mips_cm_revision() >= CM_REV_CM3_5) {
+               mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+               write_cpc_redir_pwrup_ctl(0);
+               mips_cm_unlock_other();
+       }
 }
 
 static void remote_vpe_boot(void *dummy)
@@ -442,10 +606,6 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
        unsigned int remote;
        int err;
 
-       /* We don't yet support booting CPUs in other clusters */
-       if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
-               return -ENOSYS;
-
        vpe_cfg->pc = (unsigned long)&smp_bootstrap;
        vpe_cfg->sp = __KSTK_TOS(idle);
        vpe_cfg->gp = (unsigned long)task_thread_info(idle);
@@ -454,14 +614,15 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
 
        preempt_disable();
 
-       if (!test_bit(core, core_power)) {
+       if (!test_bit(core, cluster_cfg->core_power)) {
                /* Boot a VPE on a powered down core */
-               boot_core(core, vpe_id);
+               boot_core(cluster, core, vpe_id);
                goto out;
        }
 
        if (cpu_has_vp) {
-               mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+               mips_cm_lock_other(cluster, core, vpe_id,
+                                  CM_GCR_Cx_OTHER_BLOCK_LOCAL);
                if (mips_cm_is64)
                        write_gcr_co_reset64_base(core_entry_reg);
                else
@@ -671,11 +832,15 @@ static void cps_cpu_die(unsigned int cpu) { }
 
 static void cps_cleanup_dead_cpu(unsigned cpu)
 {
+       unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
        unsigned core = cpu_core(&cpu_data[cpu]);
        unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
        ktime_t fail_time;
        unsigned stat;
        int err;
+       struct cluster_boot_config *cluster_cfg;
+
+       cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
 
        /*
         * Now wait for the CPU to actually offline. Without doing this that
@@ -727,7 +892,7 @@ static void cps_cleanup_dead_cpu(unsigned cpu)
                } while (1);
 
                /* Indicate the core is powered off */
-               bitmap_clear(core_power, core, 1);
+               bitmap_clear(cluster_cfg->core_power, core, 1);
        } else if (cpu_has_mipsmt) {
                /*
                 * Have a CPU with access to the offlined CPUs registers wait