/**
* mips_cps_first_online_in_cluster() - Detect if CPU is first online in cluster
+ * @first_cpu: The first other online CPU in cluster, or nr_cpu_ids if
+ * the function returns true.
*
* Determine whether the local CPU is the first to be brought online in its
* cluster - that is, whether there are any other online CPUs in the local
*
* Returns true if this CPU is first online, else false.
*/
-extern unsigned int mips_cps_first_online_in_cluster(void);
+extern unsigned int mips_cps_first_online_in_cluster(int *first_cpu);
#endif /* __MIPS_ASM_MIPS_CPS_H__ */
#include <linux/spinlock.h>
#include <asm/mips-cps.h>
+#include <asm/smp-cps.h>
#include <asm/mipsregs.h>
void __iomem *mips_gcr_base;
write_gcr_error_cause(cm_error);
}
-unsigned int mips_cps_first_online_in_cluster(void)
+unsigned int mips_cps_first_online_in_cluster(int *first_cpu)
{
- unsigned int local_cl;
- int i;
-
- local_cl = cpu_cluster(¤t_cpu_data);
+ unsigned int local_cl = cpu_cluster(¤t_cpu_data);
+ struct cpumask *local_cl_mask;
/*
- * We rely upon knowledge that CPUs are numbered sequentially by
- * cluster - ie. CPUs 0..X will be in cluster 0, CPUs X+1..Y in cluster
- * 1, CPUs Y+1..Z in cluster 2 etc. This means that CPUs in the same
- * cluster will immediately precede or follow one another.
- *
- * First we scan backwards, until we find an online CPU in the cluster
- * or we move on to another cluster.
+ * mips_cps_cluster_bootcfg is allocated in cps_prepare_cpus. If it is
+ * not yet done, then we are so early that only one CPU is running, so
+ * it is the first online CPU in the cluster.
*/
- for (i = smp_processor_id() - 1; i >= 0; i--) {
- if (cpu_cluster(&cpu_data[i]) != local_cl)
- break;
- if (!cpu_online(i))
- continue;
- return false;
- }
-
- /* Then do the same for higher numbered CPUs */
- for (i = smp_processor_id() + 1; i < nr_cpu_ids; i++) {
- if (cpu_cluster(&cpu_data[i]) != local_cl)
- break;
- if (!cpu_online(i))
- continue;
- return false;
- }
-
- /* We found no online CPUs in the local cluster */
- return true;
+ if (IS_ENABLED(CONFIG_MIPS_CPS) && mips_cps_cluster_bootcfg)
+ local_cl_mask = &mips_cps_cluster_bootcfg[local_cl].cpumask;
+ else
+ return true;
+
+ *first_cpu = cpumask_any_and_but(local_cl_mask,
+ cpu_online_mask,
+ smp_processor_id());
+ return (*first_cpu >= nr_cpu_ids);
}
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
- unsigned int nclusters, ncores, core_vpes, c, cl, cca;
+ unsigned int nclusters, ncores, core_vpes, nvpe = 0, c, cl, cca;
bool cca_unsuitable, cores_limited;
struct cluster_boot_config *cluster_bootcfg;
struct core_boot_config *core_bootcfg;
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {
+ int v;
core_vpes = core_vpe_count(cl, c);
core_bootcfg[c].vpe_config = kcalloc(core_vpes,
sizeof(*core_bootcfg[c].vpe_config),
GFP_KERNEL);
+ for (v = 0; v < core_vpes; v++)
+ cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask);
if (!core_bootcfg[c].vpe_config)
goto err_out;
}