2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
9 #include <asm/system.h>
10 #include <asm/arch/mp.h>
11 #include <asm/arch/soc.h>
13 #include <asm/arch-fsl-layerscape/soc.h>
15 DECLARE_GLOBAL_DATA_PTR
;
17 void *get_spin_tbl_addr(void)
22 phys_addr_t
determine_mp_bootpg(void)
24 return (phys_addr_t
)&secondary_boot_code
;
27 void update_os_arch_secondary_cores(uint8_t os_arch
)
29 u64
*table
= get_spin_tbl_addr();
32 for (i
= 1; i
< CONFIG_MAX_CPUS
; i
++) {
33 if (os_arch
== IH_ARCH_DEFAULT
)
34 table
[i
* WORDS_PER_SPIN_TABLE_ENTRY
+
35 SPIN_TABLE_ELEM_ARCH_COMP_IDX
] = OS_ARCH_SAME
;
37 table
[i
* WORDS_PER_SPIN_TABLE_ENTRY
+
38 SPIN_TABLE_ELEM_ARCH_COMP_IDX
] = OS_ARCH_DIFF
;
42 #ifdef CONFIG_FSL_LSCH3
43 void wake_secondary_core_n(int cluster
, int core
, int cluster_cores
)
45 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
46 struct ccsr_reset __iomem
*rst
= (void *)(CONFIG_SYS_FSL_RST_ADDR
);
49 mpidr
= ((cluster
<< 8) | core
);
51 * mpidr_el1 register value of core which needs to be released
52 * is written to scratchrw[6] register
54 gur_out32(&gur
->scratchrw
[6], mpidr
);
55 asm volatile("dsb st" : : : "memory");
56 rst
->brrl
|= 1 << ((cluster
* cluster_cores
) + core
);
57 asm volatile("dsb st" : : : "memory");
59 * scratchrw[6] register value is polled
60 * when the value becomes zero, this means that this core is up
61 * and running, next core can be released now
63 while (gur_in32(&gur
->scratchrw
[6]) != 0)
68 int fsl_layerscape_wake_seconday_cores(void)
70 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
71 #ifdef CONFIG_FSL_LSCH3
72 struct ccsr_reset __iomem
*rst
= (void *)(CONFIG_SYS_FSL_RST_ADDR
);
73 u32 svr
, ver
, cluster
, type
;
74 int j
= 0, cluster_cores
= 0;
75 #elif defined(CONFIG_FSL_LSCH2)
76 struct ccsr_scfg __iomem
*scfg
= (void *)(CONFIG_SYS_FSL_SCFG_ADDR
);
78 u32 cores
, cpu_up_mask
= 1;
80 u64
*table
= get_spin_tbl_addr();
82 #ifdef COUNTER_FREQUENCY_REAL
83 /* update for secondary cores */
84 __real_cntfrq
= COUNTER_FREQUENCY_REAL
;
85 flush_dcache_range((unsigned long)&__real_cntfrq
,
86 (unsigned long)&__real_cntfrq
+ 8);
90 /* Clear spin table so that secondary processors
91 * observe the correct value after waking up from wfe.
93 memset(table
, 0, CONFIG_MAX_CPUS
*SPIN_TABLE_ELEM_SIZE
);
94 flush_dcache_range((unsigned long)table
,
95 (unsigned long)table
+
96 (CONFIG_MAX_CPUS
*SPIN_TABLE_ELEM_SIZE
));
98 printf("Waking secondary cores to start from %lx\n", gd
->relocaddr
);
100 #ifdef CONFIG_FSL_LSCH3
101 gur_out32(&gur
->bootlocptrh
, (u32
)(gd
->relocaddr
>> 32));
102 gur_out32(&gur
->bootlocptrl
, (u32
)gd
->relocaddr
);
104 svr
= gur_in32(&gur
->svr
);
105 ver
= SVR_SOC_VER(svr
);
106 if (ver
== SVR_LS2080A
|| ver
== SVR_LS2085A
) {
107 gur_out32(&gur
->scratchrw
[6], 1);
108 asm volatile("dsb st" : : : "memory");
110 asm volatile("dsb st" : : : "memory");
113 * Release the cores out of reset one-at-a-time to avoid
117 cluster
= in_le32(&gur
->tp_cluster
[i
].lower
);
118 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
119 type
= initiator_type(cluster
, j
);
121 TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
)
126 cluster
= in_le32(&gur
->tp_cluster
[i
].lower
);
127 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
128 type
= initiator_type(cluster
, j
);
130 TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
)
131 wake_secondary_core_n(i
, j
,
135 } while ((cluster
& TP_CLUSTER_EOC
) != TP_CLUSTER_EOC
);
137 #elif defined(CONFIG_FSL_LSCH2)
138 scfg_out32(&scfg
->scratchrw
[0], (u32
)(gd
->relocaddr
>> 32));
139 scfg_out32(&scfg
->scratchrw
[1], (u32
)gd
->relocaddr
);
140 asm volatile("dsb st" : : : "memory");
141 gur_out32(&gur
->brrl
, cores
);
142 asm volatile("dsb st" : : : "memory");
144 /* Bootup online cores */
145 scfg_out32(&scfg
->corebcr
, cores
);
147 /* This is needed as a precautionary measure.
148 * If some code before this has accidentally released the secondary
149 * cores then the pre-bootloader code will trap them in a "wfe" unless
150 * the scratchrw[6] is set. In this case we need a sev here to get these
151 * cores moving again.
156 flush_dcache_range((unsigned long)table
, (unsigned long)table
+
157 CONFIG_MAX_CPUS
* 64);
158 for (i
= 1; i
< CONFIG_MAX_CPUS
; i
++) {
159 if (table
[i
* WORDS_PER_SPIN_TABLE_ENTRY
+
160 SPIN_TABLE_ELEM_STATUS_IDX
])
161 cpu_up_mask
|= 1 << i
;
163 if (hweight32(cpu_up_mask
) == hweight32(cores
))
168 printf("Not all cores (0x%x) are up (0x%x)\n",
172 printf("All (%d) cores are up.\n", hweight32(cores
));
177 int is_core_valid(unsigned int core
)
179 return !!((1 << core
) & cpu_mask());
182 static int is_pos_valid(unsigned int pos
)
184 return !!((1 << pos
) & cpu_pos_mask());
187 int is_core_online(u64 cpu_id
)
190 int pos
= id_to_core(cpu_id
);
191 table
= (u64
*)get_spin_tbl_addr() + pos
* WORDS_PER_SPIN_TABLE_ENTRY
;
192 return table
[SPIN_TABLE_ELEM_STATUS_IDX
] == 1;
195 int cpu_reset(int nr
)
197 puts("Feature is not implemented.\n");
202 int cpu_disable(int nr
)
204 puts("Feature is not implemented.\n");
209 static int core_to_pos(int nr
)
211 u32 cores
= cpu_pos_mask();
216 } else if (nr
>= hweight32(cores
)) {
217 puts("Not a valid core number.\n");
221 for (i
= 1; i
< 32; i
++) {
222 if (is_pos_valid(i
)) {
235 int cpu_status(int nr
)
241 table
= (u64
*)get_spin_tbl_addr();
242 printf("table base @ 0x%p\n", table
);
244 pos
= core_to_pos(nr
);
247 table
= (u64
*)get_spin_tbl_addr() + pos
*
248 WORDS_PER_SPIN_TABLE_ENTRY
;
249 printf("table @ 0x%p\n", table
);
250 printf(" addr - 0x%016llx\n",
251 table
[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX
]);
252 printf(" status - 0x%016llx\n",
253 table
[SPIN_TABLE_ELEM_STATUS_IDX
]);
254 printf(" lpid - 0x%016llx\n",
255 table
[SPIN_TABLE_ELEM_LPID_IDX
]);
261 int cpu_release(int nr
, int argc
, char * const argv
[])
264 u64
*table
= (u64
*)get_spin_tbl_addr();
267 pos
= core_to_pos(nr
);
271 table
+= pos
* WORDS_PER_SPIN_TABLE_ENTRY
;
272 boot_addr
= simple_strtoull(argv
[0], NULL
, 16);
273 table
[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX
] = boot_addr
;
274 flush_dcache_range((unsigned long)table
,
275 (unsigned long)table
+ SPIN_TABLE_ELEM_SIZE
);
276 asm volatile("dsb st");
277 smp_kick_all_cpus(); /* only those with entry addr set will run */
279 * When the first release command runs, all cores are set to go. Those
280 * without a valid entry address will be trapped by "wfe". "sev" kicks
281 * them off to check the address again. When set, they continue to run.