]> git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/powerpc/kernel/swsusp_32.S
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[people/arne_f/kernel.git] / arch / powerpc / kernel / swsusp_32.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/threads.h>
3 #include <asm/processor.h>
4 #include <asm/page.h>
5 #include <asm/cputable.h>
6 #include <asm/thread_info.h>
7 #include <asm/ppc_asm.h>
8 #include <asm/asm-offsets.h>
9 #include <asm/mmu.h>
10
11 /*
12 * Structure for storing CPU registers on the save area.
13 */
14 #define SL_SP 0
15 #define SL_PC 4
16 #define SL_MSR 8
17 #define SL_SDR1 0xc
18 #define SL_SPRG0 0x10 /* 4 sprg's */
19 #define SL_DBAT0 0x20
20 #define SL_IBAT0 0x28
21 #define SL_DBAT1 0x30
22 #define SL_IBAT1 0x38
23 #define SL_DBAT2 0x40
24 #define SL_IBAT2 0x48
25 #define SL_DBAT3 0x50
26 #define SL_IBAT3 0x58
27 #define SL_TB 0x60
28 #define SL_R2 0x68
29 #define SL_CR 0x6c
30 #define SL_LR 0x70
31 #define SL_R12 0x74 /* r12 to r31 */
32 #define SL_SIZE (SL_R12 + 80)
33
34 .section .data
35 .align 5
36
37 _GLOBAL(swsusp_save_area)
38 .space SL_SIZE
39
40
41 .section .text
42 .align 5
43
44 _GLOBAL(swsusp_arch_suspend)
45
46 lis r11,swsusp_save_area@h
47 ori r11,r11,swsusp_save_area@l
48
49 mflr r0
50 stw r0,SL_LR(r11)
51 mfcr r0
52 stw r0,SL_CR(r11)
53 stw r1,SL_SP(r11)
54 stw r2,SL_R2(r11)
55 stmw r12,SL_R12(r11)
56
57 /* Save MSR & SDR1 */
58 mfmsr r4
59 stw r4,SL_MSR(r11)
60 mfsdr1 r4
61 stw r4,SL_SDR1(r11)
62
63 /* Get a stable timebase and save it */
64 1: mftbu r4
65 stw r4,SL_TB(r11)
66 mftb r5
67 stw r5,SL_TB+4(r11)
68 mftbu r3
69 cmpw r3,r4
70 bne 1b
71
72 /* Save SPRGs */
73 mfsprg r4,0
74 stw r4,SL_SPRG0(r11)
75 mfsprg r4,1
76 stw r4,SL_SPRG0+4(r11)
77 mfsprg r4,2
78 stw r4,SL_SPRG0+8(r11)
79 mfsprg r4,3
80 stw r4,SL_SPRG0+12(r11)
81
82 /* Save BATs */
83 mfdbatu r4,0
84 stw r4,SL_DBAT0(r11)
85 mfdbatl r4,0
86 stw r4,SL_DBAT0+4(r11)
87 mfdbatu r4,1
88 stw r4,SL_DBAT1(r11)
89 mfdbatl r4,1
90 stw r4,SL_DBAT1+4(r11)
91 mfdbatu r4,2
92 stw r4,SL_DBAT2(r11)
93 mfdbatl r4,2
94 stw r4,SL_DBAT2+4(r11)
95 mfdbatu r4,3
96 stw r4,SL_DBAT3(r11)
97 mfdbatl r4,3
98 stw r4,SL_DBAT3+4(r11)
99 mfibatu r4,0
100 stw r4,SL_IBAT0(r11)
101 mfibatl r4,0
102 stw r4,SL_IBAT0+4(r11)
103 mfibatu r4,1
104 stw r4,SL_IBAT1(r11)
105 mfibatl r4,1
106 stw r4,SL_IBAT1+4(r11)
107 mfibatu r4,2
108 stw r4,SL_IBAT2(r11)
109 mfibatl r4,2
110 stw r4,SL_IBAT2+4(r11)
111 mfibatu r4,3
112 stw r4,SL_IBAT3(r11)
113 mfibatl r4,3
114 stw r4,SL_IBAT3+4(r11)
115
116 #if 0
117 /* Backup various CPU config stuffs */
118 bl __save_cpu_setup
119 #endif
120 /* Call the low level suspend stuff (we should probably have made
121 * a stackframe...
122 */
123 bl swsusp_save
124
125 /* Restore LR from the save area */
126 lis r11,swsusp_save_area@h
127 ori r11,r11,swsusp_save_area@l
128 lwz r0,SL_LR(r11)
129 mtlr r0
130
131 blr
132
133
134 /* Resume code */
135 _GLOBAL(swsusp_arch_resume)
136
137 #ifdef CONFIG_ALTIVEC
138 /* Stop pending alitvec streams and memory accesses */
139 BEGIN_FTR_SECTION
140 DSSALL
141 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
142 #endif
143 sync
144
145 /* Disable MSR:DR to make sure we don't take a TLB or
146 * hash miss during the copy, as our hash table will
147 * for a while be unusable. For .text, we assume we are
148 * covered by a BAT. This works only for non-G5 at this
149 * point. G5 will need a better approach, possibly using
150 * a small temporary hash table filled with large mappings,
151 * disabling the MMU completely isn't a good option for
152 * performance reasons.
153 * (Note that 750's may have the same performance issue as
154 * the G5 in this case, we should investigate using moving
155 * BATs for these CPUs)
156 */
157 mfmsr r0
158 sync
159 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
160 mtmsr r0
161 sync
162 isync
163
164 /* Load ptr the list of pages to copy in r3 */
165 lis r11,(restore_pblist - KERNELBASE)@h
166 ori r11,r11,restore_pblist@l
167 lwz r10,0(r11)
168
169 /* Copy the pages. This is a very basic implementation, to
170 * be replaced by something more cache efficient */
171 1:
172 tophys(r3,r10)
173 li r0,256
174 mtctr r0
175 lwz r11,pbe_address(r3) /* source */
176 tophys(r5,r11)
177 lwz r10,pbe_orig_address(r3) /* destination */
178 tophys(r6,r10)
179 2:
180 lwz r8,0(r5)
181 lwz r9,4(r5)
182 lwz r10,8(r5)
183 lwz r11,12(r5)
184 addi r5,r5,16
185 stw r8,0(r6)
186 stw r9,4(r6)
187 stw r10,8(r6)
188 stw r11,12(r6)
189 addi r6,r6,16
190 bdnz 2b
191 lwz r10,pbe_next(r3)
192 cmpwi 0,r10,0
193 bne 1b
194
195 /* Do a very simple cache flush/inval of the L1 to ensure
196 * coherency of the icache
197 */
198 lis r3,0x0002
199 mtctr r3
200 li r3, 0
201 1:
202 lwz r0,0(r3)
203 addi r3,r3,0x0020
204 bdnz 1b
205 isync
206 sync
207
208 /* Now flush those cache lines */
209 lis r3,0x0002
210 mtctr r3
211 li r3, 0
212 1:
213 dcbf 0,r3
214 addi r3,r3,0x0020
215 bdnz 1b
216 sync
217
218 /* Ok, we are now running with the kernel data of the old
219 * kernel fully restored. We can get to the save area
220 * easily now. As for the rest of the code, it assumes the
221 * loader kernel and the booted one are exactly identical
222 */
223 lis r11,swsusp_save_area@h
224 ori r11,r11,swsusp_save_area@l
225 tophys(r11,r11)
226
227 #if 0
228 /* Restore various CPU config stuffs */
229 bl __restore_cpu_setup
230 #endif
231 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
232 * This is a bit hairy as we are running out of those BATs,
233 * but first, our code is probably in the icache, and we are
234 * writing the same value to the BAT, so that should be fine,
235 * though a better solution will have to be found long-term
236 */
237 lwz r4,SL_SDR1(r11)
238 mtsdr1 r4
239 lwz r4,SL_SPRG0(r11)
240 mtsprg 0,r4
241 lwz r4,SL_SPRG0+4(r11)
242 mtsprg 1,r4
243 lwz r4,SL_SPRG0+8(r11)
244 mtsprg 2,r4
245 lwz r4,SL_SPRG0+12(r11)
246 mtsprg 3,r4
247
248 #if 0
249 lwz r4,SL_DBAT0(r11)
250 mtdbatu 0,r4
251 lwz r4,SL_DBAT0+4(r11)
252 mtdbatl 0,r4
253 lwz r4,SL_DBAT1(r11)
254 mtdbatu 1,r4
255 lwz r4,SL_DBAT1+4(r11)
256 mtdbatl 1,r4
257 lwz r4,SL_DBAT2(r11)
258 mtdbatu 2,r4
259 lwz r4,SL_DBAT2+4(r11)
260 mtdbatl 2,r4
261 lwz r4,SL_DBAT3(r11)
262 mtdbatu 3,r4
263 lwz r4,SL_DBAT3+4(r11)
264 mtdbatl 3,r4
265 lwz r4,SL_IBAT0(r11)
266 mtibatu 0,r4
267 lwz r4,SL_IBAT0+4(r11)
268 mtibatl 0,r4
269 lwz r4,SL_IBAT1(r11)
270 mtibatu 1,r4
271 lwz r4,SL_IBAT1+4(r11)
272 mtibatl 1,r4
273 lwz r4,SL_IBAT2(r11)
274 mtibatu 2,r4
275 lwz r4,SL_IBAT2+4(r11)
276 mtibatl 2,r4
277 lwz r4,SL_IBAT3(r11)
278 mtibatu 3,r4
279 lwz r4,SL_IBAT3+4(r11)
280 mtibatl 3,r4
281 #endif
282
283 BEGIN_MMU_FTR_SECTION
284 li r4,0
285 mtspr SPRN_DBAT4U,r4
286 mtspr SPRN_DBAT4L,r4
287 mtspr SPRN_DBAT5U,r4
288 mtspr SPRN_DBAT5L,r4
289 mtspr SPRN_DBAT6U,r4
290 mtspr SPRN_DBAT6L,r4
291 mtspr SPRN_DBAT7U,r4
292 mtspr SPRN_DBAT7L,r4
293 mtspr SPRN_IBAT4U,r4
294 mtspr SPRN_IBAT4L,r4
295 mtspr SPRN_IBAT5U,r4
296 mtspr SPRN_IBAT5L,r4
297 mtspr SPRN_IBAT6U,r4
298 mtspr SPRN_IBAT6L,r4
299 mtspr SPRN_IBAT7U,r4
300 mtspr SPRN_IBAT7L,r4
301 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
302
303 /* Flush all TLBs */
304 lis r4,0x1000
305 1: addic. r4,r4,-0x1000
306 tlbie r4
307 bgt 1b
308 sync
309
310 /* restore the MSR and turn on the MMU */
311 lwz r3,SL_MSR(r11)
312 bl turn_on_mmu
313 tovirt(r11,r11)
314
315 /* Restore TB */
316 li r3,0
317 mttbl r3
318 lwz r3,SL_TB(r11)
319 lwz r4,SL_TB+4(r11)
320 mttbu r3
321 mttbl r4
322
323 /* Kick decrementer */
324 li r0,1
325 mtdec r0
326
327 /* Restore the callee-saved registers and return */
328 lwz r0,SL_CR(r11)
329 mtcr r0
330 lwz r2,SL_R2(r11)
331 lmw r12,SL_R12(r11)
332 lwz r1,SL_SP(r11)
333 lwz r0,SL_LR(r11)
334 mtlr r0
335
336 // XXX Note: we don't really need to call swsusp_resume
337
338 li r3,0
339 blr
340
341 /* FIXME:This construct is actually not useful since we don't shut
342 * down the instruction MMU, we could just flip back MSR-DR on.
343 */
344 turn_on_mmu:
345 mflr r4
346 mtsrr0 r4
347 mtsrr1 r3
348 sync
349 isync
350 rfi
351