]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/arm/machine.c
Include qemu-common.h exactly where needed
[thirdparty/qemu.git] / target / arm / machine.c
1 #include "qemu/osdep.h"
2 #include "cpu.h"
3 #include "hw/hw.h"
4 #include "hw/boards.h"
5 #include "qemu/error-report.h"
6 #include "sysemu/kvm.h"
7 #include "kvm_arm.h"
8 #include "internals.h"
9 #include "migration/cpu.h"
10
11 static bool vfp_needed(void *opaque)
12 {
13 ARMCPU *cpu = opaque;
14 CPUARMState *env = &cpu->env;
15
16 return arm_feature(env, ARM_FEATURE_VFP);
17 }
18
19 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
20 const VMStateField *field)
21 {
22 ARMCPU *cpu = opaque;
23 CPUARMState *env = &cpu->env;
24 uint32_t val = qemu_get_be32(f);
25
26 vfp_set_fpscr(env, val);
27 return 0;
28 }
29
30 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
31 const VMStateField *field, QJSON *vmdesc)
32 {
33 ARMCPU *cpu = opaque;
34 CPUARMState *env = &cpu->env;
35
36 qemu_put_be32(f, vfp_get_fpscr(env));
37 return 0;
38 }
39
40 static const VMStateInfo vmstate_fpscr = {
41 .name = "fpscr",
42 .get = get_fpscr,
43 .put = put_fpscr,
44 };
45
46 static const VMStateDescription vmstate_vfp = {
47 .name = "cpu/vfp",
48 .version_id = 3,
49 .minimum_version_id = 3,
50 .needed = vfp_needed,
51 .fields = (VMStateField[]) {
52 /* For compatibility, store Qn out of Zn here. */
53 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
54 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
55 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
56 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
57 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
58 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
59 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
60 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
61 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
62 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
63 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
64 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
65 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
66 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
67 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
68 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
69 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
70 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
71 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
72 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
73 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
74 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
75 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
76 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
77 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
78 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
79 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
80 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
81 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
82 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
83 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
84 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
85
86 /* The xregs array is a little awkward because element 1 (FPSCR)
87 * requires a specific accessor, so we have to split it up in
88 * the vmstate:
89 */
90 VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
91 VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
92 {
93 .name = "fpscr",
94 .version_id = 0,
95 .size = sizeof(uint32_t),
96 .info = &vmstate_fpscr,
97 .flags = VMS_SINGLE,
98 .offset = 0,
99 },
100 VMSTATE_END_OF_LIST()
101 }
102 };
103
104 static bool iwmmxt_needed(void *opaque)
105 {
106 ARMCPU *cpu = opaque;
107 CPUARMState *env = &cpu->env;
108
109 return arm_feature(env, ARM_FEATURE_IWMMXT);
110 }
111
112 static const VMStateDescription vmstate_iwmmxt = {
113 .name = "cpu/iwmmxt",
114 .version_id = 1,
115 .minimum_version_id = 1,
116 .needed = iwmmxt_needed,
117 .fields = (VMStateField[]) {
118 VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
119 VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
120 VMSTATE_END_OF_LIST()
121 }
122 };
123
124 #ifdef TARGET_AARCH64
125 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
126 * and ARMPredicateReg is actively empty. This triggers errors
127 * in the expansion of the VMSTATE macros.
128 */
129
130 static bool sve_needed(void *opaque)
131 {
132 ARMCPU *cpu = opaque;
133
134 return cpu_isar_feature(aa64_sve, cpu);
135 }
136
137 /* The first two words of each Zreg is stored in VFP state. */
138 static const VMStateDescription vmstate_zreg_hi_reg = {
139 .name = "cpu/sve/zreg_hi",
140 .version_id = 1,
141 .minimum_version_id = 1,
142 .fields = (VMStateField[]) {
143 VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
144 VMSTATE_END_OF_LIST()
145 }
146 };
147
148 static const VMStateDescription vmstate_preg_reg = {
149 .name = "cpu/sve/preg",
150 .version_id = 1,
151 .minimum_version_id = 1,
152 .fields = (VMStateField[]) {
153 VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
154 VMSTATE_END_OF_LIST()
155 }
156 };
157
158 static const VMStateDescription vmstate_sve = {
159 .name = "cpu/sve",
160 .version_id = 1,
161 .minimum_version_id = 1,
162 .needed = sve_needed,
163 .fields = (VMStateField[]) {
164 VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
165 vmstate_zreg_hi_reg, ARMVectorReg),
166 VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
167 vmstate_preg_reg, ARMPredicateReg),
168 VMSTATE_END_OF_LIST()
169 }
170 };
171 #endif /* AARCH64 */
172
173 static bool serror_needed(void *opaque)
174 {
175 ARMCPU *cpu = opaque;
176 CPUARMState *env = &cpu->env;
177
178 return env->serror.pending != 0;
179 }
180
181 static const VMStateDescription vmstate_serror = {
182 .name = "cpu/serror",
183 .version_id = 1,
184 .minimum_version_id = 1,
185 .needed = serror_needed,
186 .fields = (VMStateField[]) {
187 VMSTATE_UINT8(env.serror.pending, ARMCPU),
188 VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
189 VMSTATE_UINT64(env.serror.esr, ARMCPU),
190 VMSTATE_END_OF_LIST()
191 }
192 };
193
194 static bool irq_line_state_needed(void *opaque)
195 {
196 return true;
197 }
198
199 static const VMStateDescription vmstate_irq_line_state = {
200 .name = "cpu/irq-line-state",
201 .version_id = 1,
202 .minimum_version_id = 1,
203 .needed = irq_line_state_needed,
204 .fields = (VMStateField[]) {
205 VMSTATE_UINT32(env.irq_line_state, ARMCPU),
206 VMSTATE_END_OF_LIST()
207 }
208 };
209
210 static bool m_needed(void *opaque)
211 {
212 ARMCPU *cpu = opaque;
213 CPUARMState *env = &cpu->env;
214
215 return arm_feature(env, ARM_FEATURE_M);
216 }
217
218 static const VMStateDescription vmstate_m_faultmask_primask = {
219 .name = "cpu/m/faultmask-primask",
220 .version_id = 1,
221 .minimum_version_id = 1,
222 .needed = m_needed,
223 .fields = (VMStateField[]) {
224 VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
225 VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
226 VMSTATE_END_OF_LIST()
227 }
228 };
229
230 /* CSSELR is in a subsection because we didn't implement it previously.
231 * Migration from an old implementation will leave it at zero, which
232 * is OK since the only CPUs in the old implementation make the
233 * register RAZ/WI.
234 * Since there was no version of QEMU which implemented the CSSELR for
235 * just non-secure, we transfer both banks here rather than putting
236 * the secure banked version in the m-security subsection.
237 */
238 static bool csselr_vmstate_validate(void *opaque, int version_id)
239 {
240 ARMCPU *cpu = opaque;
241
242 return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
243 && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
244 }
245
246 static bool m_csselr_needed(void *opaque)
247 {
248 ARMCPU *cpu = opaque;
249
250 return !arm_v7m_csselr_razwi(cpu);
251 }
252
253 static const VMStateDescription vmstate_m_csselr = {
254 .name = "cpu/m/csselr",
255 .version_id = 1,
256 .minimum_version_id = 1,
257 .needed = m_csselr_needed,
258 .fields = (VMStateField[]) {
259 VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
260 VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
261 VMSTATE_END_OF_LIST()
262 }
263 };
264
265 static const VMStateDescription vmstate_m_scr = {
266 .name = "cpu/m/scr",
267 .version_id = 1,
268 .minimum_version_id = 1,
269 .needed = m_needed,
270 .fields = (VMStateField[]) {
271 VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
272 VMSTATE_END_OF_LIST()
273 }
274 };
275
276 static const VMStateDescription vmstate_m_other_sp = {
277 .name = "cpu/m/other-sp",
278 .version_id = 1,
279 .minimum_version_id = 1,
280 .needed = m_needed,
281 .fields = (VMStateField[]) {
282 VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
283 VMSTATE_END_OF_LIST()
284 }
285 };
286
287 static bool m_v8m_needed(void *opaque)
288 {
289 ARMCPU *cpu = opaque;
290 CPUARMState *env = &cpu->env;
291
292 return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
293 }
294
295 static const VMStateDescription vmstate_m_v8m = {
296 .name = "cpu/m/v8m",
297 .version_id = 1,
298 .minimum_version_id = 1,
299 .needed = m_v8m_needed,
300 .fields = (VMStateField[]) {
301 VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
302 VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
303 VMSTATE_END_OF_LIST()
304 }
305 };
306
307 static const VMStateDescription vmstate_m_fp = {
308 .name = "cpu/m/fp",
309 .version_id = 1,
310 .minimum_version_id = 1,
311 .needed = vfp_needed,
312 .fields = (VMStateField[]) {
313 VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS),
314 VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS),
315 VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS),
316 VMSTATE_UINT32_ARRAY(env.v7m.cpacr, ARMCPU, M_REG_NUM_BANKS),
317 VMSTATE_UINT32(env.v7m.nsacr, ARMCPU),
318 VMSTATE_END_OF_LIST()
319 }
320 };
321
322 static const VMStateDescription vmstate_m = {
323 .name = "cpu/m",
324 .version_id = 4,
325 .minimum_version_id = 4,
326 .needed = m_needed,
327 .fields = (VMStateField[]) {
328 VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
329 VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
330 VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
331 VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
332 VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
333 VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
334 VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
335 VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
336 VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
337 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
338 VMSTATE_INT32(env.v7m.exception, ARMCPU),
339 VMSTATE_END_OF_LIST()
340 },
341 .subsections = (const VMStateDescription*[]) {
342 &vmstate_m_faultmask_primask,
343 &vmstate_m_csselr,
344 &vmstate_m_scr,
345 &vmstate_m_other_sp,
346 &vmstate_m_v8m,
347 &vmstate_m_fp,
348 NULL
349 }
350 };
351
352 static bool thumb2ee_needed(void *opaque)
353 {
354 ARMCPU *cpu = opaque;
355 CPUARMState *env = &cpu->env;
356
357 return arm_feature(env, ARM_FEATURE_THUMB2EE);
358 }
359
360 static const VMStateDescription vmstate_thumb2ee = {
361 .name = "cpu/thumb2ee",
362 .version_id = 1,
363 .minimum_version_id = 1,
364 .needed = thumb2ee_needed,
365 .fields = (VMStateField[]) {
366 VMSTATE_UINT32(env.teecr, ARMCPU),
367 VMSTATE_UINT32(env.teehbr, ARMCPU),
368 VMSTATE_END_OF_LIST()
369 }
370 };
371
372 static bool pmsav7_needed(void *opaque)
373 {
374 ARMCPU *cpu = opaque;
375 CPUARMState *env = &cpu->env;
376
377 return arm_feature(env, ARM_FEATURE_PMSA) &&
378 arm_feature(env, ARM_FEATURE_V7) &&
379 !arm_feature(env, ARM_FEATURE_V8);
380 }
381
382 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
383 {
384 ARMCPU *cpu = opaque;
385
386 return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
387 }
388
389 static const VMStateDescription vmstate_pmsav7 = {
390 .name = "cpu/pmsav7",
391 .version_id = 1,
392 .minimum_version_id = 1,
393 .needed = pmsav7_needed,
394 .fields = (VMStateField[]) {
395 VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
396 vmstate_info_uint32, uint32_t),
397 VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
398 vmstate_info_uint32, uint32_t),
399 VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
400 vmstate_info_uint32, uint32_t),
401 VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
402 VMSTATE_END_OF_LIST()
403 }
404 };
405
406 static bool pmsav7_rnr_needed(void *opaque)
407 {
408 ARMCPU *cpu = opaque;
409 CPUARMState *env = &cpu->env;
410
411 /* For R profile cores pmsav7.rnr is migrated via the cpreg
412 * "RGNR" definition in helper.h. For M profile we have to
413 * migrate it separately.
414 */
415 return arm_feature(env, ARM_FEATURE_M);
416 }
417
418 static const VMStateDescription vmstate_pmsav7_rnr = {
419 .name = "cpu/pmsav7-rnr",
420 .version_id = 1,
421 .minimum_version_id = 1,
422 .needed = pmsav7_rnr_needed,
423 .fields = (VMStateField[]) {
424 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
425 VMSTATE_END_OF_LIST()
426 }
427 };
428
429 static bool pmsav8_needed(void *opaque)
430 {
431 ARMCPU *cpu = opaque;
432 CPUARMState *env = &cpu->env;
433
434 return arm_feature(env, ARM_FEATURE_PMSA) &&
435 arm_feature(env, ARM_FEATURE_V8);
436 }
437
438 static const VMStateDescription vmstate_pmsav8 = {
439 .name = "cpu/pmsav8",
440 .version_id = 1,
441 .minimum_version_id = 1,
442 .needed = pmsav8_needed,
443 .fields = (VMStateField[]) {
444 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
445 0, vmstate_info_uint32, uint32_t),
446 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
447 0, vmstate_info_uint32, uint32_t),
448 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
449 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
450 VMSTATE_END_OF_LIST()
451 }
452 };
453
454 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
455 {
456 ARMCPU *cpu = opaque;
457
458 return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
459 }
460
461 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
462 {
463 ARMCPU *cpu = opaque;
464
465 return cpu->env.sau.rnr < cpu->sau_sregion;
466 }
467
468 static bool m_security_needed(void *opaque)
469 {
470 ARMCPU *cpu = opaque;
471 CPUARMState *env = &cpu->env;
472
473 return arm_feature(env, ARM_FEATURE_M_SECURITY);
474 }
475
476 static const VMStateDescription vmstate_m_security = {
477 .name = "cpu/m-security",
478 .version_id = 1,
479 .minimum_version_id = 1,
480 .needed = m_security_needed,
481 .fields = (VMStateField[]) {
482 VMSTATE_UINT32(env.v7m.secure, ARMCPU),
483 VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
484 VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
485 VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
486 VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
487 VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
488 VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
489 VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
490 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
491 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
492 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
493 0, vmstate_info_uint32, uint32_t),
494 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
495 0, vmstate_info_uint32, uint32_t),
496 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
497 VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
498 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
499 VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
500 VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
501 VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
502 VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
503 VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
504 VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
505 vmstate_info_uint32, uint32_t),
506 VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
507 vmstate_info_uint32, uint32_t),
508 VMSTATE_UINT32(env.sau.rnr, ARMCPU),
509 VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
510 VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
511 VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
512 /* AIRCR is not secure-only, but our implementation is R/O if the
513 * security extension is unimplemented, so we migrate it here.
514 */
515 VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
516 VMSTATE_END_OF_LIST()
517 }
518 };
519
520 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
521 const VMStateField *field)
522 {
523 ARMCPU *cpu = opaque;
524 CPUARMState *env = &cpu->env;
525 uint32_t val = qemu_get_be32(f);
526
527 if (arm_feature(env, ARM_FEATURE_M)) {
528 if (val & XPSR_EXCP) {
529 /* This is a CPSR format value from an older QEMU. (We can tell
530 * because values transferred in XPSR format always have zero
531 * for the EXCP field, and CPSR format will always have bit 4
532 * set in CPSR_M.) Rearrange it into XPSR format. The significant
533 * differences are that the T bit is not in the same place, the
534 * primask/faultmask info may be in the CPSR I and F bits, and
535 * we do not want the mode bits.
536 * We know that this cleanup happened before v8M, so there
537 * is no complication with banked primask/faultmask.
538 */
539 uint32_t newval = val;
540
541 assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
542
543 newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
544 if (val & CPSR_T) {
545 newval |= XPSR_T;
546 }
547 /* If the I or F bits are set then this is a migration from
548 * an old QEMU which still stored the M profile FAULTMASK
549 * and PRIMASK in env->daif. For a new QEMU, the data is
550 * transferred using the vmstate_m_faultmask_primask subsection.
551 */
552 if (val & CPSR_F) {
553 env->v7m.faultmask[M_REG_NS] = 1;
554 }
555 if (val & CPSR_I) {
556 env->v7m.primask[M_REG_NS] = 1;
557 }
558 val = newval;
559 }
560 /* Ignore the low bits, they are handled by vmstate_m. */
561 xpsr_write(env, val, ~XPSR_EXCP);
562 return 0;
563 }
564
565 env->aarch64 = ((val & PSTATE_nRW) == 0);
566
567 if (is_a64(env)) {
568 pstate_write(env, val);
569 return 0;
570 }
571
572 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
573 return 0;
574 }
575
576 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
577 const VMStateField *field, QJSON *vmdesc)
578 {
579 ARMCPU *cpu = opaque;
580 CPUARMState *env = &cpu->env;
581 uint32_t val;
582
583 if (arm_feature(env, ARM_FEATURE_M)) {
584 /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
585 val = xpsr_read(env) & ~XPSR_EXCP;
586 } else if (is_a64(env)) {
587 val = pstate_read(env);
588 } else {
589 val = cpsr_read(env);
590 }
591
592 qemu_put_be32(f, val);
593 return 0;
594 }
595
596 static const VMStateInfo vmstate_cpsr = {
597 .name = "cpsr",
598 .get = get_cpsr,
599 .put = put_cpsr,
600 };
601
602 static int get_power(QEMUFile *f, void *opaque, size_t size,
603 const VMStateField *field)
604 {
605 ARMCPU *cpu = opaque;
606 bool powered_off = qemu_get_byte(f);
607 cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
608 return 0;
609 }
610
611 static int put_power(QEMUFile *f, void *opaque, size_t size,
612 const VMStateField *field, QJSON *vmdesc)
613 {
614 ARMCPU *cpu = opaque;
615
616 /* Migration should never happen while we transition power states */
617
618 if (cpu->power_state == PSCI_ON ||
619 cpu->power_state == PSCI_OFF) {
620 bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
621 qemu_put_byte(f, powered_off);
622 return 0;
623 } else {
624 return 1;
625 }
626 }
627
628 static const VMStateInfo vmstate_powered_off = {
629 .name = "powered_off",
630 .get = get_power,
631 .put = put_power,
632 };
633
634 static int cpu_pre_save(void *opaque)
635 {
636 ARMCPU *cpu = opaque;
637
638 if (!kvm_enabled()) {
639 pmu_op_start(&cpu->env);
640 }
641
642 if (kvm_enabled()) {
643 if (!write_kvmstate_to_list(cpu)) {
644 /* This should never fail */
645 abort();
646 }
647 } else {
648 if (!write_cpustate_to_list(cpu, false)) {
649 /* This should never fail. */
650 abort();
651 }
652 }
653
654 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
655 memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
656 cpu->cpreg_array_len * sizeof(uint64_t));
657 memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
658 cpu->cpreg_array_len * sizeof(uint64_t));
659
660 return 0;
661 }
662
663 static int cpu_post_save(void *opaque)
664 {
665 ARMCPU *cpu = opaque;
666
667 if (!kvm_enabled()) {
668 pmu_op_finish(&cpu->env);
669 }
670
671 return 0;
672 }
673
674 static int cpu_pre_load(void *opaque)
675 {
676 ARMCPU *cpu = opaque;
677 CPUARMState *env = &cpu->env;
678
679 /*
680 * Pre-initialize irq_line_state to a value that's never valid as
681 * real data, so cpu_post_load() can tell whether we've seen the
682 * irq-line-state subsection in the incoming migration state.
683 */
684 env->irq_line_state = UINT32_MAX;
685
686 if (!kvm_enabled()) {
687 pmu_op_start(&cpu->env);
688 }
689
690 return 0;
691 }
692
693 static int cpu_post_load(void *opaque, int version_id)
694 {
695 ARMCPU *cpu = opaque;
696 CPUARMState *env = &cpu->env;
697 int i, v;
698
699 /*
700 * Handle migration compatibility from old QEMU which didn't
701 * send the irq-line-state subsection. A QEMU without it did not
702 * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
703 * so for TCG the line state matches the bits set in cs->interrupt_request.
704 * For KVM the line state is not stored in cs->interrupt_request
705 * and so this will leave irq_line_state as 0, but this is OK because
706 * we only need to care about it for TCG.
707 */
708 if (env->irq_line_state == UINT32_MAX) {
709 CPUState *cs = CPU(cpu);
710
711 env->irq_line_state = cs->interrupt_request &
712 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
713 CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
714 }
715
716 /* Update the values list from the incoming migration data.
717 * Anything in the incoming data which we don't know about is
718 * a migration failure; anything we know about but the incoming
719 * data doesn't specify retains its current (reset) value.
720 * The indexes list remains untouched -- we only inspect the
721 * incoming migration index list so we can match the values array
722 * entries with the right slots in our own values array.
723 */
724
725 for (i = 0, v = 0; i < cpu->cpreg_array_len
726 && v < cpu->cpreg_vmstate_array_len; i++) {
727 if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
728 /* register in our list but not incoming : skip it */
729 continue;
730 }
731 if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
732 /* register in their list but not ours: fail migration */
733 return -1;
734 }
735 /* matching register, copy the value over */
736 cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
737 v++;
738 }
739
740 if (kvm_enabled()) {
741 if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
742 return -1;
743 }
744 /* Note that it's OK for the TCG side not to know about
745 * every register in the list; KVM is authoritative if
746 * we're using it.
747 */
748 write_list_to_cpustate(cpu);
749 } else {
750 if (!write_list_to_cpustate(cpu)) {
751 return -1;
752 }
753 }
754
755 hw_breakpoint_update_all(cpu);
756 hw_watchpoint_update_all(cpu);
757
758 if (!kvm_enabled()) {
759 pmu_op_finish(&cpu->env);
760 }
761
762 return 0;
763 }
764
765 const VMStateDescription vmstate_arm_cpu = {
766 .name = "cpu",
767 .version_id = 22,
768 .minimum_version_id = 22,
769 .pre_save = cpu_pre_save,
770 .post_save = cpu_post_save,
771 .pre_load = cpu_pre_load,
772 .post_load = cpu_post_load,
773 .fields = (VMStateField[]) {
774 VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
775 VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
776 VMSTATE_UINT64(env.pc, ARMCPU),
777 {
778 .name = "cpsr",
779 .version_id = 0,
780 .size = sizeof(uint32_t),
781 .info = &vmstate_cpsr,
782 .flags = VMS_SINGLE,
783 .offset = 0,
784 },
785 VMSTATE_UINT32(env.spsr, ARMCPU),
786 VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
787 VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
788 VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
789 VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
790 VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
791 VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
792 VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
793 /* The length-check must come before the arrays to avoid
794 * incoming data possibly overflowing the array.
795 */
796 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
797 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
798 cpreg_vmstate_array_len,
799 0, vmstate_info_uint64, uint64_t),
800 VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
801 cpreg_vmstate_array_len,
802 0, vmstate_info_uint64, uint64_t),
803 VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
804 VMSTATE_UINT64(env.exclusive_val, ARMCPU),
805 VMSTATE_UINT64(env.exclusive_high, ARMCPU),
806 VMSTATE_UINT64(env.features, ARMCPU),
807 VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
808 VMSTATE_UINT32(env.exception.fsr, ARMCPU),
809 VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
810 VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
811 VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
812 {
813 .name = "power_state",
814 .version_id = 0,
815 .size = sizeof(bool),
816 .info = &vmstate_powered_off,
817 .flags = VMS_SINGLE,
818 .offset = 0,
819 },
820 VMSTATE_END_OF_LIST()
821 },
822 .subsections = (const VMStateDescription*[]) {
823 &vmstate_vfp,
824 &vmstate_iwmmxt,
825 &vmstate_m,
826 &vmstate_thumb2ee,
827 /* pmsav7_rnr must come before pmsav7 so that we have the
828 * region number before we test it in the VMSTATE_VALIDATE
829 * in vmstate_pmsav7.
830 */
831 &vmstate_pmsav7_rnr,
832 &vmstate_pmsav7,
833 &vmstate_pmsav8,
834 &vmstate_m_security,
835 #ifdef TARGET_AARCH64
836 &vmstate_sve,
837 #endif
838 &vmstate_serror,
839 &vmstate_irq_line_state,
840 NULL
841 }
842 };