]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f68d2b1b MZ |
2 | /* |
3 | * Copyright (C) 2012-2015 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
f68d2b1b MZ |
5 | */ |
6 | ||
7 | #include <linux/compiler.h> | |
8 | #include <linux/irqchip/arm-gic-v3.h> | |
9 | #include <linux/kvm_host.h> | |
10 | ||
59da1cbf | 11 | #include <asm/kvm_emulate.h> |
13720a56 | 12 | #include <asm/kvm_hyp.h> |
923a2e30 | 13 | #include <asm/kvm_mmu.h> |
f68d2b1b MZ |
14 | |
15 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) | |
d68356cc | 16 | #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1) |
132a324a | 17 | #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5)) |
f68d2b1b | 18 | |
1b8e83c0 MZ |
19 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) |
20 | { | |
21 | switch (lr & 0xf) { | |
22 | case 0: | |
23 | return read_gicreg(ICH_LR0_EL2); | |
24 | case 1: | |
25 | return read_gicreg(ICH_LR1_EL2); | |
26 | case 2: | |
27 | return read_gicreg(ICH_LR2_EL2); | |
28 | case 3: | |
29 | return read_gicreg(ICH_LR3_EL2); | |
30 | case 4: | |
31 | return read_gicreg(ICH_LR4_EL2); | |
32 | case 5: | |
33 | return read_gicreg(ICH_LR5_EL2); | |
34 | case 6: | |
35 | return read_gicreg(ICH_LR6_EL2); | |
36 | case 7: | |
37 | return read_gicreg(ICH_LR7_EL2); | |
38 | case 8: | |
39 | return read_gicreg(ICH_LR8_EL2); | |
40 | case 9: | |
41 | return read_gicreg(ICH_LR9_EL2); | |
42 | case 10: | |
43 | return read_gicreg(ICH_LR10_EL2); | |
44 | case 11: | |
45 | return read_gicreg(ICH_LR11_EL2); | |
46 | case 12: | |
47 | return read_gicreg(ICH_LR12_EL2); | |
48 | case 13: | |
49 | return read_gicreg(ICH_LR13_EL2); | |
50 | case 14: | |
51 | return read_gicreg(ICH_LR14_EL2); | |
52 | case 15: | |
53 | return read_gicreg(ICH_LR15_EL2); | |
54 | } | |
55 | ||
56 | unreachable(); | |
57 | } | |
58 | ||
59 | static void __hyp_text __gic_v3_set_lr(u64 val, int lr) | |
60 | { | |
61 | switch (lr & 0xf) { | |
62 | case 0: | |
63 | write_gicreg(val, ICH_LR0_EL2); | |
64 | break; | |
65 | case 1: | |
66 | write_gicreg(val, ICH_LR1_EL2); | |
67 | break; | |
68 | case 2: | |
69 | write_gicreg(val, ICH_LR2_EL2); | |
70 | break; | |
71 | case 3: | |
72 | write_gicreg(val, ICH_LR3_EL2); | |
73 | break; | |
74 | case 4: | |
75 | write_gicreg(val, ICH_LR4_EL2); | |
76 | break; | |
77 | case 5: | |
78 | write_gicreg(val, ICH_LR5_EL2); | |
79 | break; | |
80 | case 6: | |
81 | write_gicreg(val, ICH_LR6_EL2); | |
82 | break; | |
83 | case 7: | |
84 | write_gicreg(val, ICH_LR7_EL2); | |
85 | break; | |
86 | case 8: | |
87 | write_gicreg(val, ICH_LR8_EL2); | |
88 | break; | |
89 | case 9: | |
90 | write_gicreg(val, ICH_LR9_EL2); | |
91 | break; | |
92 | case 10: | |
93 | write_gicreg(val, ICH_LR10_EL2); | |
94 | break; | |
95 | case 11: | |
96 | write_gicreg(val, ICH_LR11_EL2); | |
97 | break; | |
98 | case 12: | |
99 | write_gicreg(val, ICH_LR12_EL2); | |
100 | break; | |
101 | case 13: | |
102 | write_gicreg(val, ICH_LR13_EL2); | |
103 | break; | |
104 | case 14: | |
105 | write_gicreg(val, ICH_LR14_EL2); | |
106 | break; | |
107 | case 15: | |
108 | write_gicreg(val, ICH_LR15_EL2); | |
109 | break; | |
110 | } | |
111 | } | |
112 | ||
63000dd8 MZ |
113 | static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n) |
114 | { | |
115 | switch (n) { | |
116 | case 0: | |
117 | write_gicreg(val, ICH_AP0R0_EL2); | |
118 | break; | |
119 | case 1: | |
120 | write_gicreg(val, ICH_AP0R1_EL2); | |
121 | break; | |
122 | case 2: | |
123 | write_gicreg(val, ICH_AP0R2_EL2); | |
124 | break; | |
125 | case 3: | |
126 | write_gicreg(val, ICH_AP0R3_EL2); | |
127 | break; | |
128 | } | |
129 | } | |
130 | ||
131 | static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n) | |
132 | { | |
133 | switch (n) { | |
134 | case 0: | |
135 | write_gicreg(val, ICH_AP1R0_EL2); | |
136 | break; | |
137 | case 1: | |
138 | write_gicreg(val, ICH_AP1R1_EL2); | |
139 | break; | |
140 | case 2: | |
141 | write_gicreg(val, ICH_AP1R2_EL2); | |
142 | break; | |
143 | case 3: | |
144 | write_gicreg(val, ICH_AP1R3_EL2); | |
145 | break; | |
146 | } | |
147 | } | |
148 | ||
149 | static u32 __hyp_text __vgic_v3_read_ap0rn(int n) | |
150 | { | |
151 | u32 val; | |
152 | ||
153 | switch (n) { | |
154 | case 0: | |
155 | val = read_gicreg(ICH_AP0R0_EL2); | |
156 | break; | |
157 | case 1: | |
158 | val = read_gicreg(ICH_AP0R1_EL2); | |
159 | break; | |
160 | case 2: | |
161 | val = read_gicreg(ICH_AP0R2_EL2); | |
162 | break; | |
163 | case 3: | |
164 | val = read_gicreg(ICH_AP0R3_EL2); | |
165 | break; | |
166 | default: | |
167 | unreachable(); | |
168 | } | |
169 | ||
170 | return val; | |
171 | } | |
172 | ||
173 | static u32 __hyp_text __vgic_v3_read_ap1rn(int n) | |
174 | { | |
175 | u32 val; | |
176 | ||
177 | switch (n) { | |
178 | case 0: | |
179 | val = read_gicreg(ICH_AP1R0_EL2); | |
180 | break; | |
181 | case 1: | |
182 | val = read_gicreg(ICH_AP1R1_EL2); | |
183 | break; | |
184 | case 2: | |
185 | val = read_gicreg(ICH_AP1R2_EL2); | |
186 | break; | |
187 | case 3: | |
188 | val = read_gicreg(ICH_AP1R3_EL2); | |
189 | break; | |
190 | default: | |
191 | unreachable(); | |
192 | } | |
193 | ||
194 | return val; | |
195 | } | |
196 | ||
f68d2b1b MZ |
197 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) |
198 | { | |
199 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 200 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f68d2b1b MZ |
201 | |
202 | /* | |
203 | * Make sure stores to the GIC via the memory mapped interface | |
2d0e63e0 CD |
204 | * are now visible to the system register interface when reading the |
205 | * LRs, and when reading back the VMCR on non-VHE systems. | |
f68d2b1b | 206 | */ |
2d0e63e0 | 207 | if (used_lrs || !has_vhe()) { |
5fbb0df6 MZ |
208 | if (!cpu_if->vgic_sre) { |
209 | dsb(sy); | |
210 | isb(); | |
211 | } | |
ff567614 | 212 | } |
f68d2b1b | 213 | |
ca71228b | 214 | if (used_lrs || cpu_if->its_vpe.its_vm) { |
1b8e83c0 | 215 | int i; |
bb5ed703 | 216 | u32 elrsr; |
f68d2b1b | 217 | |
b98c079b | 218 | elrsr = read_gicreg(ICH_ELRSR_EL2); |
f68d2b1b | 219 | |
2d0e63e0 | 220 | write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2); |
f68d2b1b | 221 | |
cffcd9df | 222 | for (i = 0; i < used_lrs; i++) { |
bb5ed703 | 223 | if (elrsr & (1 << i)) |
84e8b9c8 | 224 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; |
fa89c77e CD |
225 | else |
226 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | |
84e8b9c8 | 227 | |
b40c4892 | 228 | __gic_v3_set_lr(0, i); |
1b8e83c0 | 229 | } |
c5851328 | 230 | } |
f68d2b1b MZ |
231 | } |
232 | ||
233 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |
234 | { | |
235 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 236 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
1b8e83c0 | 237 | int i; |
f68d2b1b | 238 | |
ca71228b | 239 | if (used_lrs || cpu_if->its_vpe.its_vm) { |
2d0e63e0 CD |
240 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
241 | ||
242 | for (i = 0; i < used_lrs; i++) | |
243 | __gic_v3_set_lr(cpu_if->vgic_lr[i], i); | |
244 | } | |
245 | ||
246 | /* | |
247 | * Ensure that writes to the LRs, and on non-VHE systems ensure that | |
248 | * the write to the VMCR in __vgic_v3_activate_traps(), will have | |
249 | * reached the (re)distributors. This ensure the guest will read the | |
250 | * correct values from the memory-mapped interface. | |
251 | */ | |
252 | if (used_lrs || !has_vhe()) { | |
253 | if (!cpu_if->vgic_sre) { | |
254 | isb(); | |
255 | dsb(sy); | |
256 | } | |
257 | } | |
258 | } | |
259 | ||
260 | void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu) | |
261 | { | |
262 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
263 | ||
f68d2b1b MZ |
264 | /* |
265 | * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a | |
266 | * Group0 interrupt (as generated in GICv2 mode) to be | |
267 | * delivered as a FIQ to the guest, with potentially fatal | |
268 | * consequences. So we must make sure that ICC_SRE_EL1 has | |
269 | * been actually programmed with the value we want before | |
ff567614 | 270 | * starting to mess with the rest of the GIC, and VMCR_EL2 in |
2d0e63e0 CD |
271 | * particular. This logic must be called before |
272 | * __vgic_v3_restore_state(). | |
f68d2b1b | 273 | */ |
c5851328 MZ |
274 | if (!cpu_if->vgic_sre) { |
275 | write_gicreg(0, ICC_SRE_EL1); | |
276 | isb(); | |
ff567614 | 277 | write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); |
f68d2b1b | 278 | |
1b8e83c0 | 279 | |
2d0e63e0 CD |
280 | if (has_vhe()) { |
281 | /* | |
282 | * Ensure that the write to the VMCR will have reached | |
283 | * the (re)distributors. This ensure the guest will | |
284 | * read the correct values from the memory-mapped | |
285 | * interface. | |
286 | */ | |
287 | isb(); | |
288 | dsb(sy); | |
289 | } | |
f68d2b1b MZ |
290 | } |
291 | ||
292 | /* | |
2d0e63e0 CD |
293 | * Prevent the guest from touching the GIC system registers if |
294 | * SRE isn't enabled for GICv3 emulation. | |
295 | */ | |
296 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, | |
297 | ICC_SRE_EL2); | |
298 | ||
299 | /* | |
300 | * If we need to trap system registers, we must write | |
301 | * ICH_HCR_EL2 anyway, even if no interrupts are being | |
302 | * injected, | |
f68d2b1b | 303 | */ |
2d0e63e0 CD |
304 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) || |
305 | cpu_if->its_vpe.its_vm) | |
306 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); | |
307 | } | |
308 | ||
309 | void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu) | |
310 | { | |
311 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
312 | u64 val; | |
313 | ||
314 | if (!cpu_if->vgic_sre) { | |
315 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); | |
316 | } | |
317 | ||
318 | val = read_gicreg(ICC_SRE_EL2); | |
319 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); | |
320 | ||
c5851328 | 321 | if (!cpu_if->vgic_sre) { |
2d0e63e0 | 322 | /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ |
c5851328 | 323 | isb(); |
2d0e63e0 | 324 | write_gicreg(1, ICC_SRE_EL1); |
c5851328 | 325 | } |
f68d2b1b MZ |
326 | |
327 | /* | |
2d0e63e0 CD |
328 | * If we were trapping system registers, we enabled the VGIC even if |
329 | * no interrupts were being injected, and we disable it again here. | |
f68d2b1b | 330 | */ |
2d0e63e0 CD |
331 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) || |
332 | cpu_if->its_vpe.its_vm) | |
333 | write_gicreg(0, ICH_HCR_EL2); | |
f68d2b1b MZ |
334 | } |
335 | ||
923a2e30 CD |
336 | void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu) |
337 | { | |
338 | struct vgic_v3_cpu_if *cpu_if; | |
339 | u64 val; | |
340 | u32 nr_pre_bits; | |
341 | ||
342 | vcpu = kern_hyp_va(vcpu); | |
343 | cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
344 | ||
345 | val = read_gicreg(ICH_VTR_EL2); | |
346 | nr_pre_bits = vtr_to_nr_pre_bits(val); | |
347 | ||
348 | switch (nr_pre_bits) { | |
349 | case 7: | |
350 | cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); | |
351 | cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); | |
352 | case 6: | |
353 | cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); | |
354 | default: | |
355 | cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); | |
356 | } | |
357 | ||
358 | switch (nr_pre_bits) { | |
359 | case 7: | |
360 | cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); | |
361 | cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); | |
362 | case 6: | |
363 | cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); | |
364 | default: | |
365 | cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); | |
366 | } | |
367 | } | |
368 | ||
369 | void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu) | |
370 | { | |
371 | struct vgic_v3_cpu_if *cpu_if; | |
372 | u64 val; | |
373 | u32 nr_pre_bits; | |
374 | ||
375 | vcpu = kern_hyp_va(vcpu); | |
376 | cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
377 | ||
378 | val = read_gicreg(ICH_VTR_EL2); | |
379 | nr_pre_bits = vtr_to_nr_pre_bits(val); | |
380 | ||
381 | switch (nr_pre_bits) { | |
382 | case 7: | |
383 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); | |
384 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); | |
385 | case 6: | |
386 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); | |
387 | default: | |
388 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); | |
389 | } | |
390 | ||
391 | switch (nr_pre_bits) { | |
392 | case 7: | |
393 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); | |
394 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); | |
395 | case 6: | |
396 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); | |
397 | default: | |
398 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); | |
399 | } | |
400 | } | |
401 | ||
0d98d00b MZ |
402 | void __hyp_text __vgic_v3_init_lrs(void) |
403 | { | |
404 | int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); | |
405 | int i; | |
406 | ||
407 | for (i = 0; i <= max_lr_idx; i++) | |
408 | __gic_v3_set_lr(0, i); | |
409 | } | |
410 | ||
cf0ba18a | 411 | u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void) |
f68d2b1b MZ |
412 | { |
413 | return read_gicreg(ICH_VTR_EL2); | |
414 | } | |
328e5664 CD |
415 | |
416 | u64 __hyp_text __vgic_v3_read_vmcr(void) | |
417 | { | |
418 | return read_gicreg(ICH_VMCR_EL2); | |
419 | } | |
420 | ||
421 | void __hyp_text __vgic_v3_write_vmcr(u32 vmcr) | |
422 | { | |
423 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
424 | } | |
59da1cbf MZ |
425 | |
426 | #ifdef CONFIG_ARM64 | |
427 | ||
d70c7b31 MZ |
428 | static int __hyp_text __vgic_v3_bpr_min(void) |
429 | { | |
430 | /* See Pseudocode for VPriorityGroup */ | |
431 | return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2)); | |
432 | } | |
433 | ||
132a324a MZ |
434 | static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu) |
435 | { | |
436 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
437 | u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; | |
438 | ||
439 | return crm != 8; | |
440 | } | |
441 | ||
442 | #define GICv3_IDLE_PRIORITY 0xff | |
443 | ||
444 | static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, | |
445 | u32 vmcr, | |
446 | u64 *lr_val) | |
447 | { | |
448 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
449 | u8 priority = GICv3_IDLE_PRIORITY; | |
450 | int i, lr = -1; | |
451 | ||
452 | for (i = 0; i < used_lrs; i++) { | |
453 | u64 val = __gic_v3_get_lr(i); | |
454 | u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
455 | ||
456 | /* Not pending in the state? */ | |
457 | if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT) | |
458 | continue; | |
459 | ||
460 | /* Group-0 interrupt, but Group-0 disabled? */ | |
461 | if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK)) | |
462 | continue; | |
463 | ||
464 | /* Group-1 interrupt, but Group-1 disabled? */ | |
465 | if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK)) | |
466 | continue; | |
467 | ||
468 | /* Not the highest priority? */ | |
469 | if (lr_prio >= priority) | |
470 | continue; | |
471 | ||
472 | /* This is a candidate */ | |
473 | priority = lr_prio; | |
474 | *lr_val = val; | |
475 | lr = i; | |
476 | } | |
477 | ||
478 | if (lr == -1) | |
479 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
480 | ||
481 | return lr; | |
482 | } | |
483 | ||
b6f49035 MZ |
484 | static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, |
485 | int intid, u64 *lr_val) | |
486 | { | |
487 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
488 | int i; | |
489 | ||
490 | for (i = 0; i < used_lrs; i++) { | |
491 | u64 val = __gic_v3_get_lr(i); | |
492 | ||
493 | if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid && | |
494 | (val & ICH_LR_ACTIVE_BIT)) { | |
495 | *lr_val = val; | |
496 | return i; | |
497 | } | |
498 | } | |
499 | ||
500 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
501 | return -1; | |
502 | } | |
503 | ||
132a324a MZ |
504 | static int __hyp_text __vgic_v3_get_highest_active_priority(void) |
505 | { | |
506 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
507 | u32 hap = 0; | |
508 | int i; | |
509 | ||
510 | for (i = 0; i < nr_apr_regs; i++) { | |
511 | u32 val; | |
512 | ||
513 | /* | |
514 | * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers | |
515 | * contain the active priority levels for this VCPU | |
516 | * for the maximum number of supported priority | |
517 | * levels, and we return the full priority level only | |
518 | * if the BPR is programmed to its minimum, otherwise | |
519 | * we return a combination of the priority level and | |
520 | * subpriority, as determined by the setting of the | |
521 | * BPR, but without the full subpriority. | |
522 | */ | |
523 | val = __vgic_v3_read_ap0rn(i); | |
524 | val |= __vgic_v3_read_ap1rn(i); | |
525 | if (!val) { | |
526 | hap += 32; | |
527 | continue; | |
528 | } | |
529 | ||
530 | return (hap + __ffs(val)) << __vgic_v3_bpr_min(); | |
531 | } | |
532 | ||
533 | return GICv3_IDLE_PRIORITY; | |
534 | } | |
535 | ||
d70c7b31 MZ |
536 | static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr) |
537 | { | |
538 | return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; | |
539 | } | |
540 | ||
541 | static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr) | |
542 | { | |
543 | unsigned int bpr; | |
544 | ||
545 | if (vmcr & ICH_VMCR_CBPR_MASK) { | |
546 | bpr = __vgic_v3_get_bpr0(vmcr); | |
547 | if (bpr < 7) | |
548 | bpr++; | |
549 | } else { | |
550 | bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; | |
551 | } | |
552 | ||
553 | return bpr; | |
554 | } | |
555 | ||
132a324a MZ |
556 | /* |
557 | * Convert a priority to a preemption level, taking the relevant BPR | |
558 | * into account by zeroing the sub-priority bits. | |
559 | */ | |
560 | static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp) | |
561 | { | |
562 | unsigned int bpr; | |
563 | ||
564 | if (!grp) | |
565 | bpr = __vgic_v3_get_bpr0(vmcr) + 1; | |
566 | else | |
567 | bpr = __vgic_v3_get_bpr1(vmcr); | |
568 | ||
569 | return pri & (GENMASK(7, 0) << bpr); | |
570 | } | |
571 | ||
572 | /* | |
573 | * The priority value is independent of any of the BPR values, so we | |
574 | * normalize it using the minumal BPR value. This guarantees that no | |
575 | * matter what the guest does with its BPR, we can always set/get the | |
576 | * same value of a priority. | |
577 | */ | |
578 | static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp) | |
579 | { | |
580 | u8 pre, ap; | |
581 | u32 val; | |
582 | int apr; | |
583 | ||
584 | pre = __vgic_v3_pri_to_pre(pri, vmcr, grp); | |
585 | ap = pre >> __vgic_v3_bpr_min(); | |
586 | apr = ap / 32; | |
587 | ||
588 | if (!grp) { | |
589 | val = __vgic_v3_read_ap0rn(apr); | |
590 | __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr); | |
591 | } else { | |
592 | val = __vgic_v3_read_ap1rn(apr); | |
593 | __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr); | |
594 | } | |
595 | } | |
596 | ||
b6f49035 MZ |
597 | static int __hyp_text __vgic_v3_clear_highest_active_priority(void) |
598 | { | |
599 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
600 | u32 hap = 0; | |
601 | int i; | |
602 | ||
603 | for (i = 0; i < nr_apr_regs; i++) { | |
604 | u32 ap0, ap1; | |
605 | int c0, c1; | |
606 | ||
607 | ap0 = __vgic_v3_read_ap0rn(i); | |
608 | ap1 = __vgic_v3_read_ap1rn(i); | |
609 | if (!ap0 && !ap1) { | |
610 | hap += 32; | |
611 | continue; | |
612 | } | |
613 | ||
614 | c0 = ap0 ? __ffs(ap0) : 32; | |
615 | c1 = ap1 ? __ffs(ap1) : 32; | |
616 | ||
617 | /* Always clear the LSB, which is the highest priority */ | |
618 | if (c0 < c1) { | |
619 | ap0 &= ~BIT(c0); | |
620 | __vgic_v3_write_ap0rn(ap0, i); | |
621 | hap += c0; | |
622 | } else { | |
623 | ap1 &= ~BIT(c1); | |
624 | __vgic_v3_write_ap1rn(ap1, i); | |
625 | hap += c1; | |
626 | } | |
627 | ||
628 | /* Rescale to 8 bits of priority */ | |
629 | return hap << __vgic_v3_bpr_min(); | |
630 | } | |
631 | ||
632 | return GICv3_IDLE_PRIORITY; | |
633 | } | |
634 | ||
132a324a MZ |
635 | static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
636 | { | |
637 | u64 lr_val; | |
638 | u8 lr_prio, pmr; | |
639 | int lr, grp; | |
640 | ||
641 | grp = __vgic_v3_get_group(vcpu); | |
642 | ||
643 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
644 | if (lr < 0) | |
645 | goto spurious; | |
646 | ||
647 | if (grp != !!(lr_val & ICH_LR_GROUP)) | |
648 | goto spurious; | |
649 | ||
650 | pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; | |
651 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
652 | if (pmr <= lr_prio) | |
653 | goto spurious; | |
654 | ||
655 | if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp)) | |
656 | goto spurious; | |
657 | ||
658 | lr_val &= ~ICH_LR_STATE; | |
659 | /* No active state for LPIs */ | |
660 | if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI) | |
661 | lr_val |= ICH_LR_ACTIVE_BIT; | |
662 | __gic_v3_set_lr(lr_val, lr); | |
663 | __vgic_v3_set_active_priority(lr_prio, vmcr, grp); | |
664 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
665 | return; | |
666 | ||
667 | spurious: | |
668 | vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS); | |
669 | } | |
670 | ||
b6f49035 MZ |
671 | static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val) |
672 | { | |
673 | lr_val &= ~ICH_LR_ACTIVE_BIT; | |
674 | if (lr_val & ICH_LR_HW) { | |
675 | u32 pid; | |
676 | ||
677 | pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT; | |
678 | gic_write_dir(pid); | |
679 | } | |
680 | ||
681 | __gic_v3_set_lr(lr_val, lr); | |
682 | } | |
683 | ||
684 | static void __hyp_text __vgic_v3_bump_eoicount(void) | |
685 | { | |
686 | u32 hcr; | |
687 | ||
688 | hcr = read_gicreg(ICH_HCR_EL2); | |
689 | hcr += 1 << ICH_HCR_EOIcount_SHIFT; | |
690 | write_gicreg(hcr, ICH_HCR_EL2); | |
691 | } | |
692 | ||
40228ba5 MZ |
693 | static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu, |
694 | u32 vmcr, int rt) | |
695 | { | |
696 | u32 vid = vcpu_get_reg(vcpu, rt); | |
697 | u64 lr_val; | |
698 | int lr; | |
699 | ||
700 | /* EOImode == 0, nothing to be done here */ | |
701 | if (!(vmcr & ICH_VMCR_EOIM_MASK)) | |
702 | return; | |
703 | ||
704 | /* No deactivate to be performed on an LPI */ | |
705 | if (vid >= VGIC_MIN_LPI) | |
706 | return; | |
707 | ||
708 | lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); | |
709 | if (lr == -1) { | |
710 | __vgic_v3_bump_eoicount(); | |
711 | return; | |
712 | } | |
713 | ||
714 | __vgic_v3_clear_active_lr(lr, lr_val); | |
715 | } | |
716 | ||
b6f49035 MZ |
717 | static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
718 | { | |
719 | u32 vid = vcpu_get_reg(vcpu, rt); | |
720 | u64 lr_val; | |
721 | u8 lr_prio, act_prio; | |
722 | int lr, grp; | |
723 | ||
724 | grp = __vgic_v3_get_group(vcpu); | |
725 | ||
726 | /* Drop priority in any case */ | |
727 | act_prio = __vgic_v3_clear_highest_active_priority(); | |
728 | ||
729 | /* If EOIing an LPI, no deactivate to be performed */ | |
730 | if (vid >= VGIC_MIN_LPI) | |
731 | return; | |
732 | ||
733 | /* EOImode == 1, nothing to be done here */ | |
734 | if (vmcr & ICH_VMCR_EOIM_MASK) | |
735 | return; | |
736 | ||
737 | lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); | |
738 | if (lr == -1) { | |
739 | __vgic_v3_bump_eoicount(); | |
740 | return; | |
741 | } | |
742 | ||
743 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
744 | ||
745 | /* If priorities or group do not match, the guest has fscked-up. */ | |
746 | if (grp != !!(lr_val & ICH_LR_GROUP) || | |
747 | __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio) | |
748 | return; | |
749 | ||
750 | /* Let's now perform the deactivation */ | |
751 | __vgic_v3_clear_active_lr(lr, lr_val); | |
752 | } | |
753 | ||
fbc48a00 MZ |
754 | static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
755 | { | |
756 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK)); | |
757 | } | |
758 | ||
f8b630bc MZ |
759 | static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
760 | { | |
761 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK)); | |
762 | } | |
763 | ||
fbc48a00 MZ |
764 | static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
765 | { | |
766 | u64 val = vcpu_get_reg(vcpu, rt); | |
767 | ||
768 | if (val & 1) | |
769 | vmcr |= ICH_VMCR_ENG0_MASK; | |
770 | else | |
771 | vmcr &= ~ICH_VMCR_ENG0_MASK; | |
772 | ||
773 | __vgic_v3_write_vmcr(vmcr); | |
774 | } | |
775 | ||
f8b630bc MZ |
776 | static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
777 | { | |
778 | u64 val = vcpu_get_reg(vcpu, rt); | |
779 | ||
780 | if (val & 1) | |
781 | vmcr |= ICH_VMCR_ENG1_MASK; | |
782 | else | |
783 | vmcr &= ~ICH_VMCR_ENG1_MASK; | |
784 | ||
785 | __vgic_v3_write_vmcr(vmcr); | |
786 | } | |
787 | ||
423de85a MZ |
788 | static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
789 | { | |
790 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr)); | |
791 | } | |
792 | ||
d70c7b31 MZ |
793 | static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
794 | { | |
795 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr)); | |
796 | } | |
797 | ||
423de85a MZ |
798 | static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
799 | { | |
800 | u64 val = vcpu_get_reg(vcpu, rt); | |
801 | u8 bpr_min = __vgic_v3_bpr_min() - 1; | |
802 | ||
803 | /* Enforce BPR limiting */ | |
804 | if (val < bpr_min) | |
805 | val = bpr_min; | |
806 | ||
807 | val <<= ICH_VMCR_BPR0_SHIFT; | |
808 | val &= ICH_VMCR_BPR0_MASK; | |
809 | vmcr &= ~ICH_VMCR_BPR0_MASK; | |
810 | vmcr |= val; | |
811 | ||
812 | __vgic_v3_write_vmcr(vmcr); | |
813 | } | |
814 | ||
d70c7b31 MZ |
815 | static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
816 | { | |
817 | u64 val = vcpu_get_reg(vcpu, rt); | |
818 | u8 bpr_min = __vgic_v3_bpr_min(); | |
819 | ||
820 | if (vmcr & ICH_VMCR_CBPR_MASK) | |
821 | return; | |
822 | ||
823 | /* Enforce BPR limiting */ | |
824 | if (val < bpr_min) | |
825 | val = bpr_min; | |
826 | ||
827 | val <<= ICH_VMCR_BPR1_SHIFT; | |
828 | val &= ICH_VMCR_BPR1_MASK; | |
829 | vmcr &= ~ICH_VMCR_BPR1_MASK; | |
830 | vmcr |= val; | |
831 | ||
832 | __vgic_v3_write_vmcr(vmcr); | |
833 | } | |
834 | ||
f9e7449c MZ |
835 | static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n) |
836 | { | |
837 | u32 val; | |
838 | ||
839 | if (!__vgic_v3_get_group(vcpu)) | |
840 | val = __vgic_v3_read_ap0rn(n); | |
841 | else | |
842 | val = __vgic_v3_read_ap1rn(n); | |
843 | ||
844 | vcpu_set_reg(vcpu, rt, val); | |
845 | } | |
846 | ||
847 | static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n) | |
848 | { | |
849 | u32 val = vcpu_get_reg(vcpu, rt); | |
850 | ||
851 | if (!__vgic_v3_get_group(vcpu)) | |
852 | __vgic_v3_write_ap0rn(val, n); | |
853 | else | |
854 | __vgic_v3_write_ap1rn(val, n); | |
855 | } | |
856 | ||
857 | static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu, | |
858 | u32 vmcr, int rt) | |
859 | { | |
860 | __vgic_v3_read_apxrn(vcpu, rt, 0); | |
861 | } | |
862 | ||
863 | static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu, | |
864 | u32 vmcr, int rt) | |
865 | { | |
866 | __vgic_v3_read_apxrn(vcpu, rt, 1); | |
867 | } | |
868 | ||
869 | static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, | |
870 | u32 vmcr, int rt) | |
871 | { | |
872 | __vgic_v3_read_apxrn(vcpu, rt, 2); | |
873 | } | |
874 | ||
875 | static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, | |
876 | u32 vmcr, int rt) | |
877 | { | |
878 | __vgic_v3_read_apxrn(vcpu, rt, 3); | |
879 | } | |
880 | ||
881 | static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, | |
882 | u32 vmcr, int rt) | |
883 | { | |
884 | __vgic_v3_write_apxrn(vcpu, rt, 0); | |
885 | } | |
886 | ||
887 | static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, | |
888 | u32 vmcr, int rt) | |
889 | { | |
890 | __vgic_v3_write_apxrn(vcpu, rt, 1); | |
891 | } | |
892 | ||
893 | static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, | |
894 | u32 vmcr, int rt) | |
895 | { | |
896 | __vgic_v3_write_apxrn(vcpu, rt, 2); | |
897 | } | |
898 | ||
899 | static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, | |
900 | u32 vmcr, int rt) | |
901 | { | |
902 | __vgic_v3_write_apxrn(vcpu, rt, 3); | |
903 | } | |
904 | ||
2724c11a MZ |
905 | static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, |
906 | u32 vmcr, int rt) | |
907 | { | |
908 | u64 lr_val; | |
909 | int lr, lr_grp, grp; | |
910 | ||
911 | grp = __vgic_v3_get_group(vcpu); | |
912 | ||
913 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
914 | if (lr == -1) | |
915 | goto spurious; | |
916 | ||
917 | lr_grp = !!(lr_val & ICH_LR_GROUP); | |
918 | if (lr_grp != grp) | |
919 | lr_val = ICC_IAR1_EL1_SPURIOUS; | |
920 | ||
921 | spurious: | |
922 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
923 | } | |
924 | ||
6293d651 MZ |
925 | static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, |
926 | u32 vmcr, int rt) | |
927 | { | |
928 | vmcr &= ICH_VMCR_PMR_MASK; | |
929 | vmcr >>= ICH_VMCR_PMR_SHIFT; | |
930 | vcpu_set_reg(vcpu, rt, vmcr); | |
931 | } | |
932 | ||
933 | static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, | |
934 | u32 vmcr, int rt) | |
935 | { | |
936 | u32 val = vcpu_get_reg(vcpu, rt); | |
937 | ||
938 | val <<= ICH_VMCR_PMR_SHIFT; | |
939 | val &= ICH_VMCR_PMR_MASK; | |
940 | vmcr &= ~ICH_VMCR_PMR_MASK; | |
941 | vmcr |= val; | |
942 | ||
943 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
944 | } | |
945 | ||
43515894 MZ |
946 | static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, |
947 | u32 vmcr, int rt) | |
948 | { | |
949 | u32 val = __vgic_v3_get_highest_active_priority(); | |
950 | vcpu_set_reg(vcpu, rt, val); | |
951 | } | |
952 | ||
d840b2d3 MZ |
953 | static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, |
954 | u32 vmcr, int rt) | |
955 | { | |
956 | u32 vtr, val; | |
957 | ||
958 | vtr = read_gicreg(ICH_VTR_EL2); | |
959 | /* PRIbits */ | |
960 | val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT; | |
961 | /* IDbits */ | |
962 | val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT; | |
963 | /* SEIS */ | |
964 | val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT; | |
965 | /* A3V */ | |
966 | val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT; | |
967 | /* EOImode */ | |
968 | val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT; | |
969 | /* CBPR */ | |
970 | val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; | |
971 | ||
972 | vcpu_set_reg(vcpu, rt, val); | |
973 | } | |
974 | ||
975 | static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, | |
976 | u32 vmcr, int rt) | |
977 | { | |
978 | u32 val = vcpu_get_reg(vcpu, rt); | |
979 | ||
980 | if (val & ICC_CTLR_EL1_CBPR_MASK) | |
981 | vmcr |= ICH_VMCR_CBPR_MASK; | |
982 | else | |
983 | vmcr &= ~ICH_VMCR_CBPR_MASK; | |
984 | ||
985 | if (val & ICC_CTLR_EL1_EOImode_MASK) | |
986 | vmcr |= ICH_VMCR_EOIM_MASK; | |
987 | else | |
988 | vmcr &= ~ICH_VMCR_EOIM_MASK; | |
989 | ||
990 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
991 | } | |
992 | ||
59da1cbf MZ |
993 | int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) |
994 | { | |
995 | int rt; | |
996 | u32 esr; | |
997 | u32 vmcr; | |
998 | void (*fn)(struct kvm_vcpu *, u32, int); | |
999 | bool is_read; | |
1000 | u32 sysreg; | |
1001 | ||
1002 | esr = kvm_vcpu_get_hsr(vcpu); | |
1003 | if (vcpu_mode_is_32bit(vcpu)) { | |
bd7d95ca MR |
1004 | if (!kvm_condition_valid(vcpu)) { |
1005 | __kvm_skip_instr(vcpu); | |
59da1cbf | 1006 | return 1; |
bd7d95ca | 1007 | } |
59da1cbf MZ |
1008 | |
1009 | sysreg = esr_cp15_to_sysreg(esr); | |
1010 | } else { | |
1011 | sysreg = esr_sys64_to_sysreg(esr); | |
1012 | } | |
1013 | ||
1014 | is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; | |
1015 | ||
1016 | switch (sysreg) { | |
eab0b2dc | 1017 | case SYS_ICC_IAR0_EL1: |
132a324a | 1018 | case SYS_ICC_IAR1_EL1: |
7b1dba1f MZ |
1019 | if (unlikely(!is_read)) |
1020 | return 0; | |
132a324a MZ |
1021 | fn = __vgic_v3_read_iar; |
1022 | break; | |
eab0b2dc | 1023 | case SYS_ICC_EOIR0_EL1: |
b6f49035 | 1024 | case SYS_ICC_EOIR1_EL1: |
e7f1d1ee MZ |
1025 | if (unlikely(is_read)) |
1026 | return 0; | |
b6f49035 MZ |
1027 | fn = __vgic_v3_write_eoir; |
1028 | break; | |
21bc5281 | 1029 | case SYS_ICC_IGRPEN1_EL1: |
f8b630bc MZ |
1030 | if (is_read) |
1031 | fn = __vgic_v3_read_igrpen1; | |
1032 | else | |
1033 | fn = __vgic_v3_write_igrpen1; | |
1034 | break; | |
d70c7b31 MZ |
1035 | case SYS_ICC_BPR1_EL1: |
1036 | if (is_read) | |
1037 | fn = __vgic_v3_read_bpr1; | |
1038 | else | |
1039 | fn = __vgic_v3_write_bpr1; | |
1040 | break; | |
eab0b2dc | 1041 | case SYS_ICC_AP0Rn_EL1(0): |
f9e7449c MZ |
1042 | case SYS_ICC_AP1Rn_EL1(0): |
1043 | if (is_read) | |
1044 | fn = __vgic_v3_read_apxr0; | |
1045 | else | |
1046 | fn = __vgic_v3_write_apxr0; | |
1047 | break; | |
eab0b2dc | 1048 | case SYS_ICC_AP0Rn_EL1(1): |
f9e7449c MZ |
1049 | case SYS_ICC_AP1Rn_EL1(1): |
1050 | if (is_read) | |
1051 | fn = __vgic_v3_read_apxr1; | |
1052 | else | |
1053 | fn = __vgic_v3_write_apxr1; | |
1054 | break; | |
eab0b2dc | 1055 | case SYS_ICC_AP0Rn_EL1(2): |
f9e7449c MZ |
1056 | case SYS_ICC_AP1Rn_EL1(2): |
1057 | if (is_read) | |
1058 | fn = __vgic_v3_read_apxr2; | |
1059 | else | |
1060 | fn = __vgic_v3_write_apxr2; | |
1061 | break; | |
eab0b2dc | 1062 | case SYS_ICC_AP0Rn_EL1(3): |
f9e7449c MZ |
1063 | case SYS_ICC_AP1Rn_EL1(3): |
1064 | if (is_read) | |
1065 | fn = __vgic_v3_read_apxr3; | |
1066 | else | |
1067 | fn = __vgic_v3_write_apxr3; | |
1068 | break; | |
eab0b2dc | 1069 | case SYS_ICC_HPPIR0_EL1: |
2724c11a | 1070 | case SYS_ICC_HPPIR1_EL1: |
7b1dba1f MZ |
1071 | if (unlikely(!is_read)) |
1072 | return 0; | |
2724c11a MZ |
1073 | fn = __vgic_v3_read_hppir; |
1074 | break; | |
21bc5281 | 1075 | case SYS_ICC_IGRPEN0_EL1: |
fbc48a00 MZ |
1076 | if (is_read) |
1077 | fn = __vgic_v3_read_igrpen0; | |
1078 | else | |
1079 | fn = __vgic_v3_write_igrpen0; | |
1080 | break; | |
423de85a MZ |
1081 | case SYS_ICC_BPR0_EL1: |
1082 | if (is_read) | |
1083 | fn = __vgic_v3_read_bpr0; | |
1084 | else | |
1085 | fn = __vgic_v3_write_bpr0; | |
1086 | break; | |
40228ba5 | 1087 | case SYS_ICC_DIR_EL1: |
e7f1d1ee MZ |
1088 | if (unlikely(is_read)) |
1089 | return 0; | |
40228ba5 MZ |
1090 | fn = __vgic_v3_write_dir; |
1091 | break; | |
43515894 | 1092 | case SYS_ICC_RPR_EL1: |
7b1dba1f MZ |
1093 | if (unlikely(!is_read)) |
1094 | return 0; | |
43515894 MZ |
1095 | fn = __vgic_v3_read_rpr; |
1096 | break; | |
d840b2d3 MZ |
1097 | case SYS_ICC_CTLR_EL1: |
1098 | if (is_read) | |
1099 | fn = __vgic_v3_read_ctlr; | |
1100 | else | |
1101 | fn = __vgic_v3_write_ctlr; | |
1102 | break; | |
6293d651 MZ |
1103 | case SYS_ICC_PMR_EL1: |
1104 | if (is_read) | |
1105 | fn = __vgic_v3_read_pmr; | |
1106 | else | |
1107 | fn = __vgic_v3_write_pmr; | |
1108 | break; | |
59da1cbf MZ |
1109 | default: |
1110 | return 0; | |
1111 | } | |
1112 | ||
1113 | vmcr = __vgic_v3_read_vmcr(); | |
1114 | rt = kvm_vcpu_sys_get_rt(vcpu); | |
1115 | fn(vcpu, vmcr, rt); | |
1116 | ||
bd7d95ca MR |
1117 | __kvm_skip_instr(vcpu); |
1118 | ||
59da1cbf MZ |
1119 | return 1; |
1120 | } | |
1121 | ||
1122 | #endif |