]>
Commit | Line | Data |
---|---|---|
f68d2b1b MZ |
1 | /* |
2 | * Copyright (C) 2012-2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/compiler.h> | |
19 | #include <linux/irqchip/arm-gic-v3.h> | |
20 | #include <linux/kvm_host.h> | |
21 | ||
59da1cbf | 22 | #include <asm/kvm_emulate.h> |
13720a56 | 23 | #include <asm/kvm_hyp.h> |
f68d2b1b MZ |
24 | |
25 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) | |
d68356cc | 26 | #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1) |
132a324a | 27 | #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5)) |
f68d2b1b | 28 | |
1b8e83c0 MZ |
29 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) |
30 | { | |
31 | switch (lr & 0xf) { | |
32 | case 0: | |
33 | return read_gicreg(ICH_LR0_EL2); | |
34 | case 1: | |
35 | return read_gicreg(ICH_LR1_EL2); | |
36 | case 2: | |
37 | return read_gicreg(ICH_LR2_EL2); | |
38 | case 3: | |
39 | return read_gicreg(ICH_LR3_EL2); | |
40 | case 4: | |
41 | return read_gicreg(ICH_LR4_EL2); | |
42 | case 5: | |
43 | return read_gicreg(ICH_LR5_EL2); | |
44 | case 6: | |
45 | return read_gicreg(ICH_LR6_EL2); | |
46 | case 7: | |
47 | return read_gicreg(ICH_LR7_EL2); | |
48 | case 8: | |
49 | return read_gicreg(ICH_LR8_EL2); | |
50 | case 9: | |
51 | return read_gicreg(ICH_LR9_EL2); | |
52 | case 10: | |
53 | return read_gicreg(ICH_LR10_EL2); | |
54 | case 11: | |
55 | return read_gicreg(ICH_LR11_EL2); | |
56 | case 12: | |
57 | return read_gicreg(ICH_LR12_EL2); | |
58 | case 13: | |
59 | return read_gicreg(ICH_LR13_EL2); | |
60 | case 14: | |
61 | return read_gicreg(ICH_LR14_EL2); | |
62 | case 15: | |
63 | return read_gicreg(ICH_LR15_EL2); | |
64 | } | |
65 | ||
66 | unreachable(); | |
67 | } | |
68 | ||
69 | static void __hyp_text __gic_v3_set_lr(u64 val, int lr) | |
70 | { | |
71 | switch (lr & 0xf) { | |
72 | case 0: | |
73 | write_gicreg(val, ICH_LR0_EL2); | |
74 | break; | |
75 | case 1: | |
76 | write_gicreg(val, ICH_LR1_EL2); | |
77 | break; | |
78 | case 2: | |
79 | write_gicreg(val, ICH_LR2_EL2); | |
80 | break; | |
81 | case 3: | |
82 | write_gicreg(val, ICH_LR3_EL2); | |
83 | break; | |
84 | case 4: | |
85 | write_gicreg(val, ICH_LR4_EL2); | |
86 | break; | |
87 | case 5: | |
88 | write_gicreg(val, ICH_LR5_EL2); | |
89 | break; | |
90 | case 6: | |
91 | write_gicreg(val, ICH_LR6_EL2); | |
92 | break; | |
93 | case 7: | |
94 | write_gicreg(val, ICH_LR7_EL2); | |
95 | break; | |
96 | case 8: | |
97 | write_gicreg(val, ICH_LR8_EL2); | |
98 | break; | |
99 | case 9: | |
100 | write_gicreg(val, ICH_LR9_EL2); | |
101 | break; | |
102 | case 10: | |
103 | write_gicreg(val, ICH_LR10_EL2); | |
104 | break; | |
105 | case 11: | |
106 | write_gicreg(val, ICH_LR11_EL2); | |
107 | break; | |
108 | case 12: | |
109 | write_gicreg(val, ICH_LR12_EL2); | |
110 | break; | |
111 | case 13: | |
112 | write_gicreg(val, ICH_LR13_EL2); | |
113 | break; | |
114 | case 14: | |
115 | write_gicreg(val, ICH_LR14_EL2); | |
116 | break; | |
117 | case 15: | |
118 | write_gicreg(val, ICH_LR15_EL2); | |
119 | break; | |
120 | } | |
121 | } | |
122 | ||
63000dd8 MZ |
123 | static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n) |
124 | { | |
125 | switch (n) { | |
126 | case 0: | |
127 | write_gicreg(val, ICH_AP0R0_EL2); | |
128 | break; | |
129 | case 1: | |
130 | write_gicreg(val, ICH_AP0R1_EL2); | |
131 | break; | |
132 | case 2: | |
133 | write_gicreg(val, ICH_AP0R2_EL2); | |
134 | break; | |
135 | case 3: | |
136 | write_gicreg(val, ICH_AP0R3_EL2); | |
137 | break; | |
138 | } | |
139 | } | |
140 | ||
141 | static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n) | |
142 | { | |
143 | switch (n) { | |
144 | case 0: | |
145 | write_gicreg(val, ICH_AP1R0_EL2); | |
146 | break; | |
147 | case 1: | |
148 | write_gicreg(val, ICH_AP1R1_EL2); | |
149 | break; | |
150 | case 2: | |
151 | write_gicreg(val, ICH_AP1R2_EL2); | |
152 | break; | |
153 | case 3: | |
154 | write_gicreg(val, ICH_AP1R3_EL2); | |
155 | break; | |
156 | } | |
157 | } | |
158 | ||
159 | static u32 __hyp_text __vgic_v3_read_ap0rn(int n) | |
160 | { | |
161 | u32 val; | |
162 | ||
163 | switch (n) { | |
164 | case 0: | |
165 | val = read_gicreg(ICH_AP0R0_EL2); | |
166 | break; | |
167 | case 1: | |
168 | val = read_gicreg(ICH_AP0R1_EL2); | |
169 | break; | |
170 | case 2: | |
171 | val = read_gicreg(ICH_AP0R2_EL2); | |
172 | break; | |
173 | case 3: | |
174 | val = read_gicreg(ICH_AP0R3_EL2); | |
175 | break; | |
176 | default: | |
177 | unreachable(); | |
178 | } | |
179 | ||
180 | return val; | |
181 | } | |
182 | ||
183 | static u32 __hyp_text __vgic_v3_read_ap1rn(int n) | |
184 | { | |
185 | u32 val; | |
186 | ||
187 | switch (n) { | |
188 | case 0: | |
189 | val = read_gicreg(ICH_AP1R0_EL2); | |
190 | break; | |
191 | case 1: | |
192 | val = read_gicreg(ICH_AP1R1_EL2); | |
193 | break; | |
194 | case 2: | |
195 | val = read_gicreg(ICH_AP1R2_EL2); | |
196 | break; | |
197 | case 3: | |
198 | val = read_gicreg(ICH_AP1R3_EL2); | |
199 | break; | |
200 | default: | |
201 | unreachable(); | |
202 | } | |
203 | ||
204 | return val; | |
205 | } | |
206 | ||
f68d2b1b MZ |
207 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) |
208 | { | |
209 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 210 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f68d2b1b | 211 | u64 val; |
f68d2b1b MZ |
212 | |
213 | /* | |
214 | * Make sure stores to the GIC via the memory mapped interface | |
215 | * are now visible to the system register interface. | |
216 | */ | |
ff567614 | 217 | if (!cpu_if->vgic_sre) { |
c5851328 | 218 | dsb(st); |
ff567614 MZ |
219 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
220 | } | |
f68d2b1b | 221 | |
00dafa0f | 222 | if (used_lrs) { |
1b8e83c0 | 223 | int i; |
15d2bffd | 224 | u32 nr_pre_bits; |
f68d2b1b | 225 | |
1b8e83c0 | 226 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); |
f68d2b1b | 227 | |
1b8e83c0 MZ |
228 | write_gicreg(0, ICH_HCR_EL2); |
229 | val = read_gicreg(ICH_VTR_EL2); | |
15d2bffd | 230 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 231 | |
cffcd9df | 232 | for (i = 0; i < used_lrs; i++) { |
fa89c77e | 233 | if (cpu_if->vgic_elrsr & (1 << i)) |
84e8b9c8 | 234 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; |
fa89c77e CD |
235 | else |
236 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | |
84e8b9c8 | 237 | |
b40c4892 | 238 | __gic_v3_set_lr(0, i); |
1b8e83c0 MZ |
239 | } |
240 | ||
15d2bffd | 241 | switch (nr_pre_bits) { |
1b8e83c0 | 242 | case 7: |
63000dd8 MZ |
243 | cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); |
244 | cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); | |
1b8e83c0 | 245 | case 6: |
63000dd8 | 246 | cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); |
1b8e83c0 | 247 | default: |
63000dd8 | 248 | cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); |
1b8e83c0 MZ |
249 | } |
250 | ||
15d2bffd | 251 | switch (nr_pre_bits) { |
1b8e83c0 | 252 | case 7: |
63000dd8 MZ |
253 | cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); |
254 | cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); | |
1b8e83c0 | 255 | case 6: |
63000dd8 | 256 | cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); |
1b8e83c0 | 257 | default: |
63000dd8 | 258 | cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); |
1b8e83c0 | 259 | } |
1b8e83c0 | 260 | } else { |
1b8e83c0 MZ |
261 | cpu_if->vgic_elrsr = 0xffff; |
262 | cpu_if->vgic_ap0r[0] = 0; | |
263 | cpu_if->vgic_ap0r[1] = 0; | |
264 | cpu_if->vgic_ap0r[2] = 0; | |
265 | cpu_if->vgic_ap0r[3] = 0; | |
266 | cpu_if->vgic_ap1r[0] = 0; | |
267 | cpu_if->vgic_ap1r[1] = 0; | |
268 | cpu_if->vgic_ap1r[2] = 0; | |
269 | cpu_if->vgic_ap1r[3] = 0; | |
f68d2b1b MZ |
270 | } |
271 | ||
272 | val = read_gicreg(ICC_SRE_EL2); | |
273 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); | |
c5851328 MZ |
274 | |
275 | if (!cpu_if->vgic_sre) { | |
276 | /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ | |
277 | isb(); | |
278 | write_gicreg(1, ICC_SRE_EL1); | |
279 | } | |
f68d2b1b MZ |
280 | } |
281 | ||
282 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |
283 | { | |
284 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 285 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f68d2b1b | 286 | u64 val; |
15d2bffd | 287 | u32 nr_pre_bits; |
1b8e83c0 | 288 | int i; |
f68d2b1b MZ |
289 | |
290 | /* | |
291 | * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a | |
292 | * Group0 interrupt (as generated in GICv2 mode) to be | |
293 | * delivered as a FIQ to the guest, with potentially fatal | |
294 | * consequences. So we must make sure that ICC_SRE_EL1 has | |
295 | * been actually programmed with the value we want before | |
ff567614 MZ |
296 | * starting to mess with the rest of the GIC, and VMCR_EL2 in |
297 | * particular. | |
f68d2b1b | 298 | */ |
c5851328 MZ |
299 | if (!cpu_if->vgic_sre) { |
300 | write_gicreg(0, ICC_SRE_EL1); | |
301 | isb(); | |
ff567614 | 302 | write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); |
c5851328 | 303 | } |
f68d2b1b | 304 | |
f68d2b1b | 305 | val = read_gicreg(ICH_VTR_EL2); |
15d2bffd | 306 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 307 | |
00dafa0f | 308 | if (used_lrs) { |
1b8e83c0 MZ |
309 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
310 | ||
15d2bffd | 311 | switch (nr_pre_bits) { |
1b8e83c0 | 312 | case 7: |
63000dd8 MZ |
313 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); |
314 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); | |
1b8e83c0 | 315 | case 6: |
63000dd8 | 316 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); |
1b8e83c0 | 317 | default: |
63000dd8 | 318 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); |
1b8e83c0 MZ |
319 | } |
320 | ||
15d2bffd | 321 | switch (nr_pre_bits) { |
1b8e83c0 | 322 | case 7: |
63000dd8 MZ |
323 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); |
324 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); | |
1b8e83c0 | 325 | case 6: |
63000dd8 | 326 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); |
1b8e83c0 | 327 | default: |
63000dd8 | 328 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); |
1b8e83c0 MZ |
329 | } |
330 | ||
00dafa0f | 331 | for (i = 0; i < used_lrs; i++) |
b40c4892 | 332 | __gic_v3_set_lr(cpu_if->vgic_lr[i], i); |
f68d2b1b MZ |
333 | } |
334 | ||
335 | /* | |
336 | * Ensures that the above will have reached the | |
337 | * (re)distributors. This ensure the guest will read the | |
338 | * correct values from the memory-mapped interface. | |
339 | */ | |
c5851328 MZ |
340 | if (!cpu_if->vgic_sre) { |
341 | isb(); | |
342 | dsb(sy); | |
343 | } | |
f68d2b1b MZ |
344 | |
345 | /* | |
346 | * Prevent the guest from touching the GIC system registers if | |
347 | * SRE isn't enabled for GICv3 emulation. | |
348 | */ | |
a057001e MZ |
349 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, |
350 | ICC_SRE_EL2); | |
f68d2b1b MZ |
351 | } |
352 | ||
0d98d00b MZ |
353 | void __hyp_text __vgic_v3_init_lrs(void) |
354 | { | |
355 | int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); | |
356 | int i; | |
357 | ||
358 | for (i = 0; i <= max_lr_idx; i++) | |
359 | __gic_v3_set_lr(0, i); | |
360 | } | |
361 | ||
cf0ba18a | 362 | u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void) |
f68d2b1b MZ |
363 | { |
364 | return read_gicreg(ICH_VTR_EL2); | |
365 | } | |
328e5664 CD |
366 | |
367 | u64 __hyp_text __vgic_v3_read_vmcr(void) | |
368 | { | |
369 | return read_gicreg(ICH_VMCR_EL2); | |
370 | } | |
371 | ||
372 | void __hyp_text __vgic_v3_write_vmcr(u32 vmcr) | |
373 | { | |
374 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
375 | } | |
59da1cbf MZ |
376 | |
377 | #ifdef CONFIG_ARM64 | |
378 | ||
d70c7b31 MZ |
379 | static int __hyp_text __vgic_v3_bpr_min(void) |
380 | { | |
381 | /* See Pseudocode for VPriorityGroup */ | |
382 | return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2)); | |
383 | } | |
384 | ||
132a324a MZ |
385 | static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu) |
386 | { | |
387 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
388 | u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; | |
389 | ||
390 | return crm != 8; | |
391 | } | |
392 | ||
393 | #define GICv3_IDLE_PRIORITY 0xff | |
394 | ||
395 | static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, | |
396 | u32 vmcr, | |
397 | u64 *lr_val) | |
398 | { | |
399 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
400 | u8 priority = GICv3_IDLE_PRIORITY; | |
401 | int i, lr = -1; | |
402 | ||
403 | for (i = 0; i < used_lrs; i++) { | |
404 | u64 val = __gic_v3_get_lr(i); | |
405 | u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
406 | ||
407 | /* Not pending in the state? */ | |
408 | if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT) | |
409 | continue; | |
410 | ||
411 | /* Group-0 interrupt, but Group-0 disabled? */ | |
412 | if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK)) | |
413 | continue; | |
414 | ||
415 | /* Group-1 interrupt, but Group-1 disabled? */ | |
416 | if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK)) | |
417 | continue; | |
418 | ||
419 | /* Not the highest priority? */ | |
420 | if (lr_prio >= priority) | |
421 | continue; | |
422 | ||
423 | /* This is a candidate */ | |
424 | priority = lr_prio; | |
425 | *lr_val = val; | |
426 | lr = i; | |
427 | } | |
428 | ||
429 | if (lr == -1) | |
430 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
431 | ||
432 | return lr; | |
433 | } | |
434 | ||
435 | static int __hyp_text __vgic_v3_get_highest_active_priority(void) | |
436 | { | |
437 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
438 | u32 hap = 0; | |
439 | int i; | |
440 | ||
441 | for (i = 0; i < nr_apr_regs; i++) { | |
442 | u32 val; | |
443 | ||
444 | /* | |
445 | * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers | |
446 | * contain the active priority levels for this VCPU | |
447 | * for the maximum number of supported priority | |
448 | * levels, and we return the full priority level only | |
449 | * if the BPR is programmed to its minimum, otherwise | |
450 | * we return a combination of the priority level and | |
451 | * subpriority, as determined by the setting of the | |
452 | * BPR, but without the full subpriority. | |
453 | */ | |
454 | val = __vgic_v3_read_ap0rn(i); | |
455 | val |= __vgic_v3_read_ap1rn(i); | |
456 | if (!val) { | |
457 | hap += 32; | |
458 | continue; | |
459 | } | |
460 | ||
461 | return (hap + __ffs(val)) << __vgic_v3_bpr_min(); | |
462 | } | |
463 | ||
464 | return GICv3_IDLE_PRIORITY; | |
465 | } | |
466 | ||
d70c7b31 MZ |
467 | static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr) |
468 | { | |
469 | return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; | |
470 | } | |
471 | ||
472 | static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr) | |
473 | { | |
474 | unsigned int bpr; | |
475 | ||
476 | if (vmcr & ICH_VMCR_CBPR_MASK) { | |
477 | bpr = __vgic_v3_get_bpr0(vmcr); | |
478 | if (bpr < 7) | |
479 | bpr++; | |
480 | } else { | |
481 | bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; | |
482 | } | |
483 | ||
484 | return bpr; | |
485 | } | |
486 | ||
132a324a MZ |
487 | /* |
488 | * Convert a priority to a preemption level, taking the relevant BPR | |
489 | * into account by zeroing the sub-priority bits. | |
490 | */ | |
491 | static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp) | |
492 | { | |
493 | unsigned int bpr; | |
494 | ||
495 | if (!grp) | |
496 | bpr = __vgic_v3_get_bpr0(vmcr) + 1; | |
497 | else | |
498 | bpr = __vgic_v3_get_bpr1(vmcr); | |
499 | ||
500 | return pri & (GENMASK(7, 0) << bpr); | |
501 | } | |
502 | ||
503 | /* | |
504 | * The priority value is independent of any of the BPR values, so we | |
505 | * normalize it using the minumal BPR value. This guarantees that no | |
506 | * matter what the guest does with its BPR, we can always set/get the | |
507 | * same value of a priority. | |
508 | */ | |
509 | static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp) | |
510 | { | |
511 | u8 pre, ap; | |
512 | u32 val; | |
513 | int apr; | |
514 | ||
515 | pre = __vgic_v3_pri_to_pre(pri, vmcr, grp); | |
516 | ap = pre >> __vgic_v3_bpr_min(); | |
517 | apr = ap / 32; | |
518 | ||
519 | if (!grp) { | |
520 | val = __vgic_v3_read_ap0rn(apr); | |
521 | __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr); | |
522 | } else { | |
523 | val = __vgic_v3_read_ap1rn(apr); | |
524 | __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr); | |
525 | } | |
526 | } | |
527 | ||
528 | static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt) | |
529 | { | |
530 | u64 lr_val; | |
531 | u8 lr_prio, pmr; | |
532 | int lr, grp; | |
533 | ||
534 | grp = __vgic_v3_get_group(vcpu); | |
535 | ||
536 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
537 | if (lr < 0) | |
538 | goto spurious; | |
539 | ||
540 | if (grp != !!(lr_val & ICH_LR_GROUP)) | |
541 | goto spurious; | |
542 | ||
543 | pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; | |
544 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
545 | if (pmr <= lr_prio) | |
546 | goto spurious; | |
547 | ||
548 | if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp)) | |
549 | goto spurious; | |
550 | ||
551 | lr_val &= ~ICH_LR_STATE; | |
552 | /* No active state for LPIs */ | |
553 | if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI) | |
554 | lr_val |= ICH_LR_ACTIVE_BIT; | |
555 | __gic_v3_set_lr(lr_val, lr); | |
556 | __vgic_v3_set_active_priority(lr_prio, vmcr, grp); | |
557 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
558 | return; | |
559 | ||
560 | spurious: | |
561 | vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS); | |
562 | } | |
563 | ||
f8b630bc MZ |
564 | static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
565 | { | |
566 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK)); | |
567 | } | |
568 | ||
569 | static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) | |
570 | { | |
571 | u64 val = vcpu_get_reg(vcpu, rt); | |
572 | ||
573 | if (val & 1) | |
574 | vmcr |= ICH_VMCR_ENG1_MASK; | |
575 | else | |
576 | vmcr &= ~ICH_VMCR_ENG1_MASK; | |
577 | ||
578 | __vgic_v3_write_vmcr(vmcr); | |
579 | } | |
580 | ||
d70c7b31 MZ |
581 | static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
582 | { | |
583 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr)); | |
584 | } | |
585 | ||
586 | static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) | |
587 | { | |
588 | u64 val = vcpu_get_reg(vcpu, rt); | |
589 | u8 bpr_min = __vgic_v3_bpr_min(); | |
590 | ||
591 | if (vmcr & ICH_VMCR_CBPR_MASK) | |
592 | return; | |
593 | ||
594 | /* Enforce BPR limiting */ | |
595 | if (val < bpr_min) | |
596 | val = bpr_min; | |
597 | ||
598 | val <<= ICH_VMCR_BPR1_SHIFT; | |
599 | val &= ICH_VMCR_BPR1_MASK; | |
600 | vmcr &= ~ICH_VMCR_BPR1_MASK; | |
601 | vmcr |= val; | |
602 | ||
603 | __vgic_v3_write_vmcr(vmcr); | |
604 | } | |
605 | ||
59da1cbf MZ |
606 | int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) |
607 | { | |
608 | int rt; | |
609 | u32 esr; | |
610 | u32 vmcr; | |
611 | void (*fn)(struct kvm_vcpu *, u32, int); | |
612 | bool is_read; | |
613 | u32 sysreg; | |
614 | ||
615 | esr = kvm_vcpu_get_hsr(vcpu); | |
616 | if (vcpu_mode_is_32bit(vcpu)) { | |
617 | if (!kvm_condition_valid(vcpu)) | |
618 | return 1; | |
619 | ||
620 | sysreg = esr_cp15_to_sysreg(esr); | |
621 | } else { | |
622 | sysreg = esr_sys64_to_sysreg(esr); | |
623 | } | |
624 | ||
625 | is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; | |
626 | ||
627 | switch (sysreg) { | |
132a324a MZ |
628 | case SYS_ICC_IAR1_EL1: |
629 | fn = __vgic_v3_read_iar; | |
630 | break; | |
f8b630bc MZ |
631 | case SYS_ICC_GRPEN1_EL1: |
632 | if (is_read) | |
633 | fn = __vgic_v3_read_igrpen1; | |
634 | else | |
635 | fn = __vgic_v3_write_igrpen1; | |
636 | break; | |
d70c7b31 MZ |
637 | case SYS_ICC_BPR1_EL1: |
638 | if (is_read) | |
639 | fn = __vgic_v3_read_bpr1; | |
640 | else | |
641 | fn = __vgic_v3_write_bpr1; | |
642 | break; | |
59da1cbf MZ |
643 | default: |
644 | return 0; | |
645 | } | |
646 | ||
647 | vmcr = __vgic_v3_read_vmcr(); | |
648 | rt = kvm_vcpu_sys_get_rt(vcpu); | |
649 | fn(vcpu, vmcr, rt); | |
650 | ||
651 | return 1; | |
652 | } | |
653 | ||
654 | #endif |