]> git.ipfire.org Git - thirdparty/linux.git/blame - virt/kvm/arm/hyp/vgic-v3-sr.c
KVM: arm64: vgic-v3: Add ICV_BPR1_EL1 handler
[thirdparty/linux.git] / virt / kvm / arm / hyp / vgic-v3-sr.c
CommitLineData
f68d2b1b
MZ
1/*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/compiler.h>
19#include <linux/irqchip/arm-gic-v3.h>
20#include <linux/kvm_host.h>
21
59da1cbf 22#include <asm/kvm_emulate.h>
13720a56 23#include <asm/kvm_hyp.h>
f68d2b1b
MZ
24
25#define vtr_to_max_lr_idx(v) ((v) & 0xf)
d68356cc 26#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
f68d2b1b 27
1b8e83c0
MZ
28static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
29{
30 switch (lr & 0xf) {
31 case 0:
32 return read_gicreg(ICH_LR0_EL2);
33 case 1:
34 return read_gicreg(ICH_LR1_EL2);
35 case 2:
36 return read_gicreg(ICH_LR2_EL2);
37 case 3:
38 return read_gicreg(ICH_LR3_EL2);
39 case 4:
40 return read_gicreg(ICH_LR4_EL2);
41 case 5:
42 return read_gicreg(ICH_LR5_EL2);
43 case 6:
44 return read_gicreg(ICH_LR6_EL2);
45 case 7:
46 return read_gicreg(ICH_LR7_EL2);
47 case 8:
48 return read_gicreg(ICH_LR8_EL2);
49 case 9:
50 return read_gicreg(ICH_LR9_EL2);
51 case 10:
52 return read_gicreg(ICH_LR10_EL2);
53 case 11:
54 return read_gicreg(ICH_LR11_EL2);
55 case 12:
56 return read_gicreg(ICH_LR12_EL2);
57 case 13:
58 return read_gicreg(ICH_LR13_EL2);
59 case 14:
60 return read_gicreg(ICH_LR14_EL2);
61 case 15:
62 return read_gicreg(ICH_LR15_EL2);
63 }
64
65 unreachable();
66}
67
68static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
69{
70 switch (lr & 0xf) {
71 case 0:
72 write_gicreg(val, ICH_LR0_EL2);
73 break;
74 case 1:
75 write_gicreg(val, ICH_LR1_EL2);
76 break;
77 case 2:
78 write_gicreg(val, ICH_LR2_EL2);
79 break;
80 case 3:
81 write_gicreg(val, ICH_LR3_EL2);
82 break;
83 case 4:
84 write_gicreg(val, ICH_LR4_EL2);
85 break;
86 case 5:
87 write_gicreg(val, ICH_LR5_EL2);
88 break;
89 case 6:
90 write_gicreg(val, ICH_LR6_EL2);
91 break;
92 case 7:
93 write_gicreg(val, ICH_LR7_EL2);
94 break;
95 case 8:
96 write_gicreg(val, ICH_LR8_EL2);
97 break;
98 case 9:
99 write_gicreg(val, ICH_LR9_EL2);
100 break;
101 case 10:
102 write_gicreg(val, ICH_LR10_EL2);
103 break;
104 case 11:
105 write_gicreg(val, ICH_LR11_EL2);
106 break;
107 case 12:
108 write_gicreg(val, ICH_LR12_EL2);
109 break;
110 case 13:
111 write_gicreg(val, ICH_LR13_EL2);
112 break;
113 case 14:
114 write_gicreg(val, ICH_LR14_EL2);
115 break;
116 case 15:
117 write_gicreg(val, ICH_LR15_EL2);
118 break;
119 }
120}
121
63000dd8
MZ
122static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
123{
124 switch (n) {
125 case 0:
126 write_gicreg(val, ICH_AP0R0_EL2);
127 break;
128 case 1:
129 write_gicreg(val, ICH_AP0R1_EL2);
130 break;
131 case 2:
132 write_gicreg(val, ICH_AP0R2_EL2);
133 break;
134 case 3:
135 write_gicreg(val, ICH_AP0R3_EL2);
136 break;
137 }
138}
139
140static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
141{
142 switch (n) {
143 case 0:
144 write_gicreg(val, ICH_AP1R0_EL2);
145 break;
146 case 1:
147 write_gicreg(val, ICH_AP1R1_EL2);
148 break;
149 case 2:
150 write_gicreg(val, ICH_AP1R2_EL2);
151 break;
152 case 3:
153 write_gicreg(val, ICH_AP1R3_EL2);
154 break;
155 }
156}
157
158static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
159{
160 u32 val;
161
162 switch (n) {
163 case 0:
164 val = read_gicreg(ICH_AP0R0_EL2);
165 break;
166 case 1:
167 val = read_gicreg(ICH_AP0R1_EL2);
168 break;
169 case 2:
170 val = read_gicreg(ICH_AP0R2_EL2);
171 break;
172 case 3:
173 val = read_gicreg(ICH_AP0R3_EL2);
174 break;
175 default:
176 unreachable();
177 }
178
179 return val;
180}
181
182static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
183{
184 u32 val;
185
186 switch (n) {
187 case 0:
188 val = read_gicreg(ICH_AP1R0_EL2);
189 break;
190 case 1:
191 val = read_gicreg(ICH_AP1R1_EL2);
192 break;
193 case 2:
194 val = read_gicreg(ICH_AP1R2_EL2);
195 break;
196 case 3:
197 val = read_gicreg(ICH_AP1R3_EL2);
198 break;
199 default:
200 unreachable();
201 }
202
203 return val;
204}
205
f68d2b1b
MZ
206void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
207{
208 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
00dafa0f 209 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
f68d2b1b 210 u64 val;
f68d2b1b
MZ
211
212 /*
213 * Make sure stores to the GIC via the memory mapped interface
214 * are now visible to the system register interface.
215 */
ff567614 216 if (!cpu_if->vgic_sre) {
c5851328 217 dsb(st);
ff567614
MZ
218 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
219 }
f68d2b1b 220
00dafa0f 221 if (used_lrs) {
1b8e83c0 222 int i;
15d2bffd 223 u32 nr_pre_bits;
f68d2b1b 224
1b8e83c0 225 cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
f68d2b1b 226
1b8e83c0
MZ
227 write_gicreg(0, ICH_HCR_EL2);
228 val = read_gicreg(ICH_VTR_EL2);
15d2bffd 229 nr_pre_bits = vtr_to_nr_pre_bits(val);
f68d2b1b 230
cffcd9df 231 for (i = 0; i < used_lrs; i++) {
fa89c77e 232 if (cpu_if->vgic_elrsr & (1 << i))
84e8b9c8 233 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
fa89c77e
CD
234 else
235 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
84e8b9c8 236
b40c4892 237 __gic_v3_set_lr(0, i);
1b8e83c0
MZ
238 }
239
15d2bffd 240 switch (nr_pre_bits) {
1b8e83c0 241 case 7:
63000dd8
MZ
242 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
243 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
1b8e83c0 244 case 6:
63000dd8 245 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
1b8e83c0 246 default:
63000dd8 247 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
1b8e83c0
MZ
248 }
249
15d2bffd 250 switch (nr_pre_bits) {
1b8e83c0 251 case 7:
63000dd8
MZ
252 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
253 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
1b8e83c0 254 case 6:
63000dd8 255 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
1b8e83c0 256 default:
63000dd8 257 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
1b8e83c0 258 }
1b8e83c0 259 } else {
1b8e83c0
MZ
260 cpu_if->vgic_elrsr = 0xffff;
261 cpu_if->vgic_ap0r[0] = 0;
262 cpu_if->vgic_ap0r[1] = 0;
263 cpu_if->vgic_ap0r[2] = 0;
264 cpu_if->vgic_ap0r[3] = 0;
265 cpu_if->vgic_ap1r[0] = 0;
266 cpu_if->vgic_ap1r[1] = 0;
267 cpu_if->vgic_ap1r[2] = 0;
268 cpu_if->vgic_ap1r[3] = 0;
f68d2b1b
MZ
269 }
270
271 val = read_gicreg(ICC_SRE_EL2);
272 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
c5851328
MZ
273
274 if (!cpu_if->vgic_sre) {
275 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
276 isb();
277 write_gicreg(1, ICC_SRE_EL1);
278 }
f68d2b1b
MZ
279}
280
281void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
282{
283 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
00dafa0f 284 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
f68d2b1b 285 u64 val;
15d2bffd 286 u32 nr_pre_bits;
1b8e83c0 287 int i;
f68d2b1b
MZ
288
289 /*
290 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
291 * Group0 interrupt (as generated in GICv2 mode) to be
292 * delivered as a FIQ to the guest, with potentially fatal
293 * consequences. So we must make sure that ICC_SRE_EL1 has
294 * been actually programmed with the value we want before
ff567614
MZ
295 * starting to mess with the rest of the GIC, and VMCR_EL2 in
296 * particular.
f68d2b1b 297 */
c5851328
MZ
298 if (!cpu_if->vgic_sre) {
299 write_gicreg(0, ICC_SRE_EL1);
300 isb();
ff567614 301 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
c5851328 302 }
f68d2b1b 303
f68d2b1b 304 val = read_gicreg(ICH_VTR_EL2);
15d2bffd 305 nr_pre_bits = vtr_to_nr_pre_bits(val);
f68d2b1b 306
00dafa0f 307 if (used_lrs) {
1b8e83c0
MZ
308 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
309
15d2bffd 310 switch (nr_pre_bits) {
1b8e83c0 311 case 7:
63000dd8
MZ
312 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
313 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
1b8e83c0 314 case 6:
63000dd8 315 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
1b8e83c0 316 default:
63000dd8 317 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
1b8e83c0
MZ
318 }
319
15d2bffd 320 switch (nr_pre_bits) {
1b8e83c0 321 case 7:
63000dd8
MZ
322 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
323 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
1b8e83c0 324 case 6:
63000dd8 325 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
1b8e83c0 326 default:
63000dd8 327 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
1b8e83c0
MZ
328 }
329
00dafa0f 330 for (i = 0; i < used_lrs; i++)
b40c4892 331 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
f68d2b1b
MZ
332 }
333
334 /*
335 * Ensures that the above will have reached the
336 * (re)distributors. This ensure the guest will read the
337 * correct values from the memory-mapped interface.
338 */
c5851328
MZ
339 if (!cpu_if->vgic_sre) {
340 isb();
341 dsb(sy);
342 }
f68d2b1b
MZ
343
344 /*
345 * Prevent the guest from touching the GIC system registers if
346 * SRE isn't enabled for GICv3 emulation.
347 */
a057001e
MZ
348 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
349 ICC_SRE_EL2);
f68d2b1b
MZ
350}
351
0d98d00b
MZ
352void __hyp_text __vgic_v3_init_lrs(void)
353{
354 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
355 int i;
356
357 for (i = 0; i <= max_lr_idx; i++)
358 __gic_v3_set_lr(0, i);
359}
360
cf0ba18a 361u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
f68d2b1b
MZ
362{
363 return read_gicreg(ICH_VTR_EL2);
364}
328e5664
CD
365
366u64 __hyp_text __vgic_v3_read_vmcr(void)
367{
368 return read_gicreg(ICH_VMCR_EL2);
369}
370
371void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
372{
373 write_gicreg(vmcr, ICH_VMCR_EL2);
374}
59da1cbf
MZ
375
376#ifdef CONFIG_ARM64
377
d70c7b31
MZ
378static int __hyp_text __vgic_v3_bpr_min(void)
379{
380 /* See Pseudocode for VPriorityGroup */
381 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
382}
383
384static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
385{
386 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
387}
388
389static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
390{
391 unsigned int bpr;
392
393 if (vmcr & ICH_VMCR_CBPR_MASK) {
394 bpr = __vgic_v3_get_bpr0(vmcr);
395 if (bpr < 7)
396 bpr++;
397 } else {
398 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
399 }
400
401 return bpr;
402}
403
404static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
405{
406 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
407}
408
409static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
410{
411 u64 val = vcpu_get_reg(vcpu, rt);
412 u8 bpr_min = __vgic_v3_bpr_min();
413
414 if (vmcr & ICH_VMCR_CBPR_MASK)
415 return;
416
417 /* Enforce BPR limiting */
418 if (val < bpr_min)
419 val = bpr_min;
420
421 val <<= ICH_VMCR_BPR1_SHIFT;
422 val &= ICH_VMCR_BPR1_MASK;
423 vmcr &= ~ICH_VMCR_BPR1_MASK;
424 vmcr |= val;
425
426 __vgic_v3_write_vmcr(vmcr);
427}
428
59da1cbf
MZ
429int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
430{
431 int rt;
432 u32 esr;
433 u32 vmcr;
434 void (*fn)(struct kvm_vcpu *, u32, int);
435 bool is_read;
436 u32 sysreg;
437
438 esr = kvm_vcpu_get_hsr(vcpu);
439 if (vcpu_mode_is_32bit(vcpu)) {
440 if (!kvm_condition_valid(vcpu))
441 return 1;
442
443 sysreg = esr_cp15_to_sysreg(esr);
444 } else {
445 sysreg = esr_sys64_to_sysreg(esr);
446 }
447
448 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
449
450 switch (sysreg) {
d70c7b31
MZ
451 case SYS_ICC_BPR1_EL1:
452 if (is_read)
453 fn = __vgic_v3_read_bpr1;
454 else
455 fn = __vgic_v3_write_bpr1;
456 break;
59da1cbf
MZ
457 default:
458 return 0;
459 }
460
461 vmcr = __vgic_v3_read_vmcr();
462 rt = kvm_vcpu_sys_get_rt(vcpu);
463 fn(vcpu, vmcr, rt);
464
465 return 1;
466}
467
468#endif