]> git.ipfire.org Git - thirdparty/linux.git/blob - virt/kvm/arm/vgic/vgic-mmio-v2.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / virt / kvm / arm / vgic / vgic-mmio-v2.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGICv2 MMIO handling functions
4 */
5
6 #include <linux/irqchip/arm-gic.h>
7 #include <linux/kvm.h>
8 #include <linux/kvm_host.h>
9 #include <linux/nospec.h>
10
11 #include <kvm/iodev.h>
12 #include <kvm/arm_vgic.h>
13
14 #include "vgic.h"
15 #include "vgic-mmio.h"
16
17 /*
18 * The Revision field in the IIDR have the following meanings:
19 *
20 * Revision 1: Report GICv2 interrupts as group 0 instead of group 1
21 * Revision 2: Interrupt groups are guest-configurable and signaled using
22 * their configured groups.
23 */
24
25 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
26 gpa_t addr, unsigned int len)
27 {
28 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
29 u32 value;
30
31 switch (addr & 0x0c) {
32 case GIC_DIST_CTRL:
33 value = vgic->enabled ? GICD_ENABLE : 0;
34 break;
35 case GIC_DIST_CTR:
36 value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
37 value = (value >> 5) - 1;
38 value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
39 break;
40 case GIC_DIST_IIDR:
41 value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
42 (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
43 (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
44 break;
45 default:
46 return 0;
47 }
48
49 return value;
50 }
51
52 static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
53 gpa_t addr, unsigned int len,
54 unsigned long val)
55 {
56 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
57 bool was_enabled = dist->enabled;
58
59 switch (addr & 0x0c) {
60 case GIC_DIST_CTRL:
61 dist->enabled = val & GICD_ENABLE;
62 if (!was_enabled && dist->enabled)
63 vgic_kick_vcpus(vcpu->kvm);
64 break;
65 case GIC_DIST_CTR:
66 case GIC_DIST_IIDR:
67 /* Nothing to do */
68 return;
69 }
70 }
71
72 static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
73 gpa_t addr, unsigned int len,
74 unsigned long val)
75 {
76 switch (addr & 0x0c) {
77 case GIC_DIST_IIDR:
78 if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
79 return -EINVAL;
80
81 /*
82 * If we observe a write to GICD_IIDR we know that userspace
83 * has been updated and has had a chance to cope with older
84 * kernels (VGICv2 IIDR.Revision == 0) incorrectly reporting
85 * interrupts as group 1, and therefore we now allow groups to
86 * be user writable. Doing this by default would break
87 * migration from old kernels to new kernels with legacy
88 * userspace.
89 */
90 vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
91 return 0;
92 }
93
94 vgic_mmio_write_v2_misc(vcpu, addr, len, val);
95 return 0;
96 }
97
98 static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu,
99 gpa_t addr, unsigned int len,
100 unsigned long val)
101 {
102 if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
103 vgic_mmio_write_group(vcpu, addr, len, val);
104
105 return 0;
106 }
107
108 static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
109 gpa_t addr, unsigned int len,
110 unsigned long val)
111 {
112 int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
113 int intid = val & 0xf;
114 int targets = (val >> 16) & 0xff;
115 int mode = (val >> 24) & 0x03;
116 int c;
117 struct kvm_vcpu *vcpu;
118 unsigned long flags;
119
120 switch (mode) {
121 case 0x0: /* as specified by targets */
122 break;
123 case 0x1:
124 targets = (1U << nr_vcpus) - 1; /* all, ... */
125 targets &= ~(1U << source_vcpu->vcpu_id); /* but self */
126 break;
127 case 0x2: /* this very vCPU only */
128 targets = (1U << source_vcpu->vcpu_id);
129 break;
130 case 0x3: /* reserved */
131 return;
132 }
133
134 kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
135 struct vgic_irq *irq;
136
137 if (!(targets & (1U << c)))
138 continue;
139
140 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
141
142 raw_spin_lock_irqsave(&irq->irq_lock, flags);
143 irq->pending_latch = true;
144 irq->source |= 1U << source_vcpu->vcpu_id;
145
146 vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
147 vgic_put_irq(source_vcpu->kvm, irq);
148 }
149 }
150
151 static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
152 gpa_t addr, unsigned int len)
153 {
154 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
155 int i;
156 u64 val = 0;
157
158 for (i = 0; i < len; i++) {
159 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
160
161 val |= (u64)irq->targets << (i * 8);
162
163 vgic_put_irq(vcpu->kvm, irq);
164 }
165
166 return val;
167 }
168
169 static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
170 gpa_t addr, unsigned int len,
171 unsigned long val)
172 {
173 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
174 u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
175 int i;
176 unsigned long flags;
177
178 /* GICD_ITARGETSR[0-7] are read-only */
179 if (intid < VGIC_NR_PRIVATE_IRQS)
180 return;
181
182 for (i = 0; i < len; i++) {
183 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
184 int target;
185
186 raw_spin_lock_irqsave(&irq->irq_lock, flags);
187
188 irq->targets = (val >> (i * 8)) & cpu_mask;
189 target = irq->targets ? __ffs(irq->targets) : 0;
190 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
191
192 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
193 vgic_put_irq(vcpu->kvm, irq);
194 }
195 }
196
197 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
198 gpa_t addr, unsigned int len)
199 {
200 u32 intid = addr & 0x0f;
201 int i;
202 u64 val = 0;
203
204 for (i = 0; i < len; i++) {
205 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
206
207 val |= (u64)irq->source << (i * 8);
208
209 vgic_put_irq(vcpu->kvm, irq);
210 }
211 return val;
212 }
213
214 static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
215 gpa_t addr, unsigned int len,
216 unsigned long val)
217 {
218 u32 intid = addr & 0x0f;
219 int i;
220 unsigned long flags;
221
222 for (i = 0; i < len; i++) {
223 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
224
225 raw_spin_lock_irqsave(&irq->irq_lock, flags);
226
227 irq->source &= ~((val >> (i * 8)) & 0xff);
228 if (!irq->source)
229 irq->pending_latch = false;
230
231 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
232 vgic_put_irq(vcpu->kvm, irq);
233 }
234 }
235
236 static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
237 gpa_t addr, unsigned int len,
238 unsigned long val)
239 {
240 u32 intid = addr & 0x0f;
241 int i;
242 unsigned long flags;
243
244 for (i = 0; i < len; i++) {
245 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
246
247 raw_spin_lock_irqsave(&irq->irq_lock, flags);
248
249 irq->source |= (val >> (i * 8)) & 0xff;
250
251 if (irq->source) {
252 irq->pending_latch = true;
253 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
254 } else {
255 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
256 }
257 vgic_put_irq(vcpu->kvm, irq);
258 }
259 }
260
261 #define GICC_ARCH_VERSION_V2 0x2
262
263 /* These are for userland accesses only, there is no guest-facing emulation. */
264 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
265 gpa_t addr, unsigned int len)
266 {
267 struct vgic_vmcr vmcr;
268 u32 val;
269
270 vgic_get_vmcr(vcpu, &vmcr);
271
272 switch (addr & 0xff) {
273 case GIC_CPU_CTRL:
274 val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
275 val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
276 val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
277 val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
278 val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
279 val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
280
281 break;
282 case GIC_CPU_PRIMASK:
283 /*
284 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
285 * the PMR field as GICH_VMCR.VMPriMask rather than
286 * GICC_PMR.Priority, so we expose the upper five bits of
287 * priority mask to userspace using the lower bits in the
288 * unsigned long.
289 */
290 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
291 GICV_PMR_PRIORITY_SHIFT;
292 break;
293 case GIC_CPU_BINPOINT:
294 val = vmcr.bpr;
295 break;
296 case GIC_CPU_ALIAS_BINPOINT:
297 val = vmcr.abpr;
298 break;
299 case GIC_CPU_IDENT:
300 val = ((PRODUCT_ID_KVM << 20) |
301 (GICC_ARCH_VERSION_V2 << 16) |
302 IMPLEMENTER_ARM);
303 break;
304 default:
305 return 0;
306 }
307
308 return val;
309 }
310
311 static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
312 gpa_t addr, unsigned int len,
313 unsigned long val)
314 {
315 struct vgic_vmcr vmcr;
316
317 vgic_get_vmcr(vcpu, &vmcr);
318
319 switch (addr & 0xff) {
320 case GIC_CPU_CTRL:
321 vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
322 vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
323 vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
324 vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
325 vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
326 vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
327
328 break;
329 case GIC_CPU_PRIMASK:
330 /*
331 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
332 * the PMR field as GICH_VMCR.VMPriMask rather than
333 * GICC_PMR.Priority, so we expose the upper five bits of
334 * priority mask to userspace using the lower bits in the
335 * unsigned long.
336 */
337 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
338 GICV_PMR_PRIORITY_MASK;
339 break;
340 case GIC_CPU_BINPOINT:
341 vmcr.bpr = val;
342 break;
343 case GIC_CPU_ALIAS_BINPOINT:
344 vmcr.abpr = val;
345 break;
346 }
347
348 vgic_set_vmcr(vcpu, &vmcr);
349 }
350
351 static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
352 gpa_t addr, unsigned int len)
353 {
354 int n; /* which APRn is this */
355
356 n = (addr >> 2) & 0x3;
357
358 if (kvm_vgic_global_state.type == VGIC_V2) {
359 /* GICv2 hardware systems support max. 32 groups */
360 if (n != 0)
361 return 0;
362 return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
363 } else {
364 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
365
366 if (n > vgic_v3_max_apr_idx(vcpu))
367 return 0;
368
369 n = array_index_nospec(n, 4);
370
371 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
372 return vgicv3->vgic_ap1r[n];
373 }
374 }
375
376 static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
377 gpa_t addr, unsigned int len,
378 unsigned long val)
379 {
380 int n; /* which APRn is this */
381
382 n = (addr >> 2) & 0x3;
383
384 if (kvm_vgic_global_state.type == VGIC_V2) {
385 /* GICv2 hardware systems support max. 32 groups */
386 if (n != 0)
387 return;
388 vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
389 } else {
390 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
391
392 if (n > vgic_v3_max_apr_idx(vcpu))
393 return;
394
395 n = array_index_nospec(n, 4);
396
397 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
398 vgicv3->vgic_ap1r[n] = val;
399 }
400 }
401
402 static const struct vgic_register_region vgic_v2_dist_registers[] = {
403 REGISTER_DESC_WITH_LENGTH_UACCESS(GIC_DIST_CTRL,
404 vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc,
405 NULL, vgic_mmio_uaccess_write_v2_misc,
406 12, VGIC_ACCESS_32bit),
407 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
408 vgic_mmio_read_group, vgic_mmio_write_group,
409 NULL, vgic_mmio_uaccess_write_v2_group, 1,
410 VGIC_ACCESS_32bit),
411 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
412 vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
413 VGIC_ACCESS_32bit),
414 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
415 vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
416 VGIC_ACCESS_32bit),
417 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
418 vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
419 VGIC_ACCESS_32bit),
420 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
421 vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
422 VGIC_ACCESS_32bit),
423 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
424 vgic_mmio_read_active, vgic_mmio_write_sactive,
425 NULL, vgic_mmio_uaccess_write_sactive, 1,
426 VGIC_ACCESS_32bit),
427 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
428 vgic_mmio_read_active, vgic_mmio_write_cactive,
429 NULL, vgic_mmio_uaccess_write_cactive, 1,
430 VGIC_ACCESS_32bit),
431 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
432 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
433 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
434 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
435 vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
436 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
437 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
438 vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
439 VGIC_ACCESS_32bit),
440 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
441 vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
442 VGIC_ACCESS_32bit),
443 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
444 vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
445 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
446 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
447 vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
448 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
449 };
450
451 static const struct vgic_register_region vgic_v2_cpu_registers[] = {
452 REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
453 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
454 VGIC_ACCESS_32bit),
455 REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
456 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
457 VGIC_ACCESS_32bit),
458 REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
459 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
460 VGIC_ACCESS_32bit),
461 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
462 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
463 VGIC_ACCESS_32bit),
464 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
465 vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
466 VGIC_ACCESS_32bit),
467 REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
468 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
469 VGIC_ACCESS_32bit),
470 };
471
472 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
473 {
474 dev->regions = vgic_v2_dist_registers;
475 dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
476
477 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
478
479 return SZ_4K;
480 }
481
482 int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
483 {
484 const struct vgic_register_region *region;
485 struct vgic_io_device iodev;
486 struct vgic_reg_attr reg_attr;
487 struct kvm_vcpu *vcpu;
488 gpa_t addr;
489 int ret;
490
491 ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
492 if (ret)
493 return ret;
494
495 vcpu = reg_attr.vcpu;
496 addr = reg_attr.addr;
497
498 switch (attr->group) {
499 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
500 iodev.regions = vgic_v2_dist_registers;
501 iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
502 iodev.base_addr = 0;
503 break;
504 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
505 iodev.regions = vgic_v2_cpu_registers;
506 iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
507 iodev.base_addr = 0;
508 break;
509 default:
510 return -ENXIO;
511 }
512
513 /* We only support aligned 32-bit accesses. */
514 if (addr & 3)
515 return -ENXIO;
516
517 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
518 if (!region)
519 return -ENXIO;
520
521 return 0;
522 }
523
524 int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
525 int offset, u32 *val)
526 {
527 struct vgic_io_device dev = {
528 .regions = vgic_v2_cpu_registers,
529 .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
530 .iodev_type = IODEV_CPUIF,
531 };
532
533 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
534 }
535
536 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
537 int offset, u32 *val)
538 {
539 struct vgic_io_device dev = {
540 .regions = vgic_v2_dist_registers,
541 .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
542 .iodev_type = IODEV_DIST,
543 };
544
545 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
546 }