1 // SPDX-License-Identifier: GPL-2.0-only
3 * VGICv3 MMIO handling functions
6 #include <linux/bitfield.h>
7 #include <linux/irqchip/arm-gic-v3.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <kvm/iodev.h>
12 #include <kvm/arm_vgic.h>
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_arm.h>
16 #include <asm/kvm_mmu.h>
19 #include "vgic-mmio.h"
21 /* extract @num bytes at @offset bytes offset in data */
22 unsigned long extract_bytes(u64 data
, unsigned int offset
,
25 return (data
>> (offset
* 8)) & GENMASK_ULL(num
* 8 - 1, 0);
28 /* allows updates of any half of a 64-bit register (or the whole thing) */
29 u64
update_64bit_reg(u64 reg
, unsigned int offset
, unsigned int len
,
32 int lower
= (offset
& 4) * 8;
33 int upper
= lower
+ 8 * len
- 1;
35 reg
&= ~GENMASK_ULL(upper
, lower
);
36 val
&= GENMASK_ULL(len
* 8 - 1, 0);
38 return reg
| ((u64
)val
<< lower
);
41 bool vgic_has_its(struct kvm
*kvm
)
43 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
45 if (dist
->vgic_model
!= KVM_DEV_TYPE_ARM_VGIC_V3
)
51 bool vgic_supports_direct_msis(struct kvm
*kvm
)
53 return (kvm_vgic_global_state
.has_gicv4_1
||
54 (kvm_vgic_global_state
.has_gicv4
&& vgic_has_its(kvm
)));
58 * The Revision field in the IIDR have the following meanings:
60 * Revision 2: Interrupt groups are guest-configurable and signaled using
61 * their configured groups.
64 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu
*vcpu
,
65 gpa_t addr
, unsigned int len
)
67 struct vgic_dist
*vgic
= &vcpu
->kvm
->arch
.vgic
;
70 switch (addr
& 0x0c) {
73 value
|= GICD_CTLR_ENABLE_SS_G1
;
74 value
|= GICD_CTLR_ARE_NS
| GICD_CTLR_DS
;
76 value
|= GICD_CTLR_nASSGIreq
;
79 value
= vgic
->nr_spis
+ VGIC_NR_PRIVATE_IRQS
;
80 value
= (value
>> 5) - 1;
81 if (vgic_has_its(vcpu
->kvm
)) {
82 value
|= (INTERRUPT_ID_BITS_ITS
- 1) << 19;
83 value
|= GICD_TYPER_LPIS
;
85 value
|= (INTERRUPT_ID_BITS_SPIS
- 1) << 19;
89 if (kvm_vgic_global_state
.has_gicv4_1
)
90 value
= GICD_TYPER2_nASSGIcap
;
93 value
= (PRODUCT_ID_KVM
<< GICD_IIDR_PRODUCT_ID_SHIFT
) |
94 (vgic
->implementation_rev
<< GICD_IIDR_REVISION_SHIFT
) |
95 (IMPLEMENTER_ARM
<< GICD_IIDR_IMPLEMENTER_SHIFT
);
104 static void vgic_mmio_write_v3_misc(struct kvm_vcpu
*vcpu
,
105 gpa_t addr
, unsigned int len
,
108 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
110 switch (addr
& 0x0c) {
112 bool was_enabled
, is_hwsgi
;
114 mutex_lock(&vcpu
->kvm
->lock
);
116 was_enabled
= dist
->enabled
;
117 is_hwsgi
= dist
->nassgireq
;
119 dist
->enabled
= val
& GICD_CTLR_ENABLE_SS_G1
;
121 /* Not a GICv4.1? No HW SGIs */
122 if (!kvm_vgic_global_state
.has_gicv4_1
)
123 val
&= ~GICD_CTLR_nASSGIreq
;
125 /* Dist stays enabled? nASSGIreq is RO */
126 if (was_enabled
&& dist
->enabled
) {
127 val
&= ~GICD_CTLR_nASSGIreq
;
128 val
|= FIELD_PREP(GICD_CTLR_nASSGIreq
, is_hwsgi
);
131 /* Switching HW SGIs? */
132 dist
->nassgireq
= val
& GICD_CTLR_nASSGIreq
;
133 if (is_hwsgi
!= dist
->nassgireq
)
134 vgic_v4_configure_vsgis(vcpu
->kvm
);
136 if (kvm_vgic_global_state
.has_gicv4_1
&&
137 was_enabled
!= dist
->enabled
)
138 kvm_make_all_cpus_request(vcpu
->kvm
, KVM_REQ_RELOAD_GICv4
);
139 else if (!was_enabled
&& dist
->enabled
)
140 vgic_kick_vcpus(vcpu
->kvm
);
142 mutex_unlock(&vcpu
->kvm
->lock
);
148 /* This is at best for documentation purposes... */
153 static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu
*vcpu
,
154 gpa_t addr
, unsigned int len
,
157 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
159 switch (addr
& 0x0c) {
162 if (val
!= vgic_mmio_read_v3_misc(vcpu
, addr
, len
))
166 /* Not a GICv4.1? No HW SGIs */
167 if (!kvm_vgic_global_state
.has_gicv4_1
)
168 val
&= ~GICD_CTLR_nASSGIreq
;
170 dist
->enabled
= val
& GICD_CTLR_ENABLE_SS_G1
;
171 dist
->nassgireq
= val
& GICD_CTLR_nASSGIreq
;
175 vgic_mmio_write_v3_misc(vcpu
, addr
, len
, val
);
179 static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu
*vcpu
,
180 gpa_t addr
, unsigned int len
)
182 int intid
= VGIC_ADDR_TO_INTID(addr
, 64);
183 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intid
);
184 unsigned long ret
= 0;
189 /* The upper word is RAZ for us. */
191 ret
= extract_bytes(READ_ONCE(irq
->mpidr
), addr
& 7, len
);
193 vgic_put_irq(vcpu
->kvm
, irq
);
197 static void vgic_mmio_write_irouter(struct kvm_vcpu
*vcpu
,
198 gpa_t addr
, unsigned int len
,
201 int intid
= VGIC_ADDR_TO_INTID(addr
, 64);
202 struct vgic_irq
*irq
;
205 /* The upper word is WI for us since we don't implement Aff3. */
209 irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intid
);
214 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
216 /* We only care about and preserve Aff0, Aff1 and Aff2. */
217 irq
->mpidr
= val
& GENMASK(23, 0);
218 irq
->target_vcpu
= kvm_mpidr_to_vcpu(vcpu
->kvm
, irq
->mpidr
);
220 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
221 vgic_put_irq(vcpu
->kvm
, irq
);
224 static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu
*vcpu
,
225 gpa_t addr
, unsigned int len
)
227 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
229 return vgic_cpu
->lpis_enabled
? GICR_CTLR_ENABLE_LPIS
: 0;
233 static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu
*vcpu
,
234 gpa_t addr
, unsigned int len
,
237 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
238 bool was_enabled
= vgic_cpu
->lpis_enabled
;
240 if (!vgic_has_its(vcpu
->kvm
))
243 vgic_cpu
->lpis_enabled
= val
& GICR_CTLR_ENABLE_LPIS
;
245 if (was_enabled
&& !vgic_cpu
->lpis_enabled
) {
246 vgic_flush_pending_lpis(vcpu
);
247 vgic_its_invalidate_cache(vcpu
->kvm
);
250 if (!was_enabled
&& vgic_cpu
->lpis_enabled
)
251 vgic_enable_lpis(vcpu
);
254 static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu
*vcpu
,
255 gpa_t addr
, unsigned int len
)
257 unsigned long mpidr
= kvm_vcpu_get_mpidr_aff(vcpu
);
258 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
259 struct vgic_redist_region
*rdreg
= vgic_cpu
->rdreg
;
260 int target_vcpu_id
= vcpu
->vcpu_id
;
261 gpa_t last_rdist_typer
= rdreg
->base
+ GICR_TYPER
+
262 (rdreg
->free_index
- 1) * KVM_VGIC_V3_REDIST_SIZE
;
265 value
= (u64
)(mpidr
& GENMASK(23, 0)) << 32;
266 value
|= ((target_vcpu_id
& 0xffff) << 8);
268 if (addr
== last_rdist_typer
)
269 value
|= GICR_TYPER_LAST
;
270 if (vgic_has_its(vcpu
->kvm
))
271 value
|= GICR_TYPER_PLPIS
;
273 return extract_bytes(value
, addr
& 7, len
);
276 static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu
*vcpu
,
277 gpa_t addr
, unsigned int len
)
279 return (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
282 static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu
*vcpu
,
283 gpa_t addr
, unsigned int len
)
285 switch (addr
& 0xffff) {
287 /* report a GICv3 compliant implementation */
294 static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu
*vcpu
,
295 gpa_t addr
, unsigned int len
)
297 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
302 * pending state of interrupt is latched in pending_latch variable.
303 * Userspace will save and restore pending state and line_level
305 * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.txt
306 * for handling of ISPENDR and ICPENDR.
308 for (i
= 0; i
< len
* 8; i
++) {
309 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
310 bool state
= irq
->pending_latch
;
312 if (irq
->hw
&& vgic_irq_is_sgi(irq
->intid
)) {
315 err
= irq_get_irqchip_state(irq
->host_irq
,
316 IRQCHIP_STATE_PENDING
,
324 vgic_put_irq(vcpu
->kvm
, irq
);
330 static int vgic_v3_uaccess_write_pending(struct kvm_vcpu
*vcpu
,
331 gpa_t addr
, unsigned int len
,
334 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
338 for (i
= 0; i
< len
* 8; i
++) {
339 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
341 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
342 if (test_bit(i
, &val
)) {
344 * pending_latch is set irrespective of irq type
345 * (level or edge) to avoid dependency that VM should
346 * restore irq config before pending info.
348 irq
->pending_latch
= true;
349 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
351 irq
->pending_latch
= false;
352 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
355 vgic_put_irq(vcpu
->kvm
, irq
);
361 /* We want to avoid outer shareable. */
362 u64
vgic_sanitise_shareability(u64 field
)
365 case GIC_BASER_OuterShareable
:
366 return GIC_BASER_InnerShareable
;
372 /* Avoid any inner non-cacheable mapping. */
373 u64
vgic_sanitise_inner_cacheability(u64 field
)
376 case GIC_BASER_CACHE_nCnB
:
377 case GIC_BASER_CACHE_nC
:
378 return GIC_BASER_CACHE_RaWb
;
384 /* Non-cacheable or same-as-inner are OK. */
385 u64
vgic_sanitise_outer_cacheability(u64 field
)
388 case GIC_BASER_CACHE_SameAsInner
:
389 case GIC_BASER_CACHE_nC
:
392 return GIC_BASER_CACHE_nC
;
396 u64
vgic_sanitise_field(u64 reg
, u64 field_mask
, int field_shift
,
397 u64 (*sanitise_fn
)(u64
))
399 u64 field
= (reg
& field_mask
) >> field_shift
;
401 field
= sanitise_fn(field
) << field_shift
;
402 return (reg
& ~field_mask
) | field
;
405 #define PROPBASER_RES0_MASK \
406 (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
407 #define PENDBASER_RES0_MASK \
408 (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \
409 GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
411 static u64
vgic_sanitise_pendbaser(u64 reg
)
413 reg
= vgic_sanitise_field(reg
, GICR_PENDBASER_SHAREABILITY_MASK
,
414 GICR_PENDBASER_SHAREABILITY_SHIFT
,
415 vgic_sanitise_shareability
);
416 reg
= vgic_sanitise_field(reg
, GICR_PENDBASER_INNER_CACHEABILITY_MASK
,
417 GICR_PENDBASER_INNER_CACHEABILITY_SHIFT
,
418 vgic_sanitise_inner_cacheability
);
419 reg
= vgic_sanitise_field(reg
, GICR_PENDBASER_OUTER_CACHEABILITY_MASK
,
420 GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT
,
421 vgic_sanitise_outer_cacheability
);
423 reg
&= ~PENDBASER_RES0_MASK
;
428 static u64
vgic_sanitise_propbaser(u64 reg
)
430 reg
= vgic_sanitise_field(reg
, GICR_PROPBASER_SHAREABILITY_MASK
,
431 GICR_PROPBASER_SHAREABILITY_SHIFT
,
432 vgic_sanitise_shareability
);
433 reg
= vgic_sanitise_field(reg
, GICR_PROPBASER_INNER_CACHEABILITY_MASK
,
434 GICR_PROPBASER_INNER_CACHEABILITY_SHIFT
,
435 vgic_sanitise_inner_cacheability
);
436 reg
= vgic_sanitise_field(reg
, GICR_PROPBASER_OUTER_CACHEABILITY_MASK
,
437 GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT
,
438 vgic_sanitise_outer_cacheability
);
440 reg
&= ~PROPBASER_RES0_MASK
;
444 static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu
*vcpu
,
445 gpa_t addr
, unsigned int len
)
447 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
449 return extract_bytes(dist
->propbaser
, addr
& 7, len
);
452 static void vgic_mmio_write_propbase(struct kvm_vcpu
*vcpu
,
453 gpa_t addr
, unsigned int len
,
456 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
457 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
458 u64 old_propbaser
, propbaser
;
460 /* Storing a value with LPIs already enabled is undefined */
461 if (vgic_cpu
->lpis_enabled
)
465 old_propbaser
= READ_ONCE(dist
->propbaser
);
466 propbaser
= old_propbaser
;
467 propbaser
= update_64bit_reg(propbaser
, addr
& 4, len
, val
);
468 propbaser
= vgic_sanitise_propbaser(propbaser
);
469 } while (cmpxchg64(&dist
->propbaser
, old_propbaser
,
470 propbaser
) != old_propbaser
);
473 static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu
*vcpu
,
474 gpa_t addr
, unsigned int len
)
476 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
477 u64 value
= vgic_cpu
->pendbaser
;
479 value
&= ~GICR_PENDBASER_PTZ
;
481 return extract_bytes(value
, addr
& 7, len
);
484 static void vgic_mmio_write_pendbase(struct kvm_vcpu
*vcpu
,
485 gpa_t addr
, unsigned int len
,
488 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
489 u64 old_pendbaser
, pendbaser
;
491 /* Storing a value with LPIs already enabled is undefined */
492 if (vgic_cpu
->lpis_enabled
)
496 old_pendbaser
= READ_ONCE(vgic_cpu
->pendbaser
);
497 pendbaser
= old_pendbaser
;
498 pendbaser
= update_64bit_reg(pendbaser
, addr
& 4, len
, val
);
499 pendbaser
= vgic_sanitise_pendbaser(pendbaser
);
500 } while (cmpxchg64(&vgic_cpu
->pendbaser
, old_pendbaser
,
501 pendbaser
) != old_pendbaser
);
505 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
506 * redistributors, while SPIs are covered by registers in the distributor
507 * block. Trying to set private IRQs in this block gets ignored.
508 * We take some special care here to fix the calculation of the register
511 #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
514 .bits_per_irq = bpi, \
515 .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
516 .access_flags = acc, \
517 .read = vgic_mmio_read_raz, \
518 .write = vgic_mmio_write_wi, \
520 .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
521 .bits_per_irq = bpi, \
522 .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \
523 .access_flags = acc, \
526 .uaccess_read = ur, \
527 .uaccess_write = uw, \
530 static const struct vgic_register_region vgic_v3_dist_registers
[] = {
531 REGISTER_DESC_WITH_LENGTH_UACCESS(GICD_CTLR
,
532 vgic_mmio_read_v3_misc
, vgic_mmio_write_v3_misc
,
533 NULL
, vgic_mmio_uaccess_write_v3_misc
,
534 16, VGIC_ACCESS_32bit
),
535 REGISTER_DESC_WITH_LENGTH(GICD_STATUSR
,
536 vgic_mmio_read_rao
, vgic_mmio_write_wi
, 4,
538 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR
,
539 vgic_mmio_read_group
, vgic_mmio_write_group
, NULL
, NULL
, 1,
541 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER
,
542 vgic_mmio_read_enable
, vgic_mmio_write_senable
,
543 NULL
, vgic_uaccess_write_senable
, 1,
545 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER
,
546 vgic_mmio_read_enable
, vgic_mmio_write_cenable
,
547 NULL
, vgic_uaccess_write_cenable
, 1,
549 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR
,
550 vgic_mmio_read_pending
, vgic_mmio_write_spending
,
551 vgic_v3_uaccess_read_pending
, vgic_v3_uaccess_write_pending
, 1,
553 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR
,
554 vgic_mmio_read_pending
, vgic_mmio_write_cpending
,
555 vgic_mmio_read_raz
, vgic_mmio_uaccess_write_wi
, 1,
557 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER
,
558 vgic_mmio_read_active
, vgic_mmio_write_sactive
,
559 vgic_uaccess_read_active
, vgic_mmio_uaccess_write_sactive
, 1,
561 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER
,
562 vgic_mmio_read_active
, vgic_mmio_write_cactive
,
563 vgic_uaccess_read_active
, vgic_mmio_uaccess_write_cactive
,
564 1, VGIC_ACCESS_32bit
),
565 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR
,
566 vgic_mmio_read_priority
, vgic_mmio_write_priority
, NULL
, NULL
,
567 8, VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
568 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR
,
569 vgic_mmio_read_raz
, vgic_mmio_write_wi
, NULL
, NULL
, 8,
570 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
571 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR
,
572 vgic_mmio_read_config
, vgic_mmio_write_config
, NULL
, NULL
, 2,
574 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR
,
575 vgic_mmio_read_raz
, vgic_mmio_write_wi
, NULL
, NULL
, 1,
577 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER
,
578 vgic_mmio_read_irouter
, vgic_mmio_write_irouter
, NULL
, NULL
, 64,
579 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
580 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS
,
581 vgic_mmio_read_v3_idregs
, vgic_mmio_write_wi
, 48,
585 static const struct vgic_register_region vgic_v3_rd_registers
[] = {
586 /* RD_base registers */
587 REGISTER_DESC_WITH_LENGTH(GICR_CTLR
,
588 vgic_mmio_read_v3r_ctlr
, vgic_mmio_write_v3r_ctlr
, 4,
590 REGISTER_DESC_WITH_LENGTH(GICR_STATUSR
,
591 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
593 REGISTER_DESC_WITH_LENGTH(GICR_IIDR
,
594 vgic_mmio_read_v3r_iidr
, vgic_mmio_write_wi
, 4,
596 REGISTER_DESC_WITH_LENGTH(GICR_TYPER
,
597 vgic_mmio_read_v3r_typer
, vgic_mmio_write_wi
, 8,
598 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
599 REGISTER_DESC_WITH_LENGTH(GICR_WAKER
,
600 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
602 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER
,
603 vgic_mmio_read_propbase
, vgic_mmio_write_propbase
, 8,
604 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
605 REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER
,
606 vgic_mmio_read_pendbase
, vgic_mmio_write_pendbase
, 8,
607 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
608 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS
,
609 vgic_mmio_read_v3_idregs
, vgic_mmio_write_wi
, 48,
611 /* SGI_base registers */
612 REGISTER_DESC_WITH_LENGTH(SZ_64K
+ GICR_IGROUPR0
,
613 vgic_mmio_read_group
, vgic_mmio_write_group
, 4,
615 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K
+ GICR_ISENABLER0
,
616 vgic_mmio_read_enable
, vgic_mmio_write_senable
,
617 NULL
, vgic_uaccess_write_senable
, 4,
619 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K
+ GICR_ICENABLER0
,
620 vgic_mmio_read_enable
, vgic_mmio_write_cenable
,
621 NULL
, vgic_uaccess_write_cenable
, 4,
623 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K
+ GICR_ISPENDR0
,
624 vgic_mmio_read_pending
, vgic_mmio_write_spending
,
625 vgic_v3_uaccess_read_pending
, vgic_v3_uaccess_write_pending
, 4,
627 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K
+ GICR_ICPENDR0
,
628 vgic_mmio_read_pending
, vgic_mmio_write_cpending
,
629 vgic_mmio_read_raz
, vgic_mmio_uaccess_write_wi
, 4,
631 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K
+ GICR_ISACTIVER0
,
632 vgic_mmio_read_active
, vgic_mmio_write_sactive
,
633 vgic_uaccess_read_active
, vgic_mmio_uaccess_write_sactive
, 4,
635 REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K
+ GICR_ICACTIVER0
,
636 vgic_mmio_read_active
, vgic_mmio_write_cactive
,
637 vgic_uaccess_read_active
, vgic_mmio_uaccess_write_cactive
, 4,
639 REGISTER_DESC_WITH_LENGTH(SZ_64K
+ GICR_IPRIORITYR0
,
640 vgic_mmio_read_priority
, vgic_mmio_write_priority
, 32,
641 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
642 REGISTER_DESC_WITH_LENGTH(SZ_64K
+ GICR_ICFGR0
,
643 vgic_mmio_read_config
, vgic_mmio_write_config
, 8,
645 REGISTER_DESC_WITH_LENGTH(SZ_64K
+ GICR_IGRPMODR0
,
646 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
648 REGISTER_DESC_WITH_LENGTH(SZ_64K
+ GICR_NSACR
,
649 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
653 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device
*dev
)
655 dev
->regions
= vgic_v3_dist_registers
;
656 dev
->nr_regions
= ARRAY_SIZE(vgic_v3_dist_registers
);
658 kvm_iodevice_init(&dev
->dev
, &kvm_io_gic_ops
);
664 * vgic_register_redist_iodev - register a single redist iodev
665 * @vcpu: The VCPU to which the redistributor belongs
667 * Register a KVM iodev for this VCPU's redistributor using the address
670 * Return 0 on success, -ERRNO otherwise.
672 int vgic_register_redist_iodev(struct kvm_vcpu
*vcpu
)
674 struct kvm
*kvm
= vcpu
->kvm
;
675 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
676 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
677 struct vgic_io_device
*rd_dev
= &vcpu
->arch
.vgic_cpu
.rd_iodev
;
678 struct vgic_redist_region
*rdreg
;
682 if (!IS_VGIC_ADDR_UNDEF(vgic_cpu
->rd_iodev
.base_addr
))
686 * We may be creating VCPUs before having set the base address for the
687 * redistributor region, in which case we will come back to this
688 * function for all VCPUs when the base address is set. Just return
689 * without doing any work for now.
691 rdreg
= vgic_v3_rdist_free_slot(&vgic
->rd_regions
);
695 if (!vgic_v3_check_base(kvm
))
698 vgic_cpu
->rdreg
= rdreg
;
700 rd_base
= rdreg
->base
+ rdreg
->free_index
* KVM_VGIC_V3_REDIST_SIZE
;
702 kvm_iodevice_init(&rd_dev
->dev
, &kvm_io_gic_ops
);
703 rd_dev
->base_addr
= rd_base
;
704 rd_dev
->iodev_type
= IODEV_REDIST
;
705 rd_dev
->regions
= vgic_v3_rd_registers
;
706 rd_dev
->nr_regions
= ARRAY_SIZE(vgic_v3_rd_registers
);
707 rd_dev
->redist_vcpu
= vcpu
;
709 mutex_lock(&kvm
->slots_lock
);
710 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, rd_base
,
711 2 * SZ_64K
, &rd_dev
->dev
);
712 mutex_unlock(&kvm
->slots_lock
);
721 static void vgic_unregister_redist_iodev(struct kvm_vcpu
*vcpu
)
723 struct vgic_io_device
*rd_dev
= &vcpu
->arch
.vgic_cpu
.rd_iodev
;
725 kvm_io_bus_unregister_dev(vcpu
->kvm
, KVM_MMIO_BUS
, &rd_dev
->dev
);
728 static int vgic_register_all_redist_iodevs(struct kvm
*kvm
)
730 struct kvm_vcpu
*vcpu
;
733 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
734 ret
= vgic_register_redist_iodev(vcpu
);
740 /* The current c failed, so we start with the previous one. */
741 mutex_lock(&kvm
->slots_lock
);
742 for (c
--; c
>= 0; c
--) {
743 vcpu
= kvm_get_vcpu(kvm
, c
);
744 vgic_unregister_redist_iodev(vcpu
);
746 mutex_unlock(&kvm
->slots_lock
);
753 * vgic_v3_insert_redist_region - Insert a new redistributor region
755 * Performs various checks before inserting the rdist region in the list.
756 * Those tests depend on whether the size of the rdist region is known
757 * (ie. count != 0). The list is sorted by rdist region index.
760 * @index: redist region index
761 * @base: base of the new rdist region
762 * @count: number of redistributors the region is made of (0 in the old style
763 * single region, whose size is induced from the number of vcpus)
765 * Return 0 on success, < 0 otherwise
767 static int vgic_v3_insert_redist_region(struct kvm
*kvm
, uint32_t index
,
768 gpa_t base
, uint32_t count
)
770 struct vgic_dist
*d
= &kvm
->arch
.vgic
;
771 struct vgic_redist_region
*rdreg
;
772 struct list_head
*rd_regions
= &d
->rd_regions
;
773 size_t size
= count
* KVM_VGIC_V3_REDIST_SIZE
;
776 /* single rdist region already set ?*/
777 if (!count
&& !list_empty(rd_regions
))
780 /* cross the end of memory ? */
781 if (base
+ size
< base
)
784 if (list_empty(rd_regions
)) {
788 rdreg
= list_last_entry(rd_regions
,
789 struct vgic_redist_region
, list
);
790 if (index
!= rdreg
->index
+ 1)
793 /* Cannot add an explicitly sized regions after legacy region */
799 * For legacy single-region redistributor regions (!count),
800 * check that the redistributor region does not overlap with the
801 * distributor's address space.
803 if (!count
&& !IS_VGIC_ADDR_UNDEF(d
->vgic_dist_base
) &&
804 vgic_dist_overlap(kvm
, base
, size
))
807 /* collision with any other rdist region? */
808 if (vgic_v3_rdist_overlap(kvm
, base
, size
))
811 rdreg
= kzalloc(sizeof(*rdreg
), GFP_KERNEL
);
815 rdreg
->base
= VGIC_ADDR_UNDEF
;
817 ret
= vgic_check_ioaddr(kvm
, &rdreg
->base
, base
, SZ_64K
);
822 rdreg
->count
= count
;
823 rdreg
->free_index
= 0;
824 rdreg
->index
= index
;
826 list_add_tail(&rdreg
->list
, rd_regions
);
833 int vgic_v3_set_redist_base(struct kvm
*kvm
, u32 index
, u64 addr
, u32 count
)
837 ret
= vgic_v3_insert_redist_region(kvm
, index
, addr
, count
);
842 * Register iodevs for each existing VCPU. Adding more VCPUs
843 * afterwards will register the iodevs when needed.
845 ret
= vgic_register_all_redist_iodevs(kvm
);
852 int vgic_v3_has_attr_regs(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
854 const struct vgic_register_region
*region
;
855 struct vgic_io_device iodev
;
856 struct vgic_reg_attr reg_attr
;
857 struct kvm_vcpu
*vcpu
;
861 ret
= vgic_v3_parse_attr(dev
, attr
, ®_attr
);
865 vcpu
= reg_attr
.vcpu
;
866 addr
= reg_attr
.addr
;
868 switch (attr
->group
) {
869 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
870 iodev
.regions
= vgic_v3_dist_registers
;
871 iodev
.nr_regions
= ARRAY_SIZE(vgic_v3_dist_registers
);
874 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
:{
875 iodev
.regions
= vgic_v3_rd_registers
;
876 iodev
.nr_regions
= ARRAY_SIZE(vgic_v3_rd_registers
);
880 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
: {
883 id
= (attr
->attr
& KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK
);
884 return vgic_v3_has_cpu_sysregs_attr(vcpu
, 0, id
, ®
);
890 /* We only support aligned 32-bit accesses. */
894 region
= vgic_get_mmio_region(vcpu
, &iodev
, addr
, sizeof(u32
));
901 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
902 * generation register ICC_SGI1R_EL1) with a given VCPU.
903 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
906 static int match_mpidr(u64 sgi_aff
, u16 sgi_cpu_mask
, struct kvm_vcpu
*vcpu
)
908 unsigned long affinity
;
912 * Split the current VCPU's MPIDR into affinity level 0 and the
913 * rest as this is what we have to compare against.
915 affinity
= kvm_vcpu_get_mpidr_aff(vcpu
);
916 level0
= MPIDR_AFFINITY_LEVEL(affinity
, 0);
917 affinity
&= ~MPIDR_LEVEL_MASK
;
919 /* bail out if the upper three levels don't match */
920 if (sgi_aff
!= affinity
)
923 /* Is this VCPU's bit set in the mask ? */
924 if (!(sgi_cpu_mask
& BIT(level0
)))
931 * The ICC_SGI* registers encode the affinity differently from the MPIDR,
932 * so provide a wrapper to use the existing defines to isolate a certain
935 #define SGI_AFFINITY_LEVEL(reg, level) \
936 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
937 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
940 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
941 * @vcpu: The VCPU requesting a SGI
942 * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU
943 * @allow_group1: Does the sysreg access allow generation of G1 SGIs
945 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
946 * This will trap in sys_regs.c and call this function.
947 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
948 * target processors as well as a bitmask of 16 Aff0 CPUs.
949 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
950 * check for matching ones. If this bit is set, we signal all, but not the
953 void vgic_v3_dispatch_sgi(struct kvm_vcpu
*vcpu
, u64 reg
, bool allow_group1
)
955 struct kvm
*kvm
= vcpu
->kvm
;
956 struct kvm_vcpu
*c_vcpu
;
960 int vcpu_id
= vcpu
->vcpu_id
;
964 sgi
= (reg
& ICC_SGI1R_SGI_ID_MASK
) >> ICC_SGI1R_SGI_ID_SHIFT
;
965 broadcast
= reg
& BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT
);
966 target_cpus
= (reg
& ICC_SGI1R_TARGET_LIST_MASK
) >> ICC_SGI1R_TARGET_LIST_SHIFT
;
967 mpidr
= SGI_AFFINITY_LEVEL(reg
, 3);
968 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 2);
969 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 1);
972 * We iterate over all VCPUs to find the MPIDRs matching the request.
973 * If we have handled one CPU, we clear its bit to detect early
974 * if we are already finished. This avoids iterating through all
975 * VCPUs when most of the times we just signal a single VCPU.
977 kvm_for_each_vcpu(c
, c_vcpu
, kvm
) {
978 struct vgic_irq
*irq
;
980 /* Exit early if we have dealt with all requested CPUs */
981 if (!broadcast
&& target_cpus
== 0)
984 /* Don't signal the calling VCPU */
985 if (broadcast
&& c
== vcpu_id
)
991 level0
= match_mpidr(mpidr
, target_cpus
, c_vcpu
);
995 /* remove this matching VCPU from the mask */
996 target_cpus
&= ~BIT(level0
);
999 irq
= vgic_get_irq(vcpu
->kvm
, c_vcpu
, sgi
);
1001 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
1004 * An access targetting Group0 SGIs can only generate
1005 * those, while an access targetting Group1 SGIs can
1006 * generate interrupts of either group.
1008 if (!irq
->group
|| allow_group1
) {
1010 irq
->pending_latch
= true;
1011 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
1013 /* HW SGI? Ask the GIC to inject it */
1015 err
= irq_set_irqchip_state(irq
->host_irq
,
1016 IRQCHIP_STATE_PENDING
,
1018 WARN_RATELIMIT(err
, "IRQ %d", irq
->host_irq
);
1019 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
1022 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
1025 vgic_put_irq(vcpu
->kvm
, irq
);
1029 int vgic_v3_dist_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
1030 int offset
, u32
*val
)
1032 struct vgic_io_device dev
= {
1033 .regions
= vgic_v3_dist_registers
,
1034 .nr_regions
= ARRAY_SIZE(vgic_v3_dist_registers
),
1037 return vgic_uaccess(vcpu
, &dev
, is_write
, offset
, val
);
1040 int vgic_v3_redist_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
1041 int offset
, u32
*val
)
1043 struct vgic_io_device rd_dev
= {
1044 .regions
= vgic_v3_rd_registers
,
1045 .nr_regions
= ARRAY_SIZE(vgic_v3_rd_registers
),
1048 return vgic_uaccess(vcpu
, &rd_dev
, is_write
, offset
, val
);
1051 int vgic_v3_line_level_info_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
1052 u32 intid
, u64
*val
)
1058 vgic_write_irq_line_level_info(vcpu
, intid
, *val
);
1060 *val
= vgic_read_irq_line_level_info(vcpu
, intid
);