1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2015,2016 ARM Ltd.
6 * Author: Andre Przywara <andre.przywara@arm.com>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/uaccess.h>
15 #include <linux/list_sort.h>
17 #include <linux/irqchip/arm-gic-v3.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_arm.h>
21 #include <asm/kvm_mmu.h>
24 #include "vgic-mmio.h"
26 static int vgic_its_save_tables_v0(struct vgic_its
*its
);
27 static int vgic_its_restore_tables_v0(struct vgic_its
*its
);
28 static int vgic_its_commit_v0(struct vgic_its
*its
);
29 static int update_lpi_config(struct kvm
*kvm
, struct vgic_irq
*irq
,
30 struct kvm_vcpu
*filter_vcpu
, bool needs_inv
);
33 * Creates a new (reference to a) struct vgic_irq for a given LPI.
34 * If this LPI is already mapped on another ITS, we increase its refcount
35 * and return a pointer to the existing structure.
36 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
37 * This function returns a pointer to the _unlocked_ structure.
39 static struct vgic_irq
*vgic_add_lpi(struct kvm
*kvm
, u32 intid
,
40 struct kvm_vcpu
*vcpu
)
42 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
43 struct vgic_irq
*irq
= vgic_get_irq(kvm
, NULL
, intid
), *oldirq
;
47 /* In this case there is no put, since we keep the reference. */
51 irq
= kzalloc(sizeof(struct vgic_irq
), GFP_KERNEL
);
53 return ERR_PTR(-ENOMEM
);
55 INIT_LIST_HEAD(&irq
->lpi_list
);
56 INIT_LIST_HEAD(&irq
->ap_list
);
57 raw_spin_lock_init(&irq
->irq_lock
);
59 irq
->config
= VGIC_CONFIG_EDGE
;
60 kref_init(&irq
->refcount
);
62 irq
->target_vcpu
= vcpu
;
65 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
68 * There could be a race with another vgic_add_lpi(), so we need to
69 * check that we don't add a second list entry with the same LPI.
71 list_for_each_entry(oldirq
, &dist
->lpi_list_head
, lpi_list
) {
72 if (oldirq
->intid
!= intid
)
75 /* Someone was faster with adding this LPI, lets use that. */
80 * This increases the refcount, the caller is expected to
81 * call vgic_put_irq() on the returned pointer once it's
82 * finished with the IRQ.
84 vgic_get_irq_kref(irq
);
89 list_add_tail(&irq
->lpi_list
, &dist
->lpi_list_head
);
90 dist
->lpi_list_count
++;
93 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
96 * We "cache" the configuration table entries in our struct vgic_irq's.
97 * However we only have those structs for mapped IRQs, so we read in
98 * the respective config data from memory here upon mapping the LPI.
100 * Should any of these fail, behave as if we couldn't create the LPI
101 * by dropping the refcount and returning the error.
103 ret
= update_lpi_config(kvm
, irq
, NULL
, false);
105 vgic_put_irq(kvm
, irq
);
109 ret
= vgic_v3_lpi_sync_pending_status(kvm
, irq
);
111 vgic_put_irq(kvm
, irq
);
119 struct list_head dev_list
;
121 /* the head for the list of ITTEs */
122 struct list_head itt_head
;
123 u32 num_eventid_bits
;
128 #define COLLECTION_NOT_MAPPED ((u32)~0)
130 struct its_collection
{
131 struct list_head coll_list
;
137 #define its_is_collection_mapped(coll) ((coll) && \
138 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
141 struct list_head ite_list
;
143 struct vgic_irq
*irq
;
144 struct its_collection
*collection
;
148 struct vgic_translation_cache_entry
{
149 struct list_head entry
;
153 struct vgic_irq
*irq
;
157 * struct vgic_its_abi - ITS abi ops and settings
158 * @cte_esz: collection table entry size
159 * @dte_esz: device table entry size
160 * @ite_esz: interrupt translation table entry size
161 * @save tables: save the ITS tables into guest RAM
162 * @restore_tables: restore the ITS internal structs from tables
163 * stored in guest RAM
164 * @commit: initialize the registers which expose the ABI settings,
165 * especially the entry sizes
167 struct vgic_its_abi
{
171 int (*save_tables
)(struct vgic_its
*its
);
172 int (*restore_tables
)(struct vgic_its
*its
);
173 int (*commit
)(struct vgic_its
*its
);
177 #define ESZ_MAX ABI_0_ESZ
179 static const struct vgic_its_abi its_table_abi_versions
[] = {
181 .cte_esz
= ABI_0_ESZ
,
182 .dte_esz
= ABI_0_ESZ
,
183 .ite_esz
= ABI_0_ESZ
,
184 .save_tables
= vgic_its_save_tables_v0
,
185 .restore_tables
= vgic_its_restore_tables_v0
,
186 .commit
= vgic_its_commit_v0
,
190 #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
192 inline const struct vgic_its_abi
*vgic_its_get_abi(struct vgic_its
*its
)
194 return &its_table_abi_versions
[its
->abi_rev
];
197 static int vgic_its_set_abi(struct vgic_its
*its
, u32 rev
)
199 const struct vgic_its_abi
*abi
;
202 abi
= vgic_its_get_abi(its
);
203 return abi
->commit(its
);
207 * Find and returns a device in the device table for an ITS.
208 * Must be called with the its_lock mutex held.
210 static struct its_device
*find_its_device(struct vgic_its
*its
, u32 device_id
)
212 struct its_device
*device
;
214 list_for_each_entry(device
, &its
->device_list
, dev_list
)
215 if (device_id
== device
->device_id
)
222 * Find and returns an interrupt translation table entry (ITTE) for a given
223 * Device ID/Event ID pair on an ITS.
224 * Must be called with the its_lock mutex held.
226 static struct its_ite
*find_ite(struct vgic_its
*its
, u32 device_id
,
229 struct its_device
*device
;
232 device
= find_its_device(its
, device_id
);
236 list_for_each_entry(ite
, &device
->itt_head
, ite_list
)
237 if (ite
->event_id
== event_id
)
243 /* To be used as an iterator this macro misses the enclosing parentheses */
244 #define for_each_lpi_its(dev, ite, its) \
245 list_for_each_entry(dev, &(its)->device_list, dev_list) \
246 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
248 #define GIC_LPI_OFFSET 8192
250 #define VITS_TYPER_IDBITS 16
251 #define VITS_TYPER_DEVBITS 16
252 #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
253 #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
256 * Finds and returns a collection in the ITS collection table.
257 * Must be called with the its_lock mutex held.
259 static struct its_collection
*find_collection(struct vgic_its
*its
, int coll_id
)
261 struct its_collection
*collection
;
263 list_for_each_entry(collection
, &its
->collection_list
, coll_list
) {
264 if (coll_id
== collection
->collection_id
)
271 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
272 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
275 * Reads the configuration data for a given LPI from guest memory and
276 * updates the fields in struct vgic_irq.
277 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
278 * VCPU. Unconditionally applies if filter_vcpu is NULL.
280 static int update_lpi_config(struct kvm
*kvm
, struct vgic_irq
*irq
,
281 struct kvm_vcpu
*filter_vcpu
, bool needs_inv
)
283 u64 propbase
= GICR_PROPBASER_ADDRESS(kvm
->arch
.vgic
.propbaser
);
288 ret
= kvm_read_guest_lock(kvm
, propbase
+ irq
->intid
- GIC_LPI_OFFSET
,
294 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
296 if (!filter_vcpu
|| filter_vcpu
== irq
->target_vcpu
) {
297 irq
->priority
= LPI_PROP_PRIORITY(prop
);
298 irq
->enabled
= LPI_PROP_ENABLE_BIT(prop
);
301 vgic_queue_irq_unlock(kvm
, irq
, flags
);
306 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
309 return its_prop_update_vlpi(irq
->host_irq
, prop
, needs_inv
);
315 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
316 * enumerate those LPIs without holding any lock.
317 * Returns their number and puts the kmalloc'ed array into intid_ptr.
319 int vgic_copy_lpi_list(struct kvm
*kvm
, struct kvm_vcpu
*vcpu
, u32
**intid_ptr
)
321 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
322 struct vgic_irq
*irq
;
325 int irq_count
, i
= 0;
328 * There is an obvious race between allocating the array and LPIs
329 * being mapped/unmapped. If we ended up here as a result of a
330 * command, we're safe (locks are held, preventing another
331 * command). If coming from another path (such as enabling LPIs),
332 * we must be careful not to overrun the array.
334 irq_count
= READ_ONCE(dist
->lpi_list_count
);
335 intids
= kmalloc_array(irq_count
, sizeof(intids
[0]), GFP_KERNEL
);
339 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
340 list_for_each_entry(irq
, &dist
->lpi_list_head
, lpi_list
) {
343 /* We don't need to "get" the IRQ, as we hold the list lock. */
344 if (vcpu
&& irq
->target_vcpu
!= vcpu
)
346 intids
[i
++] = irq
->intid
;
348 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
354 static int update_affinity(struct vgic_irq
*irq
, struct kvm_vcpu
*vcpu
)
359 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
360 irq
->target_vcpu
= vcpu
;
361 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
364 struct its_vlpi_map map
;
366 ret
= its_get_vlpi(irq
->host_irq
, &map
);
371 atomic_dec(&map
.vpe
->vlpi_count
);
372 map
.vpe
= &vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
;
373 atomic_inc(&map
.vpe
->vlpi_count
);
375 ret
= its_map_vlpi(irq
->host_irq
, &map
);
382 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
383 * is targeting) to the VGIC's view, which deals with target VCPUs.
384 * Needs to be called whenever either the collection for a LPIs has
385 * changed or the collection itself got retargeted.
387 static void update_affinity_ite(struct kvm
*kvm
, struct its_ite
*ite
)
389 struct kvm_vcpu
*vcpu
;
391 if (!its_is_collection_mapped(ite
->collection
))
394 vcpu
= kvm_get_vcpu(kvm
, ite
->collection
->target_addr
);
395 update_affinity(ite
->irq
, vcpu
);
399 * Updates the target VCPU for every LPI targeting this collection.
400 * Must be called with the its_lock mutex held.
402 static void update_affinity_collection(struct kvm
*kvm
, struct vgic_its
*its
,
403 struct its_collection
*coll
)
405 struct its_device
*device
;
408 for_each_lpi_its(device
, ite
, its
) {
409 if (!ite
->collection
|| coll
!= ite
->collection
)
412 update_affinity_ite(kvm
, ite
);
416 static u32
max_lpis_propbaser(u64 propbaser
)
418 int nr_idbits
= (propbaser
& 0x1f) + 1;
420 return 1U << min(nr_idbits
, INTERRUPT_ID_BITS_ITS
);
424 * Sync the pending table pending bit of LPIs targeting @vcpu
425 * with our own data structures. This relies on the LPI being
428 static int its_sync_lpi_pending_table(struct kvm_vcpu
*vcpu
)
430 gpa_t pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
431 struct vgic_irq
*irq
;
432 int last_byte_offset
= -1;
439 nr_irqs
= vgic_copy_lpi_list(vcpu
->kvm
, vcpu
, &intids
);
443 for (i
= 0; i
< nr_irqs
; i
++) {
444 int byte_offset
, bit_nr
;
446 byte_offset
= intids
[i
] / BITS_PER_BYTE
;
447 bit_nr
= intids
[i
] % BITS_PER_BYTE
;
450 * For contiguously allocated LPIs chances are we just read
451 * this very same byte in the last iteration. Reuse that.
453 if (byte_offset
!= last_byte_offset
) {
454 ret
= kvm_read_guest_lock(vcpu
->kvm
,
455 pendbase
+ byte_offset
,
461 last_byte_offset
= byte_offset
;
464 irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intids
[i
]);
465 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
466 irq
->pending_latch
= pendmask
& (1U << bit_nr
);
467 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
468 vgic_put_irq(vcpu
->kvm
, irq
);
476 static unsigned long vgic_mmio_read_its_typer(struct kvm
*kvm
,
477 struct vgic_its
*its
,
478 gpa_t addr
, unsigned int len
)
480 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
481 u64 reg
= GITS_TYPER_PLPIS
;
484 * We use linear CPU numbers for redistributor addressing,
485 * so GITS_TYPER.PTA is 0.
486 * Also we force all PROPBASER registers to be the same, so
487 * CommonLPIAff is 0 as well.
488 * To avoid memory waste in the guest, we keep the number of IDBits and
489 * DevBits low - as least for the time being.
491 reg
|= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS
, 5) << GITS_TYPER_DEVBITS_SHIFT
;
492 reg
|= GIC_ENCODE_SZ(VITS_TYPER_IDBITS
, 5) << GITS_TYPER_IDBITS_SHIFT
;
493 reg
|= GIC_ENCODE_SZ(abi
->ite_esz
, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT
;
495 return extract_bytes(reg
, addr
& 7, len
);
498 static unsigned long vgic_mmio_read_its_iidr(struct kvm
*kvm
,
499 struct vgic_its
*its
,
500 gpa_t addr
, unsigned int len
)
504 val
= (its
->abi_rev
<< GITS_IIDR_REV_SHIFT
) & GITS_IIDR_REV_MASK
;
505 val
|= (PRODUCT_ID_KVM
<< GITS_IIDR_PRODUCTID_SHIFT
) | IMPLEMENTER_ARM
;
509 static int vgic_mmio_uaccess_write_its_iidr(struct kvm
*kvm
,
510 struct vgic_its
*its
,
511 gpa_t addr
, unsigned int len
,
514 u32 rev
= GITS_IIDR_REV(val
);
516 if (rev
>= NR_ITS_ABIS
)
518 return vgic_its_set_abi(its
, rev
);
521 static unsigned long vgic_mmio_read_its_idregs(struct kvm
*kvm
,
522 struct vgic_its
*its
,
523 gpa_t addr
, unsigned int len
)
525 switch (addr
& 0xffff) {
527 return 0x92; /* part number, bits[7:0] */
529 return 0xb4; /* part number, bits[11:8] */
531 return GIC_PIDR2_ARCH_GICv3
| 0x0b;
533 return 0x40; /* This is a 64K software visible page */
534 /* The following are the ID registers for (any) GIC. */
548 static struct vgic_irq
*__vgic_its_check_cache(struct vgic_dist
*dist
,
550 u32 devid
, u32 eventid
)
552 struct vgic_translation_cache_entry
*cte
;
554 list_for_each_entry(cte
, &dist
->lpi_translation_cache
, entry
) {
556 * If we hit a NULL entry, there is nothing after this
562 if (cte
->db
!= db
|| cte
->devid
!= devid
||
563 cte
->eventid
!= eventid
)
567 * Move this entry to the head, as it is the most
570 if (!list_is_first(&cte
->entry
, &dist
->lpi_translation_cache
))
571 list_move(&cte
->entry
, &dist
->lpi_translation_cache
);
579 static struct vgic_irq
*vgic_its_check_cache(struct kvm
*kvm
, phys_addr_t db
,
580 u32 devid
, u32 eventid
)
582 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
583 struct vgic_irq
*irq
;
586 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
587 irq
= __vgic_its_check_cache(dist
, db
, devid
, eventid
);
588 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
593 static void vgic_its_cache_translation(struct kvm
*kvm
, struct vgic_its
*its
,
594 u32 devid
, u32 eventid
,
595 struct vgic_irq
*irq
)
597 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
598 struct vgic_translation_cache_entry
*cte
;
602 /* Do not cache a directly injected interrupt */
606 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
608 if (unlikely(list_empty(&dist
->lpi_translation_cache
)))
612 * We could have raced with another CPU caching the same
613 * translation behind our back, so let's check it is not in
616 db
= its
->vgic_its_base
+ GITS_TRANSLATER
;
617 if (__vgic_its_check_cache(dist
, db
, devid
, eventid
))
620 /* Always reuse the last entry (LRU policy) */
621 cte
= list_last_entry(&dist
->lpi_translation_cache
,
622 typeof(*cte
), entry
);
625 * Caching the translation implies having an extra reference
626 * to the interrupt, so drop the potential reference on what
627 * was in the cache, and increment it on the new interrupt.
630 __vgic_put_lpi_locked(kvm
, cte
->irq
);
632 vgic_get_irq_kref(irq
);
636 cte
->eventid
= eventid
;
639 /* Move the new translation to the head of the list */
640 list_move(&cte
->entry
, &dist
->lpi_translation_cache
);
643 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
646 void vgic_its_invalidate_cache(struct kvm
*kvm
)
648 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
649 struct vgic_translation_cache_entry
*cte
;
652 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
654 list_for_each_entry(cte
, &dist
->lpi_translation_cache
, entry
) {
656 * If we hit a NULL entry, there is nothing after this
662 __vgic_put_lpi_locked(kvm
, cte
->irq
);
666 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
669 int vgic_its_resolve_lpi(struct kvm
*kvm
, struct vgic_its
*its
,
670 u32 devid
, u32 eventid
, struct vgic_irq
**irq
)
672 struct kvm_vcpu
*vcpu
;
678 ite
= find_ite(its
, devid
, eventid
);
679 if (!ite
|| !its_is_collection_mapped(ite
->collection
))
680 return E_ITS_INT_UNMAPPED_INTERRUPT
;
682 vcpu
= kvm_get_vcpu(kvm
, ite
->collection
->target_addr
);
684 return E_ITS_INT_UNMAPPED_INTERRUPT
;
686 if (!vcpu
->arch
.vgic_cpu
.lpis_enabled
)
689 vgic_its_cache_translation(kvm
, its
, devid
, eventid
, ite
->irq
);
695 struct vgic_its
*vgic_msi_to_its(struct kvm
*kvm
, struct kvm_msi
*msi
)
698 struct kvm_io_device
*kvm_io_dev
;
699 struct vgic_io_device
*iodev
;
701 if (!vgic_has_its(kvm
))
702 return ERR_PTR(-ENODEV
);
704 if (!(msi
->flags
& KVM_MSI_VALID_DEVID
))
705 return ERR_PTR(-EINVAL
);
707 address
= (u64
)msi
->address_hi
<< 32 | msi
->address_lo
;
709 kvm_io_dev
= kvm_io_bus_get_dev(kvm
, KVM_MMIO_BUS
, address
);
711 return ERR_PTR(-EINVAL
);
713 if (kvm_io_dev
->ops
!= &kvm_io_gic_ops
)
714 return ERR_PTR(-EINVAL
);
716 iodev
= container_of(kvm_io_dev
, struct vgic_io_device
, dev
);
717 if (iodev
->iodev_type
!= IODEV_ITS
)
718 return ERR_PTR(-EINVAL
);
724 * Find the target VCPU and the LPI number for a given devid/eventid pair
725 * and make this IRQ pending, possibly injecting it.
726 * Must be called with the its_lock mutex held.
727 * Returns 0 on success, a positive error value for any ITS mapping
728 * related errors and negative error values for generic errors.
730 static int vgic_its_trigger_msi(struct kvm
*kvm
, struct vgic_its
*its
,
731 u32 devid
, u32 eventid
)
733 struct vgic_irq
*irq
= NULL
;
737 err
= vgic_its_resolve_lpi(kvm
, its
, devid
, eventid
, &irq
);
742 return irq_set_irqchip_state(irq
->host_irq
,
743 IRQCHIP_STATE_PENDING
, true);
745 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
746 irq
->pending_latch
= true;
747 vgic_queue_irq_unlock(kvm
, irq
, flags
);
752 int vgic_its_inject_cached_translation(struct kvm
*kvm
, struct kvm_msi
*msi
)
754 struct vgic_irq
*irq
;
758 db
= (u64
)msi
->address_hi
<< 32 | msi
->address_lo
;
759 irq
= vgic_its_check_cache(kvm
, db
, msi
->devid
, msi
->data
);
764 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
765 irq
->pending_latch
= true;
766 vgic_queue_irq_unlock(kvm
, irq
, flags
);
772 * Queries the KVM IO bus framework to get the ITS pointer from the given
774 * We then call vgic_its_trigger_msi() with the decoded data.
775 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
777 int vgic_its_inject_msi(struct kvm
*kvm
, struct kvm_msi
*msi
)
779 struct vgic_its
*its
;
782 if (!vgic_its_inject_cached_translation(kvm
, msi
))
785 its
= vgic_msi_to_its(kvm
, msi
);
789 mutex_lock(&its
->its_lock
);
790 ret
= vgic_its_trigger_msi(kvm
, its
, msi
->devid
, msi
->data
);
791 mutex_unlock(&its
->its_lock
);
797 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
798 * if the guest has blocked the MSI. So we map any LPI mapping
799 * related error to that.
807 /* Requires the its_lock to be held. */
808 static void its_free_ite(struct kvm
*kvm
, struct its_ite
*ite
)
810 list_del(&ite
->ite_list
);
812 /* This put matches the get in vgic_add_lpi. */
815 WARN_ON(its_unmap_vlpi(ite
->irq
->host_irq
));
817 vgic_put_irq(kvm
, ite
->irq
);
823 static u64
its_cmd_mask_field(u64
*its_cmd
, int word
, int shift
, int size
)
825 return (le64_to_cpu(its_cmd
[word
]) >> shift
) & (BIT_ULL(size
) - 1);
828 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
829 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
830 #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
831 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
832 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
833 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
834 #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
835 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
836 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
839 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
840 * Must be called with the its_lock mutex held.
842 static int vgic_its_cmd_handle_discard(struct kvm
*kvm
, struct vgic_its
*its
,
845 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
846 u32 event_id
= its_cmd_get_id(its_cmd
);
849 ite
= find_ite(its
, device_id
, event_id
);
850 if (ite
&& its_is_collection_mapped(ite
->collection
)) {
852 * Though the spec talks about removing the pending state, we
853 * don't bother here since we clear the ITTE anyway and the
854 * pending state is a property of the ITTE struct.
856 vgic_its_invalidate_cache(kvm
);
858 its_free_ite(kvm
, ite
);
862 return E_ITS_DISCARD_UNMAPPED_INTERRUPT
;
866 * The MOVI command moves an ITTE to a different collection.
867 * Must be called with the its_lock mutex held.
869 static int vgic_its_cmd_handle_movi(struct kvm
*kvm
, struct vgic_its
*its
,
872 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
873 u32 event_id
= its_cmd_get_id(its_cmd
);
874 u32 coll_id
= its_cmd_get_collection(its_cmd
);
875 struct kvm_vcpu
*vcpu
;
877 struct its_collection
*collection
;
879 ite
= find_ite(its
, device_id
, event_id
);
881 return E_ITS_MOVI_UNMAPPED_INTERRUPT
;
883 if (!its_is_collection_mapped(ite
->collection
))
884 return E_ITS_MOVI_UNMAPPED_COLLECTION
;
886 collection
= find_collection(its
, coll_id
);
887 if (!its_is_collection_mapped(collection
))
888 return E_ITS_MOVI_UNMAPPED_COLLECTION
;
890 ite
->collection
= collection
;
891 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
893 vgic_its_invalidate_cache(kvm
);
895 return update_affinity(ite
->irq
, vcpu
);
899 * Check whether an ID can be stored into the corresponding guest table.
900 * For a direct table this is pretty easy, but gets a bit nasty for
901 * indirect tables. We check whether the resulting guest physical address
902 * is actually valid (covered by a memslot and guest accessible).
903 * For this we have to read the respective first level entry.
905 static bool vgic_its_check_id(struct vgic_its
*its
, u64 baser
, u32 id
,
908 int l1_tbl_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
909 u64 indirect_ptr
, type
= GITS_BASER_TYPE(baser
);
910 phys_addr_t base
= GITS_BASER_ADDR_48_to_52(baser
);
911 int esz
= GITS_BASER_ENTRY_SIZE(baser
);
917 case GITS_BASER_TYPE_DEVICE
:
918 if (id
>= BIT_ULL(VITS_TYPER_DEVBITS
))
921 case GITS_BASER_TYPE_COLLECTION
:
922 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
923 if (id
>= BIT_ULL(16))
930 if (!(baser
& GITS_BASER_INDIRECT
)) {
933 if (id
>= (l1_tbl_size
/ esz
))
936 addr
= base
+ id
* esz
;
937 gfn
= addr
>> PAGE_SHIFT
;
945 /* calculate and check the index into the 1st level */
946 index
= id
/ (SZ_64K
/ esz
);
947 if (index
>= (l1_tbl_size
/ sizeof(u64
)))
950 /* Each 1st level entry is represented by a 64-bit value. */
951 if (kvm_read_guest_lock(its
->dev
->kvm
,
952 base
+ index
* sizeof(indirect_ptr
),
953 &indirect_ptr
, sizeof(indirect_ptr
)))
956 indirect_ptr
= le64_to_cpu(indirect_ptr
);
958 /* check the valid bit of the first level entry */
959 if (!(indirect_ptr
& BIT_ULL(63)))
962 /* Mask the guest physical address and calculate the frame number. */
963 indirect_ptr
&= GENMASK_ULL(51, 16);
965 /* Find the address of the actual entry */
966 index
= id
% (SZ_64K
/ esz
);
967 indirect_ptr
+= index
* esz
;
968 gfn
= indirect_ptr
>> PAGE_SHIFT
;
971 *eaddr
= indirect_ptr
;
974 idx
= srcu_read_lock(&its
->dev
->kvm
->srcu
);
975 ret
= kvm_is_visible_gfn(its
->dev
->kvm
, gfn
);
976 srcu_read_unlock(&its
->dev
->kvm
->srcu
, idx
);
980 static int vgic_its_alloc_collection(struct vgic_its
*its
,
981 struct its_collection
**colp
,
984 struct its_collection
*collection
;
986 if (!vgic_its_check_id(its
, its
->baser_coll_table
, coll_id
, NULL
))
987 return E_ITS_MAPC_COLLECTION_OOR
;
989 collection
= kzalloc(sizeof(*collection
), GFP_KERNEL
);
993 collection
->collection_id
= coll_id
;
994 collection
->target_addr
= COLLECTION_NOT_MAPPED
;
996 list_add_tail(&collection
->coll_list
, &its
->collection_list
);
1002 static void vgic_its_free_collection(struct vgic_its
*its
, u32 coll_id
)
1004 struct its_collection
*collection
;
1005 struct its_device
*device
;
1006 struct its_ite
*ite
;
1009 * Clearing the mapping for that collection ID removes the
1010 * entry from the list. If there wasn't any before, we can
1013 collection
= find_collection(its
, coll_id
);
1017 for_each_lpi_its(device
, ite
, its
)
1018 if (ite
->collection
&&
1019 ite
->collection
->collection_id
== coll_id
)
1020 ite
->collection
= NULL
;
1022 list_del(&collection
->coll_list
);
1026 /* Must be called with its_lock mutex held */
1027 static struct its_ite
*vgic_its_alloc_ite(struct its_device
*device
,
1028 struct its_collection
*collection
,
1031 struct its_ite
*ite
;
1033 ite
= kzalloc(sizeof(*ite
), GFP_KERNEL
);
1035 return ERR_PTR(-ENOMEM
);
1037 ite
->event_id
= event_id
;
1038 ite
->collection
= collection
;
1040 list_add_tail(&ite
->ite_list
, &device
->itt_head
);
1045 * The MAPTI and MAPI commands map LPIs to ITTEs.
1046 * Must be called with its_lock mutex held.
1048 static int vgic_its_cmd_handle_mapi(struct kvm
*kvm
, struct vgic_its
*its
,
1051 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1052 u32 event_id
= its_cmd_get_id(its_cmd
);
1053 u32 coll_id
= its_cmd_get_collection(its_cmd
);
1054 struct its_ite
*ite
;
1055 struct kvm_vcpu
*vcpu
= NULL
;
1056 struct its_device
*device
;
1057 struct its_collection
*collection
, *new_coll
= NULL
;
1058 struct vgic_irq
*irq
;
1061 device
= find_its_device(its
, device_id
);
1063 return E_ITS_MAPTI_UNMAPPED_DEVICE
;
1065 if (event_id
>= BIT_ULL(device
->num_eventid_bits
))
1066 return E_ITS_MAPTI_ID_OOR
;
1068 if (its_cmd_get_command(its_cmd
) == GITS_CMD_MAPTI
)
1069 lpi_nr
= its_cmd_get_physical_id(its_cmd
);
1072 if (lpi_nr
< GIC_LPI_OFFSET
||
1073 lpi_nr
>= max_lpis_propbaser(kvm
->arch
.vgic
.propbaser
))
1074 return E_ITS_MAPTI_PHYSICALID_OOR
;
1076 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
1077 if (find_ite(its
, device_id
, event_id
))
1080 collection
= find_collection(its
, coll_id
);
1082 int ret
= vgic_its_alloc_collection(its
, &collection
, coll_id
);
1085 new_coll
= collection
;
1088 ite
= vgic_its_alloc_ite(device
, collection
, event_id
);
1091 vgic_its_free_collection(its
, coll_id
);
1092 return PTR_ERR(ite
);
1095 if (its_is_collection_mapped(collection
))
1096 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
1098 irq
= vgic_add_lpi(kvm
, lpi_nr
, vcpu
);
1101 vgic_its_free_collection(its
, coll_id
);
1102 its_free_ite(kvm
, ite
);
1103 return PTR_ERR(irq
);
1110 /* Requires the its_lock to be held. */
1111 static void vgic_its_free_device(struct kvm
*kvm
, struct its_device
*device
)
1113 struct its_ite
*ite
, *temp
;
1116 * The spec says that unmapping a device with still valid
1117 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1118 * since we cannot leave the memory unreferenced.
1120 list_for_each_entry_safe(ite
, temp
, &device
->itt_head
, ite_list
)
1121 its_free_ite(kvm
, ite
);
1123 vgic_its_invalidate_cache(kvm
);
1125 list_del(&device
->dev_list
);
1129 /* its lock must be held */
1130 static void vgic_its_free_device_list(struct kvm
*kvm
, struct vgic_its
*its
)
1132 struct its_device
*cur
, *temp
;
1134 list_for_each_entry_safe(cur
, temp
, &its
->device_list
, dev_list
)
1135 vgic_its_free_device(kvm
, cur
);
1138 /* its lock must be held */
1139 static void vgic_its_free_collection_list(struct kvm
*kvm
, struct vgic_its
*its
)
1141 struct its_collection
*cur
, *temp
;
1143 list_for_each_entry_safe(cur
, temp
, &its
->collection_list
, coll_list
)
1144 vgic_its_free_collection(its
, cur
->collection_id
);
1147 /* Must be called with its_lock mutex held */
1148 static struct its_device
*vgic_its_alloc_device(struct vgic_its
*its
,
1149 u32 device_id
, gpa_t itt_addr
,
1150 u8 num_eventid_bits
)
1152 struct its_device
*device
;
1154 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
1156 return ERR_PTR(-ENOMEM
);
1158 device
->device_id
= device_id
;
1159 device
->itt_addr
= itt_addr
;
1160 device
->num_eventid_bits
= num_eventid_bits
;
1161 INIT_LIST_HEAD(&device
->itt_head
);
1163 list_add_tail(&device
->dev_list
, &its
->device_list
);
1168 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1169 * Must be called with the its_lock mutex held.
1171 static int vgic_its_cmd_handle_mapd(struct kvm
*kvm
, struct vgic_its
*its
,
1174 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1175 bool valid
= its_cmd_get_validbit(its_cmd
);
1176 u8 num_eventid_bits
= its_cmd_get_size(its_cmd
);
1177 gpa_t itt_addr
= its_cmd_get_ittaddr(its_cmd
);
1178 struct its_device
*device
;
1180 if (!vgic_its_check_id(its
, its
->baser_device_table
, device_id
, NULL
))
1181 return E_ITS_MAPD_DEVICE_OOR
;
1183 if (valid
&& num_eventid_bits
> VITS_TYPER_IDBITS
)
1184 return E_ITS_MAPD_ITTSIZE_OOR
;
1186 device
= find_its_device(its
, device_id
);
1189 * The spec says that calling MAPD on an already mapped device
1190 * invalidates all cached data for this device. We implement this
1191 * by removing the mapping and re-establishing it.
1194 vgic_its_free_device(kvm
, device
);
1197 * The spec does not say whether unmapping a not-mapped device
1198 * is an error, so we are done in any case.
1203 device
= vgic_its_alloc_device(its
, device_id
, itt_addr
,
1206 return PTR_ERR_OR_ZERO(device
);
1210 * The MAPC command maps collection IDs to redistributors.
1211 * Must be called with the its_lock mutex held.
1213 static int vgic_its_cmd_handle_mapc(struct kvm
*kvm
, struct vgic_its
*its
,
1218 struct its_collection
*collection
;
1221 valid
= its_cmd_get_validbit(its_cmd
);
1222 coll_id
= its_cmd_get_collection(its_cmd
);
1223 target_addr
= its_cmd_get_target_addr(its_cmd
);
1225 if (target_addr
>= atomic_read(&kvm
->online_vcpus
))
1226 return E_ITS_MAPC_PROCNUM_OOR
;
1229 vgic_its_free_collection(its
, coll_id
);
1230 vgic_its_invalidate_cache(kvm
);
1232 collection
= find_collection(its
, coll_id
);
1237 ret
= vgic_its_alloc_collection(its
, &collection
,
1241 collection
->target_addr
= target_addr
;
1243 collection
->target_addr
= target_addr
;
1244 update_affinity_collection(kvm
, its
, collection
);
1252 * The CLEAR command removes the pending state for a particular LPI.
1253 * Must be called with the its_lock mutex held.
1255 static int vgic_its_cmd_handle_clear(struct kvm
*kvm
, struct vgic_its
*its
,
1258 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1259 u32 event_id
= its_cmd_get_id(its_cmd
);
1260 struct its_ite
*ite
;
1263 ite
= find_ite(its
, device_id
, event_id
);
1265 return E_ITS_CLEAR_UNMAPPED_INTERRUPT
;
1267 ite
->irq
->pending_latch
= false;
1270 return irq_set_irqchip_state(ite
->irq
->host_irq
,
1271 IRQCHIP_STATE_PENDING
, false);
1277 * The INV command syncs the configuration bits from the memory table.
1278 * Must be called with the its_lock mutex held.
1280 static int vgic_its_cmd_handle_inv(struct kvm
*kvm
, struct vgic_its
*its
,
1283 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1284 u32 event_id
= its_cmd_get_id(its_cmd
);
1285 struct its_ite
*ite
;
1288 ite
= find_ite(its
, device_id
, event_id
);
1290 return E_ITS_INV_UNMAPPED_INTERRUPT
;
1292 return update_lpi_config(kvm
, ite
->irq
, NULL
, true);
1296 * The INVALL command requests flushing of all IRQ data in this collection.
1297 * Find the VCPU mapped to that collection, then iterate over the VM's list
1298 * of mapped LPIs and update the configuration for each IRQ which targets
1299 * the specified vcpu. The configuration will be read from the in-memory
1300 * configuration table.
1301 * Must be called with the its_lock mutex held.
1303 static int vgic_its_cmd_handle_invall(struct kvm
*kvm
, struct vgic_its
*its
,
1306 u32 coll_id
= its_cmd_get_collection(its_cmd
);
1307 struct its_collection
*collection
;
1308 struct kvm_vcpu
*vcpu
;
1309 struct vgic_irq
*irq
;
1313 collection
= find_collection(its
, coll_id
);
1314 if (!its_is_collection_mapped(collection
))
1315 return E_ITS_INVALL_UNMAPPED_COLLECTION
;
1317 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
1319 irq_count
= vgic_copy_lpi_list(kvm
, vcpu
, &intids
);
1323 for (i
= 0; i
< irq_count
; i
++) {
1324 irq
= vgic_get_irq(kvm
, NULL
, intids
[i
]);
1327 update_lpi_config(kvm
, irq
, vcpu
, false);
1328 vgic_put_irq(kvm
, irq
);
1333 if (vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
.its_vm
)
1334 its_invall_vpe(&vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
);
1340 * The MOVALL command moves the pending state of all IRQs targeting one
1341 * redistributor to another. We don't hold the pending state in the VCPUs,
1342 * but in the IRQs instead, so there is really not much to do for us here.
1343 * However the spec says that no IRQ must target the old redistributor
1344 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1345 * This command affects all LPIs in the system that target that redistributor.
1347 static int vgic_its_cmd_handle_movall(struct kvm
*kvm
, struct vgic_its
*its
,
1350 u32 target1_addr
= its_cmd_get_target_addr(its_cmd
);
1351 u32 target2_addr
= its_cmd_mask_field(its_cmd
, 3, 16, 32);
1352 struct kvm_vcpu
*vcpu1
, *vcpu2
;
1353 struct vgic_irq
*irq
;
1357 if (target1_addr
>= atomic_read(&kvm
->online_vcpus
) ||
1358 target2_addr
>= atomic_read(&kvm
->online_vcpus
))
1359 return E_ITS_MOVALL_PROCNUM_OOR
;
1361 if (target1_addr
== target2_addr
)
1364 vcpu1
= kvm_get_vcpu(kvm
, target1_addr
);
1365 vcpu2
= kvm_get_vcpu(kvm
, target2_addr
);
1367 irq_count
= vgic_copy_lpi_list(kvm
, vcpu1
, &intids
);
1371 for (i
= 0; i
< irq_count
; i
++) {
1372 irq
= vgic_get_irq(kvm
, NULL
, intids
[i
]);
1374 update_affinity(irq
, vcpu2
);
1376 vgic_put_irq(kvm
, irq
);
1379 vgic_its_invalidate_cache(kvm
);
1386 * The INT command injects the LPI associated with that DevID/EvID pair.
1387 * Must be called with the its_lock mutex held.
1389 static int vgic_its_cmd_handle_int(struct kvm
*kvm
, struct vgic_its
*its
,
1392 u32 msi_data
= its_cmd_get_id(its_cmd
);
1393 u64 msi_devid
= its_cmd_get_deviceid(its_cmd
);
1395 return vgic_its_trigger_msi(kvm
, its
, msi_devid
, msi_data
);
1399 * This function is called with the its_cmd lock held, but the ITS data
1400 * structure lock dropped.
1402 static int vgic_its_handle_command(struct kvm
*kvm
, struct vgic_its
*its
,
1407 mutex_lock(&its
->its_lock
);
1408 switch (its_cmd_get_command(its_cmd
)) {
1410 ret
= vgic_its_cmd_handle_mapd(kvm
, its
, its_cmd
);
1413 ret
= vgic_its_cmd_handle_mapc(kvm
, its
, its_cmd
);
1416 ret
= vgic_its_cmd_handle_mapi(kvm
, its
, its_cmd
);
1418 case GITS_CMD_MAPTI
:
1419 ret
= vgic_its_cmd_handle_mapi(kvm
, its
, its_cmd
);
1422 ret
= vgic_its_cmd_handle_movi(kvm
, its
, its_cmd
);
1424 case GITS_CMD_DISCARD
:
1425 ret
= vgic_its_cmd_handle_discard(kvm
, its
, its_cmd
);
1427 case GITS_CMD_CLEAR
:
1428 ret
= vgic_its_cmd_handle_clear(kvm
, its
, its_cmd
);
1430 case GITS_CMD_MOVALL
:
1431 ret
= vgic_its_cmd_handle_movall(kvm
, its
, its_cmd
);
1434 ret
= vgic_its_cmd_handle_int(kvm
, its
, its_cmd
);
1437 ret
= vgic_its_cmd_handle_inv(kvm
, its
, its_cmd
);
1439 case GITS_CMD_INVALL
:
1440 ret
= vgic_its_cmd_handle_invall(kvm
, its
, its_cmd
);
1443 /* we ignore this command: we are in sync all of the time */
1447 mutex_unlock(&its
->its_lock
);
1452 static u64
vgic_sanitise_its_baser(u64 reg
)
1454 reg
= vgic_sanitise_field(reg
, GITS_BASER_SHAREABILITY_MASK
,
1455 GITS_BASER_SHAREABILITY_SHIFT
,
1456 vgic_sanitise_shareability
);
1457 reg
= vgic_sanitise_field(reg
, GITS_BASER_INNER_CACHEABILITY_MASK
,
1458 GITS_BASER_INNER_CACHEABILITY_SHIFT
,
1459 vgic_sanitise_inner_cacheability
);
1460 reg
= vgic_sanitise_field(reg
, GITS_BASER_OUTER_CACHEABILITY_MASK
,
1461 GITS_BASER_OUTER_CACHEABILITY_SHIFT
,
1462 vgic_sanitise_outer_cacheability
);
1464 /* We support only one (ITS) page size: 64K */
1465 reg
= (reg
& ~GITS_BASER_PAGE_SIZE_MASK
) | GITS_BASER_PAGE_SIZE_64K
;
1470 static u64
vgic_sanitise_its_cbaser(u64 reg
)
1472 reg
= vgic_sanitise_field(reg
, GITS_CBASER_SHAREABILITY_MASK
,
1473 GITS_CBASER_SHAREABILITY_SHIFT
,
1474 vgic_sanitise_shareability
);
1475 reg
= vgic_sanitise_field(reg
, GITS_CBASER_INNER_CACHEABILITY_MASK
,
1476 GITS_CBASER_INNER_CACHEABILITY_SHIFT
,
1477 vgic_sanitise_inner_cacheability
);
1478 reg
= vgic_sanitise_field(reg
, GITS_CBASER_OUTER_CACHEABILITY_MASK
,
1479 GITS_CBASER_OUTER_CACHEABILITY_SHIFT
,
1480 vgic_sanitise_outer_cacheability
);
1482 /* Sanitise the physical address to be 64k aligned. */
1483 reg
&= ~GENMASK_ULL(15, 12);
1488 static unsigned long vgic_mmio_read_its_cbaser(struct kvm
*kvm
,
1489 struct vgic_its
*its
,
1490 gpa_t addr
, unsigned int len
)
1492 return extract_bytes(its
->cbaser
, addr
& 7, len
);
1495 static void vgic_mmio_write_its_cbaser(struct kvm
*kvm
, struct vgic_its
*its
,
1496 gpa_t addr
, unsigned int len
,
1499 /* When GITS_CTLR.Enable is 1, this register is RO. */
1503 mutex_lock(&its
->cmd_lock
);
1504 its
->cbaser
= update_64bit_reg(its
->cbaser
, addr
& 7, len
, val
);
1505 its
->cbaser
= vgic_sanitise_its_cbaser(its
->cbaser
);
1508 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1509 * it to CREADR to make sure we start with an empty command buffer.
1511 its
->cwriter
= its
->creadr
;
1512 mutex_unlock(&its
->cmd_lock
);
1515 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1516 #define ITS_CMD_SIZE 32
1517 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1519 /* Must be called with the cmd_lock held. */
1520 static void vgic_its_process_commands(struct kvm
*kvm
, struct vgic_its
*its
)
1525 /* Commands are only processed when the ITS is enabled. */
1529 cbaser
= GITS_CBASER_ADDRESS(its
->cbaser
);
1531 while (its
->cwriter
!= its
->creadr
) {
1532 int ret
= kvm_read_guest_lock(kvm
, cbaser
+ its
->creadr
,
1533 cmd_buf
, ITS_CMD_SIZE
);
1535 * If kvm_read_guest() fails, this could be due to the guest
1536 * programming a bogus value in CBASER or something else going
1537 * wrong from which we cannot easily recover.
1538 * According to section 6.3.2 in the GICv3 spec we can just
1539 * ignore that command then.
1542 vgic_its_handle_command(kvm
, its
, cmd_buf
);
1544 its
->creadr
+= ITS_CMD_SIZE
;
1545 if (its
->creadr
== ITS_CMD_BUFFER_SIZE(its
->cbaser
))
1551 * By writing to CWRITER the guest announces new commands to be processed.
1552 * To avoid any races in the first place, we take the its_cmd lock, which
1553 * protects our ring buffer variables, so that there is only one user
1554 * per ITS handling commands at a given time.
1556 static void vgic_mmio_write_its_cwriter(struct kvm
*kvm
, struct vgic_its
*its
,
1557 gpa_t addr
, unsigned int len
,
1565 mutex_lock(&its
->cmd_lock
);
1567 reg
= update_64bit_reg(its
->cwriter
, addr
& 7, len
, val
);
1568 reg
= ITS_CMD_OFFSET(reg
);
1569 if (reg
>= ITS_CMD_BUFFER_SIZE(its
->cbaser
)) {
1570 mutex_unlock(&its
->cmd_lock
);
1575 vgic_its_process_commands(kvm
, its
);
1577 mutex_unlock(&its
->cmd_lock
);
1580 static unsigned long vgic_mmio_read_its_cwriter(struct kvm
*kvm
,
1581 struct vgic_its
*its
,
1582 gpa_t addr
, unsigned int len
)
1584 return extract_bytes(its
->cwriter
, addr
& 0x7, len
);
1587 static unsigned long vgic_mmio_read_its_creadr(struct kvm
*kvm
,
1588 struct vgic_its
*its
,
1589 gpa_t addr
, unsigned int len
)
1591 return extract_bytes(its
->creadr
, addr
& 0x7, len
);
1594 static int vgic_mmio_uaccess_write_its_creadr(struct kvm
*kvm
,
1595 struct vgic_its
*its
,
1596 gpa_t addr
, unsigned int len
,
1602 mutex_lock(&its
->cmd_lock
);
1609 cmd_offset
= ITS_CMD_OFFSET(val
);
1610 if (cmd_offset
>= ITS_CMD_BUFFER_SIZE(its
->cbaser
)) {
1615 its
->creadr
= cmd_offset
;
1617 mutex_unlock(&its
->cmd_lock
);
1621 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1622 static unsigned long vgic_mmio_read_its_baser(struct kvm
*kvm
,
1623 struct vgic_its
*its
,
1624 gpa_t addr
, unsigned int len
)
1628 switch (BASER_INDEX(addr
)) {
1630 reg
= its
->baser_device_table
;
1633 reg
= its
->baser_coll_table
;
1640 return extract_bytes(reg
, addr
& 7, len
);
1643 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1644 static void vgic_mmio_write_its_baser(struct kvm
*kvm
,
1645 struct vgic_its
*its
,
1646 gpa_t addr
, unsigned int len
,
1649 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
1650 u64 entry_size
, table_type
;
1651 u64 reg
, *regptr
, clearbits
= 0;
1653 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1657 switch (BASER_INDEX(addr
)) {
1659 regptr
= &its
->baser_device_table
;
1660 entry_size
= abi
->dte_esz
;
1661 table_type
= GITS_BASER_TYPE_DEVICE
;
1664 regptr
= &its
->baser_coll_table
;
1665 entry_size
= abi
->cte_esz
;
1666 table_type
= GITS_BASER_TYPE_COLLECTION
;
1667 clearbits
= GITS_BASER_INDIRECT
;
1673 reg
= update_64bit_reg(*regptr
, addr
& 7, len
, val
);
1674 reg
&= ~GITS_BASER_RO_MASK
;
1677 reg
|= (entry_size
- 1) << GITS_BASER_ENTRY_SIZE_SHIFT
;
1678 reg
|= table_type
<< GITS_BASER_TYPE_SHIFT
;
1679 reg
= vgic_sanitise_its_baser(reg
);
1683 if (!(reg
& GITS_BASER_VALID
)) {
1684 /* Take the its_lock to prevent a race with a save/restore */
1685 mutex_lock(&its
->its_lock
);
1686 switch (table_type
) {
1687 case GITS_BASER_TYPE_DEVICE
:
1688 vgic_its_free_device_list(kvm
, its
);
1690 case GITS_BASER_TYPE_COLLECTION
:
1691 vgic_its_free_collection_list(kvm
, its
);
1694 mutex_unlock(&its
->its_lock
);
1698 static unsigned long vgic_mmio_read_its_ctlr(struct kvm
*vcpu
,
1699 struct vgic_its
*its
,
1700 gpa_t addr
, unsigned int len
)
1704 mutex_lock(&its
->cmd_lock
);
1705 if (its
->creadr
== its
->cwriter
)
1706 reg
|= GITS_CTLR_QUIESCENT
;
1708 reg
|= GITS_CTLR_ENABLE
;
1709 mutex_unlock(&its
->cmd_lock
);
1714 static void vgic_mmio_write_its_ctlr(struct kvm
*kvm
, struct vgic_its
*its
,
1715 gpa_t addr
, unsigned int len
,
1718 mutex_lock(&its
->cmd_lock
);
1721 * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1722 * device/collection BASER are invalid
1724 if (!its
->enabled
&& (val
& GITS_CTLR_ENABLE
) &&
1725 (!(its
->baser_device_table
& GITS_BASER_VALID
) ||
1726 !(its
->baser_coll_table
& GITS_BASER_VALID
) ||
1727 !(its
->cbaser
& GITS_CBASER_VALID
)))
1730 its
->enabled
= !!(val
& GITS_CTLR_ENABLE
);
1732 vgic_its_invalidate_cache(kvm
);
1735 * Try to process any pending commands. This function bails out early
1736 * if the ITS is disabled or no commands have been queued.
1738 vgic_its_process_commands(kvm
, its
);
1741 mutex_unlock(&its
->cmd_lock
);
1744 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1746 .reg_offset = off, \
1748 .access_flags = acc, \
1753 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1755 .reg_offset = off, \
1757 .access_flags = acc, \
1760 .uaccess_its_write = uwr, \
1763 static void its_mmio_write_wi(struct kvm
*kvm
, struct vgic_its
*its
,
1764 gpa_t addr
, unsigned int len
, unsigned long val
)
1769 static struct vgic_register_region its_registers
[] = {
1770 REGISTER_ITS_DESC(GITS_CTLR
,
1771 vgic_mmio_read_its_ctlr
, vgic_mmio_write_its_ctlr
, 4,
1773 REGISTER_ITS_DESC_UACCESS(GITS_IIDR
,
1774 vgic_mmio_read_its_iidr
, its_mmio_write_wi
,
1775 vgic_mmio_uaccess_write_its_iidr
, 4,
1777 REGISTER_ITS_DESC(GITS_TYPER
,
1778 vgic_mmio_read_its_typer
, its_mmio_write_wi
, 8,
1779 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1780 REGISTER_ITS_DESC(GITS_CBASER
,
1781 vgic_mmio_read_its_cbaser
, vgic_mmio_write_its_cbaser
, 8,
1782 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1783 REGISTER_ITS_DESC(GITS_CWRITER
,
1784 vgic_mmio_read_its_cwriter
, vgic_mmio_write_its_cwriter
, 8,
1785 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1786 REGISTER_ITS_DESC_UACCESS(GITS_CREADR
,
1787 vgic_mmio_read_its_creadr
, its_mmio_write_wi
,
1788 vgic_mmio_uaccess_write_its_creadr
, 8,
1789 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1790 REGISTER_ITS_DESC(GITS_BASER
,
1791 vgic_mmio_read_its_baser
, vgic_mmio_write_its_baser
, 0x40,
1792 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1793 REGISTER_ITS_DESC(GITS_IDREGS_BASE
,
1794 vgic_mmio_read_its_idregs
, its_mmio_write_wi
, 0x30,
1798 /* This is called on setting the LPI enable bit in the redistributor. */
1799 void vgic_enable_lpis(struct kvm_vcpu
*vcpu
)
1801 if (!(vcpu
->arch
.vgic_cpu
.pendbaser
& GICR_PENDBASER_PTZ
))
1802 its_sync_lpi_pending_table(vcpu
);
1805 static int vgic_register_its_iodev(struct kvm
*kvm
, struct vgic_its
*its
,
1808 struct vgic_io_device
*iodev
= &its
->iodev
;
1811 mutex_lock(&kvm
->slots_lock
);
1812 if (!IS_VGIC_ADDR_UNDEF(its
->vgic_its_base
)) {
1817 its
->vgic_its_base
= addr
;
1818 iodev
->regions
= its_registers
;
1819 iodev
->nr_regions
= ARRAY_SIZE(its_registers
);
1820 kvm_iodevice_init(&iodev
->dev
, &kvm_io_gic_ops
);
1822 iodev
->base_addr
= its
->vgic_its_base
;
1823 iodev
->iodev_type
= IODEV_ITS
;
1825 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, iodev
->base_addr
,
1826 KVM_VGIC_V3_ITS_SIZE
, &iodev
->dev
);
1828 mutex_unlock(&kvm
->slots_lock
);
1833 /* Default is 16 cached LPIs per vcpu */
1834 #define LPI_DEFAULT_PCPU_CACHE_SIZE 16
1836 void vgic_lpi_translation_cache_init(struct kvm
*kvm
)
1838 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1842 if (!list_empty(&dist
->lpi_translation_cache
))
1845 sz
= atomic_read(&kvm
->online_vcpus
) * LPI_DEFAULT_PCPU_CACHE_SIZE
;
1847 for (i
= 0; i
< sz
; i
++) {
1848 struct vgic_translation_cache_entry
*cte
;
1850 /* An allocation failure is not fatal */
1851 cte
= kzalloc(sizeof(*cte
), GFP_KERNEL
);
1855 INIT_LIST_HEAD(&cte
->entry
);
1856 list_add(&cte
->entry
, &dist
->lpi_translation_cache
);
1860 void vgic_lpi_translation_cache_destroy(struct kvm
*kvm
)
1862 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1863 struct vgic_translation_cache_entry
*cte
, *tmp
;
1865 vgic_its_invalidate_cache(kvm
);
1867 list_for_each_entry_safe(cte
, tmp
,
1868 &dist
->lpi_translation_cache
, entry
) {
1869 list_del(&cte
->entry
);
1874 #define INITIAL_BASER_VALUE \
1875 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1876 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1877 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1878 GITS_BASER_PAGE_SIZE_64K)
1880 #define INITIAL_PROPBASER_VALUE \
1881 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1882 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1883 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1885 static int vgic_its_create(struct kvm_device
*dev
, u32 type
)
1887 struct vgic_its
*its
;
1889 if (type
!= KVM_DEV_TYPE_ARM_VGIC_ITS
)
1892 its
= kzalloc(sizeof(struct vgic_its
), GFP_KERNEL
);
1896 if (vgic_initialized(dev
->kvm
)) {
1897 int ret
= vgic_v4_init(dev
->kvm
);
1903 vgic_lpi_translation_cache_init(dev
->kvm
);
1906 mutex_init(&its
->its_lock
);
1907 mutex_init(&its
->cmd_lock
);
1909 its
->vgic_its_base
= VGIC_ADDR_UNDEF
;
1911 INIT_LIST_HEAD(&its
->device_list
);
1912 INIT_LIST_HEAD(&its
->collection_list
);
1914 dev
->kvm
->arch
.vgic
.msis_require_devid
= true;
1915 dev
->kvm
->arch
.vgic
.has_its
= true;
1916 its
->enabled
= false;
1919 its
->baser_device_table
= INITIAL_BASER_VALUE
|
1920 ((u64
)GITS_BASER_TYPE_DEVICE
<< GITS_BASER_TYPE_SHIFT
);
1921 its
->baser_coll_table
= INITIAL_BASER_VALUE
|
1922 ((u64
)GITS_BASER_TYPE_COLLECTION
<< GITS_BASER_TYPE_SHIFT
);
1923 dev
->kvm
->arch
.vgic
.propbaser
= INITIAL_PROPBASER_VALUE
;
1927 return vgic_its_set_abi(its
, NR_ITS_ABIS
- 1);
1930 static void vgic_its_destroy(struct kvm_device
*kvm_dev
)
1932 struct kvm
*kvm
= kvm_dev
->kvm
;
1933 struct vgic_its
*its
= kvm_dev
->private;
1935 mutex_lock(&its
->its_lock
);
1937 vgic_its_free_device_list(kvm
, its
);
1938 vgic_its_free_collection_list(kvm
, its
);
1940 mutex_unlock(&its
->its_lock
);
1942 kfree(kvm_dev
);/* alloc by kvm_ioctl_create_device, free by .destroy */
1945 static int vgic_its_has_attr_regs(struct kvm_device
*dev
,
1946 struct kvm_device_attr
*attr
)
1948 const struct vgic_register_region
*region
;
1949 gpa_t offset
= attr
->attr
;
1952 align
= (offset
< GITS_TYPER
) || (offset
>= GITS_PIDR4
) ? 0x3 : 0x7;
1957 region
= vgic_find_mmio_region(its_registers
,
1958 ARRAY_SIZE(its_registers
),
1966 static int vgic_its_attr_regs_access(struct kvm_device
*dev
,
1967 struct kvm_device_attr
*attr
,
1968 u64
*reg
, bool is_write
)
1970 const struct vgic_register_region
*region
;
1971 struct vgic_its
*its
;
1977 offset
= attr
->attr
;
1980 * Although the spec supports upper/lower 32-bit accesses to
1981 * 64-bit ITS registers, the userspace ABI requires 64-bit
1982 * accesses to all 64-bit wide registers. We therefore only
1983 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1986 if ((offset
< GITS_TYPER
) || (offset
>= GITS_PIDR4
))
1994 mutex_lock(&dev
->kvm
->lock
);
1996 if (IS_VGIC_ADDR_UNDEF(its
->vgic_its_base
)) {
2001 region
= vgic_find_mmio_region(its_registers
,
2002 ARRAY_SIZE(its_registers
),
2009 if (!lock_all_vcpus(dev
->kvm
)) {
2014 addr
= its
->vgic_its_base
+ offset
;
2016 len
= region
->access_flags
& VGIC_ACCESS_64bit
? 8 : 4;
2019 if (region
->uaccess_its_write
)
2020 ret
= region
->uaccess_its_write(dev
->kvm
, its
, addr
,
2023 region
->its_write(dev
->kvm
, its
, addr
, len
, *reg
);
2025 *reg
= region
->its_read(dev
->kvm
, its
, addr
, len
);
2027 unlock_all_vcpus(dev
->kvm
);
2029 mutex_unlock(&dev
->kvm
->lock
);
2033 static u32
compute_next_devid_offset(struct list_head
*h
,
2034 struct its_device
*dev
)
2036 struct its_device
*next
;
2039 if (list_is_last(&dev
->dev_list
, h
))
2041 next
= list_next_entry(dev
, dev_list
);
2042 next_offset
= next
->device_id
- dev
->device_id
;
2044 return min_t(u32
, next_offset
, VITS_DTE_MAX_DEVID_OFFSET
);
2047 static u32
compute_next_eventid_offset(struct list_head
*h
, struct its_ite
*ite
)
2049 struct its_ite
*next
;
2052 if (list_is_last(&ite
->ite_list
, h
))
2054 next
= list_next_entry(ite
, ite_list
);
2055 next_offset
= next
->event_id
- ite
->event_id
;
2057 return min_t(u32
, next_offset
, VITS_ITE_MAX_EVENTID_OFFSET
);
2061 * entry_fn_t - Callback called on a table entry restore path
2063 * @id: id of the entry
2064 * @entry: pointer to the entry
2065 * @opaque: pointer to an opaque data
2067 * Return: < 0 on error, 0 if last element was identified, id offset to next
2070 typedef int (*entry_fn_t
)(struct vgic_its
*its
, u32 id
, void *entry
,
2074 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2078 * @base: base gpa of the table
2079 * @size: size of the table in bytes
2080 * @esz: entry size in bytes
2081 * @start_id: the ID of the first entry in the table
2082 * (non zero for 2d level tables)
2083 * @fn: function to apply on each entry
2085 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
2086 * (the last element may not be found on second level tables)
2088 static int scan_its_table(struct vgic_its
*its
, gpa_t base
, int size
, u32 esz
,
2089 int start_id
, entry_fn_t fn
, void *opaque
)
2091 struct kvm
*kvm
= its
->dev
->kvm
;
2092 unsigned long len
= size
;
2095 char entry
[ESZ_MAX
];
2098 memset(entry
, 0, esz
);
2104 ret
= kvm_read_guest_lock(kvm
, gpa
, entry
, esz
);
2108 next_offset
= fn(its
, id
, entry
, opaque
);
2109 if (next_offset
<= 0)
2112 byte_offset
= next_offset
* esz
;
2121 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2123 static int vgic_its_save_ite(struct vgic_its
*its
, struct its_device
*dev
,
2124 struct its_ite
*ite
, gpa_t gpa
, int ite_esz
)
2126 struct kvm
*kvm
= its
->dev
->kvm
;
2130 next_offset
= compute_next_eventid_offset(&dev
->itt_head
, ite
);
2131 val
= ((u64
)next_offset
<< KVM_ITS_ITE_NEXT_SHIFT
) |
2132 ((u64
)ite
->irq
->intid
<< KVM_ITS_ITE_PINTID_SHIFT
) |
2133 ite
->collection
->collection_id
;
2134 val
= cpu_to_le64(val
);
2135 return kvm_write_guest_lock(kvm
, gpa
, &val
, ite_esz
);
2139 * vgic_its_restore_ite - restore an interrupt translation entry
2140 * @event_id: id used for indexing
2141 * @ptr: pointer to the ITE entry
2142 * @opaque: pointer to the its_device
2144 static int vgic_its_restore_ite(struct vgic_its
*its
, u32 event_id
,
2145 void *ptr
, void *opaque
)
2147 struct its_device
*dev
= (struct its_device
*)opaque
;
2148 struct its_collection
*collection
;
2149 struct kvm
*kvm
= its
->dev
->kvm
;
2150 struct kvm_vcpu
*vcpu
= NULL
;
2152 u64
*p
= (u64
*)ptr
;
2153 struct vgic_irq
*irq
;
2154 u32 coll_id
, lpi_id
;
2155 struct its_ite
*ite
;
2160 val
= le64_to_cpu(val
);
2162 coll_id
= val
& KVM_ITS_ITE_ICID_MASK
;
2163 lpi_id
= (val
& KVM_ITS_ITE_PINTID_MASK
) >> KVM_ITS_ITE_PINTID_SHIFT
;
2166 return 1; /* invalid entry, no choice but to scan next entry */
2168 if (lpi_id
< VGIC_MIN_LPI
)
2171 offset
= val
>> KVM_ITS_ITE_NEXT_SHIFT
;
2172 if (event_id
+ offset
>= BIT_ULL(dev
->num_eventid_bits
))
2175 collection
= find_collection(its
, coll_id
);
2179 ite
= vgic_its_alloc_ite(dev
, collection
, event_id
);
2181 return PTR_ERR(ite
);
2183 if (its_is_collection_mapped(collection
))
2184 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
2186 irq
= vgic_add_lpi(kvm
, lpi_id
, vcpu
);
2188 return PTR_ERR(irq
);
2194 static int vgic_its_ite_cmp(void *priv
, struct list_head
*a
,
2195 struct list_head
*b
)
2197 struct its_ite
*itea
= container_of(a
, struct its_ite
, ite_list
);
2198 struct its_ite
*iteb
= container_of(b
, struct its_ite
, ite_list
);
2200 if (itea
->event_id
< iteb
->event_id
)
2206 static int vgic_its_save_itt(struct vgic_its
*its
, struct its_device
*device
)
2208 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2209 gpa_t base
= device
->itt_addr
;
2210 struct its_ite
*ite
;
2212 int ite_esz
= abi
->ite_esz
;
2214 list_sort(NULL
, &device
->itt_head
, vgic_its_ite_cmp
);
2216 list_for_each_entry(ite
, &device
->itt_head
, ite_list
) {
2217 gpa_t gpa
= base
+ ite
->event_id
* ite_esz
;
2220 * If an LPI carries the HW bit, this means that this
2221 * interrupt is controlled by GICv4, and we do not
2222 * have direct access to that state. Let's simply fail
2223 * the save operation...
2228 ret
= vgic_its_save_ite(its
, device
, ite
, gpa
, ite_esz
);
2236 * vgic_its_restore_itt - restore the ITT of a device
2239 * @dev: device handle
2241 * Return 0 on success, < 0 on error
2243 static int vgic_its_restore_itt(struct vgic_its
*its
, struct its_device
*dev
)
2245 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2246 gpa_t base
= dev
->itt_addr
;
2248 int ite_esz
= abi
->ite_esz
;
2249 size_t max_size
= BIT_ULL(dev
->num_eventid_bits
) * ite_esz
;
2251 ret
= scan_its_table(its
, base
, max_size
, ite_esz
, 0,
2252 vgic_its_restore_ite
, dev
);
2254 /* scan_its_table returns +1 if all ITEs are invalid */
2262 * vgic_its_save_dte - Save a device table entry at a given GPA
2268 static int vgic_its_save_dte(struct vgic_its
*its
, struct its_device
*dev
,
2269 gpa_t ptr
, int dte_esz
)
2271 struct kvm
*kvm
= its
->dev
->kvm
;
2272 u64 val
, itt_addr_field
;
2275 itt_addr_field
= dev
->itt_addr
>> 8;
2276 next_offset
= compute_next_devid_offset(&its
->device_list
, dev
);
2277 val
= (1ULL << KVM_ITS_DTE_VALID_SHIFT
|
2278 ((u64
)next_offset
<< KVM_ITS_DTE_NEXT_SHIFT
) |
2279 (itt_addr_field
<< KVM_ITS_DTE_ITTADDR_SHIFT
) |
2280 (dev
->num_eventid_bits
- 1));
2281 val
= cpu_to_le64(val
);
2282 return kvm_write_guest_lock(kvm
, ptr
, &val
, dte_esz
);
2286 * vgic_its_restore_dte - restore a device table entry
2289 * @id: device id the DTE corresponds to
2290 * @ptr: kernel VA where the 8 byte DTE is located
2293 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2294 * next dte otherwise
2296 static int vgic_its_restore_dte(struct vgic_its
*its
, u32 id
,
2297 void *ptr
, void *opaque
)
2299 struct its_device
*dev
;
2301 u8 num_eventid_bits
;
2302 u64 entry
= *(u64
*)ptr
;
2307 entry
= le64_to_cpu(entry
);
2309 valid
= entry
>> KVM_ITS_DTE_VALID_SHIFT
;
2310 num_eventid_bits
= (entry
& KVM_ITS_DTE_SIZE_MASK
) + 1;
2311 itt_addr
= ((entry
& KVM_ITS_DTE_ITTADDR_MASK
)
2312 >> KVM_ITS_DTE_ITTADDR_SHIFT
) << 8;
2317 /* dte entry is valid */
2318 offset
= (entry
& KVM_ITS_DTE_NEXT_MASK
) >> KVM_ITS_DTE_NEXT_SHIFT
;
2320 dev
= vgic_its_alloc_device(its
, id
, itt_addr
, num_eventid_bits
);
2322 return PTR_ERR(dev
);
2324 ret
= vgic_its_restore_itt(its
, dev
);
2326 vgic_its_free_device(its
->dev
->kvm
, dev
);
2333 static int vgic_its_device_cmp(void *priv
, struct list_head
*a
,
2334 struct list_head
*b
)
2336 struct its_device
*deva
= container_of(a
, struct its_device
, dev_list
);
2337 struct its_device
*devb
= container_of(b
, struct its_device
, dev_list
);
2339 if (deva
->device_id
< devb
->device_id
)
2346 * vgic_its_save_device_tables - Save the device table and all ITT
2349 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2350 * returns the GPA of the device entry
2352 static int vgic_its_save_device_tables(struct vgic_its
*its
)
2354 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2355 u64 baser
= its
->baser_device_table
;
2356 struct its_device
*dev
;
2357 int dte_esz
= abi
->dte_esz
;
2359 if (!(baser
& GITS_BASER_VALID
))
2362 list_sort(NULL
, &its
->device_list
, vgic_its_device_cmp
);
2364 list_for_each_entry(dev
, &its
->device_list
, dev_list
) {
2368 if (!vgic_its_check_id(its
, baser
,
2369 dev
->device_id
, &eaddr
))
2372 ret
= vgic_its_save_itt(its
, dev
);
2376 ret
= vgic_its_save_dte(its
, dev
, eaddr
, dte_esz
);
2384 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2387 * @id: index of the entry in the L1 table
2391 * L1 table entries are scanned by steps of 1 entry
2392 * Return < 0 if error, 0 if last dte was found when scanning the L2
2393 * table, +1 otherwise (meaning next L1 entry must be scanned)
2395 static int handle_l1_dte(struct vgic_its
*its
, u32 id
, void *addr
,
2398 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2399 int l2_start_id
= id
* (SZ_64K
/ abi
->dte_esz
);
2400 u64 entry
= *(u64
*)addr
;
2401 int dte_esz
= abi
->dte_esz
;
2405 entry
= le64_to_cpu(entry
);
2407 if (!(entry
& KVM_ITS_L1E_VALID_MASK
))
2410 gpa
= entry
& KVM_ITS_L1E_ADDR_MASK
;
2412 ret
= scan_its_table(its
, gpa
, SZ_64K
, dte_esz
,
2413 l2_start_id
, vgic_its_restore_dte
, NULL
);
2419 * vgic_its_restore_device_tables - Restore the device table and all ITT
2420 * from guest RAM to internal data structs
2422 static int vgic_its_restore_device_tables(struct vgic_its
*its
)
2424 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2425 u64 baser
= its
->baser_device_table
;
2427 int l1_tbl_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2430 if (!(baser
& GITS_BASER_VALID
))
2433 l1_gpa
= GITS_BASER_ADDR_48_to_52(baser
);
2435 if (baser
& GITS_BASER_INDIRECT
) {
2436 l1_esz
= GITS_LVL1_ENTRY_SIZE
;
2437 ret
= scan_its_table(its
, l1_gpa
, l1_tbl_size
, l1_esz
, 0,
2438 handle_l1_dte
, NULL
);
2440 l1_esz
= abi
->dte_esz
;
2441 ret
= scan_its_table(its
, l1_gpa
, l1_tbl_size
, l1_esz
, 0,
2442 vgic_its_restore_dte
, NULL
);
2445 /* scan_its_table returns +1 if all entries are invalid */
2452 static int vgic_its_save_cte(struct vgic_its
*its
,
2453 struct its_collection
*collection
,
2458 val
= (1ULL << KVM_ITS_CTE_VALID_SHIFT
|
2459 ((u64
)collection
->target_addr
<< KVM_ITS_CTE_RDBASE_SHIFT
) |
2460 collection
->collection_id
);
2461 val
= cpu_to_le64(val
);
2462 return kvm_write_guest_lock(its
->dev
->kvm
, gpa
, &val
, esz
);
2465 static int vgic_its_restore_cte(struct vgic_its
*its
, gpa_t gpa
, int esz
)
2467 struct its_collection
*collection
;
2468 struct kvm
*kvm
= its
->dev
->kvm
;
2469 u32 target_addr
, coll_id
;
2473 BUG_ON(esz
> sizeof(val
));
2474 ret
= kvm_read_guest_lock(kvm
, gpa
, &val
, esz
);
2477 val
= le64_to_cpu(val
);
2478 if (!(val
& KVM_ITS_CTE_VALID_MASK
))
2481 target_addr
= (u32
)(val
>> KVM_ITS_CTE_RDBASE_SHIFT
);
2482 coll_id
= val
& KVM_ITS_CTE_ICID_MASK
;
2484 if (target_addr
!= COLLECTION_NOT_MAPPED
&&
2485 target_addr
>= atomic_read(&kvm
->online_vcpus
))
2488 collection
= find_collection(its
, coll_id
);
2491 ret
= vgic_its_alloc_collection(its
, &collection
, coll_id
);
2494 collection
->target_addr
= target_addr
;
2499 * vgic_its_save_collection_table - Save the collection table into
2502 static int vgic_its_save_collection_table(struct vgic_its
*its
)
2504 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2505 u64 baser
= its
->baser_coll_table
;
2506 gpa_t gpa
= GITS_BASER_ADDR_48_to_52(baser
);
2507 struct its_collection
*collection
;
2509 size_t max_size
, filled
= 0;
2510 int ret
, cte_esz
= abi
->cte_esz
;
2512 if (!(baser
& GITS_BASER_VALID
))
2515 max_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2517 list_for_each_entry(collection
, &its
->collection_list
, coll_list
) {
2518 ret
= vgic_its_save_cte(its
, collection
, gpa
, cte_esz
);
2525 if (filled
== max_size
)
2529 * table is not fully filled, add a last dummy element
2530 * with valid bit unset
2533 BUG_ON(cte_esz
> sizeof(val
));
2534 ret
= kvm_write_guest_lock(its
->dev
->kvm
, gpa
, &val
, cte_esz
);
2539 * vgic_its_restore_collection_table - reads the collection table
2540 * in guest memory and restores the ITS internal state. Requires the
2541 * BASER registers to be restored before.
2543 static int vgic_its_restore_collection_table(struct vgic_its
*its
)
2545 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2546 u64 baser
= its
->baser_coll_table
;
2547 int cte_esz
= abi
->cte_esz
;
2548 size_t max_size
, read
= 0;
2552 if (!(baser
& GITS_BASER_VALID
))
2555 gpa
= GITS_BASER_ADDR_48_to_52(baser
);
2557 max_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2559 while (read
< max_size
) {
2560 ret
= vgic_its_restore_cte(its
, gpa
, cte_esz
);
2574 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2575 * according to v0 ABI
2577 static int vgic_its_save_tables_v0(struct vgic_its
*its
)
2581 ret
= vgic_its_save_device_tables(its
);
2585 return vgic_its_save_collection_table(its
);
2589 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2590 * to internal data structs according to V0 ABI
2593 static int vgic_its_restore_tables_v0(struct vgic_its
*its
)
2597 ret
= vgic_its_restore_collection_table(its
);
2601 return vgic_its_restore_device_tables(its
);
2604 static int vgic_its_commit_v0(struct vgic_its
*its
)
2606 const struct vgic_its_abi
*abi
;
2608 abi
= vgic_its_get_abi(its
);
2609 its
->baser_coll_table
&= ~GITS_BASER_ENTRY_SIZE_MASK
;
2610 its
->baser_device_table
&= ~GITS_BASER_ENTRY_SIZE_MASK
;
2612 its
->baser_coll_table
|= (GIC_ENCODE_SZ(abi
->cte_esz
, 5)
2613 << GITS_BASER_ENTRY_SIZE_SHIFT
);
2615 its
->baser_device_table
|= (GIC_ENCODE_SZ(abi
->dte_esz
, 5)
2616 << GITS_BASER_ENTRY_SIZE_SHIFT
);
2620 static void vgic_its_reset(struct kvm
*kvm
, struct vgic_its
*its
)
2622 /* We need to keep the ABI specific field values */
2623 its
->baser_coll_table
&= ~GITS_BASER_VALID
;
2624 its
->baser_device_table
&= ~GITS_BASER_VALID
;
2629 vgic_its_free_device_list(kvm
, its
);
2630 vgic_its_free_collection_list(kvm
, its
);
2633 static int vgic_its_has_attr(struct kvm_device
*dev
,
2634 struct kvm_device_attr
*attr
)
2636 switch (attr
->group
) {
2637 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
2638 switch (attr
->attr
) {
2639 case KVM_VGIC_ITS_ADDR_TYPE
:
2643 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
2644 switch (attr
->attr
) {
2645 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
2647 case KVM_DEV_ARM_ITS_CTRL_RESET
:
2649 case KVM_DEV_ARM_ITS_SAVE_TABLES
:
2651 case KVM_DEV_ARM_ITS_RESTORE_TABLES
:
2655 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
:
2656 return vgic_its_has_attr_regs(dev
, attr
);
2661 static int vgic_its_ctrl(struct kvm
*kvm
, struct vgic_its
*its
, u64 attr
)
2663 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2666 if (attr
== KVM_DEV_ARM_VGIC_CTRL_INIT
) /* Nothing to do */
2669 mutex_lock(&kvm
->lock
);
2670 mutex_lock(&its
->its_lock
);
2672 if (!lock_all_vcpus(kvm
)) {
2673 mutex_unlock(&its
->its_lock
);
2674 mutex_unlock(&kvm
->lock
);
2679 case KVM_DEV_ARM_ITS_CTRL_RESET
:
2680 vgic_its_reset(kvm
, its
);
2682 case KVM_DEV_ARM_ITS_SAVE_TABLES
:
2683 ret
= abi
->save_tables(its
);
2685 case KVM_DEV_ARM_ITS_RESTORE_TABLES
:
2686 ret
= abi
->restore_tables(its
);
2690 unlock_all_vcpus(kvm
);
2691 mutex_unlock(&its
->its_lock
);
2692 mutex_unlock(&kvm
->lock
);
2696 static int vgic_its_set_attr(struct kvm_device
*dev
,
2697 struct kvm_device_attr
*attr
)
2699 struct vgic_its
*its
= dev
->private;
2702 switch (attr
->group
) {
2703 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2704 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2705 unsigned long type
= (unsigned long)attr
->attr
;
2708 if (type
!= KVM_VGIC_ITS_ADDR_TYPE
)
2711 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
2714 ret
= vgic_check_ioaddr(dev
->kvm
, &its
->vgic_its_base
,
2719 return vgic_register_its_iodev(dev
->kvm
, its
, addr
);
2721 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
2722 return vgic_its_ctrl(dev
->kvm
, its
, attr
->attr
);
2723 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
: {
2724 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2727 if (get_user(reg
, uaddr
))
2730 return vgic_its_attr_regs_access(dev
, attr
, ®
, true);
2736 static int vgic_its_get_attr(struct kvm_device
*dev
,
2737 struct kvm_device_attr
*attr
)
2739 switch (attr
->group
) {
2740 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2741 struct vgic_its
*its
= dev
->private;
2742 u64 addr
= its
->vgic_its_base
;
2743 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2744 unsigned long type
= (unsigned long)attr
->attr
;
2746 if (type
!= KVM_VGIC_ITS_ADDR_TYPE
)
2749 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
2753 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
: {
2754 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2758 ret
= vgic_its_attr_regs_access(dev
, attr
, ®
, false);
2761 return put_user(reg
, uaddr
);
2770 static struct kvm_device_ops kvm_arm_vgic_its_ops
= {
2771 .name
= "kvm-arm-vgic-its",
2772 .create
= vgic_its_create
,
2773 .destroy
= vgic_its_destroy
,
2774 .set_attr
= vgic_its_set_attr
,
2775 .get_attr
= vgic_its_get_attr
,
2776 .has_attr
= vgic_its_has_attr
,
2779 int kvm_vgic_register_its_device(void)
2781 return kvm_register_device_ops(&kvm_arm_vgic_its_ops
,
2782 KVM_DEV_TYPE_ARM_VGIC_ITS
);