1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2015,2016 ARM Ltd.
6 * Author: Andre Przywara <andre.przywara@arm.com>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/uaccess.h>
15 #include <linux/list_sort.h>
17 #include <linux/irqchip/arm-gic-v3.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_arm.h>
21 #include <asm/kvm_mmu.h>
24 #include "vgic-mmio.h"
26 static int vgic_its_save_tables_v0(struct vgic_its
*its
);
27 static int vgic_its_restore_tables_v0(struct vgic_its
*its
);
28 static int vgic_its_commit_v0(struct vgic_its
*its
);
29 static int update_lpi_config(struct kvm
*kvm
, struct vgic_irq
*irq
,
30 struct kvm_vcpu
*filter_vcpu
, bool needs_inv
);
33 * Creates a new (reference to a) struct vgic_irq for a given LPI.
34 * If this LPI is already mapped on another ITS, we increase its refcount
35 * and return a pointer to the existing structure.
36 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
37 * This function returns a pointer to the _unlocked_ structure.
39 static struct vgic_irq
*vgic_add_lpi(struct kvm
*kvm
, u32 intid
,
40 struct kvm_vcpu
*vcpu
)
42 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
43 struct vgic_irq
*irq
= vgic_get_irq(kvm
, NULL
, intid
), *oldirq
;
47 /* In this case there is no put, since we keep the reference. */
51 irq
= kzalloc(sizeof(struct vgic_irq
), GFP_KERNEL_ACCOUNT
);
53 return ERR_PTR(-ENOMEM
);
55 ret
= xa_reserve_irq(&dist
->lpi_xa
, intid
, GFP_KERNEL_ACCOUNT
);
61 INIT_LIST_HEAD(&irq
->ap_list
);
62 raw_spin_lock_init(&irq
->irq_lock
);
64 irq
->config
= VGIC_CONFIG_EDGE
;
65 kref_init(&irq
->refcount
);
67 irq
->target_vcpu
= vcpu
;
70 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
73 * There could be a race with another vgic_add_lpi(), so we need to
74 * check that we don't add a second list entry with the same LPI.
76 oldirq
= xa_load(&dist
->lpi_xa
, intid
);
77 if (vgic_try_get_irq_kref(oldirq
)) {
78 /* Someone was faster with adding this LPI, lets use that. */
85 ret
= xa_err(xa_store(&dist
->lpi_xa
, intid
, irq
, 0));
87 xa_release(&dist
->lpi_xa
, intid
);
92 atomic_inc(&dist
->lpi_count
);
95 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
101 * We "cache" the configuration table entries in our struct vgic_irq's.
102 * However we only have those structs for mapped IRQs, so we read in
103 * the respective config data from memory here upon mapping the LPI.
105 * Should any of these fail, behave as if we couldn't create the LPI
106 * by dropping the refcount and returning the error.
108 ret
= update_lpi_config(kvm
, irq
, NULL
, false);
110 vgic_put_irq(kvm
, irq
);
114 ret
= vgic_v3_lpi_sync_pending_status(kvm
, irq
);
116 vgic_put_irq(kvm
, irq
);
124 struct list_head dev_list
;
126 /* the head for the list of ITTEs */
127 struct list_head itt_head
;
128 u32 num_eventid_bits
;
133 #define COLLECTION_NOT_MAPPED ((u32)~0)
135 struct its_collection
{
136 struct list_head coll_list
;
142 #define its_is_collection_mapped(coll) ((coll) && \
143 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
146 struct list_head ite_list
;
148 struct vgic_irq
*irq
;
149 struct its_collection
*collection
;
153 struct vgic_translation_cache_entry
{
154 struct list_head entry
;
158 struct vgic_irq
*irq
;
162 * struct vgic_its_abi - ITS abi ops and settings
163 * @cte_esz: collection table entry size
164 * @dte_esz: device table entry size
165 * @ite_esz: interrupt translation table entry size
166 * @save_tables: save the ITS tables into guest RAM
167 * @restore_tables: restore the ITS internal structs from tables
168 * stored in guest RAM
169 * @commit: initialize the registers which expose the ABI settings,
170 * especially the entry sizes
172 struct vgic_its_abi
{
176 int (*save_tables
)(struct vgic_its
*its
);
177 int (*restore_tables
)(struct vgic_its
*its
);
178 int (*commit
)(struct vgic_its
*its
);
182 #define ESZ_MAX ABI_0_ESZ
184 static const struct vgic_its_abi its_table_abi_versions
[] = {
186 .cte_esz
= ABI_0_ESZ
,
187 .dte_esz
= ABI_0_ESZ
,
188 .ite_esz
= ABI_0_ESZ
,
189 .save_tables
= vgic_its_save_tables_v0
,
190 .restore_tables
= vgic_its_restore_tables_v0
,
191 .commit
= vgic_its_commit_v0
,
195 #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
197 inline const struct vgic_its_abi
*vgic_its_get_abi(struct vgic_its
*its
)
199 return &its_table_abi_versions
[its
->abi_rev
];
202 static int vgic_its_set_abi(struct vgic_its
*its
, u32 rev
)
204 const struct vgic_its_abi
*abi
;
207 abi
= vgic_its_get_abi(its
);
208 return abi
->commit(its
);
212 * Find and returns a device in the device table for an ITS.
213 * Must be called with the its_lock mutex held.
215 static struct its_device
*find_its_device(struct vgic_its
*its
, u32 device_id
)
217 struct its_device
*device
;
219 list_for_each_entry(device
, &its
->device_list
, dev_list
)
220 if (device_id
== device
->device_id
)
227 * Find and returns an interrupt translation table entry (ITTE) for a given
228 * Device ID/Event ID pair on an ITS.
229 * Must be called with the its_lock mutex held.
231 static struct its_ite
*find_ite(struct vgic_its
*its
, u32 device_id
,
234 struct its_device
*device
;
237 device
= find_its_device(its
, device_id
);
241 list_for_each_entry(ite
, &device
->itt_head
, ite_list
)
242 if (ite
->event_id
== event_id
)
248 /* To be used as an iterator this macro misses the enclosing parentheses */
249 #define for_each_lpi_its(dev, ite, its) \
250 list_for_each_entry(dev, &(its)->device_list, dev_list) \
251 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
253 #define GIC_LPI_OFFSET 8192
255 #define VITS_TYPER_IDBITS 16
256 #define VITS_TYPER_DEVBITS 16
257 #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
258 #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
261 * Finds and returns a collection in the ITS collection table.
262 * Must be called with the its_lock mutex held.
264 static struct its_collection
*find_collection(struct vgic_its
*its
, int coll_id
)
266 struct its_collection
*collection
;
268 list_for_each_entry(collection
, &its
->collection_list
, coll_list
) {
269 if (coll_id
== collection
->collection_id
)
276 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
277 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
280 * Reads the configuration data for a given LPI from guest memory and
281 * updates the fields in struct vgic_irq.
282 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
283 * VCPU. Unconditionally applies if filter_vcpu is NULL.
285 static int update_lpi_config(struct kvm
*kvm
, struct vgic_irq
*irq
,
286 struct kvm_vcpu
*filter_vcpu
, bool needs_inv
)
288 u64 propbase
= GICR_PROPBASER_ADDRESS(kvm
->arch
.vgic
.propbaser
);
293 ret
= kvm_read_guest_lock(kvm
, propbase
+ irq
->intid
- GIC_LPI_OFFSET
,
299 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
301 if (!filter_vcpu
|| filter_vcpu
== irq
->target_vcpu
) {
302 irq
->priority
= LPI_PROP_PRIORITY(prop
);
303 irq
->enabled
= LPI_PROP_ENABLE_BIT(prop
);
306 vgic_queue_irq_unlock(kvm
, irq
, flags
);
311 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
314 return its_prop_update_vlpi(irq
->host_irq
, prop
, needs_inv
);
319 #define GIC_LPI_MAX_INTID ((1 << INTERRUPT_ID_BITS_ITS) - 1)
322 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
323 * enumerate those LPIs without holding any lock.
324 * Returns their number and puts the kmalloc'ed array into intid_ptr.
326 int vgic_copy_lpi_list(struct kvm
*kvm
, struct kvm_vcpu
*vcpu
, u32
**intid_ptr
)
328 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
329 XA_STATE(xas
, &dist
->lpi_xa
, GIC_LPI_OFFSET
);
330 struct vgic_irq
*irq
;
333 int irq_count
, i
= 0;
336 * There is an obvious race between allocating the array and LPIs
337 * being mapped/unmapped. If we ended up here as a result of a
338 * command, we're safe (locks are held, preventing another
339 * command). If coming from another path (such as enabling LPIs),
340 * we must be careful not to overrun the array.
342 irq_count
= atomic_read(&dist
->lpi_count
);
343 intids
= kmalloc_array(irq_count
, sizeof(intids
[0]), GFP_KERNEL_ACCOUNT
);
347 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
350 xas_for_each(&xas
, irq
, GIC_LPI_MAX_INTID
) {
353 /* We don't need to "get" the IRQ, as we hold the list lock. */
354 if (vcpu
&& irq
->target_vcpu
!= vcpu
)
356 intids
[i
++] = irq
->intid
;
360 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
366 static int update_affinity(struct vgic_irq
*irq
, struct kvm_vcpu
*vcpu
)
371 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
372 irq
->target_vcpu
= vcpu
;
373 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
376 struct its_vlpi_map map
;
378 ret
= its_get_vlpi(irq
->host_irq
, &map
);
383 atomic_dec(&map
.vpe
->vlpi_count
);
384 map
.vpe
= &vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
;
385 atomic_inc(&map
.vpe
->vlpi_count
);
387 ret
= its_map_vlpi(irq
->host_irq
, &map
);
393 static struct kvm_vcpu
*collection_to_vcpu(struct kvm
*kvm
,
394 struct its_collection
*col
)
396 return kvm_get_vcpu_by_id(kvm
, col
->target_addr
);
400 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
401 * is targeting) to the VGIC's view, which deals with target VCPUs.
402 * Needs to be called whenever either the collection for a LPIs has
403 * changed or the collection itself got retargeted.
405 static void update_affinity_ite(struct kvm
*kvm
, struct its_ite
*ite
)
407 struct kvm_vcpu
*vcpu
;
409 if (!its_is_collection_mapped(ite
->collection
))
412 vcpu
= collection_to_vcpu(kvm
, ite
->collection
);
413 update_affinity(ite
->irq
, vcpu
);
417 * Updates the target VCPU for every LPI targeting this collection.
418 * Must be called with the its_lock mutex held.
420 static void update_affinity_collection(struct kvm
*kvm
, struct vgic_its
*its
,
421 struct its_collection
*coll
)
423 struct its_device
*device
;
426 for_each_lpi_its(device
, ite
, its
) {
427 if (ite
->collection
!= coll
)
430 update_affinity_ite(kvm
, ite
);
434 static u32
max_lpis_propbaser(u64 propbaser
)
436 int nr_idbits
= (propbaser
& 0x1f) + 1;
438 return 1U << min(nr_idbits
, INTERRUPT_ID_BITS_ITS
);
442 * Sync the pending table pending bit of LPIs targeting @vcpu
443 * with our own data structures. This relies on the LPI being
446 static int its_sync_lpi_pending_table(struct kvm_vcpu
*vcpu
)
448 gpa_t pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
449 struct vgic_irq
*irq
;
450 int last_byte_offset
= -1;
457 nr_irqs
= vgic_copy_lpi_list(vcpu
->kvm
, vcpu
, &intids
);
461 for (i
= 0; i
< nr_irqs
; i
++) {
462 int byte_offset
, bit_nr
;
464 byte_offset
= intids
[i
] / BITS_PER_BYTE
;
465 bit_nr
= intids
[i
] % BITS_PER_BYTE
;
468 * For contiguously allocated LPIs chances are we just read
469 * this very same byte in the last iteration. Reuse that.
471 if (byte_offset
!= last_byte_offset
) {
472 ret
= kvm_read_guest_lock(vcpu
->kvm
,
473 pendbase
+ byte_offset
,
479 last_byte_offset
= byte_offset
;
482 irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intids
[i
]);
486 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
487 irq
->pending_latch
= pendmask
& (1U << bit_nr
);
488 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
489 vgic_put_irq(vcpu
->kvm
, irq
);
497 static unsigned long vgic_mmio_read_its_typer(struct kvm
*kvm
,
498 struct vgic_its
*its
,
499 gpa_t addr
, unsigned int len
)
501 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
502 u64 reg
= GITS_TYPER_PLPIS
;
505 * We use linear CPU numbers for redistributor addressing,
506 * so GITS_TYPER.PTA is 0.
507 * Also we force all PROPBASER registers to be the same, so
508 * CommonLPIAff is 0 as well.
509 * To avoid memory waste in the guest, we keep the number of IDBits and
510 * DevBits low - as least for the time being.
512 reg
|= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS
, 5) << GITS_TYPER_DEVBITS_SHIFT
;
513 reg
|= GIC_ENCODE_SZ(VITS_TYPER_IDBITS
, 5) << GITS_TYPER_IDBITS_SHIFT
;
514 reg
|= GIC_ENCODE_SZ(abi
->ite_esz
, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT
;
516 return extract_bytes(reg
, addr
& 7, len
);
519 static unsigned long vgic_mmio_read_its_iidr(struct kvm
*kvm
,
520 struct vgic_its
*its
,
521 gpa_t addr
, unsigned int len
)
525 val
= (its
->abi_rev
<< GITS_IIDR_REV_SHIFT
) & GITS_IIDR_REV_MASK
;
526 val
|= (PRODUCT_ID_KVM
<< GITS_IIDR_PRODUCTID_SHIFT
) | IMPLEMENTER_ARM
;
530 static int vgic_mmio_uaccess_write_its_iidr(struct kvm
*kvm
,
531 struct vgic_its
*its
,
532 gpa_t addr
, unsigned int len
,
535 u32 rev
= GITS_IIDR_REV(val
);
537 if (rev
>= NR_ITS_ABIS
)
539 return vgic_its_set_abi(its
, rev
);
542 static unsigned long vgic_mmio_read_its_idregs(struct kvm
*kvm
,
543 struct vgic_its
*its
,
544 gpa_t addr
, unsigned int len
)
546 switch (addr
& 0xffff) {
548 return 0x92; /* part number, bits[7:0] */
550 return 0xb4; /* part number, bits[11:8] */
552 return GIC_PIDR2_ARCH_GICv3
| 0x0b;
554 return 0x40; /* This is a 64K software visible page */
555 /* The following are the ID registers for (any) GIC. */
569 static struct vgic_irq
*__vgic_its_check_cache(struct vgic_dist
*dist
,
571 u32 devid
, u32 eventid
)
573 struct vgic_translation_cache_entry
*cte
;
575 list_for_each_entry(cte
, &dist
->lpi_translation_cache
, entry
) {
577 * If we hit a NULL entry, there is nothing after this
583 if (cte
->db
!= db
|| cte
->devid
!= devid
||
584 cte
->eventid
!= eventid
)
588 * Move this entry to the head, as it is the most
591 if (!list_is_first(&cte
->entry
, &dist
->lpi_translation_cache
))
592 list_move(&cte
->entry
, &dist
->lpi_translation_cache
);
600 static struct vgic_irq
*vgic_its_check_cache(struct kvm
*kvm
, phys_addr_t db
,
601 u32 devid
, u32 eventid
)
603 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
604 struct vgic_irq
*irq
;
607 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
609 irq
= __vgic_its_check_cache(dist
, db
, devid
, eventid
);
610 if (!vgic_try_get_irq_kref(irq
))
613 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
618 static void vgic_its_cache_translation(struct kvm
*kvm
, struct vgic_its
*its
,
619 u32 devid
, u32 eventid
,
620 struct vgic_irq
*irq
)
622 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
623 struct vgic_translation_cache_entry
*cte
;
627 /* Do not cache a directly injected interrupt */
631 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
633 if (unlikely(list_empty(&dist
->lpi_translation_cache
)))
637 * We could have raced with another CPU caching the same
638 * translation behind our back, so let's check it is not in
641 db
= its
->vgic_its_base
+ GITS_TRANSLATER
;
642 if (__vgic_its_check_cache(dist
, db
, devid
, eventid
))
645 /* Always reuse the last entry (LRU policy) */
646 cte
= list_last_entry(&dist
->lpi_translation_cache
,
647 typeof(*cte
), entry
);
650 * Caching the translation implies having an extra reference
651 * to the interrupt, so drop the potential reference on what
652 * was in the cache, and increment it on the new interrupt.
655 vgic_put_irq(kvm
, cte
->irq
);
658 * The irq refcount is guaranteed to be nonzero while holding the
659 * its_lock, as the ITE (and the reference it holds) cannot be freed.
661 lockdep_assert_held(&its
->its_lock
);
662 vgic_get_irq_kref(irq
);
666 cte
->eventid
= eventid
;
669 /* Move the new translation to the head of the list */
670 list_move(&cte
->entry
, &dist
->lpi_translation_cache
);
673 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
676 void vgic_its_invalidate_cache(struct kvm
*kvm
)
678 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
679 struct vgic_translation_cache_entry
*cte
;
682 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
684 list_for_each_entry(cte
, &dist
->lpi_translation_cache
, entry
) {
686 * If we hit a NULL entry, there is nothing after this
692 vgic_put_irq(kvm
, cte
->irq
);
696 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
699 int vgic_its_resolve_lpi(struct kvm
*kvm
, struct vgic_its
*its
,
700 u32 devid
, u32 eventid
, struct vgic_irq
**irq
)
702 struct kvm_vcpu
*vcpu
;
708 ite
= find_ite(its
, devid
, eventid
);
709 if (!ite
|| !its_is_collection_mapped(ite
->collection
))
710 return E_ITS_INT_UNMAPPED_INTERRUPT
;
712 vcpu
= collection_to_vcpu(kvm
, ite
->collection
);
714 return E_ITS_INT_UNMAPPED_INTERRUPT
;
716 if (!vgic_lpis_enabled(vcpu
))
719 vgic_its_cache_translation(kvm
, its
, devid
, eventid
, ite
->irq
);
725 struct vgic_its
*vgic_msi_to_its(struct kvm
*kvm
, struct kvm_msi
*msi
)
728 struct kvm_io_device
*kvm_io_dev
;
729 struct vgic_io_device
*iodev
;
731 if (!vgic_has_its(kvm
))
732 return ERR_PTR(-ENODEV
);
734 if (!(msi
->flags
& KVM_MSI_VALID_DEVID
))
735 return ERR_PTR(-EINVAL
);
737 address
= (u64
)msi
->address_hi
<< 32 | msi
->address_lo
;
739 kvm_io_dev
= kvm_io_bus_get_dev(kvm
, KVM_MMIO_BUS
, address
);
741 return ERR_PTR(-EINVAL
);
743 if (kvm_io_dev
->ops
!= &kvm_io_gic_ops
)
744 return ERR_PTR(-EINVAL
);
746 iodev
= container_of(kvm_io_dev
, struct vgic_io_device
, dev
);
747 if (iodev
->iodev_type
!= IODEV_ITS
)
748 return ERR_PTR(-EINVAL
);
754 * Find the target VCPU and the LPI number for a given devid/eventid pair
755 * and make this IRQ pending, possibly injecting it.
756 * Must be called with the its_lock mutex held.
757 * Returns 0 on success, a positive error value for any ITS mapping
758 * related errors and negative error values for generic errors.
760 static int vgic_its_trigger_msi(struct kvm
*kvm
, struct vgic_its
*its
,
761 u32 devid
, u32 eventid
)
763 struct vgic_irq
*irq
= NULL
;
767 err
= vgic_its_resolve_lpi(kvm
, its
, devid
, eventid
, &irq
);
772 return irq_set_irqchip_state(irq
->host_irq
,
773 IRQCHIP_STATE_PENDING
, true);
775 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
776 irq
->pending_latch
= true;
777 vgic_queue_irq_unlock(kvm
, irq
, flags
);
782 int vgic_its_inject_cached_translation(struct kvm
*kvm
, struct kvm_msi
*msi
)
784 struct vgic_irq
*irq
;
788 db
= (u64
)msi
->address_hi
<< 32 | msi
->address_lo
;
789 irq
= vgic_its_check_cache(kvm
, db
, msi
->devid
, msi
->data
);
793 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
794 irq
->pending_latch
= true;
795 vgic_queue_irq_unlock(kvm
, irq
, flags
);
796 vgic_put_irq(kvm
, irq
);
802 * Queries the KVM IO bus framework to get the ITS pointer from the given
804 * We then call vgic_its_trigger_msi() with the decoded data.
805 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
807 int vgic_its_inject_msi(struct kvm
*kvm
, struct kvm_msi
*msi
)
809 struct vgic_its
*its
;
812 if (!vgic_its_inject_cached_translation(kvm
, msi
))
815 its
= vgic_msi_to_its(kvm
, msi
);
819 mutex_lock(&its
->its_lock
);
820 ret
= vgic_its_trigger_msi(kvm
, its
, msi
->devid
, msi
->data
);
821 mutex_unlock(&its
->its_lock
);
827 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
828 * if the guest has blocked the MSI. So we map any LPI mapping
829 * related error to that.
837 /* Requires the its_lock to be held. */
838 static void its_free_ite(struct kvm
*kvm
, struct its_ite
*ite
)
840 list_del(&ite
->ite_list
);
842 /* This put matches the get in vgic_add_lpi. */
845 WARN_ON(its_unmap_vlpi(ite
->irq
->host_irq
));
847 vgic_put_irq(kvm
, ite
->irq
);
853 static u64
its_cmd_mask_field(u64
*its_cmd
, int word
, int shift
, int size
)
855 return (le64_to_cpu(its_cmd
[word
]) >> shift
) & (BIT_ULL(size
) - 1);
858 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
859 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
860 #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
861 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
862 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
863 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
864 #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
865 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
866 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
869 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
870 * Must be called with the its_lock mutex held.
872 static int vgic_its_cmd_handle_discard(struct kvm
*kvm
, struct vgic_its
*its
,
875 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
876 u32 event_id
= its_cmd_get_id(its_cmd
);
879 ite
= find_ite(its
, device_id
, event_id
);
880 if (ite
&& its_is_collection_mapped(ite
->collection
)) {
882 * Though the spec talks about removing the pending state, we
883 * don't bother here since we clear the ITTE anyway and the
884 * pending state is a property of the ITTE struct.
886 vgic_its_invalidate_cache(kvm
);
888 its_free_ite(kvm
, ite
);
892 return E_ITS_DISCARD_UNMAPPED_INTERRUPT
;
896 * The MOVI command moves an ITTE to a different collection.
897 * Must be called with the its_lock mutex held.
899 static int vgic_its_cmd_handle_movi(struct kvm
*kvm
, struct vgic_its
*its
,
902 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
903 u32 event_id
= its_cmd_get_id(its_cmd
);
904 u32 coll_id
= its_cmd_get_collection(its_cmd
);
905 struct kvm_vcpu
*vcpu
;
907 struct its_collection
*collection
;
909 ite
= find_ite(its
, device_id
, event_id
);
911 return E_ITS_MOVI_UNMAPPED_INTERRUPT
;
913 if (!its_is_collection_mapped(ite
->collection
))
914 return E_ITS_MOVI_UNMAPPED_COLLECTION
;
916 collection
= find_collection(its
, coll_id
);
917 if (!its_is_collection_mapped(collection
))
918 return E_ITS_MOVI_UNMAPPED_COLLECTION
;
920 ite
->collection
= collection
;
921 vcpu
= collection_to_vcpu(kvm
, collection
);
923 vgic_its_invalidate_cache(kvm
);
925 return update_affinity(ite
->irq
, vcpu
);
928 static bool __is_visible_gfn_locked(struct vgic_its
*its
, gpa_t gpa
)
930 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
934 idx
= srcu_read_lock(&its
->dev
->kvm
->srcu
);
935 ret
= kvm_is_visible_gfn(its
->dev
->kvm
, gfn
);
936 srcu_read_unlock(&its
->dev
->kvm
->srcu
, idx
);
941 * Check whether an ID can be stored into the corresponding guest table.
942 * For a direct table this is pretty easy, but gets a bit nasty for
943 * indirect tables. We check whether the resulting guest physical address
944 * is actually valid (covered by a memslot and guest accessible).
945 * For this we have to read the respective first level entry.
947 static bool vgic_its_check_id(struct vgic_its
*its
, u64 baser
, u32 id
,
950 int l1_tbl_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
951 u64 indirect_ptr
, type
= GITS_BASER_TYPE(baser
);
952 phys_addr_t base
= GITS_BASER_ADDR_48_to_52(baser
);
953 int esz
= GITS_BASER_ENTRY_SIZE(baser
);
957 case GITS_BASER_TYPE_DEVICE
:
958 if (id
>= BIT_ULL(VITS_TYPER_DEVBITS
))
961 case GITS_BASER_TYPE_COLLECTION
:
962 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
963 if (id
>= BIT_ULL(16))
970 if (!(baser
& GITS_BASER_INDIRECT
)) {
973 if (id
>= (l1_tbl_size
/ esz
))
976 addr
= base
+ id
* esz
;
981 return __is_visible_gfn_locked(its
, addr
);
984 /* calculate and check the index into the 1st level */
985 index
= id
/ (SZ_64K
/ esz
);
986 if (index
>= (l1_tbl_size
/ sizeof(u64
)))
989 /* Each 1st level entry is represented by a 64-bit value. */
990 if (kvm_read_guest_lock(its
->dev
->kvm
,
991 base
+ index
* sizeof(indirect_ptr
),
992 &indirect_ptr
, sizeof(indirect_ptr
)))
995 indirect_ptr
= le64_to_cpu(indirect_ptr
);
997 /* check the valid bit of the first level entry */
998 if (!(indirect_ptr
& BIT_ULL(63)))
1001 /* Mask the guest physical address and calculate the frame number. */
1002 indirect_ptr
&= GENMASK_ULL(51, 16);
1004 /* Find the address of the actual entry */
1005 index
= id
% (SZ_64K
/ esz
);
1006 indirect_ptr
+= index
* esz
;
1009 *eaddr
= indirect_ptr
;
1011 return __is_visible_gfn_locked(its
, indirect_ptr
);
1015 * Check whether an event ID can be stored in the corresponding Interrupt
1016 * Translation Table, which starts at device->itt_addr.
1018 static bool vgic_its_check_event_id(struct vgic_its
*its
, struct its_device
*device
,
1021 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
1022 int ite_esz
= abi
->ite_esz
;
1025 /* max table size is: BIT_ULL(device->num_eventid_bits) * ite_esz */
1026 if (event_id
>= BIT_ULL(device
->num_eventid_bits
))
1029 gpa
= device
->itt_addr
+ event_id
* ite_esz
;
1030 return __is_visible_gfn_locked(its
, gpa
);
1034 * Add a new collection into the ITS collection table.
1035 * Returns 0 on success, and a negative error value for generic errors.
1037 static int vgic_its_alloc_collection(struct vgic_its
*its
,
1038 struct its_collection
**colp
,
1041 struct its_collection
*collection
;
1043 collection
= kzalloc(sizeof(*collection
), GFP_KERNEL_ACCOUNT
);
1047 collection
->collection_id
= coll_id
;
1048 collection
->target_addr
= COLLECTION_NOT_MAPPED
;
1050 list_add_tail(&collection
->coll_list
, &its
->collection_list
);
1056 static void vgic_its_free_collection(struct vgic_its
*its
, u32 coll_id
)
1058 struct its_collection
*collection
;
1059 struct its_device
*device
;
1060 struct its_ite
*ite
;
1063 * Clearing the mapping for that collection ID removes the
1064 * entry from the list. If there wasn't any before, we can
1067 collection
= find_collection(its
, coll_id
);
1071 for_each_lpi_its(device
, ite
, its
)
1072 if (ite
->collection
&&
1073 ite
->collection
->collection_id
== coll_id
)
1074 ite
->collection
= NULL
;
1076 list_del(&collection
->coll_list
);
1080 /* Must be called with its_lock mutex held */
1081 static struct its_ite
*vgic_its_alloc_ite(struct its_device
*device
,
1082 struct its_collection
*collection
,
1085 struct its_ite
*ite
;
1087 ite
= kzalloc(sizeof(*ite
), GFP_KERNEL_ACCOUNT
);
1089 return ERR_PTR(-ENOMEM
);
1091 ite
->event_id
= event_id
;
1092 ite
->collection
= collection
;
1094 list_add_tail(&ite
->ite_list
, &device
->itt_head
);
1099 * The MAPTI and MAPI commands map LPIs to ITTEs.
1100 * Must be called with its_lock mutex held.
1102 static int vgic_its_cmd_handle_mapi(struct kvm
*kvm
, struct vgic_its
*its
,
1105 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1106 u32 event_id
= its_cmd_get_id(its_cmd
);
1107 u32 coll_id
= its_cmd_get_collection(its_cmd
);
1108 struct its_ite
*ite
;
1109 struct kvm_vcpu
*vcpu
= NULL
;
1110 struct its_device
*device
;
1111 struct its_collection
*collection
, *new_coll
= NULL
;
1112 struct vgic_irq
*irq
;
1115 device
= find_its_device(its
, device_id
);
1117 return E_ITS_MAPTI_UNMAPPED_DEVICE
;
1119 if (!vgic_its_check_event_id(its
, device
, event_id
))
1120 return E_ITS_MAPTI_ID_OOR
;
1122 if (its_cmd_get_command(its_cmd
) == GITS_CMD_MAPTI
)
1123 lpi_nr
= its_cmd_get_physical_id(its_cmd
);
1126 if (lpi_nr
< GIC_LPI_OFFSET
||
1127 lpi_nr
>= max_lpis_propbaser(kvm
->arch
.vgic
.propbaser
))
1128 return E_ITS_MAPTI_PHYSICALID_OOR
;
1130 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
1131 if (find_ite(its
, device_id
, event_id
))
1134 collection
= find_collection(its
, coll_id
);
1138 if (!vgic_its_check_id(its
, its
->baser_coll_table
, coll_id
, NULL
))
1139 return E_ITS_MAPC_COLLECTION_OOR
;
1141 ret
= vgic_its_alloc_collection(its
, &collection
, coll_id
);
1144 new_coll
= collection
;
1147 ite
= vgic_its_alloc_ite(device
, collection
, event_id
);
1150 vgic_its_free_collection(its
, coll_id
);
1151 return PTR_ERR(ite
);
1154 if (its_is_collection_mapped(collection
))
1155 vcpu
= collection_to_vcpu(kvm
, collection
);
1157 irq
= vgic_add_lpi(kvm
, lpi_nr
, vcpu
);
1160 vgic_its_free_collection(its
, coll_id
);
1161 its_free_ite(kvm
, ite
);
1162 return PTR_ERR(irq
);
1169 /* Requires the its_lock to be held. */
1170 static void vgic_its_free_device(struct kvm
*kvm
, struct its_device
*device
)
1172 struct its_ite
*ite
, *temp
;
1175 * The spec says that unmapping a device with still valid
1176 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1177 * since we cannot leave the memory unreferenced.
1179 list_for_each_entry_safe(ite
, temp
, &device
->itt_head
, ite_list
)
1180 its_free_ite(kvm
, ite
);
1182 vgic_its_invalidate_cache(kvm
);
1184 list_del(&device
->dev_list
);
1188 /* its lock must be held */
1189 static void vgic_its_free_device_list(struct kvm
*kvm
, struct vgic_its
*its
)
1191 struct its_device
*cur
, *temp
;
1193 list_for_each_entry_safe(cur
, temp
, &its
->device_list
, dev_list
)
1194 vgic_its_free_device(kvm
, cur
);
1197 /* its lock must be held */
1198 static void vgic_its_free_collection_list(struct kvm
*kvm
, struct vgic_its
*its
)
1200 struct its_collection
*cur
, *temp
;
1202 list_for_each_entry_safe(cur
, temp
, &its
->collection_list
, coll_list
)
1203 vgic_its_free_collection(its
, cur
->collection_id
);
1206 /* Must be called with its_lock mutex held */
1207 static struct its_device
*vgic_its_alloc_device(struct vgic_its
*its
,
1208 u32 device_id
, gpa_t itt_addr
,
1209 u8 num_eventid_bits
)
1211 struct its_device
*device
;
1213 device
= kzalloc(sizeof(*device
), GFP_KERNEL_ACCOUNT
);
1215 return ERR_PTR(-ENOMEM
);
1217 device
->device_id
= device_id
;
1218 device
->itt_addr
= itt_addr
;
1219 device
->num_eventid_bits
= num_eventid_bits
;
1220 INIT_LIST_HEAD(&device
->itt_head
);
1222 list_add_tail(&device
->dev_list
, &its
->device_list
);
1227 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1228 * Must be called with the its_lock mutex held.
1230 static int vgic_its_cmd_handle_mapd(struct kvm
*kvm
, struct vgic_its
*its
,
1233 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1234 bool valid
= its_cmd_get_validbit(its_cmd
);
1235 u8 num_eventid_bits
= its_cmd_get_size(its_cmd
);
1236 gpa_t itt_addr
= its_cmd_get_ittaddr(its_cmd
);
1237 struct its_device
*device
;
1239 if (!vgic_its_check_id(its
, its
->baser_device_table
, device_id
, NULL
))
1240 return E_ITS_MAPD_DEVICE_OOR
;
1242 if (valid
&& num_eventid_bits
> VITS_TYPER_IDBITS
)
1243 return E_ITS_MAPD_ITTSIZE_OOR
;
1245 device
= find_its_device(its
, device_id
);
1248 * The spec says that calling MAPD on an already mapped device
1249 * invalidates all cached data for this device. We implement this
1250 * by removing the mapping and re-establishing it.
1253 vgic_its_free_device(kvm
, device
);
1256 * The spec does not say whether unmapping a not-mapped device
1257 * is an error, so we are done in any case.
1262 device
= vgic_its_alloc_device(its
, device_id
, itt_addr
,
1265 return PTR_ERR_OR_ZERO(device
);
1269 * The MAPC command maps collection IDs to redistributors.
1270 * Must be called with the its_lock mutex held.
1272 static int vgic_its_cmd_handle_mapc(struct kvm
*kvm
, struct vgic_its
*its
,
1276 struct its_collection
*collection
;
1279 valid
= its_cmd_get_validbit(its_cmd
);
1280 coll_id
= its_cmd_get_collection(its_cmd
);
1283 vgic_its_free_collection(its
, coll_id
);
1284 vgic_its_invalidate_cache(kvm
);
1286 struct kvm_vcpu
*vcpu
;
1288 vcpu
= kvm_get_vcpu_by_id(kvm
, its_cmd_get_target_addr(its_cmd
));
1290 return E_ITS_MAPC_PROCNUM_OOR
;
1292 collection
= find_collection(its
, coll_id
);
1297 if (!vgic_its_check_id(its
, its
->baser_coll_table
,
1299 return E_ITS_MAPC_COLLECTION_OOR
;
1301 ret
= vgic_its_alloc_collection(its
, &collection
,
1305 collection
->target_addr
= vcpu
->vcpu_id
;
1307 collection
->target_addr
= vcpu
->vcpu_id
;
1308 update_affinity_collection(kvm
, its
, collection
);
1316 * The CLEAR command removes the pending state for a particular LPI.
1317 * Must be called with the its_lock mutex held.
1319 static int vgic_its_cmd_handle_clear(struct kvm
*kvm
, struct vgic_its
*its
,
1322 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1323 u32 event_id
= its_cmd_get_id(its_cmd
);
1324 struct its_ite
*ite
;
1327 ite
= find_ite(its
, device_id
, event_id
);
1329 return E_ITS_CLEAR_UNMAPPED_INTERRUPT
;
1331 ite
->irq
->pending_latch
= false;
1334 return irq_set_irqchip_state(ite
->irq
->host_irq
,
1335 IRQCHIP_STATE_PENDING
, false);
1340 int vgic_its_inv_lpi(struct kvm
*kvm
, struct vgic_irq
*irq
)
1342 return update_lpi_config(kvm
, irq
, NULL
, true);
1346 * The INV command syncs the configuration bits from the memory table.
1347 * Must be called with the its_lock mutex held.
1349 static int vgic_its_cmd_handle_inv(struct kvm
*kvm
, struct vgic_its
*its
,
1352 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1353 u32 event_id
= its_cmd_get_id(its_cmd
);
1354 struct its_ite
*ite
;
1357 ite
= find_ite(its
, device_id
, event_id
);
1359 return E_ITS_INV_UNMAPPED_INTERRUPT
;
1361 return vgic_its_inv_lpi(kvm
, ite
->irq
);
1365 * vgic_its_invall - invalidate all LPIs targeting a given vcpu
1366 * @vcpu: the vcpu for which the RD is targeted by an invalidation
1368 * Contrary to the INVALL command, this targets a RD instead of a
1369 * collection, and we don't need to hold the its_lock, since no ITS is
1372 int vgic_its_invall(struct kvm_vcpu
*vcpu
)
1374 struct kvm
*kvm
= vcpu
->kvm
;
1375 int irq_count
, i
= 0;
1378 irq_count
= vgic_copy_lpi_list(kvm
, vcpu
, &intids
);
1382 for (i
= 0; i
< irq_count
; i
++) {
1383 struct vgic_irq
*irq
= vgic_get_irq(kvm
, NULL
, intids
[i
]);
1386 update_lpi_config(kvm
, irq
, vcpu
, false);
1387 vgic_put_irq(kvm
, irq
);
1392 if (vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
.its_vm
)
1393 its_invall_vpe(&vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
);
1399 * The INVALL command requests flushing of all IRQ data in this collection.
1400 * Find the VCPU mapped to that collection, then iterate over the VM's list
1401 * of mapped LPIs and update the configuration for each IRQ which targets
1402 * the specified vcpu. The configuration will be read from the in-memory
1403 * configuration table.
1404 * Must be called with the its_lock mutex held.
1406 static int vgic_its_cmd_handle_invall(struct kvm
*kvm
, struct vgic_its
*its
,
1409 u32 coll_id
= its_cmd_get_collection(its_cmd
);
1410 struct its_collection
*collection
;
1411 struct kvm_vcpu
*vcpu
;
1413 collection
= find_collection(its
, coll_id
);
1414 if (!its_is_collection_mapped(collection
))
1415 return E_ITS_INVALL_UNMAPPED_COLLECTION
;
1417 vcpu
= collection_to_vcpu(kvm
, collection
);
1418 vgic_its_invall(vcpu
);
1424 * The MOVALL command moves the pending state of all IRQs targeting one
1425 * redistributor to another. We don't hold the pending state in the VCPUs,
1426 * but in the IRQs instead, so there is really not much to do for us here.
1427 * However the spec says that no IRQ must target the old redistributor
1428 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1429 * This command affects all LPIs in the system that target that redistributor.
1431 static int vgic_its_cmd_handle_movall(struct kvm
*kvm
, struct vgic_its
*its
,
1434 struct kvm_vcpu
*vcpu1
, *vcpu2
;
1435 struct vgic_irq
*irq
;
1439 /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
1440 vcpu1
= kvm_get_vcpu_by_id(kvm
, its_cmd_get_target_addr(its_cmd
));
1441 vcpu2
= kvm_get_vcpu_by_id(kvm
, its_cmd_mask_field(its_cmd
, 3, 16, 32));
1443 if (!vcpu1
|| !vcpu2
)
1444 return E_ITS_MOVALL_PROCNUM_OOR
;
1449 irq_count
= vgic_copy_lpi_list(kvm
, vcpu1
, &intids
);
1453 for (i
= 0; i
< irq_count
; i
++) {
1454 irq
= vgic_get_irq(kvm
, NULL
, intids
[i
]);
1458 update_affinity(irq
, vcpu2
);
1460 vgic_put_irq(kvm
, irq
);
1463 vgic_its_invalidate_cache(kvm
);
1470 * The INT command injects the LPI associated with that DevID/EvID pair.
1471 * Must be called with the its_lock mutex held.
1473 static int vgic_its_cmd_handle_int(struct kvm
*kvm
, struct vgic_its
*its
,
1476 u32 msi_data
= its_cmd_get_id(its_cmd
);
1477 u64 msi_devid
= its_cmd_get_deviceid(its_cmd
);
1479 return vgic_its_trigger_msi(kvm
, its
, msi_devid
, msi_data
);
1483 * This function is called with the its_cmd lock held, but the ITS data
1484 * structure lock dropped.
1486 static int vgic_its_handle_command(struct kvm
*kvm
, struct vgic_its
*its
,
1491 mutex_lock(&its
->its_lock
);
1492 switch (its_cmd_get_command(its_cmd
)) {
1494 ret
= vgic_its_cmd_handle_mapd(kvm
, its
, its_cmd
);
1497 ret
= vgic_its_cmd_handle_mapc(kvm
, its
, its_cmd
);
1500 ret
= vgic_its_cmd_handle_mapi(kvm
, its
, its_cmd
);
1502 case GITS_CMD_MAPTI
:
1503 ret
= vgic_its_cmd_handle_mapi(kvm
, its
, its_cmd
);
1506 ret
= vgic_its_cmd_handle_movi(kvm
, its
, its_cmd
);
1508 case GITS_CMD_DISCARD
:
1509 ret
= vgic_its_cmd_handle_discard(kvm
, its
, its_cmd
);
1511 case GITS_CMD_CLEAR
:
1512 ret
= vgic_its_cmd_handle_clear(kvm
, its
, its_cmd
);
1514 case GITS_CMD_MOVALL
:
1515 ret
= vgic_its_cmd_handle_movall(kvm
, its
, its_cmd
);
1518 ret
= vgic_its_cmd_handle_int(kvm
, its
, its_cmd
);
1521 ret
= vgic_its_cmd_handle_inv(kvm
, its
, its_cmd
);
1523 case GITS_CMD_INVALL
:
1524 ret
= vgic_its_cmd_handle_invall(kvm
, its
, its_cmd
);
1527 /* we ignore this command: we are in sync all of the time */
1531 mutex_unlock(&its
->its_lock
);
1536 static u64
vgic_sanitise_its_baser(u64 reg
)
1538 reg
= vgic_sanitise_field(reg
, GITS_BASER_SHAREABILITY_MASK
,
1539 GITS_BASER_SHAREABILITY_SHIFT
,
1540 vgic_sanitise_shareability
);
1541 reg
= vgic_sanitise_field(reg
, GITS_BASER_INNER_CACHEABILITY_MASK
,
1542 GITS_BASER_INNER_CACHEABILITY_SHIFT
,
1543 vgic_sanitise_inner_cacheability
);
1544 reg
= vgic_sanitise_field(reg
, GITS_BASER_OUTER_CACHEABILITY_MASK
,
1545 GITS_BASER_OUTER_CACHEABILITY_SHIFT
,
1546 vgic_sanitise_outer_cacheability
);
1548 /* We support only one (ITS) page size: 64K */
1549 reg
= (reg
& ~GITS_BASER_PAGE_SIZE_MASK
) | GITS_BASER_PAGE_SIZE_64K
;
1554 static u64
vgic_sanitise_its_cbaser(u64 reg
)
1556 reg
= vgic_sanitise_field(reg
, GITS_CBASER_SHAREABILITY_MASK
,
1557 GITS_CBASER_SHAREABILITY_SHIFT
,
1558 vgic_sanitise_shareability
);
1559 reg
= vgic_sanitise_field(reg
, GITS_CBASER_INNER_CACHEABILITY_MASK
,
1560 GITS_CBASER_INNER_CACHEABILITY_SHIFT
,
1561 vgic_sanitise_inner_cacheability
);
1562 reg
= vgic_sanitise_field(reg
, GITS_CBASER_OUTER_CACHEABILITY_MASK
,
1563 GITS_CBASER_OUTER_CACHEABILITY_SHIFT
,
1564 vgic_sanitise_outer_cacheability
);
1566 /* Sanitise the physical address to be 64k aligned. */
1567 reg
&= ~GENMASK_ULL(15, 12);
1572 static unsigned long vgic_mmio_read_its_cbaser(struct kvm
*kvm
,
1573 struct vgic_its
*its
,
1574 gpa_t addr
, unsigned int len
)
1576 return extract_bytes(its
->cbaser
, addr
& 7, len
);
1579 static void vgic_mmio_write_its_cbaser(struct kvm
*kvm
, struct vgic_its
*its
,
1580 gpa_t addr
, unsigned int len
,
1583 /* When GITS_CTLR.Enable is 1, this register is RO. */
1587 mutex_lock(&its
->cmd_lock
);
1588 its
->cbaser
= update_64bit_reg(its
->cbaser
, addr
& 7, len
, val
);
1589 its
->cbaser
= vgic_sanitise_its_cbaser(its
->cbaser
);
1592 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1593 * it to CREADR to make sure we start with an empty command buffer.
1595 its
->cwriter
= its
->creadr
;
1596 mutex_unlock(&its
->cmd_lock
);
1599 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1600 #define ITS_CMD_SIZE 32
1601 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1603 /* Must be called with the cmd_lock held. */
1604 static void vgic_its_process_commands(struct kvm
*kvm
, struct vgic_its
*its
)
1609 /* Commands are only processed when the ITS is enabled. */
1613 cbaser
= GITS_CBASER_ADDRESS(its
->cbaser
);
1615 while (its
->cwriter
!= its
->creadr
) {
1616 int ret
= kvm_read_guest_lock(kvm
, cbaser
+ its
->creadr
,
1617 cmd_buf
, ITS_CMD_SIZE
);
1619 * If kvm_read_guest() fails, this could be due to the guest
1620 * programming a bogus value in CBASER or something else going
1621 * wrong from which we cannot easily recover.
1622 * According to section 6.3.2 in the GICv3 spec we can just
1623 * ignore that command then.
1626 vgic_its_handle_command(kvm
, its
, cmd_buf
);
1628 its
->creadr
+= ITS_CMD_SIZE
;
1629 if (its
->creadr
== ITS_CMD_BUFFER_SIZE(its
->cbaser
))
1635 * By writing to CWRITER the guest announces new commands to be processed.
1636 * To avoid any races in the first place, we take the its_cmd lock, which
1637 * protects our ring buffer variables, so that there is only one user
1638 * per ITS handling commands at a given time.
1640 static void vgic_mmio_write_its_cwriter(struct kvm
*kvm
, struct vgic_its
*its
,
1641 gpa_t addr
, unsigned int len
,
1649 mutex_lock(&its
->cmd_lock
);
1651 reg
= update_64bit_reg(its
->cwriter
, addr
& 7, len
, val
);
1652 reg
= ITS_CMD_OFFSET(reg
);
1653 if (reg
>= ITS_CMD_BUFFER_SIZE(its
->cbaser
)) {
1654 mutex_unlock(&its
->cmd_lock
);
1659 vgic_its_process_commands(kvm
, its
);
1661 mutex_unlock(&its
->cmd_lock
);
1664 static unsigned long vgic_mmio_read_its_cwriter(struct kvm
*kvm
,
1665 struct vgic_its
*its
,
1666 gpa_t addr
, unsigned int len
)
1668 return extract_bytes(its
->cwriter
, addr
& 0x7, len
);
1671 static unsigned long vgic_mmio_read_its_creadr(struct kvm
*kvm
,
1672 struct vgic_its
*its
,
1673 gpa_t addr
, unsigned int len
)
1675 return extract_bytes(its
->creadr
, addr
& 0x7, len
);
1678 static int vgic_mmio_uaccess_write_its_creadr(struct kvm
*kvm
,
1679 struct vgic_its
*its
,
1680 gpa_t addr
, unsigned int len
,
1686 mutex_lock(&its
->cmd_lock
);
1693 cmd_offset
= ITS_CMD_OFFSET(val
);
1694 if (cmd_offset
>= ITS_CMD_BUFFER_SIZE(its
->cbaser
)) {
1699 its
->creadr
= cmd_offset
;
1701 mutex_unlock(&its
->cmd_lock
);
1705 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1706 static unsigned long vgic_mmio_read_its_baser(struct kvm
*kvm
,
1707 struct vgic_its
*its
,
1708 gpa_t addr
, unsigned int len
)
1712 switch (BASER_INDEX(addr
)) {
1714 reg
= its
->baser_device_table
;
1717 reg
= its
->baser_coll_table
;
1724 return extract_bytes(reg
, addr
& 7, len
);
1727 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1728 static void vgic_mmio_write_its_baser(struct kvm
*kvm
,
1729 struct vgic_its
*its
,
1730 gpa_t addr
, unsigned int len
,
1733 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
1734 u64 entry_size
, table_type
;
1735 u64 reg
, *regptr
, clearbits
= 0;
1737 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1741 switch (BASER_INDEX(addr
)) {
1743 regptr
= &its
->baser_device_table
;
1744 entry_size
= abi
->dte_esz
;
1745 table_type
= GITS_BASER_TYPE_DEVICE
;
1748 regptr
= &its
->baser_coll_table
;
1749 entry_size
= abi
->cte_esz
;
1750 table_type
= GITS_BASER_TYPE_COLLECTION
;
1751 clearbits
= GITS_BASER_INDIRECT
;
1757 reg
= update_64bit_reg(*regptr
, addr
& 7, len
, val
);
1758 reg
&= ~GITS_BASER_RO_MASK
;
1761 reg
|= (entry_size
- 1) << GITS_BASER_ENTRY_SIZE_SHIFT
;
1762 reg
|= table_type
<< GITS_BASER_TYPE_SHIFT
;
1763 reg
= vgic_sanitise_its_baser(reg
);
1767 if (!(reg
& GITS_BASER_VALID
)) {
1768 /* Take the its_lock to prevent a race with a save/restore */
1769 mutex_lock(&its
->its_lock
);
1770 switch (table_type
) {
1771 case GITS_BASER_TYPE_DEVICE
:
1772 vgic_its_free_device_list(kvm
, its
);
1774 case GITS_BASER_TYPE_COLLECTION
:
1775 vgic_its_free_collection_list(kvm
, its
);
1778 mutex_unlock(&its
->its_lock
);
1782 static unsigned long vgic_mmio_read_its_ctlr(struct kvm
*vcpu
,
1783 struct vgic_its
*its
,
1784 gpa_t addr
, unsigned int len
)
1788 mutex_lock(&its
->cmd_lock
);
1789 if (its
->creadr
== its
->cwriter
)
1790 reg
|= GITS_CTLR_QUIESCENT
;
1792 reg
|= GITS_CTLR_ENABLE
;
1793 mutex_unlock(&its
->cmd_lock
);
1798 static void vgic_mmio_write_its_ctlr(struct kvm
*kvm
, struct vgic_its
*its
,
1799 gpa_t addr
, unsigned int len
,
1802 mutex_lock(&its
->cmd_lock
);
1805 * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1806 * device/collection BASER are invalid
1808 if (!its
->enabled
&& (val
& GITS_CTLR_ENABLE
) &&
1809 (!(its
->baser_device_table
& GITS_BASER_VALID
) ||
1810 !(its
->baser_coll_table
& GITS_BASER_VALID
) ||
1811 !(its
->cbaser
& GITS_CBASER_VALID
)))
1814 its
->enabled
= !!(val
& GITS_CTLR_ENABLE
);
1816 vgic_its_invalidate_cache(kvm
);
1819 * Try to process any pending commands. This function bails out early
1820 * if the ITS is disabled or no commands have been queued.
1822 vgic_its_process_commands(kvm
, its
);
1825 mutex_unlock(&its
->cmd_lock
);
1828 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1830 .reg_offset = off, \
1832 .access_flags = acc, \
1837 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1839 .reg_offset = off, \
1841 .access_flags = acc, \
1844 .uaccess_its_write = uwr, \
1847 static void its_mmio_write_wi(struct kvm
*kvm
, struct vgic_its
*its
,
1848 gpa_t addr
, unsigned int len
, unsigned long val
)
1853 static struct vgic_register_region its_registers
[] = {
1854 REGISTER_ITS_DESC(GITS_CTLR
,
1855 vgic_mmio_read_its_ctlr
, vgic_mmio_write_its_ctlr
, 4,
1857 REGISTER_ITS_DESC_UACCESS(GITS_IIDR
,
1858 vgic_mmio_read_its_iidr
, its_mmio_write_wi
,
1859 vgic_mmio_uaccess_write_its_iidr
, 4,
1861 REGISTER_ITS_DESC(GITS_TYPER
,
1862 vgic_mmio_read_its_typer
, its_mmio_write_wi
, 8,
1863 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1864 REGISTER_ITS_DESC(GITS_CBASER
,
1865 vgic_mmio_read_its_cbaser
, vgic_mmio_write_its_cbaser
, 8,
1866 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1867 REGISTER_ITS_DESC(GITS_CWRITER
,
1868 vgic_mmio_read_its_cwriter
, vgic_mmio_write_its_cwriter
, 8,
1869 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1870 REGISTER_ITS_DESC_UACCESS(GITS_CREADR
,
1871 vgic_mmio_read_its_creadr
, its_mmio_write_wi
,
1872 vgic_mmio_uaccess_write_its_creadr
, 8,
1873 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1874 REGISTER_ITS_DESC(GITS_BASER
,
1875 vgic_mmio_read_its_baser
, vgic_mmio_write_its_baser
, 0x40,
1876 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1877 REGISTER_ITS_DESC(GITS_IDREGS_BASE
,
1878 vgic_mmio_read_its_idregs
, its_mmio_write_wi
, 0x30,
1882 /* This is called on setting the LPI enable bit in the redistributor. */
1883 void vgic_enable_lpis(struct kvm_vcpu
*vcpu
)
1885 if (!(vcpu
->arch
.vgic_cpu
.pendbaser
& GICR_PENDBASER_PTZ
))
1886 its_sync_lpi_pending_table(vcpu
);
1889 static int vgic_register_its_iodev(struct kvm
*kvm
, struct vgic_its
*its
,
1892 struct vgic_io_device
*iodev
= &its
->iodev
;
1895 mutex_lock(&kvm
->slots_lock
);
1896 if (!IS_VGIC_ADDR_UNDEF(its
->vgic_its_base
)) {
1901 its
->vgic_its_base
= addr
;
1902 iodev
->regions
= its_registers
;
1903 iodev
->nr_regions
= ARRAY_SIZE(its_registers
);
1904 kvm_iodevice_init(&iodev
->dev
, &kvm_io_gic_ops
);
1906 iodev
->base_addr
= its
->vgic_its_base
;
1907 iodev
->iodev_type
= IODEV_ITS
;
1909 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, iodev
->base_addr
,
1910 KVM_VGIC_V3_ITS_SIZE
, &iodev
->dev
);
1912 mutex_unlock(&kvm
->slots_lock
);
1917 /* Default is 16 cached LPIs per vcpu */
1918 #define LPI_DEFAULT_PCPU_CACHE_SIZE 16
1920 void vgic_lpi_translation_cache_init(struct kvm
*kvm
)
1922 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1926 if (!list_empty(&dist
->lpi_translation_cache
))
1929 sz
= atomic_read(&kvm
->online_vcpus
) * LPI_DEFAULT_PCPU_CACHE_SIZE
;
1931 for (i
= 0; i
< sz
; i
++) {
1932 struct vgic_translation_cache_entry
*cte
;
1934 /* An allocation failure is not fatal */
1935 cte
= kzalloc(sizeof(*cte
), GFP_KERNEL_ACCOUNT
);
1939 INIT_LIST_HEAD(&cte
->entry
);
1940 list_add(&cte
->entry
, &dist
->lpi_translation_cache
);
1944 void vgic_lpi_translation_cache_destroy(struct kvm
*kvm
)
1946 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1947 struct vgic_translation_cache_entry
*cte
, *tmp
;
1949 vgic_its_invalidate_cache(kvm
);
1951 list_for_each_entry_safe(cte
, tmp
,
1952 &dist
->lpi_translation_cache
, entry
) {
1953 list_del(&cte
->entry
);
1958 #define INITIAL_BASER_VALUE \
1959 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1960 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1961 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1962 GITS_BASER_PAGE_SIZE_64K)
1964 #define INITIAL_PROPBASER_VALUE \
1965 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1966 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1967 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1969 static int vgic_its_create(struct kvm_device
*dev
, u32 type
)
1972 struct vgic_its
*its
;
1974 if (type
!= KVM_DEV_TYPE_ARM_VGIC_ITS
)
1977 its
= kzalloc(sizeof(struct vgic_its
), GFP_KERNEL_ACCOUNT
);
1981 mutex_lock(&dev
->kvm
->arch
.config_lock
);
1983 if (vgic_initialized(dev
->kvm
)) {
1984 ret
= vgic_v4_init(dev
->kvm
);
1986 mutex_unlock(&dev
->kvm
->arch
.config_lock
);
1991 vgic_lpi_translation_cache_init(dev
->kvm
);
1994 mutex_init(&its
->its_lock
);
1995 mutex_init(&its
->cmd_lock
);
1997 /* Yep, even more trickery for lock ordering... */
1998 #ifdef CONFIG_LOCKDEP
1999 mutex_lock(&its
->cmd_lock
);
2000 mutex_lock(&its
->its_lock
);
2001 mutex_unlock(&its
->its_lock
);
2002 mutex_unlock(&its
->cmd_lock
);
2005 its
->vgic_its_base
= VGIC_ADDR_UNDEF
;
2007 INIT_LIST_HEAD(&its
->device_list
);
2008 INIT_LIST_HEAD(&its
->collection_list
);
2010 dev
->kvm
->arch
.vgic
.msis_require_devid
= true;
2011 dev
->kvm
->arch
.vgic
.has_its
= true;
2012 its
->enabled
= false;
2015 its
->baser_device_table
= INITIAL_BASER_VALUE
|
2016 ((u64
)GITS_BASER_TYPE_DEVICE
<< GITS_BASER_TYPE_SHIFT
);
2017 its
->baser_coll_table
= INITIAL_BASER_VALUE
|
2018 ((u64
)GITS_BASER_TYPE_COLLECTION
<< GITS_BASER_TYPE_SHIFT
);
2019 dev
->kvm
->arch
.vgic
.propbaser
= INITIAL_PROPBASER_VALUE
;
2023 ret
= vgic_its_set_abi(its
, NR_ITS_ABIS
- 1);
2025 mutex_unlock(&dev
->kvm
->arch
.config_lock
);
2030 static void vgic_its_destroy(struct kvm_device
*kvm_dev
)
2032 struct kvm
*kvm
= kvm_dev
->kvm
;
2033 struct vgic_its
*its
= kvm_dev
->private;
2035 mutex_lock(&its
->its_lock
);
2037 vgic_its_free_device_list(kvm
, its
);
2038 vgic_its_free_collection_list(kvm
, its
);
2040 mutex_unlock(&its
->its_lock
);
2042 kfree(kvm_dev
);/* alloc by kvm_ioctl_create_device, free by .destroy */
2045 static int vgic_its_has_attr_regs(struct kvm_device
*dev
,
2046 struct kvm_device_attr
*attr
)
2048 const struct vgic_register_region
*region
;
2049 gpa_t offset
= attr
->attr
;
2052 align
= (offset
< GITS_TYPER
) || (offset
>= GITS_PIDR4
) ? 0x3 : 0x7;
2057 region
= vgic_find_mmio_region(its_registers
,
2058 ARRAY_SIZE(its_registers
),
2066 static int vgic_its_attr_regs_access(struct kvm_device
*dev
,
2067 struct kvm_device_attr
*attr
,
2068 u64
*reg
, bool is_write
)
2070 const struct vgic_register_region
*region
;
2071 struct vgic_its
*its
;
2077 offset
= attr
->attr
;
2080 * Although the spec supports upper/lower 32-bit accesses to
2081 * 64-bit ITS registers, the userspace ABI requires 64-bit
2082 * accesses to all 64-bit wide registers. We therefore only
2083 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
2086 if ((offset
< GITS_TYPER
) || (offset
>= GITS_PIDR4
))
2094 mutex_lock(&dev
->kvm
->lock
);
2096 if (!lock_all_vcpus(dev
->kvm
)) {
2097 mutex_unlock(&dev
->kvm
->lock
);
2101 mutex_lock(&dev
->kvm
->arch
.config_lock
);
2103 if (IS_VGIC_ADDR_UNDEF(its
->vgic_its_base
)) {
2108 region
= vgic_find_mmio_region(its_registers
,
2109 ARRAY_SIZE(its_registers
),
2116 addr
= its
->vgic_its_base
+ offset
;
2118 len
= region
->access_flags
& VGIC_ACCESS_64bit
? 8 : 4;
2121 if (region
->uaccess_its_write
)
2122 ret
= region
->uaccess_its_write(dev
->kvm
, its
, addr
,
2125 region
->its_write(dev
->kvm
, its
, addr
, len
, *reg
);
2127 *reg
= region
->its_read(dev
->kvm
, its
, addr
, len
);
2130 mutex_unlock(&dev
->kvm
->arch
.config_lock
);
2131 unlock_all_vcpus(dev
->kvm
);
2132 mutex_unlock(&dev
->kvm
->lock
);
2136 static u32
compute_next_devid_offset(struct list_head
*h
,
2137 struct its_device
*dev
)
2139 struct its_device
*next
;
2142 if (list_is_last(&dev
->dev_list
, h
))
2144 next
= list_next_entry(dev
, dev_list
);
2145 next_offset
= next
->device_id
- dev
->device_id
;
2147 return min_t(u32
, next_offset
, VITS_DTE_MAX_DEVID_OFFSET
);
2150 static u32
compute_next_eventid_offset(struct list_head
*h
, struct its_ite
*ite
)
2152 struct its_ite
*next
;
2155 if (list_is_last(&ite
->ite_list
, h
))
2157 next
= list_next_entry(ite
, ite_list
);
2158 next_offset
= next
->event_id
- ite
->event_id
;
2160 return min_t(u32
, next_offset
, VITS_ITE_MAX_EVENTID_OFFSET
);
2164 * typedef entry_fn_t - Callback called on a table entry restore path
2166 * @id: id of the entry
2167 * @entry: pointer to the entry
2168 * @opaque: pointer to an opaque data
2170 * Return: < 0 on error, 0 if last element was identified, id offset to next
2173 typedef int (*entry_fn_t
)(struct vgic_its
*its
, u32 id
, void *entry
,
2177 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2181 * @base: base gpa of the table
2182 * @size: size of the table in bytes
2183 * @esz: entry size in bytes
2184 * @start_id: the ID of the first entry in the table
2185 * (non zero for 2d level tables)
2186 * @fn: function to apply on each entry
2188 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
2189 * (the last element may not be found on second level tables)
2191 static int scan_its_table(struct vgic_its
*its
, gpa_t base
, int size
, u32 esz
,
2192 int start_id
, entry_fn_t fn
, void *opaque
)
2194 struct kvm
*kvm
= its
->dev
->kvm
;
2195 unsigned long len
= size
;
2198 char entry
[ESZ_MAX
];
2201 memset(entry
, 0, esz
);
2207 ret
= kvm_read_guest_lock(kvm
, gpa
, entry
, esz
);
2211 next_offset
= fn(its
, id
, entry
, opaque
);
2212 if (next_offset
<= 0)
2215 byte_offset
= next_offset
* esz
;
2216 if (byte_offset
>= len
)
2227 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2229 static int vgic_its_save_ite(struct vgic_its
*its
, struct its_device
*dev
,
2230 struct its_ite
*ite
, gpa_t gpa
, int ite_esz
)
2232 struct kvm
*kvm
= its
->dev
->kvm
;
2236 next_offset
= compute_next_eventid_offset(&dev
->itt_head
, ite
);
2237 val
= ((u64
)next_offset
<< KVM_ITS_ITE_NEXT_SHIFT
) |
2238 ((u64
)ite
->irq
->intid
<< KVM_ITS_ITE_PINTID_SHIFT
) |
2239 ite
->collection
->collection_id
;
2240 val
= cpu_to_le64(val
);
2241 return vgic_write_guest_lock(kvm
, gpa
, &val
, ite_esz
);
2245 * vgic_its_restore_ite - restore an interrupt translation entry
2246 * @event_id: id used for indexing
2247 * @ptr: pointer to the ITE entry
2248 * @opaque: pointer to the its_device
2250 static int vgic_its_restore_ite(struct vgic_its
*its
, u32 event_id
,
2251 void *ptr
, void *opaque
)
2253 struct its_device
*dev
= opaque
;
2254 struct its_collection
*collection
;
2255 struct kvm
*kvm
= its
->dev
->kvm
;
2256 struct kvm_vcpu
*vcpu
= NULL
;
2258 u64
*p
= (u64
*)ptr
;
2259 struct vgic_irq
*irq
;
2260 u32 coll_id
, lpi_id
;
2261 struct its_ite
*ite
;
2266 val
= le64_to_cpu(val
);
2268 coll_id
= val
& KVM_ITS_ITE_ICID_MASK
;
2269 lpi_id
= (val
& KVM_ITS_ITE_PINTID_MASK
) >> KVM_ITS_ITE_PINTID_SHIFT
;
2272 return 1; /* invalid entry, no choice but to scan next entry */
2274 if (lpi_id
< VGIC_MIN_LPI
)
2277 offset
= val
>> KVM_ITS_ITE_NEXT_SHIFT
;
2278 if (event_id
+ offset
>= BIT_ULL(dev
->num_eventid_bits
))
2281 collection
= find_collection(its
, coll_id
);
2285 if (!vgic_its_check_event_id(its
, dev
, event_id
))
2288 ite
= vgic_its_alloc_ite(dev
, collection
, event_id
);
2290 return PTR_ERR(ite
);
2292 if (its_is_collection_mapped(collection
))
2293 vcpu
= kvm_get_vcpu_by_id(kvm
, collection
->target_addr
);
2295 irq
= vgic_add_lpi(kvm
, lpi_id
, vcpu
);
2297 its_free_ite(kvm
, ite
);
2298 return PTR_ERR(irq
);
2305 static int vgic_its_ite_cmp(void *priv
, const struct list_head
*a
,
2306 const struct list_head
*b
)
2308 struct its_ite
*itea
= container_of(a
, struct its_ite
, ite_list
);
2309 struct its_ite
*iteb
= container_of(b
, struct its_ite
, ite_list
);
2311 if (itea
->event_id
< iteb
->event_id
)
2317 static int vgic_its_save_itt(struct vgic_its
*its
, struct its_device
*device
)
2319 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2320 gpa_t base
= device
->itt_addr
;
2321 struct its_ite
*ite
;
2323 int ite_esz
= abi
->ite_esz
;
2325 list_sort(NULL
, &device
->itt_head
, vgic_its_ite_cmp
);
2327 list_for_each_entry(ite
, &device
->itt_head
, ite_list
) {
2328 gpa_t gpa
= base
+ ite
->event_id
* ite_esz
;
2331 * If an LPI carries the HW bit, this means that this
2332 * interrupt is controlled by GICv4, and we do not
2333 * have direct access to that state without GICv4.1.
2334 * Let's simply fail the save operation...
2336 if (ite
->irq
->hw
&& !kvm_vgic_global_state
.has_gicv4_1
)
2339 ret
= vgic_its_save_ite(its
, device
, ite
, gpa
, ite_esz
);
2347 * vgic_its_restore_itt - restore the ITT of a device
2350 * @dev: device handle
2352 * Return 0 on success, < 0 on error
2354 static int vgic_its_restore_itt(struct vgic_its
*its
, struct its_device
*dev
)
2356 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2357 gpa_t base
= dev
->itt_addr
;
2359 int ite_esz
= abi
->ite_esz
;
2360 size_t max_size
= BIT_ULL(dev
->num_eventid_bits
) * ite_esz
;
2362 ret
= scan_its_table(its
, base
, max_size
, ite_esz
, 0,
2363 vgic_its_restore_ite
, dev
);
2365 /* scan_its_table returns +1 if all ITEs are invalid */
2373 * vgic_its_save_dte - Save a device table entry at a given GPA
2379 static int vgic_its_save_dte(struct vgic_its
*its
, struct its_device
*dev
,
2380 gpa_t ptr
, int dte_esz
)
2382 struct kvm
*kvm
= its
->dev
->kvm
;
2383 u64 val
, itt_addr_field
;
2386 itt_addr_field
= dev
->itt_addr
>> 8;
2387 next_offset
= compute_next_devid_offset(&its
->device_list
, dev
);
2388 val
= (1ULL << KVM_ITS_DTE_VALID_SHIFT
|
2389 ((u64
)next_offset
<< KVM_ITS_DTE_NEXT_SHIFT
) |
2390 (itt_addr_field
<< KVM_ITS_DTE_ITTADDR_SHIFT
) |
2391 (dev
->num_eventid_bits
- 1));
2392 val
= cpu_to_le64(val
);
2393 return vgic_write_guest_lock(kvm
, ptr
, &val
, dte_esz
);
2397 * vgic_its_restore_dte - restore a device table entry
2400 * @id: device id the DTE corresponds to
2401 * @ptr: kernel VA where the 8 byte DTE is located
2404 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2405 * next dte otherwise
2407 static int vgic_its_restore_dte(struct vgic_its
*its
, u32 id
,
2408 void *ptr
, void *opaque
)
2410 struct its_device
*dev
;
2411 u64 baser
= its
->baser_device_table
;
2413 u8 num_eventid_bits
;
2414 u64 entry
= *(u64
*)ptr
;
2419 entry
= le64_to_cpu(entry
);
2421 valid
= entry
>> KVM_ITS_DTE_VALID_SHIFT
;
2422 num_eventid_bits
= (entry
& KVM_ITS_DTE_SIZE_MASK
) + 1;
2423 itt_addr
= ((entry
& KVM_ITS_DTE_ITTADDR_MASK
)
2424 >> KVM_ITS_DTE_ITTADDR_SHIFT
) << 8;
2429 /* dte entry is valid */
2430 offset
= (entry
& KVM_ITS_DTE_NEXT_MASK
) >> KVM_ITS_DTE_NEXT_SHIFT
;
2432 if (!vgic_its_check_id(its
, baser
, id
, NULL
))
2435 dev
= vgic_its_alloc_device(its
, id
, itt_addr
, num_eventid_bits
);
2437 return PTR_ERR(dev
);
2439 ret
= vgic_its_restore_itt(its
, dev
);
2441 vgic_its_free_device(its
->dev
->kvm
, dev
);
2448 static int vgic_its_device_cmp(void *priv
, const struct list_head
*a
,
2449 const struct list_head
*b
)
2451 struct its_device
*deva
= container_of(a
, struct its_device
, dev_list
);
2452 struct its_device
*devb
= container_of(b
, struct its_device
, dev_list
);
2454 if (deva
->device_id
< devb
->device_id
)
2461 * vgic_its_save_device_tables - Save the device table and all ITT
2464 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2465 * returns the GPA of the device entry
2467 static int vgic_its_save_device_tables(struct vgic_its
*its
)
2469 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2470 u64 baser
= its
->baser_device_table
;
2471 struct its_device
*dev
;
2472 int dte_esz
= abi
->dte_esz
;
2474 if (!(baser
& GITS_BASER_VALID
))
2477 list_sort(NULL
, &its
->device_list
, vgic_its_device_cmp
);
2479 list_for_each_entry(dev
, &its
->device_list
, dev_list
) {
2483 if (!vgic_its_check_id(its
, baser
,
2484 dev
->device_id
, &eaddr
))
2487 ret
= vgic_its_save_itt(its
, dev
);
2491 ret
= vgic_its_save_dte(its
, dev
, eaddr
, dte_esz
);
2499 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2502 * @id: index of the entry in the L1 table
2506 * L1 table entries are scanned by steps of 1 entry
2507 * Return < 0 if error, 0 if last dte was found when scanning the L2
2508 * table, +1 otherwise (meaning next L1 entry must be scanned)
2510 static int handle_l1_dte(struct vgic_its
*its
, u32 id
, void *addr
,
2513 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2514 int l2_start_id
= id
* (SZ_64K
/ abi
->dte_esz
);
2515 u64 entry
= *(u64
*)addr
;
2516 int dte_esz
= abi
->dte_esz
;
2520 entry
= le64_to_cpu(entry
);
2522 if (!(entry
& KVM_ITS_L1E_VALID_MASK
))
2525 gpa
= entry
& KVM_ITS_L1E_ADDR_MASK
;
2527 ret
= scan_its_table(its
, gpa
, SZ_64K
, dte_esz
,
2528 l2_start_id
, vgic_its_restore_dte
, NULL
);
2534 * vgic_its_restore_device_tables - Restore the device table and all ITT
2535 * from guest RAM to internal data structs
2537 static int vgic_its_restore_device_tables(struct vgic_its
*its
)
2539 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2540 u64 baser
= its
->baser_device_table
;
2542 int l1_tbl_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2545 if (!(baser
& GITS_BASER_VALID
))
2548 l1_gpa
= GITS_BASER_ADDR_48_to_52(baser
);
2550 if (baser
& GITS_BASER_INDIRECT
) {
2551 l1_esz
= GITS_LVL1_ENTRY_SIZE
;
2552 ret
= scan_its_table(its
, l1_gpa
, l1_tbl_size
, l1_esz
, 0,
2553 handle_l1_dte
, NULL
);
2555 l1_esz
= abi
->dte_esz
;
2556 ret
= scan_its_table(its
, l1_gpa
, l1_tbl_size
, l1_esz
, 0,
2557 vgic_its_restore_dte
, NULL
);
2560 /* scan_its_table returns +1 if all entries are invalid */
2565 vgic_its_free_device_list(its
->dev
->kvm
, its
);
2570 static int vgic_its_save_cte(struct vgic_its
*its
,
2571 struct its_collection
*collection
,
2576 val
= (1ULL << KVM_ITS_CTE_VALID_SHIFT
|
2577 ((u64
)collection
->target_addr
<< KVM_ITS_CTE_RDBASE_SHIFT
) |
2578 collection
->collection_id
);
2579 val
= cpu_to_le64(val
);
2580 return vgic_write_guest_lock(its
->dev
->kvm
, gpa
, &val
, esz
);
2584 * Restore a collection entry into the ITS collection table.
2585 * Return +1 on success, 0 if the entry was invalid (which should be
2586 * interpreted as end-of-table), and a negative error value for generic errors.
2588 static int vgic_its_restore_cte(struct vgic_its
*its
, gpa_t gpa
, int esz
)
2590 struct its_collection
*collection
;
2591 struct kvm
*kvm
= its
->dev
->kvm
;
2592 u32 target_addr
, coll_id
;
2596 BUG_ON(esz
> sizeof(val
));
2597 ret
= kvm_read_guest_lock(kvm
, gpa
, &val
, esz
);
2600 val
= le64_to_cpu(val
);
2601 if (!(val
& KVM_ITS_CTE_VALID_MASK
))
2604 target_addr
= (u32
)(val
>> KVM_ITS_CTE_RDBASE_SHIFT
);
2605 coll_id
= val
& KVM_ITS_CTE_ICID_MASK
;
2607 if (target_addr
!= COLLECTION_NOT_MAPPED
&&
2608 !kvm_get_vcpu_by_id(kvm
, target_addr
))
2611 collection
= find_collection(its
, coll_id
);
2615 if (!vgic_its_check_id(its
, its
->baser_coll_table
, coll_id
, NULL
))
2618 ret
= vgic_its_alloc_collection(its
, &collection
, coll_id
);
2621 collection
->target_addr
= target_addr
;
2626 * vgic_its_save_collection_table - Save the collection table into
2629 static int vgic_its_save_collection_table(struct vgic_its
*its
)
2631 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2632 u64 baser
= its
->baser_coll_table
;
2633 gpa_t gpa
= GITS_BASER_ADDR_48_to_52(baser
);
2634 struct its_collection
*collection
;
2636 size_t max_size
, filled
= 0;
2637 int ret
, cte_esz
= abi
->cte_esz
;
2639 if (!(baser
& GITS_BASER_VALID
))
2642 max_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2644 list_for_each_entry(collection
, &its
->collection_list
, coll_list
) {
2645 ret
= vgic_its_save_cte(its
, collection
, gpa
, cte_esz
);
2652 if (filled
== max_size
)
2656 * table is not fully filled, add a last dummy element
2657 * with valid bit unset
2660 BUG_ON(cte_esz
> sizeof(val
));
2661 ret
= vgic_write_guest_lock(its
->dev
->kvm
, gpa
, &val
, cte_esz
);
2666 * vgic_its_restore_collection_table - reads the collection table
2667 * in guest memory and restores the ITS internal state. Requires the
2668 * BASER registers to be restored before.
2670 static int vgic_its_restore_collection_table(struct vgic_its
*its
)
2672 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2673 u64 baser
= its
->baser_coll_table
;
2674 int cte_esz
= abi
->cte_esz
;
2675 size_t max_size
, read
= 0;
2679 if (!(baser
& GITS_BASER_VALID
))
2682 gpa
= GITS_BASER_ADDR_48_to_52(baser
);
2684 max_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2686 while (read
< max_size
) {
2687 ret
= vgic_its_restore_cte(its
, gpa
, cte_esz
);
2698 vgic_its_free_collection_list(its
->dev
->kvm
, its
);
2704 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2705 * according to v0 ABI
2707 static int vgic_its_save_tables_v0(struct vgic_its
*its
)
2711 ret
= vgic_its_save_device_tables(its
);
2715 return vgic_its_save_collection_table(its
);
2719 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2720 * to internal data structs according to V0 ABI
2723 static int vgic_its_restore_tables_v0(struct vgic_its
*its
)
2727 ret
= vgic_its_restore_collection_table(its
);
2731 ret
= vgic_its_restore_device_tables(its
);
2733 vgic_its_free_collection_list(its
->dev
->kvm
, its
);
2737 static int vgic_its_commit_v0(struct vgic_its
*its
)
2739 const struct vgic_its_abi
*abi
;
2741 abi
= vgic_its_get_abi(its
);
2742 its
->baser_coll_table
&= ~GITS_BASER_ENTRY_SIZE_MASK
;
2743 its
->baser_device_table
&= ~GITS_BASER_ENTRY_SIZE_MASK
;
2745 its
->baser_coll_table
|= (GIC_ENCODE_SZ(abi
->cte_esz
, 5)
2746 << GITS_BASER_ENTRY_SIZE_SHIFT
);
2748 its
->baser_device_table
|= (GIC_ENCODE_SZ(abi
->dte_esz
, 5)
2749 << GITS_BASER_ENTRY_SIZE_SHIFT
);
2753 static void vgic_its_reset(struct kvm
*kvm
, struct vgic_its
*its
)
2755 /* We need to keep the ABI specific field values */
2756 its
->baser_coll_table
&= ~GITS_BASER_VALID
;
2757 its
->baser_device_table
&= ~GITS_BASER_VALID
;
2762 vgic_its_free_device_list(kvm
, its
);
2763 vgic_its_free_collection_list(kvm
, its
);
2766 static int vgic_its_has_attr(struct kvm_device
*dev
,
2767 struct kvm_device_attr
*attr
)
2769 switch (attr
->group
) {
2770 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
2771 switch (attr
->attr
) {
2772 case KVM_VGIC_ITS_ADDR_TYPE
:
2776 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
2777 switch (attr
->attr
) {
2778 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
2780 case KVM_DEV_ARM_ITS_CTRL_RESET
:
2782 case KVM_DEV_ARM_ITS_SAVE_TABLES
:
2784 case KVM_DEV_ARM_ITS_RESTORE_TABLES
:
2788 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
:
2789 return vgic_its_has_attr_regs(dev
, attr
);
2794 static int vgic_its_ctrl(struct kvm
*kvm
, struct vgic_its
*its
, u64 attr
)
2796 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2799 if (attr
== KVM_DEV_ARM_VGIC_CTRL_INIT
) /* Nothing to do */
2802 mutex_lock(&kvm
->lock
);
2804 if (!lock_all_vcpus(kvm
)) {
2805 mutex_unlock(&kvm
->lock
);
2809 mutex_lock(&kvm
->arch
.config_lock
);
2810 mutex_lock(&its
->its_lock
);
2813 case KVM_DEV_ARM_ITS_CTRL_RESET
:
2814 vgic_its_reset(kvm
, its
);
2816 case KVM_DEV_ARM_ITS_SAVE_TABLES
:
2817 ret
= abi
->save_tables(its
);
2819 case KVM_DEV_ARM_ITS_RESTORE_TABLES
:
2820 ret
= abi
->restore_tables(its
);
2824 mutex_unlock(&its
->its_lock
);
2825 mutex_unlock(&kvm
->arch
.config_lock
);
2826 unlock_all_vcpus(kvm
);
2827 mutex_unlock(&kvm
->lock
);
2832 * kvm_arch_allow_write_without_running_vcpu - allow writing guest memory
2833 * without the running VCPU when dirty ring is enabled.
2835 * The running VCPU is required to track dirty guest pages when dirty ring
2836 * is enabled. Otherwise, the backup bitmap should be used to track the
2837 * dirty guest pages. When vgic/its tables are being saved, the backup
2838 * bitmap is used to track the dirty guest pages due to the missed running
2839 * VCPU in the period.
2841 bool kvm_arch_allow_write_without_running_vcpu(struct kvm
*kvm
)
2843 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
2845 return dist
->table_write_in_progress
;
2848 static int vgic_its_set_attr(struct kvm_device
*dev
,
2849 struct kvm_device_attr
*attr
)
2851 struct vgic_its
*its
= dev
->private;
2854 switch (attr
->group
) {
2855 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2856 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2857 unsigned long type
= (unsigned long)attr
->attr
;
2860 if (type
!= KVM_VGIC_ITS_ADDR_TYPE
)
2863 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
2866 ret
= vgic_check_iorange(dev
->kvm
, its
->vgic_its_base
,
2867 addr
, SZ_64K
, KVM_VGIC_V3_ITS_SIZE
);
2871 return vgic_register_its_iodev(dev
->kvm
, its
, addr
);
2873 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
2874 return vgic_its_ctrl(dev
->kvm
, its
, attr
->attr
);
2875 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
: {
2876 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2879 if (get_user(reg
, uaddr
))
2882 return vgic_its_attr_regs_access(dev
, attr
, ®
, true);
2888 static int vgic_its_get_attr(struct kvm_device
*dev
,
2889 struct kvm_device_attr
*attr
)
2891 switch (attr
->group
) {
2892 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2893 struct vgic_its
*its
= dev
->private;
2894 u64 addr
= its
->vgic_its_base
;
2895 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2896 unsigned long type
= (unsigned long)attr
->attr
;
2898 if (type
!= KVM_VGIC_ITS_ADDR_TYPE
)
2901 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
2905 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
: {
2906 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2910 ret
= vgic_its_attr_regs_access(dev
, attr
, ®
, false);
2913 return put_user(reg
, uaddr
);
2922 static struct kvm_device_ops kvm_arm_vgic_its_ops
= {
2923 .name
= "kvm-arm-vgic-its",
2924 .create
= vgic_its_create
,
2925 .destroy
= vgic_its_destroy
,
2926 .set_attr
= vgic_its_set_attr
,
2927 .get_attr
= vgic_its_get_attr
,
2928 .has_attr
= vgic_its_has_attr
,
2931 int kvm_vgic_register_its_device(void)
2933 return kvm_register_device_ops(&kvm_arm_vgic_its_ops
,
2934 KVM_DEV_TYPE_ARM_VGIC_ITS
);