1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2015,2016 ARM Ltd.
6 * Author: Andre Przywara <andre.przywara@arm.com>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/uaccess.h>
15 #include <linux/list_sort.h>
17 #include <linux/irqchip/arm-gic-v3.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_arm.h>
21 #include <asm/kvm_mmu.h>
24 #include "vgic-mmio.h"
26 static int vgic_its_save_tables_v0(struct vgic_its
*its
);
27 static int vgic_its_restore_tables_v0(struct vgic_its
*its
);
28 static int vgic_its_commit_v0(struct vgic_its
*its
);
29 static int update_lpi_config(struct kvm
*kvm
, struct vgic_irq
*irq
,
30 struct kvm_vcpu
*filter_vcpu
, bool needs_inv
);
33 * Creates a new (reference to a) struct vgic_irq for a given LPI.
34 * If this LPI is already mapped on another ITS, we increase its refcount
35 * and return a pointer to the existing structure.
36 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
37 * This function returns a pointer to the _unlocked_ structure.
39 static struct vgic_irq
*vgic_add_lpi(struct kvm
*kvm
, u32 intid
,
40 struct kvm_vcpu
*vcpu
)
42 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
43 struct vgic_irq
*irq
= vgic_get_irq(kvm
, NULL
, intid
), *oldirq
;
47 /* In this case there is no put, since we keep the reference. */
51 irq
= kzalloc(sizeof(struct vgic_irq
), GFP_KERNEL
);
53 return ERR_PTR(-ENOMEM
);
55 INIT_LIST_HEAD(&irq
->lpi_list
);
56 INIT_LIST_HEAD(&irq
->ap_list
);
57 raw_spin_lock_init(&irq
->irq_lock
);
59 irq
->config
= VGIC_CONFIG_EDGE
;
60 kref_init(&irq
->refcount
);
62 irq
->target_vcpu
= vcpu
;
65 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
68 * There could be a race with another vgic_add_lpi(), so we need to
69 * check that we don't add a second list entry with the same LPI.
71 list_for_each_entry(oldirq
, &dist
->lpi_list_head
, lpi_list
) {
72 if (oldirq
->intid
!= intid
)
75 /* Someone was faster with adding this LPI, lets use that. */
80 * This increases the refcount, the caller is expected to
81 * call vgic_put_irq() on the returned pointer once it's
82 * finished with the IRQ.
84 vgic_get_irq_kref(irq
);
89 list_add_tail(&irq
->lpi_list
, &dist
->lpi_list_head
);
90 dist
->lpi_list_count
++;
93 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
96 * We "cache" the configuration table entries in our struct vgic_irq's.
97 * However we only have those structs for mapped IRQs, so we read in
98 * the respective config data from memory here upon mapping the LPI.
100 ret
= update_lpi_config(kvm
, irq
, NULL
, false);
104 ret
= vgic_v3_lpi_sync_pending_status(kvm
, irq
);
112 struct list_head dev_list
;
114 /* the head for the list of ITTEs */
115 struct list_head itt_head
;
116 u32 num_eventid_bits
;
121 #define COLLECTION_NOT_MAPPED ((u32)~0)
123 struct its_collection
{
124 struct list_head coll_list
;
130 #define its_is_collection_mapped(coll) ((coll) && \
131 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
134 struct list_head ite_list
;
136 struct vgic_irq
*irq
;
137 struct its_collection
*collection
;
142 * struct vgic_its_abi - ITS abi ops and settings
143 * @cte_esz: collection table entry size
144 * @dte_esz: device table entry size
145 * @ite_esz: interrupt translation table entry size
146 * @save tables: save the ITS tables into guest RAM
147 * @restore_tables: restore the ITS internal structs from tables
148 * stored in guest RAM
149 * @commit: initialize the registers which expose the ABI settings,
150 * especially the entry sizes
152 struct vgic_its_abi
{
156 int (*save_tables
)(struct vgic_its
*its
);
157 int (*restore_tables
)(struct vgic_its
*its
);
158 int (*commit
)(struct vgic_its
*its
);
162 #define ESZ_MAX ABI_0_ESZ
164 static const struct vgic_its_abi its_table_abi_versions
[] = {
166 .cte_esz
= ABI_0_ESZ
,
167 .dte_esz
= ABI_0_ESZ
,
168 .ite_esz
= ABI_0_ESZ
,
169 .save_tables
= vgic_its_save_tables_v0
,
170 .restore_tables
= vgic_its_restore_tables_v0
,
171 .commit
= vgic_its_commit_v0
,
175 #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
177 inline const struct vgic_its_abi
*vgic_its_get_abi(struct vgic_its
*its
)
179 return &its_table_abi_versions
[its
->abi_rev
];
182 static int vgic_its_set_abi(struct vgic_its
*its
, u32 rev
)
184 const struct vgic_its_abi
*abi
;
187 abi
= vgic_its_get_abi(its
);
188 return abi
->commit(its
);
192 * Find and returns a device in the device table for an ITS.
193 * Must be called with the its_lock mutex held.
195 static struct its_device
*find_its_device(struct vgic_its
*its
, u32 device_id
)
197 struct its_device
*device
;
199 list_for_each_entry(device
, &its
->device_list
, dev_list
)
200 if (device_id
== device
->device_id
)
207 * Find and returns an interrupt translation table entry (ITTE) for a given
208 * Device ID/Event ID pair on an ITS.
209 * Must be called with the its_lock mutex held.
211 static struct its_ite
*find_ite(struct vgic_its
*its
, u32 device_id
,
214 struct its_device
*device
;
217 device
= find_its_device(its
, device_id
);
221 list_for_each_entry(ite
, &device
->itt_head
, ite_list
)
222 if (ite
->event_id
== event_id
)
228 /* To be used as an iterator this macro misses the enclosing parentheses */
229 #define for_each_lpi_its(dev, ite, its) \
230 list_for_each_entry(dev, &(its)->device_list, dev_list) \
231 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
233 #define GIC_LPI_OFFSET 8192
235 #define VITS_TYPER_IDBITS 16
236 #define VITS_TYPER_DEVBITS 16
237 #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
238 #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
241 * Finds and returns a collection in the ITS collection table.
242 * Must be called with the its_lock mutex held.
244 static struct its_collection
*find_collection(struct vgic_its
*its
, int coll_id
)
246 struct its_collection
*collection
;
248 list_for_each_entry(collection
, &its
->collection_list
, coll_list
) {
249 if (coll_id
== collection
->collection_id
)
256 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
257 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
260 * Reads the configuration data for a given LPI from guest memory and
261 * updates the fields in struct vgic_irq.
262 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
263 * VCPU. Unconditionally applies if filter_vcpu is NULL.
265 static int update_lpi_config(struct kvm
*kvm
, struct vgic_irq
*irq
,
266 struct kvm_vcpu
*filter_vcpu
, bool needs_inv
)
268 u64 propbase
= GICR_PROPBASER_ADDRESS(kvm
->arch
.vgic
.propbaser
);
273 ret
= kvm_read_guest_lock(kvm
, propbase
+ irq
->intid
- GIC_LPI_OFFSET
,
279 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
281 if (!filter_vcpu
|| filter_vcpu
== irq
->target_vcpu
) {
282 irq
->priority
= LPI_PROP_PRIORITY(prop
);
283 irq
->enabled
= LPI_PROP_ENABLE_BIT(prop
);
286 vgic_queue_irq_unlock(kvm
, irq
, flags
);
291 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
294 return its_prop_update_vlpi(irq
->host_irq
, prop
, needs_inv
);
300 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
301 * enumerate those LPIs without holding any lock.
302 * Returns their number and puts the kmalloc'ed array into intid_ptr.
304 int vgic_copy_lpi_list(struct kvm
*kvm
, struct kvm_vcpu
*vcpu
, u32
**intid_ptr
)
306 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
307 struct vgic_irq
*irq
;
310 int irq_count
, i
= 0;
313 * There is an obvious race between allocating the array and LPIs
314 * being mapped/unmapped. If we ended up here as a result of a
315 * command, we're safe (locks are held, preventing another
316 * command). If coming from another path (such as enabling LPIs),
317 * we must be careful not to overrun the array.
319 irq_count
= READ_ONCE(dist
->lpi_list_count
);
320 intids
= kmalloc_array(irq_count
, sizeof(intids
[0]), GFP_KERNEL
);
324 raw_spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
325 list_for_each_entry(irq
, &dist
->lpi_list_head
, lpi_list
) {
328 /* We don't need to "get" the IRQ, as we hold the list lock. */
329 if (vcpu
&& irq
->target_vcpu
!= vcpu
)
331 intids
[i
++] = irq
->intid
;
333 raw_spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
339 static int update_affinity(struct vgic_irq
*irq
, struct kvm_vcpu
*vcpu
)
344 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
345 irq
->target_vcpu
= vcpu
;
346 raw_spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
349 struct its_vlpi_map map
;
351 ret
= its_get_vlpi(irq
->host_irq
, &map
);
355 map
.vpe
= &vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
;
357 ret
= its_map_vlpi(irq
->host_irq
, &map
);
364 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
365 * is targeting) to the VGIC's view, which deals with target VCPUs.
366 * Needs to be called whenever either the collection for a LPIs has
367 * changed or the collection itself got retargeted.
369 static void update_affinity_ite(struct kvm
*kvm
, struct its_ite
*ite
)
371 struct kvm_vcpu
*vcpu
;
373 if (!its_is_collection_mapped(ite
->collection
))
376 vcpu
= kvm_get_vcpu(kvm
, ite
->collection
->target_addr
);
377 update_affinity(ite
->irq
, vcpu
);
381 * Updates the target VCPU for every LPI targeting this collection.
382 * Must be called with the its_lock mutex held.
384 static void update_affinity_collection(struct kvm
*kvm
, struct vgic_its
*its
,
385 struct its_collection
*coll
)
387 struct its_device
*device
;
390 for_each_lpi_its(device
, ite
, its
) {
391 if (!ite
->collection
|| coll
!= ite
->collection
)
394 update_affinity_ite(kvm
, ite
);
398 static u32
max_lpis_propbaser(u64 propbaser
)
400 int nr_idbits
= (propbaser
& 0x1f) + 1;
402 return 1U << min(nr_idbits
, INTERRUPT_ID_BITS_ITS
);
406 * Sync the pending table pending bit of LPIs targeting @vcpu
407 * with our own data structures. This relies on the LPI being
410 static int its_sync_lpi_pending_table(struct kvm_vcpu
*vcpu
)
412 gpa_t pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
413 struct vgic_irq
*irq
;
414 int last_byte_offset
= -1;
421 nr_irqs
= vgic_copy_lpi_list(vcpu
->kvm
, vcpu
, &intids
);
425 for (i
= 0; i
< nr_irqs
; i
++) {
426 int byte_offset
, bit_nr
;
428 byte_offset
= intids
[i
] / BITS_PER_BYTE
;
429 bit_nr
= intids
[i
] % BITS_PER_BYTE
;
432 * For contiguously allocated LPIs chances are we just read
433 * this very same byte in the last iteration. Reuse that.
435 if (byte_offset
!= last_byte_offset
) {
436 ret
= kvm_read_guest_lock(vcpu
->kvm
,
437 pendbase
+ byte_offset
,
443 last_byte_offset
= byte_offset
;
446 irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intids
[i
]);
447 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
448 irq
->pending_latch
= pendmask
& (1U << bit_nr
);
449 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
450 vgic_put_irq(vcpu
->kvm
, irq
);
458 static unsigned long vgic_mmio_read_its_typer(struct kvm
*kvm
,
459 struct vgic_its
*its
,
460 gpa_t addr
, unsigned int len
)
462 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
463 u64 reg
= GITS_TYPER_PLPIS
;
466 * We use linear CPU numbers for redistributor addressing,
467 * so GITS_TYPER.PTA is 0.
468 * Also we force all PROPBASER registers to be the same, so
469 * CommonLPIAff is 0 as well.
470 * To avoid memory waste in the guest, we keep the number of IDBits and
471 * DevBits low - as least for the time being.
473 reg
|= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS
, 5) << GITS_TYPER_DEVBITS_SHIFT
;
474 reg
|= GIC_ENCODE_SZ(VITS_TYPER_IDBITS
, 5) << GITS_TYPER_IDBITS_SHIFT
;
475 reg
|= GIC_ENCODE_SZ(abi
->ite_esz
, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT
;
477 return extract_bytes(reg
, addr
& 7, len
);
480 static unsigned long vgic_mmio_read_its_iidr(struct kvm
*kvm
,
481 struct vgic_its
*its
,
482 gpa_t addr
, unsigned int len
)
486 val
= (its
->abi_rev
<< GITS_IIDR_REV_SHIFT
) & GITS_IIDR_REV_MASK
;
487 val
|= (PRODUCT_ID_KVM
<< GITS_IIDR_PRODUCTID_SHIFT
) | IMPLEMENTER_ARM
;
491 static int vgic_mmio_uaccess_write_its_iidr(struct kvm
*kvm
,
492 struct vgic_its
*its
,
493 gpa_t addr
, unsigned int len
,
496 u32 rev
= GITS_IIDR_REV(val
);
498 if (rev
>= NR_ITS_ABIS
)
500 return vgic_its_set_abi(its
, rev
);
503 static unsigned long vgic_mmio_read_its_idregs(struct kvm
*kvm
,
504 struct vgic_its
*its
,
505 gpa_t addr
, unsigned int len
)
507 switch (addr
& 0xffff) {
509 return 0x92; /* part number, bits[7:0] */
511 return 0xb4; /* part number, bits[11:8] */
513 return GIC_PIDR2_ARCH_GICv3
| 0x0b;
515 return 0x40; /* This is a 64K software visible page */
516 /* The following are the ID registers for (any) GIC. */
530 int vgic_its_resolve_lpi(struct kvm
*kvm
, struct vgic_its
*its
,
531 u32 devid
, u32 eventid
, struct vgic_irq
**irq
)
533 struct kvm_vcpu
*vcpu
;
539 ite
= find_ite(its
, devid
, eventid
);
540 if (!ite
|| !its_is_collection_mapped(ite
->collection
))
541 return E_ITS_INT_UNMAPPED_INTERRUPT
;
543 vcpu
= kvm_get_vcpu(kvm
, ite
->collection
->target_addr
);
545 return E_ITS_INT_UNMAPPED_INTERRUPT
;
547 if (!vcpu
->arch
.vgic_cpu
.lpis_enabled
)
554 struct vgic_its
*vgic_msi_to_its(struct kvm
*kvm
, struct kvm_msi
*msi
)
557 struct kvm_io_device
*kvm_io_dev
;
558 struct vgic_io_device
*iodev
;
560 if (!vgic_has_its(kvm
))
561 return ERR_PTR(-ENODEV
);
563 if (!(msi
->flags
& KVM_MSI_VALID_DEVID
))
564 return ERR_PTR(-EINVAL
);
566 address
= (u64
)msi
->address_hi
<< 32 | msi
->address_lo
;
568 kvm_io_dev
= kvm_io_bus_get_dev(kvm
, KVM_MMIO_BUS
, address
);
570 return ERR_PTR(-EINVAL
);
572 if (kvm_io_dev
->ops
!= &kvm_io_gic_ops
)
573 return ERR_PTR(-EINVAL
);
575 iodev
= container_of(kvm_io_dev
, struct vgic_io_device
, dev
);
576 if (iodev
->iodev_type
!= IODEV_ITS
)
577 return ERR_PTR(-EINVAL
);
583 * Find the target VCPU and the LPI number for a given devid/eventid pair
584 * and make this IRQ pending, possibly injecting it.
585 * Must be called with the its_lock mutex held.
586 * Returns 0 on success, a positive error value for any ITS mapping
587 * related errors and negative error values for generic errors.
589 static int vgic_its_trigger_msi(struct kvm
*kvm
, struct vgic_its
*its
,
590 u32 devid
, u32 eventid
)
592 struct vgic_irq
*irq
= NULL
;
596 err
= vgic_its_resolve_lpi(kvm
, its
, devid
, eventid
, &irq
);
601 return irq_set_irqchip_state(irq
->host_irq
,
602 IRQCHIP_STATE_PENDING
, true);
604 raw_spin_lock_irqsave(&irq
->irq_lock
, flags
);
605 irq
->pending_latch
= true;
606 vgic_queue_irq_unlock(kvm
, irq
, flags
);
612 * Queries the KVM IO bus framework to get the ITS pointer from the given
614 * We then call vgic_its_trigger_msi() with the decoded data.
615 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
617 int vgic_its_inject_msi(struct kvm
*kvm
, struct kvm_msi
*msi
)
619 struct vgic_its
*its
;
622 its
= vgic_msi_to_its(kvm
, msi
);
626 mutex_lock(&its
->its_lock
);
627 ret
= vgic_its_trigger_msi(kvm
, its
, msi
->devid
, msi
->data
);
628 mutex_unlock(&its
->its_lock
);
634 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
635 * if the guest has blocked the MSI. So we map any LPI mapping
636 * related error to that.
644 /* Requires the its_lock to be held. */
645 static void its_free_ite(struct kvm
*kvm
, struct its_ite
*ite
)
647 list_del(&ite
->ite_list
);
649 /* This put matches the get in vgic_add_lpi. */
652 WARN_ON(its_unmap_vlpi(ite
->irq
->host_irq
));
654 vgic_put_irq(kvm
, ite
->irq
);
660 static u64
its_cmd_mask_field(u64
*its_cmd
, int word
, int shift
, int size
)
662 return (le64_to_cpu(its_cmd
[word
]) >> shift
) & (BIT_ULL(size
) - 1);
665 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
666 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
667 #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
668 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
669 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
670 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
671 #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
672 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
673 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
676 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
677 * Must be called with the its_lock mutex held.
679 static int vgic_its_cmd_handle_discard(struct kvm
*kvm
, struct vgic_its
*its
,
682 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
683 u32 event_id
= its_cmd_get_id(its_cmd
);
687 ite
= find_ite(its
, device_id
, event_id
);
688 if (ite
&& ite
->collection
) {
690 * Though the spec talks about removing the pending state, we
691 * don't bother here since we clear the ITTE anyway and the
692 * pending state is a property of the ITTE struct.
694 its_free_ite(kvm
, ite
);
698 return E_ITS_DISCARD_UNMAPPED_INTERRUPT
;
702 * The MOVI command moves an ITTE to a different collection.
703 * Must be called with the its_lock mutex held.
705 static int vgic_its_cmd_handle_movi(struct kvm
*kvm
, struct vgic_its
*its
,
708 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
709 u32 event_id
= its_cmd_get_id(its_cmd
);
710 u32 coll_id
= its_cmd_get_collection(its_cmd
);
711 struct kvm_vcpu
*vcpu
;
713 struct its_collection
*collection
;
715 ite
= find_ite(its
, device_id
, event_id
);
717 return E_ITS_MOVI_UNMAPPED_INTERRUPT
;
719 if (!its_is_collection_mapped(ite
->collection
))
720 return E_ITS_MOVI_UNMAPPED_COLLECTION
;
722 collection
= find_collection(its
, coll_id
);
723 if (!its_is_collection_mapped(collection
))
724 return E_ITS_MOVI_UNMAPPED_COLLECTION
;
726 ite
->collection
= collection
;
727 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
729 return update_affinity(ite
->irq
, vcpu
);
733 * Check whether an ID can be stored into the corresponding guest table.
734 * For a direct table this is pretty easy, but gets a bit nasty for
735 * indirect tables. We check whether the resulting guest physical address
736 * is actually valid (covered by a memslot and guest accessible).
737 * For this we have to read the respective first level entry.
739 static bool vgic_its_check_id(struct vgic_its
*its
, u64 baser
, u32 id
,
742 int l1_tbl_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
743 u64 indirect_ptr
, type
= GITS_BASER_TYPE(baser
);
744 phys_addr_t base
= GITS_BASER_ADDR_48_to_52(baser
);
745 int esz
= GITS_BASER_ENTRY_SIZE(baser
);
751 case GITS_BASER_TYPE_DEVICE
:
752 if (id
>= BIT_ULL(VITS_TYPER_DEVBITS
))
755 case GITS_BASER_TYPE_COLLECTION
:
756 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
757 if (id
>= BIT_ULL(16))
764 if (!(baser
& GITS_BASER_INDIRECT
)) {
767 if (id
>= (l1_tbl_size
/ esz
))
770 addr
= base
+ id
* esz
;
771 gfn
= addr
>> PAGE_SHIFT
;
779 /* calculate and check the index into the 1st level */
780 index
= id
/ (SZ_64K
/ esz
);
781 if (index
>= (l1_tbl_size
/ sizeof(u64
)))
784 /* Each 1st level entry is represented by a 64-bit value. */
785 if (kvm_read_guest_lock(its
->dev
->kvm
,
786 base
+ index
* sizeof(indirect_ptr
),
787 &indirect_ptr
, sizeof(indirect_ptr
)))
790 indirect_ptr
= le64_to_cpu(indirect_ptr
);
792 /* check the valid bit of the first level entry */
793 if (!(indirect_ptr
& BIT_ULL(63)))
796 /* Mask the guest physical address and calculate the frame number. */
797 indirect_ptr
&= GENMASK_ULL(51, 16);
799 /* Find the address of the actual entry */
800 index
= id
% (SZ_64K
/ esz
);
801 indirect_ptr
+= index
* esz
;
802 gfn
= indirect_ptr
>> PAGE_SHIFT
;
805 *eaddr
= indirect_ptr
;
808 idx
= srcu_read_lock(&its
->dev
->kvm
->srcu
);
809 ret
= kvm_is_visible_gfn(its
->dev
->kvm
, gfn
);
810 srcu_read_unlock(&its
->dev
->kvm
->srcu
, idx
);
814 static int vgic_its_alloc_collection(struct vgic_its
*its
,
815 struct its_collection
**colp
,
818 struct its_collection
*collection
;
820 if (!vgic_its_check_id(its
, its
->baser_coll_table
, coll_id
, NULL
))
821 return E_ITS_MAPC_COLLECTION_OOR
;
823 collection
= kzalloc(sizeof(*collection
), GFP_KERNEL
);
827 collection
->collection_id
= coll_id
;
828 collection
->target_addr
= COLLECTION_NOT_MAPPED
;
830 list_add_tail(&collection
->coll_list
, &its
->collection_list
);
836 static void vgic_its_free_collection(struct vgic_its
*its
, u32 coll_id
)
838 struct its_collection
*collection
;
839 struct its_device
*device
;
843 * Clearing the mapping for that collection ID removes the
844 * entry from the list. If there wasn't any before, we can
847 collection
= find_collection(its
, coll_id
);
851 for_each_lpi_its(device
, ite
, its
)
852 if (ite
->collection
&&
853 ite
->collection
->collection_id
== coll_id
)
854 ite
->collection
= NULL
;
856 list_del(&collection
->coll_list
);
860 /* Must be called with its_lock mutex held */
861 static struct its_ite
*vgic_its_alloc_ite(struct its_device
*device
,
862 struct its_collection
*collection
,
867 ite
= kzalloc(sizeof(*ite
), GFP_KERNEL
);
869 return ERR_PTR(-ENOMEM
);
871 ite
->event_id
= event_id
;
872 ite
->collection
= collection
;
874 list_add_tail(&ite
->ite_list
, &device
->itt_head
);
879 * The MAPTI and MAPI commands map LPIs to ITTEs.
880 * Must be called with its_lock mutex held.
882 static int vgic_its_cmd_handle_mapi(struct kvm
*kvm
, struct vgic_its
*its
,
885 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
886 u32 event_id
= its_cmd_get_id(its_cmd
);
887 u32 coll_id
= its_cmd_get_collection(its_cmd
);
889 struct kvm_vcpu
*vcpu
= NULL
;
890 struct its_device
*device
;
891 struct its_collection
*collection
, *new_coll
= NULL
;
892 struct vgic_irq
*irq
;
895 device
= find_its_device(its
, device_id
);
897 return E_ITS_MAPTI_UNMAPPED_DEVICE
;
899 if (event_id
>= BIT_ULL(device
->num_eventid_bits
))
900 return E_ITS_MAPTI_ID_OOR
;
902 if (its_cmd_get_command(its_cmd
) == GITS_CMD_MAPTI
)
903 lpi_nr
= its_cmd_get_physical_id(its_cmd
);
906 if (lpi_nr
< GIC_LPI_OFFSET
||
907 lpi_nr
>= max_lpis_propbaser(kvm
->arch
.vgic
.propbaser
))
908 return E_ITS_MAPTI_PHYSICALID_OOR
;
910 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
911 if (find_ite(its
, device_id
, event_id
))
914 collection
= find_collection(its
, coll_id
);
916 int ret
= vgic_its_alloc_collection(its
, &collection
, coll_id
);
919 new_coll
= collection
;
922 ite
= vgic_its_alloc_ite(device
, collection
, event_id
);
925 vgic_its_free_collection(its
, coll_id
);
929 if (its_is_collection_mapped(collection
))
930 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
932 irq
= vgic_add_lpi(kvm
, lpi_nr
, vcpu
);
935 vgic_its_free_collection(its
, coll_id
);
936 its_free_ite(kvm
, ite
);
944 /* Requires the its_lock to be held. */
945 static void vgic_its_free_device(struct kvm
*kvm
, struct its_device
*device
)
947 struct its_ite
*ite
, *temp
;
950 * The spec says that unmapping a device with still valid
951 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
952 * since we cannot leave the memory unreferenced.
954 list_for_each_entry_safe(ite
, temp
, &device
->itt_head
, ite_list
)
955 its_free_ite(kvm
, ite
);
957 list_del(&device
->dev_list
);
961 /* its lock must be held */
962 static void vgic_its_free_device_list(struct kvm
*kvm
, struct vgic_its
*its
)
964 struct its_device
*cur
, *temp
;
966 list_for_each_entry_safe(cur
, temp
, &its
->device_list
, dev_list
)
967 vgic_its_free_device(kvm
, cur
);
970 /* its lock must be held */
971 static void vgic_its_free_collection_list(struct kvm
*kvm
, struct vgic_its
*its
)
973 struct its_collection
*cur
, *temp
;
975 list_for_each_entry_safe(cur
, temp
, &its
->collection_list
, coll_list
)
976 vgic_its_free_collection(its
, cur
->collection_id
);
979 /* Must be called with its_lock mutex held */
980 static struct its_device
*vgic_its_alloc_device(struct vgic_its
*its
,
981 u32 device_id
, gpa_t itt_addr
,
984 struct its_device
*device
;
986 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
988 return ERR_PTR(-ENOMEM
);
990 device
->device_id
= device_id
;
991 device
->itt_addr
= itt_addr
;
992 device
->num_eventid_bits
= num_eventid_bits
;
993 INIT_LIST_HEAD(&device
->itt_head
);
995 list_add_tail(&device
->dev_list
, &its
->device_list
);
1000 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1001 * Must be called with the its_lock mutex held.
1003 static int vgic_its_cmd_handle_mapd(struct kvm
*kvm
, struct vgic_its
*its
,
1006 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1007 bool valid
= its_cmd_get_validbit(its_cmd
);
1008 u8 num_eventid_bits
= its_cmd_get_size(its_cmd
);
1009 gpa_t itt_addr
= its_cmd_get_ittaddr(its_cmd
);
1010 struct its_device
*device
;
1012 if (!vgic_its_check_id(its
, its
->baser_device_table
, device_id
, NULL
))
1013 return E_ITS_MAPD_DEVICE_OOR
;
1015 if (valid
&& num_eventid_bits
> VITS_TYPER_IDBITS
)
1016 return E_ITS_MAPD_ITTSIZE_OOR
;
1018 device
= find_its_device(its
, device_id
);
1021 * The spec says that calling MAPD on an already mapped device
1022 * invalidates all cached data for this device. We implement this
1023 * by removing the mapping and re-establishing it.
1026 vgic_its_free_device(kvm
, device
);
1029 * The spec does not say whether unmapping a not-mapped device
1030 * is an error, so we are done in any case.
1035 device
= vgic_its_alloc_device(its
, device_id
, itt_addr
,
1038 return PTR_ERR_OR_ZERO(device
);
1042 * The MAPC command maps collection IDs to redistributors.
1043 * Must be called with the its_lock mutex held.
1045 static int vgic_its_cmd_handle_mapc(struct kvm
*kvm
, struct vgic_its
*its
,
1050 struct its_collection
*collection
;
1053 valid
= its_cmd_get_validbit(its_cmd
);
1054 coll_id
= its_cmd_get_collection(its_cmd
);
1055 target_addr
= its_cmd_get_target_addr(its_cmd
);
1057 if (target_addr
>= atomic_read(&kvm
->online_vcpus
))
1058 return E_ITS_MAPC_PROCNUM_OOR
;
1061 vgic_its_free_collection(its
, coll_id
);
1063 collection
= find_collection(its
, coll_id
);
1068 ret
= vgic_its_alloc_collection(its
, &collection
,
1072 collection
->target_addr
= target_addr
;
1074 collection
->target_addr
= target_addr
;
1075 update_affinity_collection(kvm
, its
, collection
);
1083 * The CLEAR command removes the pending state for a particular LPI.
1084 * Must be called with the its_lock mutex held.
1086 static int vgic_its_cmd_handle_clear(struct kvm
*kvm
, struct vgic_its
*its
,
1089 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1090 u32 event_id
= its_cmd_get_id(its_cmd
);
1091 struct its_ite
*ite
;
1094 ite
= find_ite(its
, device_id
, event_id
);
1096 return E_ITS_CLEAR_UNMAPPED_INTERRUPT
;
1098 ite
->irq
->pending_latch
= false;
1101 return irq_set_irqchip_state(ite
->irq
->host_irq
,
1102 IRQCHIP_STATE_PENDING
, false);
1108 * The INV command syncs the configuration bits from the memory table.
1109 * Must be called with the its_lock mutex held.
1111 static int vgic_its_cmd_handle_inv(struct kvm
*kvm
, struct vgic_its
*its
,
1114 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1115 u32 event_id
= its_cmd_get_id(its_cmd
);
1116 struct its_ite
*ite
;
1119 ite
= find_ite(its
, device_id
, event_id
);
1121 return E_ITS_INV_UNMAPPED_INTERRUPT
;
1123 return update_lpi_config(kvm
, ite
->irq
, NULL
, true);
1127 * The INVALL command requests flushing of all IRQ data in this collection.
1128 * Find the VCPU mapped to that collection, then iterate over the VM's list
1129 * of mapped LPIs and update the configuration for each IRQ which targets
1130 * the specified vcpu. The configuration will be read from the in-memory
1131 * configuration table.
1132 * Must be called with the its_lock mutex held.
1134 static int vgic_its_cmd_handle_invall(struct kvm
*kvm
, struct vgic_its
*its
,
1137 u32 coll_id
= its_cmd_get_collection(its_cmd
);
1138 struct its_collection
*collection
;
1139 struct kvm_vcpu
*vcpu
;
1140 struct vgic_irq
*irq
;
1144 collection
= find_collection(its
, coll_id
);
1145 if (!its_is_collection_mapped(collection
))
1146 return E_ITS_INVALL_UNMAPPED_COLLECTION
;
1148 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
1150 irq_count
= vgic_copy_lpi_list(kvm
, vcpu
, &intids
);
1154 for (i
= 0; i
< irq_count
; i
++) {
1155 irq
= vgic_get_irq(kvm
, NULL
, intids
[i
]);
1158 update_lpi_config(kvm
, irq
, vcpu
, false);
1159 vgic_put_irq(kvm
, irq
);
1164 if (vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
.its_vm
)
1165 its_invall_vpe(&vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
);
1171 * The MOVALL command moves the pending state of all IRQs targeting one
1172 * redistributor to another. We don't hold the pending state in the VCPUs,
1173 * but in the IRQs instead, so there is really not much to do for us here.
1174 * However the spec says that no IRQ must target the old redistributor
1175 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1176 * This command affects all LPIs in the system that target that redistributor.
1178 static int vgic_its_cmd_handle_movall(struct kvm
*kvm
, struct vgic_its
*its
,
1181 u32 target1_addr
= its_cmd_get_target_addr(its_cmd
);
1182 u32 target2_addr
= its_cmd_mask_field(its_cmd
, 3, 16, 32);
1183 struct kvm_vcpu
*vcpu1
, *vcpu2
;
1184 struct vgic_irq
*irq
;
1188 if (target1_addr
>= atomic_read(&kvm
->online_vcpus
) ||
1189 target2_addr
>= atomic_read(&kvm
->online_vcpus
))
1190 return E_ITS_MOVALL_PROCNUM_OOR
;
1192 if (target1_addr
== target2_addr
)
1195 vcpu1
= kvm_get_vcpu(kvm
, target1_addr
);
1196 vcpu2
= kvm_get_vcpu(kvm
, target2_addr
);
1198 irq_count
= vgic_copy_lpi_list(kvm
, vcpu1
, &intids
);
1202 for (i
= 0; i
< irq_count
; i
++) {
1203 irq
= vgic_get_irq(kvm
, NULL
, intids
[i
]);
1205 update_affinity(irq
, vcpu2
);
1207 vgic_put_irq(kvm
, irq
);
1215 * The INT command injects the LPI associated with that DevID/EvID pair.
1216 * Must be called with the its_lock mutex held.
1218 static int vgic_its_cmd_handle_int(struct kvm
*kvm
, struct vgic_its
*its
,
1221 u32 msi_data
= its_cmd_get_id(its_cmd
);
1222 u64 msi_devid
= its_cmd_get_deviceid(its_cmd
);
1224 return vgic_its_trigger_msi(kvm
, its
, msi_devid
, msi_data
);
1228 * This function is called with the its_cmd lock held, but the ITS data
1229 * structure lock dropped.
1231 static int vgic_its_handle_command(struct kvm
*kvm
, struct vgic_its
*its
,
1236 mutex_lock(&its
->its_lock
);
1237 switch (its_cmd_get_command(its_cmd
)) {
1239 ret
= vgic_its_cmd_handle_mapd(kvm
, its
, its_cmd
);
1242 ret
= vgic_its_cmd_handle_mapc(kvm
, its
, its_cmd
);
1245 ret
= vgic_its_cmd_handle_mapi(kvm
, its
, its_cmd
);
1247 case GITS_CMD_MAPTI
:
1248 ret
= vgic_its_cmd_handle_mapi(kvm
, its
, its_cmd
);
1251 ret
= vgic_its_cmd_handle_movi(kvm
, its
, its_cmd
);
1253 case GITS_CMD_DISCARD
:
1254 ret
= vgic_its_cmd_handle_discard(kvm
, its
, its_cmd
);
1256 case GITS_CMD_CLEAR
:
1257 ret
= vgic_its_cmd_handle_clear(kvm
, its
, its_cmd
);
1259 case GITS_CMD_MOVALL
:
1260 ret
= vgic_its_cmd_handle_movall(kvm
, its
, its_cmd
);
1263 ret
= vgic_its_cmd_handle_int(kvm
, its
, its_cmd
);
1266 ret
= vgic_its_cmd_handle_inv(kvm
, its
, its_cmd
);
1268 case GITS_CMD_INVALL
:
1269 ret
= vgic_its_cmd_handle_invall(kvm
, its
, its_cmd
);
1272 /* we ignore this command: we are in sync all of the time */
1276 mutex_unlock(&its
->its_lock
);
1281 static u64
vgic_sanitise_its_baser(u64 reg
)
1283 reg
= vgic_sanitise_field(reg
, GITS_BASER_SHAREABILITY_MASK
,
1284 GITS_BASER_SHAREABILITY_SHIFT
,
1285 vgic_sanitise_shareability
);
1286 reg
= vgic_sanitise_field(reg
, GITS_BASER_INNER_CACHEABILITY_MASK
,
1287 GITS_BASER_INNER_CACHEABILITY_SHIFT
,
1288 vgic_sanitise_inner_cacheability
);
1289 reg
= vgic_sanitise_field(reg
, GITS_BASER_OUTER_CACHEABILITY_MASK
,
1290 GITS_BASER_OUTER_CACHEABILITY_SHIFT
,
1291 vgic_sanitise_outer_cacheability
);
1293 /* We support only one (ITS) page size: 64K */
1294 reg
= (reg
& ~GITS_BASER_PAGE_SIZE_MASK
) | GITS_BASER_PAGE_SIZE_64K
;
1299 static u64
vgic_sanitise_its_cbaser(u64 reg
)
1301 reg
= vgic_sanitise_field(reg
, GITS_CBASER_SHAREABILITY_MASK
,
1302 GITS_CBASER_SHAREABILITY_SHIFT
,
1303 vgic_sanitise_shareability
);
1304 reg
= vgic_sanitise_field(reg
, GITS_CBASER_INNER_CACHEABILITY_MASK
,
1305 GITS_CBASER_INNER_CACHEABILITY_SHIFT
,
1306 vgic_sanitise_inner_cacheability
);
1307 reg
= vgic_sanitise_field(reg
, GITS_CBASER_OUTER_CACHEABILITY_MASK
,
1308 GITS_CBASER_OUTER_CACHEABILITY_SHIFT
,
1309 vgic_sanitise_outer_cacheability
);
1311 /* Sanitise the physical address to be 64k aligned. */
1312 reg
&= ~GENMASK_ULL(15, 12);
1317 static unsigned long vgic_mmio_read_its_cbaser(struct kvm
*kvm
,
1318 struct vgic_its
*its
,
1319 gpa_t addr
, unsigned int len
)
1321 return extract_bytes(its
->cbaser
, addr
& 7, len
);
1324 static void vgic_mmio_write_its_cbaser(struct kvm
*kvm
, struct vgic_its
*its
,
1325 gpa_t addr
, unsigned int len
,
1328 /* When GITS_CTLR.Enable is 1, this register is RO. */
1332 mutex_lock(&its
->cmd_lock
);
1333 its
->cbaser
= update_64bit_reg(its
->cbaser
, addr
& 7, len
, val
);
1334 its
->cbaser
= vgic_sanitise_its_cbaser(its
->cbaser
);
1337 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1338 * it to CREADR to make sure we start with an empty command buffer.
1340 its
->cwriter
= its
->creadr
;
1341 mutex_unlock(&its
->cmd_lock
);
1344 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1345 #define ITS_CMD_SIZE 32
1346 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1348 /* Must be called with the cmd_lock held. */
1349 static void vgic_its_process_commands(struct kvm
*kvm
, struct vgic_its
*its
)
1354 /* Commands are only processed when the ITS is enabled. */
1358 cbaser
= GITS_CBASER_ADDRESS(its
->cbaser
);
1360 while (its
->cwriter
!= its
->creadr
) {
1361 int ret
= kvm_read_guest_lock(kvm
, cbaser
+ its
->creadr
,
1362 cmd_buf
, ITS_CMD_SIZE
);
1364 * If kvm_read_guest() fails, this could be due to the guest
1365 * programming a bogus value in CBASER or something else going
1366 * wrong from which we cannot easily recover.
1367 * According to section 6.3.2 in the GICv3 spec we can just
1368 * ignore that command then.
1371 vgic_its_handle_command(kvm
, its
, cmd_buf
);
1373 its
->creadr
+= ITS_CMD_SIZE
;
1374 if (its
->creadr
== ITS_CMD_BUFFER_SIZE(its
->cbaser
))
1380 * By writing to CWRITER the guest announces new commands to be processed.
1381 * To avoid any races in the first place, we take the its_cmd lock, which
1382 * protects our ring buffer variables, so that there is only one user
1383 * per ITS handling commands at a given time.
1385 static void vgic_mmio_write_its_cwriter(struct kvm
*kvm
, struct vgic_its
*its
,
1386 gpa_t addr
, unsigned int len
,
1394 mutex_lock(&its
->cmd_lock
);
1396 reg
= update_64bit_reg(its
->cwriter
, addr
& 7, len
, val
);
1397 reg
= ITS_CMD_OFFSET(reg
);
1398 if (reg
>= ITS_CMD_BUFFER_SIZE(its
->cbaser
)) {
1399 mutex_unlock(&its
->cmd_lock
);
1404 vgic_its_process_commands(kvm
, its
);
1406 mutex_unlock(&its
->cmd_lock
);
1409 static unsigned long vgic_mmio_read_its_cwriter(struct kvm
*kvm
,
1410 struct vgic_its
*its
,
1411 gpa_t addr
, unsigned int len
)
1413 return extract_bytes(its
->cwriter
, addr
& 0x7, len
);
1416 static unsigned long vgic_mmio_read_its_creadr(struct kvm
*kvm
,
1417 struct vgic_its
*its
,
1418 gpa_t addr
, unsigned int len
)
1420 return extract_bytes(its
->creadr
, addr
& 0x7, len
);
1423 static int vgic_mmio_uaccess_write_its_creadr(struct kvm
*kvm
,
1424 struct vgic_its
*its
,
1425 gpa_t addr
, unsigned int len
,
1431 mutex_lock(&its
->cmd_lock
);
1438 cmd_offset
= ITS_CMD_OFFSET(val
);
1439 if (cmd_offset
>= ITS_CMD_BUFFER_SIZE(its
->cbaser
)) {
1444 its
->creadr
= cmd_offset
;
1446 mutex_unlock(&its
->cmd_lock
);
1450 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1451 static unsigned long vgic_mmio_read_its_baser(struct kvm
*kvm
,
1452 struct vgic_its
*its
,
1453 gpa_t addr
, unsigned int len
)
1457 switch (BASER_INDEX(addr
)) {
1459 reg
= its
->baser_device_table
;
1462 reg
= its
->baser_coll_table
;
1469 return extract_bytes(reg
, addr
& 7, len
);
1472 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1473 static void vgic_mmio_write_its_baser(struct kvm
*kvm
,
1474 struct vgic_its
*its
,
1475 gpa_t addr
, unsigned int len
,
1478 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
1479 u64 entry_size
, table_type
;
1480 u64 reg
, *regptr
, clearbits
= 0;
1482 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1486 switch (BASER_INDEX(addr
)) {
1488 regptr
= &its
->baser_device_table
;
1489 entry_size
= abi
->dte_esz
;
1490 table_type
= GITS_BASER_TYPE_DEVICE
;
1493 regptr
= &its
->baser_coll_table
;
1494 entry_size
= abi
->cte_esz
;
1495 table_type
= GITS_BASER_TYPE_COLLECTION
;
1496 clearbits
= GITS_BASER_INDIRECT
;
1502 reg
= update_64bit_reg(*regptr
, addr
& 7, len
, val
);
1503 reg
&= ~GITS_BASER_RO_MASK
;
1506 reg
|= (entry_size
- 1) << GITS_BASER_ENTRY_SIZE_SHIFT
;
1507 reg
|= table_type
<< GITS_BASER_TYPE_SHIFT
;
1508 reg
= vgic_sanitise_its_baser(reg
);
1512 if (!(reg
& GITS_BASER_VALID
)) {
1513 /* Take the its_lock to prevent a race with a save/restore */
1514 mutex_lock(&its
->its_lock
);
1515 switch (table_type
) {
1516 case GITS_BASER_TYPE_DEVICE
:
1517 vgic_its_free_device_list(kvm
, its
);
1519 case GITS_BASER_TYPE_COLLECTION
:
1520 vgic_its_free_collection_list(kvm
, its
);
1523 mutex_unlock(&its
->its_lock
);
1527 static unsigned long vgic_mmio_read_its_ctlr(struct kvm
*vcpu
,
1528 struct vgic_its
*its
,
1529 gpa_t addr
, unsigned int len
)
1533 mutex_lock(&its
->cmd_lock
);
1534 if (its
->creadr
== its
->cwriter
)
1535 reg
|= GITS_CTLR_QUIESCENT
;
1537 reg
|= GITS_CTLR_ENABLE
;
1538 mutex_unlock(&its
->cmd_lock
);
1543 static void vgic_mmio_write_its_ctlr(struct kvm
*kvm
, struct vgic_its
*its
,
1544 gpa_t addr
, unsigned int len
,
1547 mutex_lock(&its
->cmd_lock
);
1550 * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1551 * device/collection BASER are invalid
1553 if (!its
->enabled
&& (val
& GITS_CTLR_ENABLE
) &&
1554 (!(its
->baser_device_table
& GITS_BASER_VALID
) ||
1555 !(its
->baser_coll_table
& GITS_BASER_VALID
) ||
1556 !(its
->cbaser
& GITS_CBASER_VALID
)))
1559 its
->enabled
= !!(val
& GITS_CTLR_ENABLE
);
1562 * Try to process any pending commands. This function bails out early
1563 * if the ITS is disabled or no commands have been queued.
1565 vgic_its_process_commands(kvm
, its
);
1568 mutex_unlock(&its
->cmd_lock
);
1571 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1573 .reg_offset = off, \
1575 .access_flags = acc, \
1580 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1582 .reg_offset = off, \
1584 .access_flags = acc, \
1587 .uaccess_its_write = uwr, \
1590 static void its_mmio_write_wi(struct kvm
*kvm
, struct vgic_its
*its
,
1591 gpa_t addr
, unsigned int len
, unsigned long val
)
1596 static struct vgic_register_region its_registers
[] = {
1597 REGISTER_ITS_DESC(GITS_CTLR
,
1598 vgic_mmio_read_its_ctlr
, vgic_mmio_write_its_ctlr
, 4,
1600 REGISTER_ITS_DESC_UACCESS(GITS_IIDR
,
1601 vgic_mmio_read_its_iidr
, its_mmio_write_wi
,
1602 vgic_mmio_uaccess_write_its_iidr
, 4,
1604 REGISTER_ITS_DESC(GITS_TYPER
,
1605 vgic_mmio_read_its_typer
, its_mmio_write_wi
, 8,
1606 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1607 REGISTER_ITS_DESC(GITS_CBASER
,
1608 vgic_mmio_read_its_cbaser
, vgic_mmio_write_its_cbaser
, 8,
1609 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1610 REGISTER_ITS_DESC(GITS_CWRITER
,
1611 vgic_mmio_read_its_cwriter
, vgic_mmio_write_its_cwriter
, 8,
1612 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1613 REGISTER_ITS_DESC_UACCESS(GITS_CREADR
,
1614 vgic_mmio_read_its_creadr
, its_mmio_write_wi
,
1615 vgic_mmio_uaccess_write_its_creadr
, 8,
1616 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1617 REGISTER_ITS_DESC(GITS_BASER
,
1618 vgic_mmio_read_its_baser
, vgic_mmio_write_its_baser
, 0x40,
1619 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1620 REGISTER_ITS_DESC(GITS_IDREGS_BASE
,
1621 vgic_mmio_read_its_idregs
, its_mmio_write_wi
, 0x30,
1625 /* This is called on setting the LPI enable bit in the redistributor. */
1626 void vgic_enable_lpis(struct kvm_vcpu
*vcpu
)
1628 if (!(vcpu
->arch
.vgic_cpu
.pendbaser
& GICR_PENDBASER_PTZ
))
1629 its_sync_lpi_pending_table(vcpu
);
1632 static int vgic_register_its_iodev(struct kvm
*kvm
, struct vgic_its
*its
,
1635 struct vgic_io_device
*iodev
= &its
->iodev
;
1638 mutex_lock(&kvm
->slots_lock
);
1639 if (!IS_VGIC_ADDR_UNDEF(its
->vgic_its_base
)) {
1644 its
->vgic_its_base
= addr
;
1645 iodev
->regions
= its_registers
;
1646 iodev
->nr_regions
= ARRAY_SIZE(its_registers
);
1647 kvm_iodevice_init(&iodev
->dev
, &kvm_io_gic_ops
);
1649 iodev
->base_addr
= its
->vgic_its_base
;
1650 iodev
->iodev_type
= IODEV_ITS
;
1652 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, iodev
->base_addr
,
1653 KVM_VGIC_V3_ITS_SIZE
, &iodev
->dev
);
1655 mutex_unlock(&kvm
->slots_lock
);
1660 #define INITIAL_BASER_VALUE \
1661 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1662 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1663 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1664 GITS_BASER_PAGE_SIZE_64K)
1666 #define INITIAL_PROPBASER_VALUE \
1667 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1668 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1669 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1671 static int vgic_its_create(struct kvm_device
*dev
, u32 type
)
1673 struct vgic_its
*its
;
1675 if (type
!= KVM_DEV_TYPE_ARM_VGIC_ITS
)
1678 its
= kzalloc(sizeof(struct vgic_its
), GFP_KERNEL
);
1682 if (vgic_initialized(dev
->kvm
)) {
1683 int ret
= vgic_v4_init(dev
->kvm
);
1690 mutex_init(&its
->its_lock
);
1691 mutex_init(&its
->cmd_lock
);
1693 its
->vgic_its_base
= VGIC_ADDR_UNDEF
;
1695 INIT_LIST_HEAD(&its
->device_list
);
1696 INIT_LIST_HEAD(&its
->collection_list
);
1698 dev
->kvm
->arch
.vgic
.msis_require_devid
= true;
1699 dev
->kvm
->arch
.vgic
.has_its
= true;
1700 its
->enabled
= false;
1703 its
->baser_device_table
= INITIAL_BASER_VALUE
|
1704 ((u64
)GITS_BASER_TYPE_DEVICE
<< GITS_BASER_TYPE_SHIFT
);
1705 its
->baser_coll_table
= INITIAL_BASER_VALUE
|
1706 ((u64
)GITS_BASER_TYPE_COLLECTION
<< GITS_BASER_TYPE_SHIFT
);
1707 dev
->kvm
->arch
.vgic
.propbaser
= INITIAL_PROPBASER_VALUE
;
1711 return vgic_its_set_abi(its
, NR_ITS_ABIS
- 1);
1714 static void vgic_its_destroy(struct kvm_device
*kvm_dev
)
1716 struct kvm
*kvm
= kvm_dev
->kvm
;
1717 struct vgic_its
*its
= kvm_dev
->private;
1719 mutex_lock(&its
->its_lock
);
1721 vgic_its_free_device_list(kvm
, its
);
1722 vgic_its_free_collection_list(kvm
, its
);
1724 mutex_unlock(&its
->its_lock
);
1726 kfree(kvm_dev
);/* alloc by kvm_ioctl_create_device, free by .destroy */
1729 static int vgic_its_has_attr_regs(struct kvm_device
*dev
,
1730 struct kvm_device_attr
*attr
)
1732 const struct vgic_register_region
*region
;
1733 gpa_t offset
= attr
->attr
;
1736 align
= (offset
< GITS_TYPER
) || (offset
>= GITS_PIDR4
) ? 0x3 : 0x7;
1741 region
= vgic_find_mmio_region(its_registers
,
1742 ARRAY_SIZE(its_registers
),
1750 static int vgic_its_attr_regs_access(struct kvm_device
*dev
,
1751 struct kvm_device_attr
*attr
,
1752 u64
*reg
, bool is_write
)
1754 const struct vgic_register_region
*region
;
1755 struct vgic_its
*its
;
1761 offset
= attr
->attr
;
1764 * Although the spec supports upper/lower 32-bit accesses to
1765 * 64-bit ITS registers, the userspace ABI requires 64-bit
1766 * accesses to all 64-bit wide registers. We therefore only
1767 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1770 if ((offset
< GITS_TYPER
) || (offset
>= GITS_PIDR4
))
1778 mutex_lock(&dev
->kvm
->lock
);
1780 if (IS_VGIC_ADDR_UNDEF(its
->vgic_its_base
)) {
1785 region
= vgic_find_mmio_region(its_registers
,
1786 ARRAY_SIZE(its_registers
),
1793 if (!lock_all_vcpus(dev
->kvm
)) {
1798 addr
= its
->vgic_its_base
+ offset
;
1800 len
= region
->access_flags
& VGIC_ACCESS_64bit
? 8 : 4;
1803 if (region
->uaccess_its_write
)
1804 ret
= region
->uaccess_its_write(dev
->kvm
, its
, addr
,
1807 region
->its_write(dev
->kvm
, its
, addr
, len
, *reg
);
1809 *reg
= region
->its_read(dev
->kvm
, its
, addr
, len
);
1811 unlock_all_vcpus(dev
->kvm
);
1813 mutex_unlock(&dev
->kvm
->lock
);
1817 static u32
compute_next_devid_offset(struct list_head
*h
,
1818 struct its_device
*dev
)
1820 struct its_device
*next
;
1823 if (list_is_last(&dev
->dev_list
, h
))
1825 next
= list_next_entry(dev
, dev_list
);
1826 next_offset
= next
->device_id
- dev
->device_id
;
1828 return min_t(u32
, next_offset
, VITS_DTE_MAX_DEVID_OFFSET
);
1831 static u32
compute_next_eventid_offset(struct list_head
*h
, struct its_ite
*ite
)
1833 struct its_ite
*next
;
1836 if (list_is_last(&ite
->ite_list
, h
))
1838 next
= list_next_entry(ite
, ite_list
);
1839 next_offset
= next
->event_id
- ite
->event_id
;
1841 return min_t(u32
, next_offset
, VITS_ITE_MAX_EVENTID_OFFSET
);
1845 * entry_fn_t - Callback called on a table entry restore path
1847 * @id: id of the entry
1848 * @entry: pointer to the entry
1849 * @opaque: pointer to an opaque data
1851 * Return: < 0 on error, 0 if last element was identified, id offset to next
1854 typedef int (*entry_fn_t
)(struct vgic_its
*its
, u32 id
, void *entry
,
1858 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
1862 * @base: base gpa of the table
1863 * @size: size of the table in bytes
1864 * @esz: entry size in bytes
1865 * @start_id: the ID of the first entry in the table
1866 * (non zero for 2d level tables)
1867 * @fn: function to apply on each entry
1869 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
1870 * (the last element may not be found on second level tables)
1872 static int scan_its_table(struct vgic_its
*its
, gpa_t base
, int size
, u32 esz
,
1873 int start_id
, entry_fn_t fn
, void *opaque
)
1875 struct kvm
*kvm
= its
->dev
->kvm
;
1876 unsigned long len
= size
;
1879 char entry
[ESZ_MAX
];
1882 memset(entry
, 0, esz
);
1888 ret
= kvm_read_guest_lock(kvm
, gpa
, entry
, esz
);
1892 next_offset
= fn(its
, id
, entry
, opaque
);
1893 if (next_offset
<= 0)
1896 byte_offset
= next_offset
* esz
;
1905 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
1907 static int vgic_its_save_ite(struct vgic_its
*its
, struct its_device
*dev
,
1908 struct its_ite
*ite
, gpa_t gpa
, int ite_esz
)
1910 struct kvm
*kvm
= its
->dev
->kvm
;
1914 next_offset
= compute_next_eventid_offset(&dev
->itt_head
, ite
);
1915 val
= ((u64
)next_offset
<< KVM_ITS_ITE_NEXT_SHIFT
) |
1916 ((u64
)ite
->irq
->intid
<< KVM_ITS_ITE_PINTID_SHIFT
) |
1917 ite
->collection
->collection_id
;
1918 val
= cpu_to_le64(val
);
1919 return kvm_write_guest_lock(kvm
, gpa
, &val
, ite_esz
);
1923 * vgic_its_restore_ite - restore an interrupt translation entry
1924 * @event_id: id used for indexing
1925 * @ptr: pointer to the ITE entry
1926 * @opaque: pointer to the its_device
1928 static int vgic_its_restore_ite(struct vgic_its
*its
, u32 event_id
,
1929 void *ptr
, void *opaque
)
1931 struct its_device
*dev
= (struct its_device
*)opaque
;
1932 struct its_collection
*collection
;
1933 struct kvm
*kvm
= its
->dev
->kvm
;
1934 struct kvm_vcpu
*vcpu
= NULL
;
1936 u64
*p
= (u64
*)ptr
;
1937 struct vgic_irq
*irq
;
1938 u32 coll_id
, lpi_id
;
1939 struct its_ite
*ite
;
1944 val
= le64_to_cpu(val
);
1946 coll_id
= val
& KVM_ITS_ITE_ICID_MASK
;
1947 lpi_id
= (val
& KVM_ITS_ITE_PINTID_MASK
) >> KVM_ITS_ITE_PINTID_SHIFT
;
1950 return 1; /* invalid entry, no choice but to scan next entry */
1952 if (lpi_id
< VGIC_MIN_LPI
)
1955 offset
= val
>> KVM_ITS_ITE_NEXT_SHIFT
;
1956 if (event_id
+ offset
>= BIT_ULL(dev
->num_eventid_bits
))
1959 collection
= find_collection(its
, coll_id
);
1963 ite
= vgic_its_alloc_ite(dev
, collection
, event_id
);
1965 return PTR_ERR(ite
);
1967 if (its_is_collection_mapped(collection
))
1968 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
1970 irq
= vgic_add_lpi(kvm
, lpi_id
, vcpu
);
1972 return PTR_ERR(irq
);
1978 static int vgic_its_ite_cmp(void *priv
, struct list_head
*a
,
1979 struct list_head
*b
)
1981 struct its_ite
*itea
= container_of(a
, struct its_ite
, ite_list
);
1982 struct its_ite
*iteb
= container_of(b
, struct its_ite
, ite_list
);
1984 if (itea
->event_id
< iteb
->event_id
)
1990 static int vgic_its_save_itt(struct vgic_its
*its
, struct its_device
*device
)
1992 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
1993 gpa_t base
= device
->itt_addr
;
1994 struct its_ite
*ite
;
1996 int ite_esz
= abi
->ite_esz
;
1998 list_sort(NULL
, &device
->itt_head
, vgic_its_ite_cmp
);
2000 list_for_each_entry(ite
, &device
->itt_head
, ite_list
) {
2001 gpa_t gpa
= base
+ ite
->event_id
* ite_esz
;
2004 * If an LPI carries the HW bit, this means that this
2005 * interrupt is controlled by GICv4, and we do not
2006 * have direct access to that state. Let's simply fail
2007 * the save operation...
2012 ret
= vgic_its_save_ite(its
, device
, ite
, gpa
, ite_esz
);
2020 * vgic_its_restore_itt - restore the ITT of a device
2023 * @dev: device handle
2025 * Return 0 on success, < 0 on error
2027 static int vgic_its_restore_itt(struct vgic_its
*its
, struct its_device
*dev
)
2029 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2030 gpa_t base
= dev
->itt_addr
;
2032 int ite_esz
= abi
->ite_esz
;
2033 size_t max_size
= BIT_ULL(dev
->num_eventid_bits
) * ite_esz
;
2035 ret
= scan_its_table(its
, base
, max_size
, ite_esz
, 0,
2036 vgic_its_restore_ite
, dev
);
2038 /* scan_its_table returns +1 if all ITEs are invalid */
2046 * vgic_its_save_dte - Save a device table entry at a given GPA
2052 static int vgic_its_save_dte(struct vgic_its
*its
, struct its_device
*dev
,
2053 gpa_t ptr
, int dte_esz
)
2055 struct kvm
*kvm
= its
->dev
->kvm
;
2056 u64 val
, itt_addr_field
;
2059 itt_addr_field
= dev
->itt_addr
>> 8;
2060 next_offset
= compute_next_devid_offset(&its
->device_list
, dev
);
2061 val
= (1ULL << KVM_ITS_DTE_VALID_SHIFT
|
2062 ((u64
)next_offset
<< KVM_ITS_DTE_NEXT_SHIFT
) |
2063 (itt_addr_field
<< KVM_ITS_DTE_ITTADDR_SHIFT
) |
2064 (dev
->num_eventid_bits
- 1));
2065 val
= cpu_to_le64(val
);
2066 return kvm_write_guest_lock(kvm
, ptr
, &val
, dte_esz
);
2070 * vgic_its_restore_dte - restore a device table entry
2073 * @id: device id the DTE corresponds to
2074 * @ptr: kernel VA where the 8 byte DTE is located
2077 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2078 * next dte otherwise
2080 static int vgic_its_restore_dte(struct vgic_its
*its
, u32 id
,
2081 void *ptr
, void *opaque
)
2083 struct its_device
*dev
;
2085 u8 num_eventid_bits
;
2086 u64 entry
= *(u64
*)ptr
;
2091 entry
= le64_to_cpu(entry
);
2093 valid
= entry
>> KVM_ITS_DTE_VALID_SHIFT
;
2094 num_eventid_bits
= (entry
& KVM_ITS_DTE_SIZE_MASK
) + 1;
2095 itt_addr
= ((entry
& KVM_ITS_DTE_ITTADDR_MASK
)
2096 >> KVM_ITS_DTE_ITTADDR_SHIFT
) << 8;
2101 /* dte entry is valid */
2102 offset
= (entry
& KVM_ITS_DTE_NEXT_MASK
) >> KVM_ITS_DTE_NEXT_SHIFT
;
2104 dev
= vgic_its_alloc_device(its
, id
, itt_addr
, num_eventid_bits
);
2106 return PTR_ERR(dev
);
2108 ret
= vgic_its_restore_itt(its
, dev
);
2110 vgic_its_free_device(its
->dev
->kvm
, dev
);
2117 static int vgic_its_device_cmp(void *priv
, struct list_head
*a
,
2118 struct list_head
*b
)
2120 struct its_device
*deva
= container_of(a
, struct its_device
, dev_list
);
2121 struct its_device
*devb
= container_of(b
, struct its_device
, dev_list
);
2123 if (deva
->device_id
< devb
->device_id
)
2130 * vgic_its_save_device_tables - Save the device table and all ITT
2133 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2134 * returns the GPA of the device entry
2136 static int vgic_its_save_device_tables(struct vgic_its
*its
)
2138 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2139 u64 baser
= its
->baser_device_table
;
2140 struct its_device
*dev
;
2141 int dte_esz
= abi
->dte_esz
;
2143 if (!(baser
& GITS_BASER_VALID
))
2146 list_sort(NULL
, &its
->device_list
, vgic_its_device_cmp
);
2148 list_for_each_entry(dev
, &its
->device_list
, dev_list
) {
2152 if (!vgic_its_check_id(its
, baser
,
2153 dev
->device_id
, &eaddr
))
2156 ret
= vgic_its_save_itt(its
, dev
);
2160 ret
= vgic_its_save_dte(its
, dev
, eaddr
, dte_esz
);
2168 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2171 * @id: index of the entry in the L1 table
2175 * L1 table entries are scanned by steps of 1 entry
2176 * Return < 0 if error, 0 if last dte was found when scanning the L2
2177 * table, +1 otherwise (meaning next L1 entry must be scanned)
2179 static int handle_l1_dte(struct vgic_its
*its
, u32 id
, void *addr
,
2182 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2183 int l2_start_id
= id
* (SZ_64K
/ abi
->dte_esz
);
2184 u64 entry
= *(u64
*)addr
;
2185 int dte_esz
= abi
->dte_esz
;
2189 entry
= le64_to_cpu(entry
);
2191 if (!(entry
& KVM_ITS_L1E_VALID_MASK
))
2194 gpa
= entry
& KVM_ITS_L1E_ADDR_MASK
;
2196 ret
= scan_its_table(its
, gpa
, SZ_64K
, dte_esz
,
2197 l2_start_id
, vgic_its_restore_dte
, NULL
);
2203 * vgic_its_restore_device_tables - Restore the device table and all ITT
2204 * from guest RAM to internal data structs
2206 static int vgic_its_restore_device_tables(struct vgic_its
*its
)
2208 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2209 u64 baser
= its
->baser_device_table
;
2211 int l1_tbl_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2214 if (!(baser
& GITS_BASER_VALID
))
2217 l1_gpa
= GITS_BASER_ADDR_48_to_52(baser
);
2219 if (baser
& GITS_BASER_INDIRECT
) {
2220 l1_esz
= GITS_LVL1_ENTRY_SIZE
;
2221 ret
= scan_its_table(its
, l1_gpa
, l1_tbl_size
, l1_esz
, 0,
2222 handle_l1_dte
, NULL
);
2224 l1_esz
= abi
->dte_esz
;
2225 ret
= scan_its_table(its
, l1_gpa
, l1_tbl_size
, l1_esz
, 0,
2226 vgic_its_restore_dte
, NULL
);
2229 /* scan_its_table returns +1 if all entries are invalid */
2236 static int vgic_its_save_cte(struct vgic_its
*its
,
2237 struct its_collection
*collection
,
2242 val
= (1ULL << KVM_ITS_CTE_VALID_SHIFT
|
2243 ((u64
)collection
->target_addr
<< KVM_ITS_CTE_RDBASE_SHIFT
) |
2244 collection
->collection_id
);
2245 val
= cpu_to_le64(val
);
2246 return kvm_write_guest_lock(its
->dev
->kvm
, gpa
, &val
, esz
);
2249 static int vgic_its_restore_cte(struct vgic_its
*its
, gpa_t gpa
, int esz
)
2251 struct its_collection
*collection
;
2252 struct kvm
*kvm
= its
->dev
->kvm
;
2253 u32 target_addr
, coll_id
;
2257 BUG_ON(esz
> sizeof(val
));
2258 ret
= kvm_read_guest_lock(kvm
, gpa
, &val
, esz
);
2261 val
= le64_to_cpu(val
);
2262 if (!(val
& KVM_ITS_CTE_VALID_MASK
))
2265 target_addr
= (u32
)(val
>> KVM_ITS_CTE_RDBASE_SHIFT
);
2266 coll_id
= val
& KVM_ITS_CTE_ICID_MASK
;
2268 if (target_addr
>= atomic_read(&kvm
->online_vcpus
))
2271 collection
= find_collection(its
, coll_id
);
2274 ret
= vgic_its_alloc_collection(its
, &collection
, coll_id
);
2277 collection
->target_addr
= target_addr
;
2282 * vgic_its_save_collection_table - Save the collection table into
2285 static int vgic_its_save_collection_table(struct vgic_its
*its
)
2287 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2288 u64 baser
= its
->baser_coll_table
;
2289 gpa_t gpa
= GITS_BASER_ADDR_48_to_52(baser
);
2290 struct its_collection
*collection
;
2292 size_t max_size
, filled
= 0;
2293 int ret
, cte_esz
= abi
->cte_esz
;
2295 if (!(baser
& GITS_BASER_VALID
))
2298 max_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2300 list_for_each_entry(collection
, &its
->collection_list
, coll_list
) {
2301 ret
= vgic_its_save_cte(its
, collection
, gpa
, cte_esz
);
2308 if (filled
== max_size
)
2312 * table is not fully filled, add a last dummy element
2313 * with valid bit unset
2316 BUG_ON(cte_esz
> sizeof(val
));
2317 ret
= kvm_write_guest_lock(its
->dev
->kvm
, gpa
, &val
, cte_esz
);
2322 * vgic_its_restore_collection_table - reads the collection table
2323 * in guest memory and restores the ITS internal state. Requires the
2324 * BASER registers to be restored before.
2326 static int vgic_its_restore_collection_table(struct vgic_its
*its
)
2328 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2329 u64 baser
= its
->baser_coll_table
;
2330 int cte_esz
= abi
->cte_esz
;
2331 size_t max_size
, read
= 0;
2335 if (!(baser
& GITS_BASER_VALID
))
2338 gpa
= GITS_BASER_ADDR_48_to_52(baser
);
2340 max_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2342 while (read
< max_size
) {
2343 ret
= vgic_its_restore_cte(its
, gpa
, cte_esz
);
2357 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2358 * according to v0 ABI
2360 static int vgic_its_save_tables_v0(struct vgic_its
*its
)
2364 ret
= vgic_its_save_device_tables(its
);
2368 return vgic_its_save_collection_table(its
);
2372 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2373 * to internal data structs according to V0 ABI
2376 static int vgic_its_restore_tables_v0(struct vgic_its
*its
)
2380 ret
= vgic_its_restore_collection_table(its
);
2384 return vgic_its_restore_device_tables(its
);
2387 static int vgic_its_commit_v0(struct vgic_its
*its
)
2389 const struct vgic_its_abi
*abi
;
2391 abi
= vgic_its_get_abi(its
);
2392 its
->baser_coll_table
&= ~GITS_BASER_ENTRY_SIZE_MASK
;
2393 its
->baser_device_table
&= ~GITS_BASER_ENTRY_SIZE_MASK
;
2395 its
->baser_coll_table
|= (GIC_ENCODE_SZ(abi
->cte_esz
, 5)
2396 << GITS_BASER_ENTRY_SIZE_SHIFT
);
2398 its
->baser_device_table
|= (GIC_ENCODE_SZ(abi
->dte_esz
, 5)
2399 << GITS_BASER_ENTRY_SIZE_SHIFT
);
2403 static void vgic_its_reset(struct kvm
*kvm
, struct vgic_its
*its
)
2405 /* We need to keep the ABI specific field values */
2406 its
->baser_coll_table
&= ~GITS_BASER_VALID
;
2407 its
->baser_device_table
&= ~GITS_BASER_VALID
;
2412 vgic_its_free_device_list(kvm
, its
);
2413 vgic_its_free_collection_list(kvm
, its
);
2416 static int vgic_its_has_attr(struct kvm_device
*dev
,
2417 struct kvm_device_attr
*attr
)
2419 switch (attr
->group
) {
2420 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
2421 switch (attr
->attr
) {
2422 case KVM_VGIC_ITS_ADDR_TYPE
:
2426 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
2427 switch (attr
->attr
) {
2428 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
2430 case KVM_DEV_ARM_ITS_CTRL_RESET
:
2432 case KVM_DEV_ARM_ITS_SAVE_TABLES
:
2434 case KVM_DEV_ARM_ITS_RESTORE_TABLES
:
2438 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
:
2439 return vgic_its_has_attr_regs(dev
, attr
);
2444 static int vgic_its_ctrl(struct kvm
*kvm
, struct vgic_its
*its
, u64 attr
)
2446 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2449 if (attr
== KVM_DEV_ARM_VGIC_CTRL_INIT
) /* Nothing to do */
2452 mutex_lock(&kvm
->lock
);
2453 mutex_lock(&its
->its_lock
);
2455 if (!lock_all_vcpus(kvm
)) {
2456 mutex_unlock(&its
->its_lock
);
2457 mutex_unlock(&kvm
->lock
);
2462 case KVM_DEV_ARM_ITS_CTRL_RESET
:
2463 vgic_its_reset(kvm
, its
);
2465 case KVM_DEV_ARM_ITS_SAVE_TABLES
:
2466 ret
= abi
->save_tables(its
);
2468 case KVM_DEV_ARM_ITS_RESTORE_TABLES
:
2469 ret
= abi
->restore_tables(its
);
2473 unlock_all_vcpus(kvm
);
2474 mutex_unlock(&its
->its_lock
);
2475 mutex_unlock(&kvm
->lock
);
2479 static int vgic_its_set_attr(struct kvm_device
*dev
,
2480 struct kvm_device_attr
*attr
)
2482 struct vgic_its
*its
= dev
->private;
2485 switch (attr
->group
) {
2486 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2487 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2488 unsigned long type
= (unsigned long)attr
->attr
;
2491 if (type
!= KVM_VGIC_ITS_ADDR_TYPE
)
2494 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
2497 ret
= vgic_check_ioaddr(dev
->kvm
, &its
->vgic_its_base
,
2502 return vgic_register_its_iodev(dev
->kvm
, its
, addr
);
2504 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
2505 return vgic_its_ctrl(dev
->kvm
, its
, attr
->attr
);
2506 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
: {
2507 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2510 if (get_user(reg
, uaddr
))
2513 return vgic_its_attr_regs_access(dev
, attr
, ®
, true);
2519 static int vgic_its_get_attr(struct kvm_device
*dev
,
2520 struct kvm_device_attr
*attr
)
2522 switch (attr
->group
) {
2523 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2524 struct vgic_its
*its
= dev
->private;
2525 u64 addr
= its
->vgic_its_base
;
2526 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2527 unsigned long type
= (unsigned long)attr
->attr
;
2529 if (type
!= KVM_VGIC_ITS_ADDR_TYPE
)
2532 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
2536 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
: {
2537 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2541 ret
= vgic_its_attr_regs_access(dev
, attr
, ®
, false);
2544 return put_user(reg
, uaddr
);
2553 static struct kvm_device_ops kvm_arm_vgic_its_ops
= {
2554 .name
= "kvm-arm-vgic-its",
2555 .create
= vgic_its_create
,
2556 .destroy
= vgic_its_destroy
,
2557 .set_attr
= vgic_its_set_attr
,
2558 .get_attr
= vgic_its_get_attr
,
2559 .has_attr
= vgic_its_has_attr
,
2562 int kvm_vgic_register_its_device(void)
2564 return kvm_register_device_ops(&kvm_arm_vgic_its_ops
,
2565 KVM_DEV_TYPE_ARM_VGIC_ITS
);