GICv5 systems will likely not support the full set of PPIs. The
presence of any virtual PPI is tied to the presence of the physical
PPI. Therefore, the available PPIs will be limited by the physical
host. Userspace cannot drive any PPIs that are not implemented.
Moreover, it is not desirable to expose all PPIs to the guest in the
first place, even if they are supported in hardware. Some devices,
such as the arch timer, are implemented in KVM, and hence those PPIs
shouldn't be driven by userspace, either.
Provided a new UAPI:
KVM_DEV_ARM_VGIC_GRP_CTRL => KVM_DEV_ARM_VGIC_USERPSPACE_PPIs
This allows userspace to query which PPIs it is able to drive via
KVM_IRQ_LINE.
Additionally, introduce a check in kvm_vm_ioctl_irq_line() to reject
any PPIs not in the userspace mask.
Signed-off-by: Sascha Bischoff <sascha.bischoff@arm.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Link: https://patch.msgid.link/20260319154937.3619520-40-sascha.bischoff@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
request the initialization of the VGIC, no additional parameter in
kvm_device_attr.addr. Must be called after all VCPUs have been created.
+ KVM_DEV_ARM_VGIC_USERPSPACE_PPIs
+ request the mask of userspace-drivable PPIs. Only a subset of the PPIs can
+ be directly driven from userspace with GICv5, and the returned mask
+ informs userspace of which it is allowed to drive via KVM_IRQ_LINE.
+
+ Userspace must allocate and point to __u64[2] of data in
+ kvm_device_attr.addr. When this call returns, the provided memory will be
+ populated with the userspace PPI mask. The lower __u64 contains the mask
+ for the lower 64 PPIS, with the remaining 64 being in the second __u64.
+
+ This is a read-only attribute, and cannot be set. Attempts to set it are
+ rejected.
+
Errors:
======= ========================================================
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
+#define KVM_DEV_ARM_VGIC_USERSPACE_PPIS 5
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status)
{
- u32 irq = irq_level->irq;
unsigned int irq_type, vcpu_id, irq_num;
struct kvm_vcpu *vcpu = NULL;
bool level = irq_level->level;
+ u32 irq = irq_level->irq;
+ unsigned long *mask;
irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
if (irq_num >= VGIC_V5_NR_PRIVATE_IRQS)
return -EINVAL;
+ /*
+ * Only allow PPIs that are explicitly exposed to
+ * usespace to be driven via KVM_IRQ_LINE
+ */
+ mask = kvm->arch.vgic.gicv5_vm.userspace_ppis;
+ if (!test_bit(irq_num, mask))
+ return -EINVAL;
+
/* Build a GICv5-style IntID here */
irq_num = vgic_v5_make_ppi(irq_num);
} else if (irq_num < VGIC_NR_SGIS ||
.has_attr = vgic_v3_has_attr,
};
+static int vgic_v5_get_userspace_ppis(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ struct vgic_v5_vm *gicv5_vm = &dev->kvm->arch.vgic.gicv5_vm;
+ u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+ int ret;
+
+ guard(mutex)(&dev->kvm->arch.config_lock);
+
+ /*
+ * We either support 64 or 128 PPIs. In the former case, we need to
+ * return 0s for the second 64 bits as we have no storage backing those.
+ */
+ ret = put_user(bitmap_read(gicv5_vm->userspace_ppis, 0, 64), uaddr);
+ if (ret)
+ return ret;
+ uaddr++;
+
+ if (VGIC_V5_NR_PRIVATE_IRQS == 128)
+ ret = put_user(bitmap_read(gicv5_vm->userspace_ppis, 64, 128), uaddr);
+ else
+ ret = put_user(0, uaddr);
+
+ return ret;
+}
+
static int vgic_v5_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
return vgic_set_common_attr(dev, attr);
+ case KVM_DEV_ARM_VGIC_USERSPACE_PPIS:
default:
return -ENXIO;
}
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
return vgic_get_common_attr(dev, attr);
+ case KVM_DEV_ARM_VGIC_USERSPACE_PPIS:
+ return vgic_v5_get_userspace_ppis(dev, attr);
default:
return -ENXIO;
}
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
return 0;
+ case KVM_DEV_ARM_VGIC_USERSPACE_PPIS:
+ return 0;
default:
return -ENXIO;
}
}
}
+ /* We only allow userspace to drive the SW_PPI, if it is implemented. */
+ bitmap_zero(kvm->arch.vgic.gicv5_vm.userspace_ppis,
+ VGIC_V5_NR_PRIVATE_IRQS);
+ __assign_bit(GICV5_ARCH_PPI_SW_PPI,
+ kvm->arch.vgic.gicv5_vm.userspace_ppis,
+ VGIC_V5_NR_PRIVATE_IRQS);
+ bitmap_and(kvm->arch.vgic.gicv5_vm.userspace_ppis,
+ kvm->arch.vgic.gicv5_vm.userspace_ppis,
+ ppi_caps.impl_ppi_mask, VGIC_V5_NR_PRIVATE_IRQS);
+
return 0;
}
*/
DECLARE_BITMAP(vgic_ppi_mask, VGIC_V5_NR_PRIVATE_IRQS);
+ /* A mask of the PPIs that are exposed for userspace to drive. */
+ DECLARE_BITMAP(userspace_ppis, VGIC_V5_NR_PRIVATE_IRQS);
+
/*
* The HMR itself is handled by the hardware, but we still need to have
* a mask that we can use when merging in pending state (only the state
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
+#define KVM_DEV_ARM_VGIC_USERSPACE_PPIS 5
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0