Implement irqfd that deliver msi to vCPU and vCPU dmsintc irq injection.
Add pch_msi_set_irq() choice dmsintc to set msi irq by the msg_addr and
implement dmsintc set msi irq.
Signed-off-by: Song Gao <gaosong@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
};
int kvm_loongarch_register_dmsintc_device(void);
+void dmsintc_inject_irq(struct kvm_vcpu *vcpu);
+int dmsintc_set_irq(struct kvm *kvm, u64 addr, int data, int level);
+int dmsintc_deliver_msi_to_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 vector, int level);
#endif
uint64_t pch_pic_base;
};
+struct kvm_kernel_irq_routing_entry;
int kvm_loongarch_register_pch_pic_device(void);
void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level);
-void pch_msi_set_irq(struct kvm *kvm, int irq, int level);
+int pch_msi_set_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, int level);
#endif /* __ASM_KVM_PCH_PIC_H */
*/
#include <linux/kvm_host.h>
+#include <asm/kvm_csr.h>
#include <asm/kvm_dmsintc.h>
#include <asm/kvm_vcpu.h>
+void dmsintc_inject_irq(struct kvm_vcpu *vcpu)
+{
+ unsigned int i;
+ unsigned long vector[4], old;
+ struct dmsintc_state *ds = &vcpu->arch.dmsintc_state;
+
+ if (!ds)
+ return;
+
+ for (i = 0; i < 4; i++) {
+ old = atomic64_read(&(ds->vector_map[i]));
+ if (old)
+ vector[i] = atomic64_xchg(&(ds->vector_map[i]), 0);
+ }
+
+ if (vector[0]) {
+ old = kvm_read_hw_gcsr(LOONGARCH_CSR_ISR0);
+ kvm_write_hw_gcsr(LOONGARCH_CSR_ISR0, vector[0] | old);
+ }
+
+ if (vector[1]) {
+ old = kvm_read_hw_gcsr(LOONGARCH_CSR_ISR1);
+ kvm_write_hw_gcsr(LOONGARCH_CSR_ISR1, vector[1] | old);
+ }
+
+ if (vector[2]) {
+ old = kvm_read_hw_gcsr(LOONGARCH_CSR_ISR2);
+ kvm_write_hw_gcsr(LOONGARCH_CSR_ISR2, vector[2] | old);
+ }
+
+ if (vector[3]) {
+ old = kvm_read_hw_gcsr(LOONGARCH_CSR_ISR3);
+ kvm_write_hw_gcsr(LOONGARCH_CSR_ISR3, vector[3] | old);
+ }
+}
+
+int dmsintc_deliver_msi_to_vcpu(struct kvm *kvm,
+ struct kvm_vcpu *vcpu, u32 vector, int level)
+{
+ struct kvm_interrupt vcpu_irq;
+ struct dmsintc_state *ds = &vcpu->arch.dmsintc_state;
+
+ if (!level)
+ return 0;
+ if (!vcpu || vector >= 256)
+ return -EINVAL;
+ if (!ds)
+ return -ENODEV;
+
+ vcpu_irq.irq = INT_AVEC;
+ set_bit(vector, (unsigned long *)&ds->vector_map);
+ kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq);
+ kvm_vcpu_kick(vcpu);
+
+ return 0;
+}
+
+int dmsintc_set_irq(struct kvm *kvm, u64 addr, int data, int level)
+{
+ unsigned int irq, cpu;
+ struct kvm_vcpu *vcpu;
+
+ irq = (addr >> AVEC_IRQ_SHIFT) & AVEC_IRQ_MASK;
+ cpu = (addr >> AVEC_CPU_SHIFT) & kvm->arch.dmsintc->cpu_mask;
+ if (cpu >= KVM_MAX_VCPUS)
+ return -EINVAL;
+ vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu);
+ if (!vcpu)
+ return -EINVAL;
+
+ return dmsintc_deliver_msi_to_vcpu(kvm, vcpu, irq, level);
+}
+
static int kvm_dmsintc_ctrl_access(struct kvm_device *dev,
struct kvm_device_attr *attr, bool is_write)
{
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
+#include <asm/kvm_dmsintc.h>
#include <asm/kvm_eiointc.h>
#include <asm/kvm_pch_pic.h>
#include <asm/kvm_vcpu.h>
}
/* msi irq handler */
-void pch_msi_set_irq(struct kvm *kvm, int irq, int level)
+int pch_msi_set_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, int level)
{
- eiointc_set_irq(kvm->arch.eiointc, irq, level);
+ u64 msg_addr = (((u64)e->msi.address_hi) << 32) | e->msi.address_lo;
+
+ if (cpu_has_msgint && kvm->arch.dmsintc &&
+ msg_addr >= kvm->arch.dmsintc->msg_addr_base &&
+ msg_addr < (kvm->arch.dmsintc->msg_addr_base + kvm->arch.dmsintc->msg_addr_size)) {
+ return dmsintc_set_irq(kvm, msg_addr, e->msi.data, level);
+ }
+
+ eiointc_set_irq(kvm->arch.eiointc, e->msi.data, level);
+
+ return 0;
}
static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val)
#include <linux/errno.h>
#include <asm/kvm_csr.h>
#include <asm/kvm_vcpu.h>
+#include <asm/kvm_dmsintc.h>
static unsigned int priority_to_irq[EXCCODE_INT_NUM] = {
[INT_TI] = CPU_TIMER,
irq = priority_to_irq[priority];
if (kvm_guest_has_msgint(&vcpu->arch) && (priority == INT_AVEC)) {
+ dmsintc_inject_irq(vcpu);
set_gcsr_estat(irq);
return 1;
}
if (!level)
return -1;
- pch_msi_set_irq(kvm, e->msi.data, level);
-
- return 0;
+ return pch_msi_set_irq(kvm, e, level);
}
/*
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level, bool line_status)
{
+ if (!level)
+ return -EWOULDBLOCK;
+
switch (e->type) {
case KVM_IRQ_ROUTING_IRQCHIP:
pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level);
return 0;
case KVM_IRQ_ROUTING_MSI:
- pch_msi_set_irq(kvm, e->msi.data, level);
- return 0;
+ return pch_msi_set_irq(kvm, e, level);
default:
return -EWOULDBLOCK;
}