]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
LoongArch: KVM: Implement vcpu interrupt operations
authorTianrui Zhao <zhaotianrui@loongson.cn>
Mon, 2 Oct 2023 02:01:28 +0000 (10:01 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Mon, 2 Oct 2023 02:01:28 +0000 (10:01 +0800)
Implement vcpu interrupt operations such as vcpu set irq and vcpu
clear irq, using set_gcsr_estat() to set irq which is parsed by the
irq bitmap.

Reviewed-by: Bibo Mao <maobibo@loongson.cn>
Tested-by: Huacai Chen <chenhuacai@loongson.cn>
Signed-off-by: Tianrui Zhao <zhaotianrui@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/kvm/interrupt.c [new file with mode: 0644]
arch/loongarch/kvm/vcpu.c

diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c
new file mode 100644 (file)
index 0000000..4c3f22d
--- /dev/null
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <asm/kvm_csr.h>
+#include <asm/kvm_vcpu.h>
+
+static unsigned int priority_to_irq[EXCCODE_INT_NUM] = {
+       [INT_TI]        = CPU_TIMER,
+       [INT_IPI]       = CPU_IPI,
+       [INT_SWI0]      = CPU_SIP0,
+       [INT_SWI1]      = CPU_SIP1,
+       [INT_HWI0]      = CPU_IP0,
+       [INT_HWI1]      = CPU_IP1,
+       [INT_HWI2]      = CPU_IP2,
+       [INT_HWI3]      = CPU_IP3,
+       [INT_HWI4]      = CPU_IP4,
+       [INT_HWI5]      = CPU_IP5,
+       [INT_HWI6]      = CPU_IP6,
+       [INT_HWI7]      = CPU_IP7,
+};
+
+static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+       unsigned int irq = 0;
+
+       clear_bit(priority, &vcpu->arch.irq_pending);
+       if (priority < EXCCODE_INT_NUM)
+               irq = priority_to_irq[priority];
+
+       switch (priority) {
+       case INT_TI:
+       case INT_IPI:
+       case INT_SWI0:
+       case INT_SWI1:
+               set_gcsr_estat(irq);
+               break;
+
+       case INT_HWI0 ... INT_HWI7:
+               set_csr_gintc(irq);
+               break;
+
+       default:
+               break;
+       }
+
+       return 1;
+}
+
+static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+       unsigned int irq = 0;
+
+       clear_bit(priority, &vcpu->arch.irq_clear);
+       if (priority < EXCCODE_INT_NUM)
+               irq = priority_to_irq[priority];
+
+       switch (priority) {
+       case INT_TI:
+       case INT_IPI:
+       case INT_SWI0:
+       case INT_SWI1:
+               clear_gcsr_estat(irq);
+               break;
+
+       case INT_HWI0 ... INT_HWI7:
+               clear_csr_gintc(irq);
+               break;
+
+       default:
+               break;
+       }
+
+       return 1;
+}
+
+void kvm_deliver_intr(struct kvm_vcpu *vcpu)
+{
+       unsigned int priority;
+       unsigned long *pending = &vcpu->arch.irq_pending;
+       unsigned long *pending_clr = &vcpu->arch.irq_clear;
+
+       if (!(*pending) && !(*pending_clr))
+               return;
+
+       if (*pending_clr) {
+               priority = __ffs(*pending_clr);
+               while (priority <= INT_IPI) {
+                       kvm_irq_clear(vcpu, priority);
+                       priority = find_next_bit(pending_clr,
+                                       BITS_PER_BYTE * sizeof(*pending_clr),
+                                       priority + 1);
+               }
+       }
+
+       if (*pending) {
+               priority = __ffs(*pending);
+               while (priority <= INT_IPI) {
+                       kvm_irq_deliver(vcpu, priority);
+                       priority = find_next_bit(pending,
+                                       BITS_PER_BYTE * sizeof(*pending),
+                                       priority + 1);
+               }
+       }
+}
+
+int kvm_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return test_bit(INT_TI, &vcpu->arch.irq_pending);
+}
+
+/*
+ * Only support illegal instruction or illegal Address Error exception,
+ * Other exceptions are injected by hardware in kvm mode
+ */
+static void _kvm_deliver_exception(struct kvm_vcpu *vcpu,
+                               unsigned int code, unsigned int subcode)
+{
+       unsigned long val, vec_size;
+
+       /*
+        * BADV is added for EXCCODE_ADE exception
+        *  Use PC register (GVA address) if it is instruction exeception
+        *  Else use BADV from host side (GPA address) for data exeception
+        */
+       if (code == EXCCODE_ADE) {
+               if (subcode == EXSUBCODE_ADEF)
+                       val = vcpu->arch.pc;
+               else
+                       val = vcpu->arch.badv;
+               kvm_write_hw_gcsr(LOONGARCH_CSR_BADV, val);
+       }
+
+       /* Set exception instruction */
+       kvm_write_hw_gcsr(LOONGARCH_CSR_BADI, vcpu->arch.badi);
+
+       /*
+        * Save CRMD in PRMD
+        * Set IRQ disabled and PLV0 with CRMD
+        */
+       val = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD);
+       kvm_write_hw_gcsr(LOONGARCH_CSR_PRMD, val);
+       val = val & ~(CSR_CRMD_PLV | CSR_CRMD_IE);
+       kvm_write_hw_gcsr(LOONGARCH_CSR_CRMD, val);
+
+       /* Set exception PC address */
+       kvm_write_hw_gcsr(LOONGARCH_CSR_ERA, vcpu->arch.pc);
+
+       /*
+        * Set exception code
+        * Exception and interrupt can be inject at the same time
+        * Hardware will handle exception first and then extern interrupt
+        * Exception code is Ecode in ESTAT[16:21]
+        * Interrupt code in ESTAT[0:12]
+        */
+       val = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT);
+       val = (val & ~CSR_ESTAT_EXC) | code;
+       kvm_write_hw_gcsr(LOONGARCH_CSR_ESTAT, val);
+
+       /* Calculate expcetion entry address */
+       val = kvm_read_hw_gcsr(LOONGARCH_CSR_ECFG);
+       vec_size = (val & CSR_ECFG_VS) >> CSR_ECFG_VS_SHIFT;
+       if (vec_size)
+               vec_size = (1 << vec_size) * 4;
+       val =  kvm_read_hw_gcsr(LOONGARCH_CSR_EENTRY);
+       vcpu->arch.pc = val + code * vec_size;
+}
+
+void kvm_deliver_exception(struct kvm_vcpu *vcpu)
+{
+       unsigned int code;
+       unsigned long *pending = &vcpu->arch.exception_pending;
+
+       if (*pending) {
+               code = __ffs(*pending);
+               _kvm_deliver_exception(vcpu, code, vcpu->arch.esubcode);
+               *pending = 0;
+               vcpu->arch.esubcode = 0;
+       }
+}
index 0f19c8b0c02869bad667d10eebf8c4cf0b68c5d1..7576f5a735ea981d41c01680852e8ac734dec3d3 100644 (file)
@@ -458,6 +458,44 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
        preempt_enable();
 }
 
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+
+       if (intr > 0)
+               kvm_queue_irq(vcpu, intr);
+       else if (intr < 0)
+               kvm_dequeue_irq(vcpu, -intr);
+       else {
+               kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
+               return -EINVAL;
+       }
+
+       kvm_vcpu_kick(vcpu);
+
+       return 0;
+}
+
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+                              unsigned int ioctl, unsigned long arg)
+{
+       void __user *argp = (void __user *)arg;
+       struct kvm_vcpu *vcpu = filp->private_data;
+
+       if (ioctl == KVM_INTERRUPT) {
+               struct kvm_interrupt irq;
+
+               if (copy_from_user(&irq, argp, sizeof(irq)))
+                       return -EFAULT;
+
+               kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
+
+               return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+       }
+
+       return -ENOIOCTLCMD;
+}
+
 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
 {
        return 0;