]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
Loongarch: Support loongarch avec
authorTianyang Zhang <zhangtianyang@loongson.cn>
Tue, 4 Jun 2024 12:50:26 +0000 (20:50 +0800)
committerThomas Gleixner <tglx@linutronix.de>
Sun, 23 Jun 2024 17:09:14 +0000 (19:09 +0200)
Introduce the advanced extended interrupt controllers. This feature will
allow each core to have 256 independent interrupt vectors and MSI
interrupts can be independently routed to any vector on any CPU.

[ tglx: Fixed up coding style. Made on/offline functions void ]

Co-developed-by: Jianmin Lv <lvjianmin@loongson.cn>
Signed-off-by: Jianmin Lv <lvjianmin@loongson.cn>
Co-developed-by: Liupu Wang <wangliupu@loongson.cn>
Signed-off-by: Liupu Wang <wangliupu@loongson.cn>
Signed-off-by: Tianyang Zhang <zhangtianyang@loongson.cn>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240604125026.18745-1-zhangtianyang@loongson.cn
14 files changed:
arch/loongarch/Kconfig
arch/loongarch/include/asm/cpu-features.h
arch/loongarch/include/asm/cpu.h
arch/loongarch/include/asm/hw_irq.h
arch/loongarch/include/asm/irq.h
arch/loongarch/include/asm/loongarch.h
arch/loongarch/include/asm/smp.h
arch/loongarch/kernel/cpu-probe.c
arch/loongarch/kernel/smp.c
drivers/irqchip/Makefile
drivers/irqchip/irq-loongarch-avec.c [new file with mode: 0644]
drivers/irqchip/irq-loongarch-cpu.c
drivers/irqchip/irq-loongson-eiointc.c
drivers/irqchip/irq-loongson-pch-msi.c

index e38139c576ee90aa1ee18ff034582043b53fe1dd..a66e49b5a68c5bfb6866763637206ce85da7f906 100644 (file)
@@ -83,6 +83,7 @@ config LOONGARCH
        select GENERIC_ENTRY
        select GENERIC_GETTIMEOFDAY
        select GENERIC_IOREMAP if !ARCH_IOREMAP
+       select GENERIC_IRQ_MATRIX_ALLOCATOR
        select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
index 2eafe6a6aca8189c88617c18da45c373294a6aa8..16a716f88a5ca2c2d0117184d4a6b55046fe44d7 100644 (file)
@@ -65,5 +65,6 @@
 #define cpu_has_guestid                cpu_opt(LOONGARCH_CPU_GUESTID)
 #define cpu_has_hypervisor     cpu_opt(LOONGARCH_CPU_HYPERVISOR)
 #define cpu_has_ptw            cpu_opt(LOONGARCH_CPU_PTW)
+#define cpu_has_avecint                cpu_opt(LOONGARCH_CPU_AVECINT)
 
 #endif /* __ASM_CPU_FEATURES_H */
index 48b9f7168bcca03f92a63f891a346f88055b7bfc..843f9c4ec98071b818c615272734d2cc8428b7e5 100644 (file)
@@ -99,6 +99,7 @@ enum cpu_type_enum {
 #define CPU_FEATURE_GUESTID            24      /* CPU has GuestID feature */
 #define CPU_FEATURE_HYPERVISOR         25      /* CPU has hypervisor (running in VM) */
 #define CPU_FEATURE_PTW                        26      /* CPU has hardware page table walker */
+#define CPU_FEATURE_AVECINT            27      /* CPU has avec interrupt */
 
 #define LOONGARCH_CPU_CPUCFG           BIT_ULL(CPU_FEATURE_CPUCFG)
 #define LOONGARCH_CPU_LAM              BIT_ULL(CPU_FEATURE_LAM)
@@ -127,5 +128,6 @@ enum cpu_type_enum {
 #define LOONGARCH_CPU_GUESTID          BIT_ULL(CPU_FEATURE_GUESTID)
 #define LOONGARCH_CPU_HYPERVISOR       BIT_ULL(CPU_FEATURE_HYPERVISOR)
 #define LOONGARCH_CPU_PTW              BIT_ULL(CPU_FEATURE_PTW)
+#define LOONGARCH_CPU_AVECINT          BIT_ULL(CPU_FEATURE_AVECINT)
 
 #endif /* _ASM_CPU_H */
index af4f4e8fbd858f701490f4e590e7ec18c51085e1..772692e765c019f5d0826759ffae376d793b311c 100644 (file)
@@ -9,6 +9,16 @@
 
 extern atomic_t irq_err_count;
 
+/*
+ * 256 vectors Map:
+ *
+ * 0 - 15: mapping legacy IPs, e.g. IP0-12.
+ * 16 - 255: mapping a vector for external IRQ.
+ *
+ */
+#define NR_VECTORS             256
+#define IRQ_MATRIX_BITS                NR_VECTORS
+#define NR_LEGACY_VECTORS      16
 /*
  * interrupt-retrigger: NOP for now. This may not be appropriate for all
  * machines, we'll see ...
index 480418bc5071a5ebba50192744fe0be64b719d32..cf3b635a9b86ddf30e38d9d31192ec9721532885 100644 (file)
@@ -65,7 +65,7 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
 #define LOONGSON_LPC_LAST_IRQ          (LOONGSON_LPC_IRQ_BASE + 15)
 
 #define LOONGSON_CPU_IRQ_BASE          16
-#define LOONGSON_CPU_LAST_IRQ          (LOONGSON_CPU_IRQ_BASE + 14)
+#define LOONGSON_CPU_LAST_IRQ          (LOONGSON_CPU_IRQ_BASE + 15)
 
 #define LOONGSON_PCH_IRQ_BASE          64
 #define LOONGSON_PCH_ACPI_IRQ          (LOONGSON_PCH_IRQ_BASE + 47)
@@ -101,6 +101,16 @@ int pch_msi_acpi_init(struct irq_domain *parent,
                                        struct acpi_madt_msi_pic *acpi_pchmsi);
 int pch_pic_acpi_init(struct irq_domain *parent,
                                        struct acpi_madt_bio_pic *acpi_pchpic);
+
+#ifdef CONFIG_ACPI
+int __init pch_msi_acpi_init_v2(struct irq_domain *parent,
+               struct acpi_madt_msi_pic *pch_msi_entry);
+int __init loongarch_avec_acpi_init(struct irq_domain *parent);
+void complete_irq_moving(void);
+void loongarch_avec_offline_cpu(unsigned int cpu);
+void loongarch_avec_online_cpu(unsigned int cpu);
+#endif
+
 int find_pch_pic(u32 gsi);
 struct fwnode_handle *get_pch_msi_handle(int pci_segment);
 
index eb09adda54b7c855eb56232b4442b79fadd09d46..16a91035997798c12ce83c1439b0373b8d367518 100644 (file)
@@ -72,7 +72,6 @@
 #define  CPUCFG1_RPLV                  BIT(23)
 #define  CPUCFG1_HUGEPG                        BIT(24)
 #define  CPUCFG1_CRC32                 BIT(25)
-#define  CPUCFG1_MSGINT                        BIT(26)
 
 #define LOONGARCH_CPUCFG2              0x2
 #define  CPUCFG2_FP                    BIT(0)
 #define  CSR_ESTAT_EXC_WIDTH           6
 #define  CSR_ESTAT_EXC                 (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
 #define  CSR_ESTAT_IS_SHIFT            0
-#define  CSR_ESTAT_IS_WIDTH            14
-#define  CSR_ESTAT_IS                  (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT)
+#define  CSR_ESTAT_IS_WIDTH            15
+#define  CSR_ESTAT_IS                  (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
 
 #define LOONGARCH_CSR_ERA              0x6     /* ERA */
 
 #define CSR_FWPC_SKIP_SHIFT            16
 #define CSR_FWPC_SKIP                  (_ULCAST_(1) << CSR_FWPC_SKIP_SHIFT)
 
+#define LOONGARCH_CSR_IRR0             0xa0
+#define LOONGARCH_CSR_IRR1             0xa1
+#define LOONGARCH_CSR_IRR2             0xa2
+#define LOONGARCH_CSR_IRR3             0xa3
+#define LOONGARCH_CSR_IRR_BASE         LOONGARCH_CSR_IRR0
+
+#define        LOONGARCH_CSR_ILR               0xa4
+
 /*
  * CSR_ECFG IM
  */
-#define ECFG0_IM               0x00001fff
+#define ECFG0_IM               0x00005fff
 #define ECFGB_SIP0             0
 #define ECFGF_SIP0             (_ULCAST_(1) << ECFGB_SIP0)
 #define ECFGB_SIP1             1
 #define  IOCSRF_EIODECODE              BIT_ULL(9)
 #define  IOCSRF_FLATMODE               BIT_ULL(10)
 #define  IOCSRF_VM                     BIT_ULL(11)
+#define  IOCSRF_AVEC                   BIT_ULL(15)
 
 #define LOONGARCH_IOCSR_VENDOR         0x10
 
 #define LOONGARCH_IOCSR_MISC_FUNC      0x420
 #define  IOCSR_MISC_FUNC_TIMER_RESET   BIT_ULL(21)
 #define  IOCSR_MISC_FUNC_EXT_IOI_EN    BIT_ULL(48)
+#define  IOCSR_MISC_FUNC_AVEC_EN       BIT_ULL(51)
 
 #define LOONGARCH_IOCSR_CPUTEMP                0x428
 
@@ -1375,9 +1384,10 @@ __BUILD_CSR_OP(tlbidx)
 #define INT_TI         11      /* Timer */
 #define INT_IPI                12
 #define INT_NMI                13
+#define INT_AVEC       14
 
 /* ExcCodes corresponding to interrupts */
-#define EXCCODE_INT_NUM                (INT_NMI + 1)
+#define EXCCODE_INT_NUM                (INT_AVEC + 1)
 #define EXCCODE_INT_START      64
 #define EXCCODE_INT_END                (EXCCODE_INT_START + EXCCODE_INT_NUM - 1)
 
index 278700cfee887b024c230e5fc4de5c4bac822b45..2399004596a3620d20d10eaa4ba831655ae40f1a 100644 (file)
@@ -69,9 +69,11 @@ extern int __cpu_logical_map[NR_CPUS];
 #define ACTION_BOOT_CPU        0
 #define ACTION_RESCHEDULE      1
 #define ACTION_CALL_FUNCTION   2
+#define ACTION_CLEAR_VECT      3
 #define SMP_BOOT_CPU           BIT(ACTION_BOOT_CPU)
 #define SMP_RESCHEDULE         BIT(ACTION_RESCHEDULE)
 #define SMP_CALL_FUNCTION      BIT(ACTION_CALL_FUNCTION)
+#define SMP_CLEAR_VECT         BIT(ACTION_CLEAR_VECT)
 
 struct secondary_data {
        unsigned long stack;
index 55320813ee0819f0f23429361b7fc9722b710d65..3b2e72e8f9bd246c4d001dafcb66d1deeef1b48c 100644 (file)
@@ -106,7 +106,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
                elf_hwcap |= HWCAP_LOONGARCH_CRC32;
        }
 
-
        config = read_cpucfg(LOONGARCH_CPUCFG2);
        if (config & CPUCFG2_LAM) {
                c->options |= LOONGARCH_CPU_LAM;
@@ -176,6 +175,8 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
                c->options |= LOONGARCH_CPU_EIODECODE;
        if (config & IOCSRF_VM)
                c->options |= LOONGARCH_CPU_HYPERVISOR;
+       if (config & IOCSRF_AVEC)
+               c->options |= LOONGARCH_CPU_AVECINT;
 
        config = csr_read32(LOONGARCH_CSR_ASID);
        config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
index 0dfe2388ef413b673185ab88951b06c532559fff..6dfedef306f34b6922362ce120f7dd32339d2816 100644 (file)
@@ -234,6 +234,9 @@ static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
                per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
        }
 
+       if (action & SMP_CLEAR_VECT)
+               complete_irq_moving();
+
        return IRQ_HANDLED;
 }
 
@@ -388,6 +391,7 @@ int loongson_cpu_disable(void)
        irq_migrate_all_off_this_cpu();
        clear_csr_ecfg(ECFG0_IM);
        local_irq_restore(flags);
+       loongarch_avec_offline_cpu(cpu);
        local_flush_tlb_all();
 
        return 0;
@@ -566,6 +570,7 @@ asmlinkage void start_secondary(void)
         * early is dangerous.
         */
        WARN_ON_ONCE(!irqs_disabled());
+       loongarch_avec_online_cpu(cpu);
        loongson_smp_finish();
 
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
index 9f6f88274becc226342f3c8806a923ef87d7c5a5..1062e713cea4d4034beb6a2443c4866806f68941 100644 (file)
@@ -109,7 +109,7 @@ obj-$(CONFIG_LS1X_IRQ)                      += irq-ls1x.o
 obj-$(CONFIG_TI_SCI_INTR_IRQCHIP)      += irq-ti-sci-intr.o
 obj-$(CONFIG_TI_SCI_INTA_IRQCHIP)      += irq-ti-sci-inta.o
 obj-$(CONFIG_TI_PRUSS_INTC)            += irq-pruss-intc.o
-obj-$(CONFIG_IRQ_LOONGARCH_CPU)                += irq-loongarch-cpu.o
+obj-$(CONFIG_IRQ_LOONGARCH_CPU)                += irq-loongarch-cpu.o irq-loongarch-avec.o
 obj-$(CONFIG_LOONGSON_LIOINTC)         += irq-loongson-liointc.o
 obj-$(CONFIG_LOONGSON_EIOINTC)         += irq-loongson-eiointc.o
 obj-$(CONFIG_LOONGSON_HTPIC)           += irq-loongson-htpic.o
diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c
new file mode 100644 (file)
index 0000000..4cd9079
--- /dev/null
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Loongson Technologies, Inc.
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+
+#include <asm/loongarch.h>
+#include <asm/setup.h>
+
+#define VECTORS_PER_REG                64
+#define ILR_INVALID_MASK       0x80000000UL
+#define ILR_VECTOR_MASK                0xffUL
+#define AVEC_MSG_OFFSET                0x100000
+
+static phys_addr_t msi_base_v2;
+static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map);
+
+struct pending_list {
+       struct list_head        head;
+};
+
+static DEFINE_PER_CPU(struct pending_list, pending_list);
+
+struct loongarch_avec_chip {
+       struct fwnode_handle    *fwnode;
+       struct irq_domain       *domain;
+       struct irq_matrix       *vector_matrix;
+       raw_spinlock_t          lock;
+};
+
+static struct loongarch_avec_chip loongarch_avec;
+
+struct loongarch_avec_data {
+       struct list_head        entry;
+       unsigned int            cpu;
+       unsigned int            vec;
+       unsigned int            prev_cpu;
+       unsigned int            prev_vec;
+       unsigned int            moving          : 1,
+                               managed         : 1;
+};
+
+static struct cpumask intersect_mask;
+
+static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest,
+                            unsigned int *cpu)
+{
+       return irq_matrix_alloc(loongarch_avec.vector_matrix, dest, false, cpu);
+}
+
+static inline void loongarch_avec_ack_irq(struct irq_data *d)
+{
+}
+
+static inline void loongarch_avec_unmask_irq(struct irq_data *d)
+{
+}
+
+static inline void loongarch_avec_mask_irq(struct irq_data *d)
+{
+}
+
+static void loongarch_avec_sync(struct loongarch_avec_data *adata)
+{
+       struct pending_list *plist;
+
+       if (cpu_online(adata->prev_cpu)) {
+               plist = per_cpu_ptr(&pending_list, adata->prev_cpu);
+               list_add_tail(&adata->entry, &plist->head);
+               adata->moving = true;
+               loongson_send_ipi_single(adata->prev_cpu, SMP_CLEAR_VECT);
+       }
+       adata->prev_cpu = adata->cpu;
+       adata->prev_vec = adata->vec;
+}
+
+static int loongarch_avec_set_affinity(struct irq_data *data, const struct cpumask *dest,
+                                      bool force)
+{
+       struct loongarch_avec_data *adata;
+       unsigned int cpu, vector;
+       unsigned long flags;
+       int ret;
+
+       raw_spin_lock_irqsave(&loongarch_avec.lock, flags);
+       adata = irq_data_get_irq_chip_data(data);
+
+       if (adata->vec && cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest)) {
+               raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags);
+               return 0;
+       }
+       if (adata->moving)
+               return -EBUSY;
+
+       cpumask_and(&intersect_mask, dest, cpu_online_mask);
+
+       ret = assign_irq_vector(data, &intersect_mask, &cpu);
+       if (ret < 0) {
+               raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags);
+               return ret;
+       }
+       vector = ret;
+       adata->cpu = cpu;
+       adata->vec = vector;
+       per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
+       loongarch_avec_sync(adata);
+
+       raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags);
+       irq_data_update_effective_affinity(data, cpumask_of(cpu));
+
+       return IRQ_SET_MASK_OK;
+}
+
+static void loongarch_avec_compose_msg(struct irq_data *d,
+               struct msi_msg *msg)
+{
+       struct loongarch_avec_data *avec_data;
+
+       avec_data = irq_data_get_irq_chip_data(d);
+
+       msg->address_hi = 0x0;
+       msg->address_lo = msi_base_v2 | ((avec_data->vec & 0xff) << 4) |
+                         ((cpu_logical_map(avec_data->cpu & 0xffff)) << 12);
+       msg->data = 0x0;
+
+}
+
+static struct irq_chip loongarch_avec_controller = {
+       .name                   = "CORE_AVEC",
+       .irq_ack                = loongarch_avec_ack_irq,
+       .irq_mask               = loongarch_avec_mask_irq,
+       .irq_unmask             = loongarch_avec_unmask_irq,
+       .irq_set_affinity       = loongarch_avec_set_affinity,
+       .irq_compose_msi_msg    = loongarch_avec_compose_msg,
+};
+
+void complete_irq_moving(void)
+{
+       struct pending_list *plist = this_cpu_ptr(&pending_list);
+       struct loongarch_avec_data *adata, *tmp;
+       int cpu, vector, bias;
+       u64 irr;
+
+       raw_spin_lock(&loongarch_avec.lock);
+
+       list_for_each_entry_safe(adata, tmp, &plist->head, entry) {
+               cpu = adata->prev_cpu;
+               vector = adata->prev_vec;
+               bias = vector / VECTORS_PER_REG;
+               switch (bias) {
+               case 0:
+                       irr = csr_read64(LOONGARCH_CSR_IRR0);
+               case 1:
+                       irr = csr_read64(LOONGARCH_CSR_IRR1);
+               case 2:
+                       irr = csr_read64(LOONGARCH_CSR_IRR2);
+               case 3:
+                       irr = csr_read64(LOONGARCH_CSR_IRR3);
+               }
+
+               if (irr & (1UL << (vector % VECTORS_PER_REG))) {
+                       loongson_send_ipi_single(cpu, SMP_CLEAR_VECT);
+                       continue;
+               }
+               list_del(&adata->entry);
+               irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, adata->managed);
+               this_cpu_write(irq_map[vector], NULL);
+               adata->moving = 0;
+       }
+       raw_spin_unlock(&loongarch_avec.lock);
+}
+
+static void loongarch_avec_dispatch(struct irq_desc *desc)
+{
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       unsigned long vector;
+       struct irq_desc *d;
+
+       chained_irq_enter(chip, desc);
+       vector = csr_read64(LOONGARCH_CSR_ILR);
+       if (vector & ILR_INVALID_MASK)
+               return;
+
+       vector &= ILR_VECTOR_MASK;
+
+       d = this_cpu_read(irq_map[vector]);
+       if (d) {
+               generic_handle_irq_desc(d);
+       } else {
+               pr_warn("IRQ ERROR:Unexpected irq  occur on cpu %d[vector %ld]\n",
+                       smp_processor_id(), vector);
+       }
+
+       chained_irq_exit(chip, desc);
+}
+
+static int loongarch_avec_alloc(struct irq_domain *domain, unsigned int virq,
+                               unsigned int nr_irqs, void *arg)
+{
+       struct loongarch_avec_data *adata;
+       struct irq_data *irqd;
+       unsigned int cpu, vector, i, ret;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&loongarch_avec.lock, flags);
+       for (i = 0; i < nr_irqs; i++) {
+               irqd = irq_domain_get_irq_data(domain, virq + i);
+               adata = kzalloc(sizeof(*adata), GFP_KERNEL);
+               if (!adata) {
+                       raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags);
+                       return -ENOMEM;
+               }
+               ret = assign_irq_vector(irqd, cpu_online_mask, &cpu);
+               if (ret < 0) {
+                       raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags);
+                       return ret;
+               }
+               vector = ret;
+               adata->prev_cpu = adata->cpu = cpu;
+               adata->prev_vec = adata->vec = vector;
+               adata->managed = irqd_affinity_is_managed(irqd);
+               irq_domain_set_info(domain, virq + i, virq + i, &loongarch_avec_controller,
+                               adata, handle_edge_irq, NULL, NULL);
+               adata->moving = 0;
+               irqd_set_single_target(irqd);
+               irqd_set_affinity_on_activate(irqd);
+
+               per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd);
+       }
+       raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags);
+
+       return 0;
+}
+
+static void clear_free_vector(struct irq_data *irqd)
+{
+       struct loongarch_avec_data *adata = irq_data_get_irq_chip_data(irqd);
+       bool managed = irqd_affinity_is_managed(irqd);
+
+       per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
+       irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, managed);
+       adata->cpu = 0;
+       adata->vec = 0;
+       if (!adata->moving)
+               return;
+
+       per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = 0;
+       irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu,
+                       adata->prev_vec, adata->managed);
+       adata->prev_vec = 0;
+       adata->prev_cpu = 0;
+       adata->moving = 0;
+       list_del_init(&adata->entry);
+}
+
+static void loongarch_avec_free(struct irq_domain *domain, unsigned int virq,
+               unsigned int nr_irqs)
+{
+       struct irq_data *d;
+       unsigned long flags;
+       unsigned int i;
+
+       raw_spin_lock_irqsave(&loongarch_avec.lock, flags);
+       for (i = 0; i < nr_irqs; i++) {
+               d = irq_domain_get_irq_data(domain, virq + i);
+               if (d) {
+                       clear_free_vector(d);
+                       irq_domain_reset_irq_data(d);
+
+               }
+       }
+
+       raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags);
+}
+
+static const struct irq_domain_ops loongarch_avec_domain_ops = {
+       .alloc          = loongarch_avec_alloc,
+       .free           = loongarch_avec_free,
+};
+
+static int __init irq_matrix_init(void)
+{
+       int i;
+
+       loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS - 1);
+       if (!loongarch_avec.vector_matrix)
+               return -ENOMEM;
+       for (i = 0; i < NR_LEGACY_VECTORS; i++)
+               irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false);
+
+       irq_matrix_online(loongarch_avec.vector_matrix);
+
+       return 0;
+}
+
+static int __init loongarch_avec_init(struct irq_domain *parent)
+{
+       struct pending_list *plist = per_cpu_ptr(&pending_list, 0);
+       int ret = 0, parent_irq;
+       unsigned long tmp;
+
+       raw_spin_lock_init(&loongarch_avec.lock);
+
+       loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("CORE_AVEC");
+       if (!loongarch_avec.fwnode) {
+               pr_err("Unable to allocate domain handle\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode,
+                       &loongarch_avec_domain_ops, NULL);
+       if (!loongarch_avec.domain) {
+               pr_err("core-vec: cannot create IRQ domain\n");
+               ret = -ENOMEM;
+               goto out_free_handle;
+       }
+
+       parent_irq = irq_create_mapping(parent, INT_AVEC);
+       if (!parent_irq) {
+               pr_err("Failed to mapping hwirq\n");
+               ret = -EINVAL;
+               goto out_remove_domain;
+       }
+       irq_set_chained_handler_and_data(parent_irq, loongarch_avec_dispatch, NULL);
+
+       ret = irq_matrix_init();
+       if (ret) {
+               pr_err("Failed to init irq matrix\n");
+               goto out_free_matrix;
+       }
+
+       INIT_LIST_HEAD(&plist->head);
+       tmp = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
+       tmp |= IOCSR_MISC_FUNC_AVEC_EN;
+       iocsr_write64(tmp, LOONGARCH_IOCSR_MISC_FUNC);
+
+       return ret;
+
+out_free_matrix:
+       kfree(loongarch_avec.vector_matrix);
+out_remove_domain:
+       irq_domain_remove(loongarch_avec.domain);
+out_free_handle:
+       irq_domain_free_fwnode(loongarch_avec.fwnode);
+out:
+       return ret;
+}
+
+void loongarch_avec_offline_cpu(unsigned int cpu)
+{
+       struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&loongarch_avec.lock, flags);
+       if (list_empty(&plist->head))
+               irq_matrix_offline(loongarch_avec.vector_matrix);
+       else
+               pr_warn("cpu %d advanced extioi is busy\n", cpu);
+       raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags);
+}
+
+void loongarch_avec_online_cpu(unsigned int cpu)
+{
+       struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&loongarch_avec.lock, flags);
+
+       irq_matrix_online(loongarch_avec.vector_matrix);
+
+       INIT_LIST_HEAD(&plist->head);
+
+       raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags);
+}
+
+static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
+                                    const unsigned long end)
+{
+       struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+
+       msi_base_v2 = pchmsi_entry->msg_address - AVEC_MSG_OFFSET;
+       return pch_msi_acpi_init_v2(loongarch_avec.domain, pchmsi_entry);
+}
+
+static inline int __init acpi_cascade_irqdomain_init(void)
+{
+       return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
+}
+
+int __init loongarch_avec_acpi_init(struct irq_domain *parent)
+{
+       int ret = 0;
+
+       ret = loongarch_avec_init(parent);
+       if (ret) {
+               pr_err("Failed to init irq domain\n");
+               return ret;
+       }
+
+       ret = acpi_cascade_irqdomain_init();
+       if (ret) {
+               pr_err("Failed to cascade IRQ domain\n");
+               return ret;
+       }
+
+       return ret;
+}
index 9d8f2c40604310d6d92fdea667c2ea0322ece015..1ecac59925c6ca0f7d57bae12ed84ff0a6515c0c 100644 (file)
@@ -138,7 +138,9 @@ static int __init acpi_cascade_irqdomain_init(void)
        if (r < 0)
                return r;
 
-       return 0;
+       if (cpu_has_avecint)
+               r = loongarch_avec_acpi_init(irq_domain);
+       return r;
 }
 
 static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
index c7ddebf312ad2c1395a2cfaec7dbf24e93e60fb0..1f9a3048813794e11ab8c3b6c8d7f58e6aaa2e12 100644 (file)
@@ -359,6 +359,9 @@ static int __init acpi_cascade_irqdomain_init(void)
        if (r < 0)
                return r;
 
+       if (cpu_has_avecint)
+               return 0;
+
        r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
        if (r < 0)
                return r;
index dd4d699170f4ec5fe731e0e08c48acebb629d70f..1926857f9a41b8e45d0f04247c0b40098b2c4f7c 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/slab.h>
 
 static int nr_pics;
-
 struct pch_msi_data {
        struct mutex    msi_map_lock;
        phys_addr_t     doorbell;
@@ -100,6 +99,17 @@ static struct irq_chip middle_irq_chip = {
        .irq_compose_msi_msg    = pch_msi_compose_msi_msg,
 };
 
+static struct irq_chip pch_msi_irq_chip_v2 = {
+       .name                   = "MSI",
+       .irq_ack                = irq_chip_ack_parent,
+};
+
+static struct msi_domain_info pch_msi_domain_info_v2 = {
+       .flags          = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+                       MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+       .chip   = &pch_msi_irq_chip_v2,
+};
+
 static int pch_msi_parent_domain_alloc(struct irq_domain *domain,
                                        unsigned int virq, int hwirq)
 {
@@ -268,6 +278,9 @@ struct fwnode_handle *get_pch_msi_handle(int pci_segment)
 {
        int i;
 
+       if (cpu_has_avecint)
+               return pch_msi_handle[0];
+
        for (i = 0; i < MAX_IO_PICS; i++) {
                if (msi_group[i].pci_segment == pci_segment)
                        return pch_msi_handle[i];
@@ -289,4 +302,32 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
 
        return ret;
 }
+
+int __init pch_msi_acpi_init_v2(struct irq_domain *parent,
+               struct acpi_madt_msi_pic *msi_entry)
+{
+       struct irq_domain *msi_domain;
+
+       if (pch_msi_handle[0])
+               return 0;
+
+       pch_msi_handle[0] = irq_domain_alloc_named_fwnode("msipic-v2");
+       if (!pch_msi_handle[0]) {
+               pr_err("Unable to allocate domain handle\n");
+               kfree(pch_msi_handle[0]);
+               return -ENOMEM;
+       }
+
+       msi_domain = pci_msi_create_irq_domain(pch_msi_handle[0],
+                       &pch_msi_domain_info_v2,
+                       parent);
+       if (!msi_domain) {
+               pr_err("Failed to create PCI MSI domain\n");
+               kfree(pch_msi_handle[0]);
+               return -ENOMEM;
+       }
+
+       pr_info("IRQ domain MSIPIC-V2 init done.\n");
+       return 0;
+}
 #endif