1 // SPDX-License-Identifier: GPL-2.0
3 * Loongson Extend I/O Interrupt Controller support
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8 #define pr_fmt(fmt) "eiointc: " fmt
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqchip.h>
13 #include <linux/irqdomain.h>
14 #include <linux/irqchip/chained_irq.h>
15 #include <linux/kernel.h>
16 #include <linux/platform_device.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/of_platform.h>
21 #define EIOINTC_REG_NODEMAP 0x14a0
22 #define EIOINTC_REG_IPMAP 0x14c0
23 #define EIOINTC_REG_ENABLE 0x1600
24 #define EIOINTC_REG_BOUNCE 0x1680
25 #define EIOINTC_REG_ISR 0x1800
26 #define EIOINTC_REG_ROUTE 0x1c00
28 #define VEC_REG_COUNT 4
29 #define VEC_COUNT_PER_REG 64
30 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
31 #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
32 #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
33 #define EIOINTC_ALL_ENABLE 0xffffffff
35 #define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
42 cpumask_t cpuspan_map
;
43 struct fwnode_handle
*domain_handle
;
44 struct irq_domain
*eiointc_domain
;
47 static struct eiointc_priv
*eiointc_priv
[MAX_IO_PICS
];
49 static void eiointc_enable(void)
53 misc
= iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC
);
54 misc
|= IOCSR_MISC_FUNC_EXT_IOI_EN
;
55 iocsr_write64(misc
, LOONGARCH_IOCSR_MISC_FUNC
);
58 static int cpu_to_eio_node(int cpu
)
60 return cpu_logical_map(cpu
) / CORES_PER_EIO_NODE
;
63 static void eiointc_set_irq_route(int pos
, unsigned int cpu
, unsigned int mnode
, nodemask_t
*node_map
)
65 int i
, node
, cpu_node
, route_node
;
66 unsigned char coremap
;
67 uint32_t pos_off
, data
, data_byte
, data_mask
;
71 data_mask
= ~BIT_MASK(data_byte
) & 0xf;
73 /* Calculate node and coremap of target irq */
74 cpu_node
= cpu_logical_map(cpu
) / CORES_PER_EIO_NODE
;
75 coremap
= BIT(cpu_logical_map(cpu
) % CORES_PER_EIO_NODE
);
77 for_each_online_cpu(i
) {
78 node
= cpu_to_eio_node(i
);
79 if (!node_isset(node
, *node_map
))
82 /* EIO node 0 is in charge of inter-node interrupt dispatch */
83 route_node
= (node
== mnode
) ? cpu_node
: node
;
84 data
= ((coremap
| (route_node
<< 4)) << (data_byte
* 8));
85 csr_any_send(EIOINTC_REG_ROUTE
+ pos_off
, data
, data_mask
, node
* CORES_PER_EIO_NODE
);
89 static DEFINE_RAW_SPINLOCK(affinity_lock
);
91 static int eiointc_set_irq_affinity(struct irq_data
*d
, const struct cpumask
*affinity
, bool force
)
95 uint32_t vector
, regaddr
;
96 struct cpumask intersect_affinity
;
97 struct eiointc_priv
*priv
= d
->domain
->host_data
;
99 raw_spin_lock_irqsave(&affinity_lock
, flags
);
101 cpumask_and(&intersect_affinity
, affinity
, cpu_online_mask
);
102 cpumask_and(&intersect_affinity
, &intersect_affinity
, &priv
->cpuspan_map
);
104 if (cpumask_empty(&intersect_affinity
)) {
105 raw_spin_unlock_irqrestore(&affinity_lock
, flags
);
108 cpu
= cpumask_first(&intersect_affinity
);
111 regaddr
= EIOINTC_REG_ENABLE
+ ((vector
>> 5) << 2);
113 /* Mask target vector */
114 csr_any_send(regaddr
, EIOINTC_ALL_ENABLE
& (~BIT(vector
& 0x1F)),
115 0x0, priv
->node
* CORES_PER_EIO_NODE
);
117 /* Set route for target vector */
118 eiointc_set_irq_route(vector
, cpu
, priv
->node
, &priv
->node_map
);
120 /* Unmask target vector */
121 csr_any_send(regaddr
, EIOINTC_ALL_ENABLE
,
122 0x0, priv
->node
* CORES_PER_EIO_NODE
);
124 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
126 raw_spin_unlock_irqrestore(&affinity_lock
, flags
);
128 return IRQ_SET_MASK_OK
;
131 static int eiointc_index(int node
)
135 for (i
= 0; i
< nr_pics
; i
++) {
136 if (node_isset(node
, eiointc_priv
[i
]->node_map
))
143 static int eiointc_router_init(unsigned int cpu
)
147 uint32_t node
= cpu_to_eio_node(cpu
);
148 uint32_t index
= eiointc_index(node
);
151 pr_err("Error: invalid nodemap!\n");
155 if ((cpu_logical_map(cpu
) % CORES_PER_EIO_NODE
) == 0) {
158 for (i
= 0; i
< VEC_COUNT
/ 32; i
++) {
159 data
= (((1 << (i
* 2 + 1)) << 16) | (1 << (i
* 2)));
160 iocsr_write32(data
, EIOINTC_REG_NODEMAP
+ i
* 4);
163 for (i
= 0; i
< VEC_COUNT
/ 32 / 4; i
++) {
164 bit
= BIT(1 + index
); /* Route to IP[1 + index] */
165 data
= bit
| (bit
<< 8) | (bit
<< 16) | (bit
<< 24);
166 iocsr_write32(data
, EIOINTC_REG_IPMAP
+ i
* 4);
169 for (i
= 0; i
< VEC_COUNT
/ 4; i
++) {
170 /* Route to Node-0 Core-0 */
172 bit
= BIT(cpu_logical_map(0));
174 bit
= (eiointc_priv
[index
]->node
<< 4) | 1;
176 data
= bit
| (bit
<< 8) | (bit
<< 16) | (bit
<< 24);
177 iocsr_write32(data
, EIOINTC_REG_ROUTE
+ i
* 4);
180 for (i
= 0; i
< VEC_COUNT
/ 32; i
++) {
182 iocsr_write32(data
, EIOINTC_REG_ENABLE
+ i
* 4);
183 iocsr_write32(data
, EIOINTC_REG_BOUNCE
+ i
* 4);
190 static void eiointc_irq_dispatch(struct irq_desc
*desc
)
194 bool handled
= false;
195 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
196 struct eiointc_priv
*priv
= irq_desc_get_handler_data(desc
);
198 chained_irq_enter(chip
, desc
);
200 for (i
= 0; i
< VEC_REG_COUNT
; i
++) {
201 pending
= iocsr_read64(EIOINTC_REG_ISR
+ (i
<< 3));
202 iocsr_write64(pending
, EIOINTC_REG_ISR
+ (i
<< 3));
204 int bit
= __ffs(pending
);
205 int irq
= bit
+ VEC_COUNT_PER_REG
* i
;
207 generic_handle_domain_irq(priv
->eiointc_domain
, irq
);
208 pending
&= ~BIT(bit
);
214 spurious_interrupt();
216 chained_irq_exit(chip
, desc
);
219 static void eiointc_ack_irq(struct irq_data
*d
)
223 static void eiointc_mask_irq(struct irq_data
*d
)
227 static void eiointc_unmask_irq(struct irq_data
*d
)
231 static struct irq_chip eiointc_irq_chip
= {
233 .irq_ack
= eiointc_ack_irq
,
234 .irq_mask
= eiointc_mask_irq
,
235 .irq_unmask
= eiointc_unmask_irq
,
236 .irq_set_affinity
= eiointc_set_irq_affinity
,
239 static int eiointc_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
240 unsigned int nr_irqs
, void *arg
)
243 unsigned int i
, type
;
244 unsigned long hwirq
= 0;
245 struct eiointc
*priv
= domain
->host_data
;
247 ret
= irq_domain_translate_onecell(domain
, arg
, &hwirq
, &type
);
251 for (i
= 0; i
< nr_irqs
; i
++) {
252 irq_domain_set_info(domain
, virq
+ i
, hwirq
+ i
, &eiointc_irq_chip
,
253 priv
, handle_edge_irq
, NULL
, NULL
);
259 static void eiointc_domain_free(struct irq_domain
*domain
, unsigned int virq
,
260 unsigned int nr_irqs
)
264 for (i
= 0; i
< nr_irqs
; i
++) {
265 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
+ i
);
267 irq_set_handler(virq
+ i
, NULL
);
268 irq_domain_reset_irq_data(d
);
272 static const struct irq_domain_ops eiointc_domain_ops
= {
273 .translate
= irq_domain_translate_onecell
,
274 .alloc
= eiointc_domain_alloc
,
275 .free
= eiointc_domain_free
,
278 static void acpi_set_vec_parent(int node
, struct irq_domain
*parent
, struct acpi_vector_group
*vec_group
)
282 if (cpu_has_flatmode
)
283 node
= cpu_to_node(node
* CORES_PER_EIO_NODE
);
285 for (i
= 0; i
< MAX_IO_PICS
; i
++) {
286 if (node
== vec_group
[i
].node
) {
287 vec_group
[i
].parent
= parent
;
293 static struct irq_domain
*acpi_get_vec_parent(int node
, struct acpi_vector_group
*vec_group
)
297 for (i
= 0; i
< MAX_IO_PICS
; i
++) {
298 if (node
== vec_group
[i
].node
)
299 return vec_group
[i
].parent
;
305 pch_pic_parse_madt(union acpi_subtable_headers
*header
,
306 const unsigned long end
)
308 struct acpi_madt_bio_pic
*pchpic_entry
= (struct acpi_madt_bio_pic
*)header
;
309 unsigned int node
= (pchpic_entry
->address
>> 44) & 0xf;
310 struct irq_domain
*parent
= acpi_get_vec_parent(node
, pch_group
);
313 return pch_pic_acpi_init(parent
, pchpic_entry
);
319 pch_msi_parse_madt(union acpi_subtable_headers
*header
,
320 const unsigned long end
)
322 struct acpi_madt_msi_pic
*pchmsi_entry
= (struct acpi_madt_msi_pic
*)header
;
323 struct irq_domain
*parent
= acpi_get_vec_parent(eiointc_priv
[nr_pics
- 1]->node
, msi_group
);
326 return pch_msi_acpi_init(parent
, pchmsi_entry
);
331 static int __init
acpi_cascade_irqdomain_init(void)
333 acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC
,
334 pch_pic_parse_madt
, 0);
335 acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC
,
336 pch_msi_parse_madt
, 1);
340 int __init
eiointc_acpi_init(struct irq_domain
*parent
,
341 struct acpi_madt_eio_pic
*acpi_eiointc
)
344 unsigned long node_map
;
345 struct eiointc_priv
*priv
;
347 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
351 priv
->domain_handle
= irq_domain_alloc_named_id_fwnode("EIOPIC",
353 if (!priv
->domain_handle
) {
354 pr_err("Unable to allocate domain handle\n");
358 priv
->node
= acpi_eiointc
->node
;
359 node_map
= acpi_eiointc
->node_map
? : -1ULL;
361 for_each_possible_cpu(i
) {
362 if (node_map
& (1ULL << cpu_to_eio_node(i
))) {
363 node_set(cpu_to_eio_node(i
), priv
->node_map
);
364 cpumask_or(&priv
->cpuspan_map
, &priv
->cpuspan_map
, cpumask_of(i
));
368 /* Setup IRQ domain */
369 priv
->eiointc_domain
= irq_domain_create_linear(priv
->domain_handle
, VEC_COUNT
,
370 &eiointc_domain_ops
, priv
);
371 if (!priv
->eiointc_domain
) {
372 pr_err("loongson-eiointc: cannot add IRQ domain\n");
373 goto out_free_handle
;
376 eiointc_priv
[nr_pics
++] = priv
;
378 eiointc_router_init(0);
380 parent_irq
= irq_create_mapping(parent
, acpi_eiointc
->cascade
);
381 irq_set_chained_handler_and_data(parent_irq
, eiointc_irq_dispatch
, priv
);
383 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING
,
384 "irqchip/loongarch/intc:starting",
385 eiointc_router_init
, NULL
);
387 acpi_set_vec_parent(acpi_eiointc
->node
, priv
->eiointc_domain
, pch_group
);
388 acpi_set_vec_parent(acpi_eiointc
->node
, priv
->eiointc_domain
, msi_group
);
389 acpi_cascade_irqdomain_init();
394 irq_domain_free_fwnode(priv
->domain_handle
);
395 priv
->domain_handle
= NULL
;