]>
Commit | Line | Data |
---|---|---|
8237f8bc CH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2017 SiFive | |
4 | * Copyright (C) 2018 Christoph Hellwig | |
5 | */ | |
6 | #define pr_fmt(fmt) "plic: " fmt | |
ccbe80ba | 7 | #include <linux/cpu.h> |
8237f8bc CH |
8 | #include <linux/interrupt.h> |
9 | #include <linux/io.h> | |
10 | #include <linux/irq.h> | |
11 | #include <linux/irqchip.h> | |
12 | #include <linux/irqdomain.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/of.h> | |
15 | #include <linux/of_address.h> | |
16 | #include <linux/of_irq.h> | |
17 | #include <linux/platform_device.h> | |
18 | #include <linux/spinlock.h> | |
f99fb607 | 19 | #include <asm/smp.h> |
8237f8bc CH |
20 | |
21 | /* | |
22 | * This driver implements a version of the RISC-V PLIC with the actual layout | |
23 | * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: | |
24 | * | |
25 | * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf | |
26 | * | |
27 | * The largest number supported by devices marked as 'sifive,plic-1.0.0', is | |
28 | * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged | |
29 | * Spec. | |
30 | */ | |
31 | ||
32 | #define MAX_DEVICES 1024 | |
33 | #define MAX_CONTEXTS 15872 | |
34 | ||
35 | /* | |
36 | * Each interrupt source has a priority register associated with it. | |
37 | * We always hardwire it to one in Linux. | |
38 | */ | |
39 | #define PRIORITY_BASE 0 | |
40 | #define PRIORITY_PER_ID 4 | |
41 | ||
42 | /* | |
43 | * Each hart context has a vector of interrupt enable bits associated with it. | |
44 | * There's one bit for each interrupt source. | |
45 | */ | |
46 | #define ENABLE_BASE 0x2000 | |
47 | #define ENABLE_PER_HART 0x80 | |
48 | ||
49 | /* | |
50 | * Each hart context has a set of control registers associated with it. Right | |
51 | * now there's only two: a source priority threshold over which the hart will | |
52 | * take an interrupt, and a register to claim interrupts. | |
53 | */ | |
54 | #define CONTEXT_BASE 0x200000 | |
55 | #define CONTEXT_PER_HART 0x1000 | |
56 | #define CONTEXT_THRESHOLD 0x00 | |
57 | #define CONTEXT_CLAIM 0x04 | |
58 | ||
d727be7b | 59 | #define PLIC_DISABLE_THRESHOLD 0x7 |
ccbe80ba AP |
60 | #define PLIC_ENABLE_THRESHOLD 0 |
61 | ||
f1ad1133 AP |
62 | struct plic_priv { |
63 | struct cpumask lmask; | |
64 | struct irq_domain *irqdomain; | |
65 | void __iomem *regs; | |
66 | }; | |
8237f8bc CH |
67 | |
68 | struct plic_handler { | |
69 | bool present; | |
86c7cbf1 AP |
70 | void __iomem *hart_base; |
71 | /* | |
72 | * Protect mask operations on the registers given that we can't | |
73 | * assume atomic memory operations work on them. | |
74 | */ | |
75 | raw_spinlock_t enable_lock; | |
76 | void __iomem *enable_base; | |
f1ad1133 | 77 | struct plic_priv *priv; |
8237f8bc CH |
78 | }; |
79 | static DEFINE_PER_CPU(struct plic_handler, plic_handlers); | |
80 | ||
86c7cbf1 AP |
81 | static inline void plic_toggle(struct plic_handler *handler, |
82 | int hwirq, int enable) | |
8237f8bc | 83 | { |
86c7cbf1 | 84 | u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32); |
8237f8bc CH |
85 | u32 hwirq_mask = 1 << (hwirq % 32); |
86 | ||
86c7cbf1 | 87 | raw_spin_lock(&handler->enable_lock); |
8237f8bc CH |
88 | if (enable) |
89 | writel(readl(reg) | hwirq_mask, reg); | |
90 | else | |
91 | writel(readl(reg) & ~hwirq_mask, reg); | |
86c7cbf1 | 92 | raw_spin_unlock(&handler->enable_lock); |
8237f8bc CH |
93 | } |
94 | ||
cc9f04f9 | 95 | static inline void plic_irq_toggle(const struct cpumask *mask, |
f1ad1133 | 96 | struct irq_data *d, int enable) |
8237f8bc CH |
97 | { |
98 | int cpu; | |
f1ad1133 | 99 | struct plic_priv *priv = irq_get_chip_data(d->irq); |
8237f8bc | 100 | |
f1ad1133 | 101 | writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); |
cc9f04f9 | 102 | for_each_cpu(cpu, mask) { |
8237f8bc CH |
103 | struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); |
104 | ||
f1ad1133 AP |
105 | if (handler->present && |
106 | cpumask_test_cpu(cpu, &handler->priv->lmask)) | |
107 | plic_toggle(handler, d->hwirq, enable); | |
8237f8bc CH |
108 | } |
109 | } | |
110 | ||
bb0fed1c | 111 | static void plic_irq_unmask(struct irq_data *d) |
8237f8bc | 112 | { |
f1ad1133 AP |
113 | struct cpumask amask; |
114 | unsigned int cpu; | |
115 | struct plic_priv *priv = irq_get_chip_data(d->irq); | |
116 | ||
117 | cpumask_and(&amask, &priv->lmask, cpu_online_mask); | |
118 | cpu = cpumask_any_and(irq_data_get_affinity_mask(d), | |
119 | &amask); | |
cc9f04f9 AP |
120 | if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) |
121 | return; | |
f1ad1133 | 122 | plic_irq_toggle(cpumask_of(cpu), d, 1); |
8237f8bc CH |
123 | } |
124 | ||
bb0fed1c | 125 | static void plic_irq_mask(struct irq_data *d) |
8237f8bc | 126 | { |
f1ad1133 AP |
127 | struct plic_priv *priv = irq_get_chip_data(d->irq); |
128 | ||
129 | plic_irq_toggle(&priv->lmask, d, 0); | |
8237f8bc CH |
130 | } |
131 | ||
cc9f04f9 AP |
132 | #ifdef CONFIG_SMP |
133 | static int plic_set_affinity(struct irq_data *d, | |
134 | const struct cpumask *mask_val, bool force) | |
135 | { | |
136 | unsigned int cpu; | |
f1ad1133 AP |
137 | struct cpumask amask; |
138 | struct plic_priv *priv = irq_get_chip_data(d->irq); | |
139 | ||
140 | cpumask_and(&amask, &priv->lmask, mask_val); | |
cc9f04f9 AP |
141 | |
142 | if (force) | |
f1ad1133 | 143 | cpu = cpumask_first(&amask); |
cc9f04f9 | 144 | else |
f1ad1133 | 145 | cpu = cpumask_any_and(&amask, cpu_online_mask); |
cc9f04f9 AP |
146 | |
147 | if (cpu >= nr_cpu_ids) | |
148 | return -EINVAL; | |
149 | ||
f1ad1133 AP |
150 | plic_irq_toggle(&priv->lmask, d, 0); |
151 | plic_irq_toggle(cpumask_of(cpu), d, 1); | |
cc9f04f9 AP |
152 | |
153 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); | |
154 | ||
155 | return IRQ_SET_MASK_OK_DONE; | |
156 | } | |
157 | #endif | |
158 | ||
bb0fed1c MZ |
159 | static void plic_irq_eoi(struct irq_data *d) |
160 | { | |
161 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); | |
162 | ||
163 | writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); | |
164 | } | |
165 | ||
8237f8bc CH |
166 | static struct irq_chip plic_chip = { |
167 | .name = "SiFive PLIC", | |
bb0fed1c MZ |
168 | .irq_mask = plic_irq_mask, |
169 | .irq_unmask = plic_irq_unmask, | |
170 | .irq_eoi = plic_irq_eoi, | |
cc9f04f9 AP |
171 | #ifdef CONFIG_SMP |
172 | .irq_set_affinity = plic_set_affinity, | |
173 | #endif | |
8237f8bc CH |
174 | }; |
175 | ||
176 | static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, | |
177 | irq_hw_number_t hwirq) | |
178 | { | |
466008f9 YS |
179 | irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, |
180 | handle_fasteoi_irq, NULL, NULL); | |
8237f8bc CH |
181 | irq_set_noprobe(irq); |
182 | return 0; | |
183 | } | |
184 | ||
466008f9 YS |
185 | static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
186 | unsigned int nr_irqs, void *arg) | |
187 | { | |
188 | int i, ret; | |
189 | irq_hw_number_t hwirq; | |
190 | unsigned int type; | |
191 | struct irq_fwspec *fwspec = arg; | |
192 | ||
193 | ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type); | |
194 | if (ret) | |
195 | return ret; | |
196 | ||
197 | for (i = 0; i < nr_irqs; i++) { | |
198 | ret = plic_irqdomain_map(domain, virq + i, hwirq + i); | |
199 | if (ret) | |
200 | return ret; | |
201 | } | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
8237f8bc | 206 | static const struct irq_domain_ops plic_irqdomain_ops = { |
466008f9 YS |
207 | .translate = irq_domain_translate_onecell, |
208 | .alloc = plic_irq_domain_alloc, | |
209 | .free = irq_domain_free_irqs_top, | |
8237f8bc CH |
210 | }; |
211 | ||
8237f8bc CH |
212 | /* |
213 | * Handling an interrupt is a two-step process: first you claim the interrupt | |
214 | * by reading the claim register, then you complete the interrupt by writing | |
215 | * that source ID back to the same claim register. This automatically enables | |
216 | * and disables the interrupt, so there's nothing else to do. | |
217 | */ | |
218 | static void plic_handle_irq(struct pt_regs *regs) | |
219 | { | |
220 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); | |
86c7cbf1 | 221 | void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; |
8237f8bc CH |
222 | irq_hw_number_t hwirq; |
223 | ||
224 | WARN_ON_ONCE(!handler->present); | |
225 | ||
a4c3733d | 226 | csr_clear(CSR_IE, IE_EIE); |
8237f8bc | 227 | while ((hwirq = readl(claim))) { |
f1ad1133 | 228 | int irq = irq_find_mapping(handler->priv->irqdomain, hwirq); |
8237f8bc CH |
229 | |
230 | if (unlikely(irq <= 0)) | |
231 | pr_warn_ratelimited("can't find mapping for hwirq %lu\n", | |
232 | hwirq); | |
233 | else | |
234 | generic_handle_irq(irq); | |
8237f8bc | 235 | } |
a4c3733d | 236 | csr_set(CSR_IE, IE_EIE); |
8237f8bc CH |
237 | } |
238 | ||
239 | /* | |
240 | * Walk up the DT tree until we find an active RISC-V core (HART) node and | |
241 | * extract the cpuid from it. | |
242 | */ | |
243 | static int plic_find_hart_id(struct device_node *node) | |
244 | { | |
245 | for (; node; node = node->parent) { | |
246 | if (of_device_is_compatible(node, "riscv")) | |
b2f8cfa7 | 247 | return riscv_of_processor_hartid(node); |
8237f8bc CH |
248 | } |
249 | ||
250 | return -1; | |
251 | } | |
252 | ||
ccbe80ba AP |
253 | static void plic_set_threshold(struct plic_handler *handler, u32 threshold) |
254 | { | |
255 | /* priority must be > threshold to trigger an interrupt */ | |
256 | writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); | |
257 | } | |
258 | ||
259 | static int plic_dying_cpu(unsigned int cpu) | |
260 | { | |
261 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); | |
262 | ||
263 | csr_clear(CSR_IE, IE_EIE); | |
264 | plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); | |
265 | ||
266 | return 0; | |
267 | } | |
268 | ||
269 | static int plic_starting_cpu(unsigned int cpu) | |
270 | { | |
271 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); | |
272 | ||
273 | csr_set(CSR_IE, IE_EIE); | |
274 | plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); | |
275 | ||
276 | return 0; | |
277 | } | |
278 | ||
8237f8bc CH |
279 | static int __init plic_init(struct device_node *node, |
280 | struct device_node *parent) | |
281 | { | |
6adfe8d2 | 282 | int error = 0, nr_contexts, nr_handlers = 0, i; |
8237f8bc | 283 | u32 nr_irqs; |
f1ad1133 | 284 | struct plic_priv *priv; |
8237f8bc | 285 | |
f1ad1133 AP |
286 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
287 | if (!priv) | |
288 | return -ENOMEM; | |
8237f8bc | 289 | |
f1ad1133 AP |
290 | priv->regs = of_iomap(node, 0); |
291 | if (WARN_ON(!priv->regs)) { | |
292 | error = -EIO; | |
293 | goto out_free_priv; | |
294 | } | |
8237f8bc CH |
295 | |
296 | error = -EINVAL; | |
297 | of_property_read_u32(node, "riscv,ndev", &nr_irqs); | |
298 | if (WARN_ON(!nr_irqs)) | |
299 | goto out_iounmap; | |
300 | ||
6adfe8d2 AP |
301 | nr_contexts = of_irq_count(node); |
302 | if (WARN_ON(!nr_contexts)) | |
8237f8bc | 303 | goto out_iounmap; |
6adfe8d2 | 304 | if (WARN_ON(nr_contexts < num_possible_cpus())) |
8237f8bc CH |
305 | goto out_iounmap; |
306 | ||
307 | error = -ENOMEM; | |
f1ad1133 AP |
308 | priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1, |
309 | &plic_irqdomain_ops, priv); | |
310 | if (WARN_ON(!priv->irqdomain)) | |
8237f8bc CH |
311 | goto out_iounmap; |
312 | ||
6adfe8d2 | 313 | for (i = 0; i < nr_contexts; i++) { |
8237f8bc CH |
314 | struct of_phandle_args parent; |
315 | struct plic_handler *handler; | |
316 | irq_hw_number_t hwirq; | |
f99fb607 | 317 | int cpu, hartid; |
8237f8bc CH |
318 | |
319 | if (of_irq_parse_one(node, i, &parent)) { | |
320 | pr_err("failed to parse parent for context %d.\n", i); | |
321 | continue; | |
322 | } | |
323 | ||
a4c3733d CH |
324 | /* |
325 | * Skip contexts other than external interrupts for our | |
326 | * privilege level. | |
327 | */ | |
2f3035da | 328 | if (parent.args[0] != RV_IRQ_EXT) |
8237f8bc CH |
329 | continue; |
330 | ||
f99fb607 AP |
331 | hartid = plic_find_hart_id(parent.np); |
332 | if (hartid < 0) { | |
8237f8bc CH |
333 | pr_warn("failed to parse hart ID for context %d.\n", i); |
334 | continue; | |
335 | } | |
336 | ||
f99fb607 | 337 | cpu = riscv_hartid_to_cpuid(hartid); |
fc03acae AP |
338 | if (cpu < 0) { |
339 | pr_warn("Invalid cpuid for context %d\n", i); | |
340 | continue; | |
341 | } | |
342 | ||
9ce06497 CH |
343 | /* |
344 | * When running in M-mode we need to ignore the S-mode handler. | |
345 | * Here we assume it always comes later, but that might be a | |
346 | * little fragile. | |
347 | */ | |
8237f8bc | 348 | handler = per_cpu_ptr(&plic_handlers, cpu); |
3fecb5aa AP |
349 | if (handler->present) { |
350 | pr_warn("handler already present for context %d.\n", i); | |
ccbe80ba | 351 | plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); |
9ce06497 | 352 | goto done; |
3fecb5aa AP |
353 | } |
354 | ||
f1ad1133 | 355 | cpumask_set_cpu(cpu, &priv->lmask); |
8237f8bc | 356 | handler->present = true; |
86c7cbf1 | 357 | handler->hart_base = |
f1ad1133 | 358 | priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART; |
86c7cbf1 AP |
359 | raw_spin_lock_init(&handler->enable_lock); |
360 | handler->enable_base = | |
f1ad1133 AP |
361 | priv->regs + ENABLE_BASE + i * ENABLE_PER_HART; |
362 | handler->priv = priv; | |
9ce06497 | 363 | done: |
8237f8bc | 364 | for (hwirq = 1; hwirq <= nr_irqs; hwirq++) |
86c7cbf1 | 365 | plic_toggle(handler, hwirq, 0); |
6adfe8d2 | 366 | nr_handlers++; |
8237f8bc CH |
367 | } |
368 | ||
ccbe80ba AP |
369 | cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, |
370 | "irqchip/sifive/plic:starting", | |
371 | plic_starting_cpu, plic_dying_cpu); | |
6adfe8d2 AP |
372 | pr_info("mapped %d interrupts with %d handlers for %d contexts.\n", |
373 | nr_irqs, nr_handlers, nr_contexts); | |
8237f8bc CH |
374 | set_handle_irq(plic_handle_irq); |
375 | return 0; | |
376 | ||
377 | out_iounmap: | |
f1ad1133 AP |
378 | iounmap(priv->regs); |
379 | out_free_priv: | |
380 | kfree(priv); | |
8237f8bc CH |
381 | return error; |
382 | } | |
383 | ||
384 | IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); | |
385 | IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ |