]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/irqchip/irq-gic-v3.c
irqchip/brcmstb-l2: Add support for the BCM7271 L2 controller
[thirdparty/linux.git] / drivers / irqchip / irq-gic-v3.c
CommitLineData
021f6537 1/*
0edc23ea 2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
021f6537
MZ
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
68628bb8
JG
18#define pr_fmt(fmt) "GICv3: " fmt
19
ffa7d616 20#include <linux/acpi.h>
021f6537 21#include <linux/cpu.h>
3708d52f 22#include <linux/cpu_pm.h>
021f6537
MZ
23#include <linux/delay.h>
24#include <linux/interrupt.h>
ffa7d616 25#include <linux/irqdomain.h>
021f6537
MZ
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/of_irq.h>
29#include <linux/percpu.h>
30#include <linux/slab.h>
31
41a83e06 32#include <linux/irqchip.h>
1839e576 33#include <linux/irqchip/arm-gic-common.h>
021f6537 34#include <linux/irqchip/arm-gic-v3.h>
e3825ba1 35#include <linux/irqchip/irq-partition-percpu.h>
021f6537
MZ
36
37#include <asm/cputype.h>
38#include <asm/exception.h>
39#include <asm/smp_plat.h>
0b6a3da9 40#include <asm/virt.h>
021f6537
MZ
41
42#include "irq-gic-common.h"
021f6537 43
f5c1434c
MZ
44struct redist_region {
45 void __iomem *redist_base;
46 phys_addr_t phys_base;
b70fb7af 47 bool single_redist;
f5c1434c
MZ
48};
49
021f6537 50struct gic_chip_data {
e3825ba1 51 struct fwnode_handle *fwnode;
021f6537 52 void __iomem *dist_base;
f5c1434c
MZ
53 struct redist_region *redist_regions;
54 struct rdists rdists;
021f6537
MZ
55 struct irq_domain *domain;
56 u64 redist_stride;
f5c1434c 57 u32 nr_redist_regions;
021f6537 58 unsigned int irq_nr;
e3825ba1 59 struct partition_desc *ppi_descs[16];
021f6537
MZ
60};
61
62static struct gic_chip_data gic_data __read_mostly;
0b6a3da9 63static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
021f6537 64
1839e576
JG
65static struct gic_kvm_info gic_v3_kvm_info;
66
f5c1434c
MZ
67#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
68#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
021f6537
MZ
69#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
70
71/* Our default, arbitrary priority value. Linux only uses one anyway. */
72#define DEFAULT_PMR_VALUE 0xf0
73
74static inline unsigned int gic_irq(struct irq_data *d)
75{
76 return d->hwirq;
77}
78
79static inline int gic_irq_in_rdist(struct irq_data *d)
80{
81 return gic_irq(d) < 32;
82}
83
84static inline void __iomem *gic_dist_base(struct irq_data *d)
85{
86 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
87 return gic_data_rdist_sgi_base();
88
89 if (d->hwirq <= 1023) /* SPI -> dist_base */
90 return gic_data.dist_base;
91
021f6537
MZ
92 return NULL;
93}
94
95static void gic_do_wait_for_rwp(void __iomem *base)
96{
97 u32 count = 1000000; /* 1s! */
98
99 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
100 count--;
101 if (!count) {
102 pr_err_ratelimited("RWP timeout, gone fishing\n");
103 return;
104 }
105 cpu_relax();
106 udelay(1);
107 };
108}
109
110/* Wait for completion of a distributor change */
111static void gic_dist_wait_for_rwp(void)
112{
113 gic_do_wait_for_rwp(gic_data.dist_base);
114}
115
116/* Wait for completion of a redistributor change */
117static void gic_redist_wait_for_rwp(void)
118{
119 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
120}
121
7936e914 122#ifdef CONFIG_ARM64
6d4e11c5
RR
123
124static u64 __maybe_unused gic_read_iar(void)
125{
a4023f68 126 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
6d4e11c5
RR
127 return gic_read_iar_cavium_thunderx();
128 else
129 return gic_read_iar_common();
130}
7936e914 131#endif
021f6537 132
a2c22510 133static void gic_enable_redist(bool enable)
021f6537
MZ
134{
135 void __iomem *rbase;
136 u32 count = 1000000; /* 1s! */
137 u32 val;
138
139 rbase = gic_data_rdist_rd_base();
140
021f6537 141 val = readl_relaxed(rbase + GICR_WAKER);
a2c22510
SH
142 if (enable)
143 /* Wake up this CPU redistributor */
144 val &= ~GICR_WAKER_ProcessorSleep;
145 else
146 val |= GICR_WAKER_ProcessorSleep;
021f6537
MZ
147 writel_relaxed(val, rbase + GICR_WAKER);
148
a2c22510
SH
149 if (!enable) { /* Check that GICR_WAKER is writeable */
150 val = readl_relaxed(rbase + GICR_WAKER);
151 if (!(val & GICR_WAKER_ProcessorSleep))
152 return; /* No PM support in this redistributor */
153 }
154
d102eb5c 155 while (--count) {
a2c22510 156 val = readl_relaxed(rbase + GICR_WAKER);
cf1d9d11 157 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
a2c22510 158 break;
021f6537
MZ
159 cpu_relax();
160 udelay(1);
161 };
a2c22510
SH
162 if (!count)
163 pr_err_ratelimited("redistributor failed to %s...\n",
164 enable ? "wakeup" : "sleep");
021f6537
MZ
165}
166
167/*
168 * Routines to disable, enable, EOI and route interrupts
169 */
b594c6e2
MZ
170static int gic_peek_irq(struct irq_data *d, u32 offset)
171{
172 u32 mask = 1 << (gic_irq(d) % 32);
173 void __iomem *base;
174
175 if (gic_irq_in_rdist(d))
176 base = gic_data_rdist_sgi_base();
177 else
178 base = gic_data.dist_base;
179
180 return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
181}
182
021f6537
MZ
183static void gic_poke_irq(struct irq_data *d, u32 offset)
184{
185 u32 mask = 1 << (gic_irq(d) % 32);
186 void (*rwp_wait)(void);
187 void __iomem *base;
188
189 if (gic_irq_in_rdist(d)) {
190 base = gic_data_rdist_sgi_base();
191 rwp_wait = gic_redist_wait_for_rwp;
192 } else {
193 base = gic_data.dist_base;
194 rwp_wait = gic_dist_wait_for_rwp;
195 }
196
197 writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
198 rwp_wait();
199}
200
021f6537
MZ
201static void gic_mask_irq(struct irq_data *d)
202{
203 gic_poke_irq(d, GICD_ICENABLER);
204}
205
0b6a3da9
MZ
206static void gic_eoimode1_mask_irq(struct irq_data *d)
207{
208 gic_mask_irq(d);
530bf353
MZ
209 /*
210 * When masking a forwarded interrupt, make sure it is
211 * deactivated as well.
212 *
213 * This ensures that an interrupt that is getting
214 * disabled/masked will not get "stuck", because there is
215 * noone to deactivate it (guest is being terminated).
216 */
4df7f54d 217 if (irqd_is_forwarded_to_vcpu(d))
530bf353 218 gic_poke_irq(d, GICD_ICACTIVER);
0b6a3da9
MZ
219}
220
021f6537
MZ
221static void gic_unmask_irq(struct irq_data *d)
222{
223 gic_poke_irq(d, GICD_ISENABLER);
224}
225
b594c6e2
MZ
226static int gic_irq_set_irqchip_state(struct irq_data *d,
227 enum irqchip_irq_state which, bool val)
228{
229 u32 reg;
230
231 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
232 return -EINVAL;
233
234 switch (which) {
235 case IRQCHIP_STATE_PENDING:
236 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
237 break;
238
239 case IRQCHIP_STATE_ACTIVE:
240 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
241 break;
242
243 case IRQCHIP_STATE_MASKED:
244 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
245 break;
246
247 default:
248 return -EINVAL;
249 }
250
251 gic_poke_irq(d, reg);
252 return 0;
253}
254
255static int gic_irq_get_irqchip_state(struct irq_data *d,
256 enum irqchip_irq_state which, bool *val)
257{
258 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
259 return -EINVAL;
260
261 switch (which) {
262 case IRQCHIP_STATE_PENDING:
263 *val = gic_peek_irq(d, GICD_ISPENDR);
264 break;
265
266 case IRQCHIP_STATE_ACTIVE:
267 *val = gic_peek_irq(d, GICD_ISACTIVER);
268 break;
269
270 case IRQCHIP_STATE_MASKED:
271 *val = !gic_peek_irq(d, GICD_ISENABLER);
272 break;
273
274 default:
275 return -EINVAL;
276 }
277
278 return 0;
279}
280
021f6537
MZ
281static void gic_eoi_irq(struct irq_data *d)
282{
283 gic_write_eoir(gic_irq(d));
284}
285
0b6a3da9
MZ
286static void gic_eoimode1_eoi_irq(struct irq_data *d)
287{
288 /*
530bf353
MZ
289 * No need to deactivate an LPI, or an interrupt that
290 * is is getting forwarded to a vcpu.
0b6a3da9 291 */
4df7f54d 292 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
0b6a3da9
MZ
293 return;
294 gic_write_dir(gic_irq(d));
295}
296
021f6537
MZ
297static int gic_set_type(struct irq_data *d, unsigned int type)
298{
299 unsigned int irq = gic_irq(d);
300 void (*rwp_wait)(void);
301 void __iomem *base;
302
303 /* Interrupt configuration for SGIs can't be changed */
304 if (irq < 16)
305 return -EINVAL;
306
fb7e7deb
LD
307 /* SPIs have restrictions on the supported types */
308 if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
309 type != IRQ_TYPE_EDGE_RISING)
021f6537
MZ
310 return -EINVAL;
311
312 if (gic_irq_in_rdist(d)) {
313 base = gic_data_rdist_sgi_base();
314 rwp_wait = gic_redist_wait_for_rwp;
315 } else {
316 base = gic_data.dist_base;
317 rwp_wait = gic_dist_wait_for_rwp;
318 }
319
fb7e7deb 320 return gic_configure_irq(irq, type, base, rwp_wait);
021f6537
MZ
321}
322
530bf353
MZ
323static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
324{
4df7f54d
TG
325 if (vcpu)
326 irqd_set_forwarded_to_vcpu(d);
327 else
328 irqd_clr_forwarded_to_vcpu(d);
530bf353
MZ
329 return 0;
330}
331
f6c86a41 332static u64 gic_mpidr_to_affinity(unsigned long mpidr)
021f6537
MZ
333{
334 u64 aff;
335
f6c86a41 336 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
021f6537
MZ
337 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
338 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
339 MPIDR_AFFINITY_LEVEL(mpidr, 0));
340
341 return aff;
342}
343
344static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
345{
f6c86a41 346 u32 irqnr;
021f6537
MZ
347
348 do {
349 irqnr = gic_read_iar();
350
da33f31d 351 if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
ebc6de00 352 int err;
0b6a3da9
MZ
353
354 if (static_key_true(&supports_deactivate))
355 gic_write_eoir(irqnr);
39a06b67
WD
356 else
357 isb();
0b6a3da9 358
ebc6de00
MZ
359 err = handle_domain_irq(gic_data.domain, irqnr, regs);
360 if (err) {
da33f31d 361 WARN_ONCE(true, "Unexpected interrupt received!\n");
0b6a3da9
MZ
362 if (static_key_true(&supports_deactivate)) {
363 if (irqnr < 8192)
364 gic_write_dir(irqnr);
365 } else {
366 gic_write_eoir(irqnr);
367 }
021f6537 368 }
ebc6de00 369 continue;
021f6537
MZ
370 }
371 if (irqnr < 16) {
372 gic_write_eoir(irqnr);
0b6a3da9
MZ
373 if (static_key_true(&supports_deactivate))
374 gic_write_dir(irqnr);
021f6537 375#ifdef CONFIG_SMP
f86c4fbd
WD
376 /*
377 * Unlike GICv2, we don't need an smp_rmb() here.
378 * The control dependency from gic_read_iar to
379 * the ISB in gic_write_eoir is enough to ensure
380 * that any shared data read by handle_IPI will
381 * be read after the ACK.
382 */
021f6537
MZ
383 handle_IPI(irqnr, regs);
384#else
385 WARN_ONCE(true, "Unexpected SGI received!\n");
386#endif
387 continue;
388 }
389 } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
390}
391
392static void __init gic_dist_init(void)
393{
394 unsigned int i;
395 u64 affinity;
396 void __iomem *base = gic_data.dist_base;
397
398 /* Disable the distributor */
399 writel_relaxed(0, base + GICD_CTLR);
400 gic_dist_wait_for_rwp();
401
7c9b9730
MZ
402 /*
403 * Configure SPIs as non-secure Group-1. This will only matter
404 * if the GIC only has a single security state. This will not
405 * do the right thing if the kernel is running in secure mode,
406 * but that's not the intended use case anyway.
407 */
408 for (i = 32; i < gic_data.irq_nr; i += 32)
409 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
410
021f6537
MZ
411 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
412
413 /* Enable distributor with ARE, Group1 */
414 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
415 base + GICD_CTLR);
416
417 /*
418 * Set all global interrupts to the boot CPU only. ARE must be
419 * enabled.
420 */
421 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
422 for (i = 32; i < gic_data.irq_nr; i++)
72c97126 423 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
021f6537
MZ
424}
425
0d94ded2 426static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
021f6537 427{
0d94ded2 428 int ret = -ENODEV;
021f6537
MZ
429 int i;
430
f5c1434c
MZ
431 for (i = 0; i < gic_data.nr_redist_regions; i++) {
432 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
0d94ded2 433 u64 typer;
021f6537
MZ
434 u32 reg;
435
436 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
437 if (reg != GIC_PIDR2_ARCH_GICv3 &&
438 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
439 pr_warn("No redistributor present @%p\n", ptr);
440 break;
441 }
442
443 do {
72c97126 444 typer = gic_read_typer(ptr + GICR_TYPER);
0d94ded2
MZ
445 ret = fn(gic_data.redist_regions + i, ptr);
446 if (!ret)
021f6537 447 return 0;
021f6537 448
b70fb7af
TN
449 if (gic_data.redist_regions[i].single_redist)
450 break;
451
021f6537
MZ
452 if (gic_data.redist_stride) {
453 ptr += gic_data.redist_stride;
454 } else {
455 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
456 if (typer & GICR_TYPER_VLPIS)
457 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
458 }
459 } while (!(typer & GICR_TYPER_LAST));
460 }
461
0d94ded2
MZ
462 return ret ? -ENODEV : 0;
463}
464
465static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
466{
467 unsigned long mpidr = cpu_logical_map(smp_processor_id());
468 u64 typer;
469 u32 aff;
470
471 /*
472 * Convert affinity to a 32bit value that can be matched to
473 * GICR_TYPER bits [63:32].
474 */
475 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
476 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
477 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
478 MPIDR_AFFINITY_LEVEL(mpidr, 0));
479
480 typer = gic_read_typer(ptr + GICR_TYPER);
481 if ((typer >> 32) == aff) {
482 u64 offset = ptr - region->redist_base;
483 gic_data_rdist_rd_base() = ptr;
484 gic_data_rdist()->phys_base = region->phys_base + offset;
485
486 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
487 smp_processor_id(), mpidr,
488 (int)(region - gic_data.redist_regions),
489 &gic_data_rdist()->phys_base);
490 return 0;
491 }
492
493 /* Try next one */
494 return 1;
495}
496
497static int gic_populate_rdist(void)
498{
499 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
500 return 0;
501
021f6537 502 /* We couldn't even deal with ourselves... */
f6c86a41 503 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
0d94ded2
MZ
504 smp_processor_id(),
505 (unsigned long)cpu_logical_map(smp_processor_id()));
021f6537
MZ
506 return -ENODEV;
507}
508
0edc23ea
MZ
509static int __gic_update_vlpi_properties(struct redist_region *region,
510 void __iomem *ptr)
511{
512 u64 typer = gic_read_typer(ptr + GICR_TYPER);
513 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
514 gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
515
516 return 1;
517}
518
519static void gic_update_vlpi_properties(void)
520{
521 gic_iterate_rdists(__gic_update_vlpi_properties);
522 pr_info("%sVLPI support, %sdirect LPI support\n",
523 !gic_data.rdists.has_vlpis ? "no " : "",
524 !gic_data.rdists.has_direct_lpi ? "no " : "");
525}
526
3708d52f
SH
527static void gic_cpu_sys_reg_init(void)
528{
7cabd008
MZ
529 /*
530 * Need to check that the SRE bit has actually been set. If
531 * not, it means that SRE is disabled at EL2. We're going to
532 * die painfully, and there is nothing we can do about it.
533 *
534 * Kindly inform the luser.
535 */
536 if (!gic_enable_sre())
537 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
3708d52f
SH
538
539 /* Set priority mask register */
540 gic_write_pmr(DEFAULT_PMR_VALUE);
541
91ef8442
DT
542 /*
543 * Some firmwares hand over to the kernel with the BPR changed from
544 * its reset value (and with a value large enough to prevent
545 * any pre-emptive interrupts from working at all). Writing a zero
546 * to BPR restores is reset value.
547 */
548 gic_write_bpr1(0);
549
0b6a3da9
MZ
550 if (static_key_true(&supports_deactivate)) {
551 /* EOI drops priority only (mode 1) */
552 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
553 } else {
554 /* EOI deactivates interrupt too (mode 0) */
555 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
556 }
3708d52f
SH
557
558 /* ... and let's hit the road... */
559 gic_write_grpen1(1);
560}
561
da33f31d
MZ
562static int gic_dist_supports_lpis(void)
563{
564 return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
565}
566
021f6537
MZ
567static void gic_cpu_init(void)
568{
569 void __iomem *rbase;
570
571 /* Register ourselves with the rest of the world */
572 if (gic_populate_rdist())
573 return;
574
a2c22510 575 gic_enable_redist(true);
021f6537
MZ
576
577 rbase = gic_data_rdist_sgi_base();
578
7c9b9730
MZ
579 /* Configure SGIs/PPIs as non-secure Group-1 */
580 writel_relaxed(~0, rbase + GICR_IGROUPR0);
581
021f6537
MZ
582 gic_cpu_config(rbase, gic_redist_wait_for_rwp);
583
da33f31d
MZ
584 /* Give LPIs a spin */
585 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
586 its_cpu_init();
587
3708d52f
SH
588 /* initialise system registers */
589 gic_cpu_sys_reg_init();
021f6537
MZ
590}
591
592#ifdef CONFIG_SMP
6670a6d8
RC
593
594static int gic_starting_cpu(unsigned int cpu)
021f6537 595{
6670a6d8
RC
596 gic_cpu_init();
597 return 0;
021f6537
MZ
598}
599
021f6537 600static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
f6c86a41 601 unsigned long cluster_id)
021f6537 602{
727653d6 603 int next_cpu, cpu = *base_cpu;
f6c86a41 604 unsigned long mpidr = cpu_logical_map(cpu);
021f6537
MZ
605 u16 tlist = 0;
606
607 while (cpu < nr_cpu_ids) {
608 /*
609 * If we ever get a cluster of more than 16 CPUs, just
610 * scream and skip that CPU.
611 */
612 if (WARN_ON((mpidr & 0xff) >= 16))
613 goto out;
614
615 tlist |= 1 << (mpidr & 0xf);
616
727653d6
JM
617 next_cpu = cpumask_next(cpu, mask);
618 if (next_cpu >= nr_cpu_ids)
021f6537 619 goto out;
727653d6 620 cpu = next_cpu;
021f6537
MZ
621
622 mpidr = cpu_logical_map(cpu);
623
624 if (cluster_id != (mpidr & ~0xffUL)) {
625 cpu--;
626 goto out;
627 }
628 }
629out:
630 *base_cpu = cpu;
631 return tlist;
632}
633
7e580278
AP
634#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
635 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
636 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
637
021f6537
MZ
638static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
639{
640 u64 val;
641
7e580278
AP
642 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
643 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
644 irq << ICC_SGI1R_SGI_ID_SHIFT |
645 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
646 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
021f6537
MZ
647
648 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
649 gic_write_sgi1r(val);
650}
651
652static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
653{
654 int cpu;
655
656 if (WARN_ON(irq >= 16))
657 return;
658
659 /*
660 * Ensure that stores to Normal memory are visible to the
661 * other CPUs before issuing the IPI.
662 */
663 smp_wmb();
664
f9b531fe 665 for_each_cpu(cpu, mask) {
f6c86a41 666 unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
021f6537
MZ
667 u16 tlist;
668
669 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
670 gic_send_sgi(cluster_id, tlist, irq);
671 }
672
673 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
674 isb();
675}
676
677static void gic_smp_init(void)
678{
679 set_smp_cross_call(gic_raise_softirq);
6896bcd1 680 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
73c1b41e
TG
681 "irqchip/arm/gicv3:starting",
682 gic_starting_cpu, NULL);
021f6537
MZ
683}
684
685static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
686 bool force)
687{
65a30f8b 688 unsigned int cpu;
021f6537
MZ
689 void __iomem *reg;
690 int enabled;
691 u64 val;
692
65a30f8b
SP
693 if (force)
694 cpu = cpumask_first(mask_val);
695 else
696 cpu = cpumask_any_and(mask_val, cpu_online_mask);
697
866d7c1b
SP
698 if (cpu >= nr_cpu_ids)
699 return -EINVAL;
700
021f6537
MZ
701 if (gic_irq_in_rdist(d))
702 return -EINVAL;
703
704 /* If interrupt was enabled, disable it first */
705 enabled = gic_peek_irq(d, GICD_ISENABLER);
706 if (enabled)
707 gic_mask_irq(d);
708
709 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
710 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
711
72c97126 712 gic_write_irouter(val, reg);
021f6537
MZ
713
714 /*
715 * If the interrupt was enabled, enabled it again. Otherwise,
716 * just wait for the distributor to have digested our changes.
717 */
718 if (enabled)
719 gic_unmask_irq(d);
720 else
721 gic_dist_wait_for_rwp();
722
956ae91a
MZ
723 irq_data_update_effective_affinity(d, cpumask_of(cpu));
724
0fc6fa29 725 return IRQ_SET_MASK_OK_DONE;
021f6537
MZ
726}
727#else
728#define gic_set_affinity NULL
729#define gic_smp_init() do { } while(0)
730#endif
731
3708d52f 732#ifdef CONFIG_CPU_PM
ccd9432a
SH
733/* Check whether it's single security state view */
734static bool gic_dist_security_disabled(void)
735{
736 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
737}
738
3708d52f
SH
739static int gic_cpu_pm_notifier(struct notifier_block *self,
740 unsigned long cmd, void *v)
741{
742 if (cmd == CPU_PM_EXIT) {
ccd9432a
SH
743 if (gic_dist_security_disabled())
744 gic_enable_redist(true);
3708d52f 745 gic_cpu_sys_reg_init();
ccd9432a 746 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
3708d52f
SH
747 gic_write_grpen1(0);
748 gic_enable_redist(false);
749 }
750 return NOTIFY_OK;
751}
752
753static struct notifier_block gic_cpu_pm_notifier_block = {
754 .notifier_call = gic_cpu_pm_notifier,
755};
756
757static void gic_cpu_pm_init(void)
758{
759 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
760}
761
762#else
763static inline void gic_cpu_pm_init(void) { }
764#endif /* CONFIG_CPU_PM */
765
021f6537
MZ
766static struct irq_chip gic_chip = {
767 .name = "GICv3",
768 .irq_mask = gic_mask_irq,
769 .irq_unmask = gic_unmask_irq,
770 .irq_eoi = gic_eoi_irq,
771 .irq_set_type = gic_set_type,
772 .irq_set_affinity = gic_set_affinity,
b594c6e2
MZ
773 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
774 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
55963c9f 775 .flags = IRQCHIP_SET_TYPE_MASKED,
021f6537
MZ
776};
777
0b6a3da9
MZ
778static struct irq_chip gic_eoimode1_chip = {
779 .name = "GICv3",
780 .irq_mask = gic_eoimode1_mask_irq,
781 .irq_unmask = gic_unmask_irq,
782 .irq_eoi = gic_eoimode1_eoi_irq,
783 .irq_set_type = gic_set_type,
784 .irq_set_affinity = gic_set_affinity,
785 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
786 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
530bf353 787 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
0b6a3da9
MZ
788 .flags = IRQCHIP_SET_TYPE_MASKED,
789};
790
da33f31d
MZ
791#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
792
021f6537
MZ
793static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
794 irq_hw_number_t hw)
795{
0b6a3da9
MZ
796 struct irq_chip *chip = &gic_chip;
797
798 if (static_key_true(&supports_deactivate))
799 chip = &gic_eoimode1_chip;
800
021f6537
MZ
801 /* SGIs are private to the core kernel */
802 if (hw < 16)
803 return -EPERM;
da33f31d
MZ
804 /* Nothing here */
805 if (hw >= gic_data.irq_nr && hw < 8192)
806 return -EPERM;
807 /* Off limits */
808 if (hw >= GIC_ID_NR)
809 return -EPERM;
810
021f6537
MZ
811 /* PPIs */
812 if (hw < 32) {
813 irq_set_percpu_devid(irq);
0b6a3da9 814 irq_domain_set_info(d, irq, hw, chip, d->host_data,
443acc4f 815 handle_percpu_devid_irq, NULL, NULL);
d17cab44 816 irq_set_status_flags(irq, IRQ_NOAUTOEN);
021f6537
MZ
817 }
818 /* SPIs */
819 if (hw >= 32 && hw < gic_data.irq_nr) {
0b6a3da9 820 irq_domain_set_info(d, irq, hw, chip, d->host_data,
443acc4f 821 handle_fasteoi_irq, NULL, NULL);
d17cab44 822 irq_set_probe(irq);
956ae91a 823 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
021f6537 824 }
da33f31d
MZ
825 /* LPIs */
826 if (hw >= 8192 && hw < GIC_ID_NR) {
827 if (!gic_dist_supports_lpis())
828 return -EPERM;
0b6a3da9 829 irq_domain_set_info(d, irq, hw, chip, d->host_data,
da33f31d 830 handle_fasteoi_irq, NULL, NULL);
da33f31d
MZ
831 }
832
021f6537
MZ
833 return 0;
834}
835
f833f57f
MZ
836static int gic_irq_domain_translate(struct irq_domain *d,
837 struct irq_fwspec *fwspec,
838 unsigned long *hwirq,
839 unsigned int *type)
021f6537 840{
f833f57f
MZ
841 if (is_of_node(fwspec->fwnode)) {
842 if (fwspec->param_count < 3)
843 return -EINVAL;
021f6537 844
db8c70ec
MZ
845 switch (fwspec->param[0]) {
846 case 0: /* SPI */
847 *hwirq = fwspec->param[1] + 32;
848 break;
849 case 1: /* PPI */
850 *hwirq = fwspec->param[1] + 16;
851 break;
852 case GIC_IRQ_TYPE_LPI: /* LPI */
853 *hwirq = fwspec->param[1];
854 break;
855 default:
856 return -EINVAL;
857 }
f833f57f
MZ
858
859 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
860 return 0;
021f6537
MZ
861 }
862
ffa7d616
TN
863 if (is_fwnode_irqchip(fwspec->fwnode)) {
864 if(fwspec->param_count != 2)
865 return -EINVAL;
866
867 *hwirq = fwspec->param[0];
868 *type = fwspec->param[1];
869 return 0;
870 }
871
f833f57f 872 return -EINVAL;
021f6537
MZ
873}
874
443acc4f
MZ
875static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
876 unsigned int nr_irqs, void *arg)
877{
878 int i, ret;
879 irq_hw_number_t hwirq;
880 unsigned int type = IRQ_TYPE_NONE;
f833f57f 881 struct irq_fwspec *fwspec = arg;
443acc4f 882
f833f57f 883 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
443acc4f
MZ
884 if (ret)
885 return ret;
886
63c16c6e
SP
887 for (i = 0; i < nr_irqs; i++) {
888 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
889 if (ret)
890 return ret;
891 }
443acc4f
MZ
892
893 return 0;
894}
895
896static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
897 unsigned int nr_irqs)
898{
899 int i;
900
901 for (i = 0; i < nr_irqs; i++) {
902 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
903 irq_set_handler(virq + i, NULL);
904 irq_domain_reset_irq_data(d);
905 }
906}
907
e3825ba1
MZ
908static int gic_irq_domain_select(struct irq_domain *d,
909 struct irq_fwspec *fwspec,
910 enum irq_domain_bus_token bus_token)
911{
912 /* Not for us */
913 if (fwspec->fwnode != d->fwnode)
914 return 0;
915
916 /* If this is not DT, then we have a single domain */
917 if (!is_of_node(fwspec->fwnode))
918 return 1;
919
920 /*
921 * If this is a PPI and we have a 4th (non-null) parameter,
922 * then we need to match the partition domain.
923 */
924 if (fwspec->param_count >= 4 &&
925 fwspec->param[0] == 1 && fwspec->param[3] != 0)
926 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
927
928 return d == gic_data.domain;
929}
930
021f6537 931static const struct irq_domain_ops gic_irq_domain_ops = {
f833f57f 932 .translate = gic_irq_domain_translate,
443acc4f
MZ
933 .alloc = gic_irq_domain_alloc,
934 .free = gic_irq_domain_free,
e3825ba1
MZ
935 .select = gic_irq_domain_select,
936};
937
938static int partition_domain_translate(struct irq_domain *d,
939 struct irq_fwspec *fwspec,
940 unsigned long *hwirq,
941 unsigned int *type)
942{
943 struct device_node *np;
944 int ret;
945
946 np = of_find_node_by_phandle(fwspec->param[3]);
947 if (WARN_ON(!np))
948 return -EINVAL;
949
950 ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
951 of_node_to_fwnode(np));
952 if (ret < 0)
953 return ret;
954
955 *hwirq = ret;
956 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
957
958 return 0;
959}
960
961static const struct irq_domain_ops partition_domain_ops = {
962 .translate = partition_domain_translate,
963 .select = gic_irq_domain_select,
021f6537
MZ
964};
965
db57d746
TN
966static int __init gic_init_bases(void __iomem *dist_base,
967 struct redist_region *rdist_regs,
968 u32 nr_redist_regions,
969 u64 redist_stride,
970 struct fwnode_handle *handle)
021f6537 971{
f5c1434c 972 u32 typer;
021f6537
MZ
973 int gic_irqs;
974 int err;
021f6537 975
0b6a3da9
MZ
976 if (!is_hyp_mode_available())
977 static_key_slow_dec(&supports_deactivate);
978
979 if (static_key_true(&supports_deactivate))
980 pr_info("GIC: Using split EOI/Deactivate mode\n");
981
e3825ba1 982 gic_data.fwnode = handle;
021f6537 983 gic_data.dist_base = dist_base;
f5c1434c
MZ
984 gic_data.redist_regions = rdist_regs;
985 gic_data.nr_redist_regions = nr_redist_regions;
021f6537
MZ
986 gic_data.redist_stride = redist_stride;
987
988 /*
989 * Find out how many interrupts are supported.
990 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
991 */
f5c1434c
MZ
992 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
993 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
994 gic_irqs = GICD_TYPER_IRQS(typer);
021f6537
MZ
995 if (gic_irqs > 1020)
996 gic_irqs = 1020;
997 gic_data.irq_nr = gic_irqs;
998
db57d746
TN
999 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1000 &gic_data);
f5c1434c 1001 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
0edc23ea
MZ
1002 gic_data.rdists.has_vlpis = true;
1003 gic_data.rdists.has_direct_lpi = true;
021f6537 1004
f5c1434c 1005 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
021f6537
MZ
1006 err = -ENOMEM;
1007 goto out_free;
1008 }
1009
1010 set_handle_irq(gic_handle_irq);
1011
0edc23ea
MZ
1012 gic_update_vlpi_properties();
1013
db40f0a7
TN
1014 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
1015 its_init(handle, &gic_data.rdists, gic_data.domain);
da33f31d 1016
021f6537
MZ
1017 gic_smp_init();
1018 gic_dist_init();
1019 gic_cpu_init();
3708d52f 1020 gic_cpu_pm_init();
021f6537
MZ
1021
1022 return 0;
1023
1024out_free:
1025 if (gic_data.domain)
1026 irq_domain_remove(gic_data.domain);
f5c1434c 1027 free_percpu(gic_data.rdists.rdist);
db57d746
TN
1028 return err;
1029}
1030
1031static int __init gic_validate_dist_version(void __iomem *dist_base)
1032{
1033 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1034
1035 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1036 return -ENODEV;
1037
1038 return 0;
1039}
1040
e3825ba1
MZ
1041static int get_cpu_number(struct device_node *dn)
1042{
1043 const __be32 *cell;
1044 u64 hwid;
3fad4cda 1045 int cpu;
e3825ba1
MZ
1046
1047 cell = of_get_property(dn, "reg", NULL);
1048 if (!cell)
1049 return -1;
1050
1051 hwid = of_read_number(cell, of_n_addr_cells(dn));
1052
1053 /*
1054 * Non affinity bits must be set to 0 in the DT
1055 */
1056 if (hwid & ~MPIDR_HWID_BITMASK)
1057 return -1;
1058
3fad4cda 1059 for_each_possible_cpu(cpu)
1060 if (cpu_logical_map(cpu) == hwid)
1061 return cpu;
e3825ba1
MZ
1062
1063 return -1;
1064}
1065
1066/* Create all possible partitions at boot time */
7beaa24b 1067static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
e3825ba1
MZ
1068{
1069 struct device_node *parts_node, *child_part;
1070 int part_idx = 0, i;
1071 int nr_parts;
1072 struct partition_affinity *parts;
1073
1074 parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
1075 if (!parts_node)
1076 return;
1077
1078 nr_parts = of_get_child_count(parts_node);
1079
1080 if (!nr_parts)
1081 return;
1082
1083 parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
1084 if (WARN_ON(!parts))
1085 return;
1086
1087 for_each_child_of_node(parts_node, child_part) {
1088 struct partition_affinity *part;
1089 int n;
1090
1091 part = &parts[part_idx];
1092
1093 part->partition_id = of_node_to_fwnode(child_part);
1094
1095 pr_info("GIC: PPI partition %s[%d] { ",
1096 child_part->name, part_idx);
1097
1098 n = of_property_count_elems_of_size(child_part, "affinity",
1099 sizeof(u32));
1100 WARN_ON(n <= 0);
1101
1102 for (i = 0; i < n; i++) {
1103 int err, cpu;
1104 u32 cpu_phandle;
1105 struct device_node *cpu_node;
1106
1107 err = of_property_read_u32_index(child_part, "affinity",
1108 i, &cpu_phandle);
1109 if (WARN_ON(err))
1110 continue;
1111
1112 cpu_node = of_find_node_by_phandle(cpu_phandle);
1113 if (WARN_ON(!cpu_node))
1114 continue;
1115
1116 cpu = get_cpu_number(cpu_node);
1117 if (WARN_ON(cpu == -1))
1118 continue;
1119
e81f54c6 1120 pr_cont("%pOF[%d] ", cpu_node, cpu);
e3825ba1
MZ
1121
1122 cpumask_set_cpu(cpu, &part->mask);
1123 }
1124
1125 pr_cont("}\n");
1126 part_idx++;
1127 }
1128
1129 for (i = 0; i < 16; i++) {
1130 unsigned int irq;
1131 struct partition_desc *desc;
1132 struct irq_fwspec ppi_fwspec = {
1133 .fwnode = gic_data.fwnode,
1134 .param_count = 3,
1135 .param = {
1136 [0] = 1,
1137 [1] = i,
1138 [2] = IRQ_TYPE_NONE,
1139 },
1140 };
1141
1142 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1143 if (WARN_ON(!irq))
1144 continue;
1145 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1146 irq, &partition_domain_ops);
1147 if (WARN_ON(!desc))
1148 continue;
1149
1150 gic_data.ppi_descs[i] = desc;
1151 }
1152}
1153
1839e576
JG
1154static void __init gic_of_setup_kvm_info(struct device_node *node)
1155{
1156 int ret;
1157 struct resource r;
1158 u32 gicv_idx;
1159
1160 gic_v3_kvm_info.type = GIC_V3;
1161
1162 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1163 if (!gic_v3_kvm_info.maint_irq)
1164 return;
1165
1166 if (of_property_read_u32(node, "#redistributor-regions",
1167 &gicv_idx))
1168 gicv_idx = 1;
1169
1170 gicv_idx += 3; /* Also skip GICD, GICC, GICH */
1171 ret = of_address_to_resource(node, gicv_idx, &r);
1172 if (!ret)
1173 gic_v3_kvm_info.vcpu = r;
1174
4bdf5025 1175 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1839e576
JG
1176 gic_set_kvm_info(&gic_v3_kvm_info);
1177}
1178
db57d746
TN
1179static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1180{
1181 void __iomem *dist_base;
1182 struct redist_region *rdist_regs;
1183 u64 redist_stride;
1184 u32 nr_redist_regions;
1185 int err, i;
1186
1187 dist_base = of_iomap(node, 0);
1188 if (!dist_base) {
e81f54c6 1189 pr_err("%pOF: unable to map gic dist registers\n", node);
db57d746
TN
1190 return -ENXIO;
1191 }
1192
1193 err = gic_validate_dist_version(dist_base);
1194 if (err) {
e81f54c6 1195 pr_err("%pOF: no distributor detected, giving up\n", node);
db57d746
TN
1196 goto out_unmap_dist;
1197 }
1198
1199 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1200 nr_redist_regions = 1;
1201
1202 rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
1203 if (!rdist_regs) {
1204 err = -ENOMEM;
1205 goto out_unmap_dist;
1206 }
1207
1208 for (i = 0; i < nr_redist_regions; i++) {
1209 struct resource res;
1210 int ret;
1211
1212 ret = of_address_to_resource(node, 1 + i, &res);
1213 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1214 if (ret || !rdist_regs[i].redist_base) {
e81f54c6 1215 pr_err("%pOF: couldn't map region %d\n", node, i);
db57d746
TN
1216 err = -ENODEV;
1217 goto out_unmap_rdist;
1218 }
1219 rdist_regs[i].phys_base = res.start;
1220 }
1221
1222 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1223 redist_stride = 0;
1224
1225 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1226 redist_stride, &node->fwnode);
e3825ba1
MZ
1227 if (err)
1228 goto out_unmap_rdist;
1229
1230 gic_populate_ppi_partitions(node);
7beaa24b 1231 gic_of_setup_kvm_info(node);
e3825ba1 1232 return 0;
db57d746 1233
021f6537 1234out_unmap_rdist:
f5c1434c
MZ
1235 for (i = 0; i < nr_redist_regions; i++)
1236 if (rdist_regs[i].redist_base)
1237 iounmap(rdist_regs[i].redist_base);
1238 kfree(rdist_regs);
021f6537
MZ
1239out_unmap_dist:
1240 iounmap(dist_base);
1241 return err;
1242}
1243
1244IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
ffa7d616
TN
1245
1246#ifdef CONFIG_ACPI
611f039f
JG
1247static struct
1248{
1249 void __iomem *dist_base;
1250 struct redist_region *redist_regs;
1251 u32 nr_redist_regions;
1252 bool single_redist;
1839e576
JG
1253 u32 maint_irq;
1254 int maint_irq_mode;
1255 phys_addr_t vcpu_base;
611f039f 1256} acpi_data __initdata;
b70fb7af
TN
1257
1258static void __init
1259gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1260{
1261 static int count = 0;
1262
611f039f
JG
1263 acpi_data.redist_regs[count].phys_base = phys_base;
1264 acpi_data.redist_regs[count].redist_base = redist_base;
1265 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
b70fb7af
TN
1266 count++;
1267}
ffa7d616
TN
1268
1269static int __init
1270gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
1271 const unsigned long end)
1272{
1273 struct acpi_madt_generic_redistributor *redist =
1274 (struct acpi_madt_generic_redistributor *)header;
1275 void __iomem *redist_base;
ffa7d616
TN
1276
1277 redist_base = ioremap(redist->base_address, redist->length);
1278 if (!redist_base) {
1279 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1280 return -ENOMEM;
1281 }
1282
b70fb7af 1283 gic_acpi_register_redist(redist->base_address, redist_base);
ffa7d616
TN
1284 return 0;
1285}
1286
b70fb7af
TN
1287static int __init
1288gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
1289 const unsigned long end)
1290{
1291 struct acpi_madt_generic_interrupt *gicc =
1292 (struct acpi_madt_generic_interrupt *)header;
611f039f 1293 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
b70fb7af
TN
1294 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1295 void __iomem *redist_base;
1296
1297 redist_base = ioremap(gicc->gicr_base_address, size);
1298 if (!redist_base)
1299 return -ENOMEM;
1300
1301 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1302 return 0;
1303}
1304
1305static int __init gic_acpi_collect_gicr_base(void)
1306{
1307 acpi_tbl_entry_handler redist_parser;
1308 enum acpi_madt_type type;
1309
611f039f 1310 if (acpi_data.single_redist) {
b70fb7af
TN
1311 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
1312 redist_parser = gic_acpi_parse_madt_gicc;
1313 } else {
1314 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
1315 redist_parser = gic_acpi_parse_madt_redist;
1316 }
1317
1318 /* Collect redistributor base addresses in GICR entries */
1319 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
1320 return 0;
1321
1322 pr_info("No valid GICR entries exist\n");
1323 return -ENODEV;
1324}
1325
ffa7d616
TN
1326static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
1327 const unsigned long end)
1328{
1329 /* Subtable presence means that redist exists, that's it */
1330 return 0;
1331}
1332
b70fb7af
TN
1333static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
1334 const unsigned long end)
1335{
1336 struct acpi_madt_generic_interrupt *gicc =
1337 (struct acpi_madt_generic_interrupt *)header;
1338
1339 /*
1340 * If GICC is enabled and has valid gicr base address, then it means
1341 * GICR base is presented via GICC
1342 */
1343 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
1344 return 0;
1345
1346 return -ENODEV;
1347}
1348
1349static int __init gic_acpi_count_gicr_regions(void)
1350{
1351 int count;
1352
1353 /*
1354 * Count how many redistributor regions we have. It is not allowed
1355 * to mix redistributor description, GICR and GICC subtables have to be
1356 * mutually exclusive.
1357 */
1358 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1359 gic_acpi_match_gicr, 0);
1360 if (count > 0) {
611f039f 1361 acpi_data.single_redist = false;
b70fb7af
TN
1362 return count;
1363 }
1364
1365 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1366 gic_acpi_match_gicc, 0);
1367 if (count > 0)
611f039f 1368 acpi_data.single_redist = true;
b70fb7af
TN
1369
1370 return count;
1371}
1372
ffa7d616
TN
1373static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
1374 struct acpi_probe_entry *ape)
1375{
1376 struct acpi_madt_generic_distributor *dist;
1377 int count;
1378
1379 dist = (struct acpi_madt_generic_distributor *)header;
1380 if (dist->version != ape->driver_data)
1381 return false;
1382
1383 /* We need to do that exercise anyway, the sooner the better */
b70fb7af 1384 count = gic_acpi_count_gicr_regions();
ffa7d616
TN
1385 if (count <= 0)
1386 return false;
1387
611f039f 1388 acpi_data.nr_redist_regions = count;
ffa7d616
TN
1389 return true;
1390}
1391
1839e576
JG
1392static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
1393 const unsigned long end)
1394{
1395 struct acpi_madt_generic_interrupt *gicc =
1396 (struct acpi_madt_generic_interrupt *)header;
1397 int maint_irq_mode;
1398 static int first_madt = true;
1399
1400 /* Skip unusable CPUs */
1401 if (!(gicc->flags & ACPI_MADT_ENABLED))
1402 return 0;
1403
1404 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1405 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1406
1407 if (first_madt) {
1408 first_madt = false;
1409
1410 acpi_data.maint_irq = gicc->vgic_interrupt;
1411 acpi_data.maint_irq_mode = maint_irq_mode;
1412 acpi_data.vcpu_base = gicc->gicv_base_address;
1413
1414 return 0;
1415 }
1416
1417 /*
1418 * The maintenance interrupt and GICV should be the same for every CPU
1419 */
1420 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
1421 (acpi_data.maint_irq_mode != maint_irq_mode) ||
1422 (acpi_data.vcpu_base != gicc->gicv_base_address))
1423 return -EINVAL;
1424
1425 return 0;
1426}
1427
1428static bool __init gic_acpi_collect_virt_info(void)
1429{
1430 int count;
1431
1432 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1433 gic_acpi_parse_virt_madt_gicc, 0);
1434
1435 return (count > 0);
1436}
1437
ffa7d616 1438#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
1839e576
JG
1439#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
1440#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
1441
1442static void __init gic_acpi_setup_kvm_info(void)
1443{
1444 int irq;
1445
1446 if (!gic_acpi_collect_virt_info()) {
1447 pr_warn("Unable to get hardware information used for virtualization\n");
1448 return;
1449 }
1450
1451 gic_v3_kvm_info.type = GIC_V3;
1452
1453 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
1454 acpi_data.maint_irq_mode,
1455 ACPI_ACTIVE_HIGH);
1456 if (irq <= 0)
1457 return;
1458
1459 gic_v3_kvm_info.maint_irq = irq;
1460
1461 if (acpi_data.vcpu_base) {
1462 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
1463
1464 vcpu->flags = IORESOURCE_MEM;
1465 vcpu->start = acpi_data.vcpu_base;
1466 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1467 }
1468
4bdf5025 1469 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1839e576
JG
1470 gic_set_kvm_info(&gic_v3_kvm_info);
1471}
ffa7d616
TN
1472
1473static int __init
1474gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
1475{
1476 struct acpi_madt_generic_distributor *dist;
1477 struct fwnode_handle *domain_handle;
611f039f 1478 size_t size;
b70fb7af 1479 int i, err;
ffa7d616
TN
1480
1481 /* Get distributor base address */
1482 dist = (struct acpi_madt_generic_distributor *)header;
611f039f
JG
1483 acpi_data.dist_base = ioremap(dist->base_address,
1484 ACPI_GICV3_DIST_MEM_SIZE);
1485 if (!acpi_data.dist_base) {
ffa7d616
TN
1486 pr_err("Unable to map GICD registers\n");
1487 return -ENOMEM;
1488 }
1489
611f039f 1490 err = gic_validate_dist_version(acpi_data.dist_base);
ffa7d616 1491 if (err) {
611f039f
JG
1492 pr_err("No distributor detected at @%p, giving up",
1493 acpi_data.dist_base);
ffa7d616
TN
1494 goto out_dist_unmap;
1495 }
1496
611f039f
JG
1497 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
1498 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
1499 if (!acpi_data.redist_regs) {
ffa7d616
TN
1500 err = -ENOMEM;
1501 goto out_dist_unmap;
1502 }
1503
b70fb7af
TN
1504 err = gic_acpi_collect_gicr_base();
1505 if (err)
ffa7d616 1506 goto out_redist_unmap;
ffa7d616 1507
611f039f 1508 domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
ffa7d616
TN
1509 if (!domain_handle) {
1510 err = -ENOMEM;
1511 goto out_redist_unmap;
1512 }
1513
611f039f
JG
1514 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
1515 acpi_data.nr_redist_regions, 0, domain_handle);
ffa7d616
TN
1516 if (err)
1517 goto out_fwhandle_free;
1518
1519 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
1839e576
JG
1520 gic_acpi_setup_kvm_info();
1521
ffa7d616
TN
1522 return 0;
1523
1524out_fwhandle_free:
1525 irq_domain_free_fwnode(domain_handle);
1526out_redist_unmap:
611f039f
JG
1527 for (i = 0; i < acpi_data.nr_redist_regions; i++)
1528 if (acpi_data.redist_regs[i].redist_base)
1529 iounmap(acpi_data.redist_regs[i].redist_base);
1530 kfree(acpi_data.redist_regs);
ffa7d616 1531out_dist_unmap:
611f039f 1532 iounmap(acpi_data.dist_base);
ffa7d616
TN
1533 return err;
1534}
1535IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1536 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
1537 gic_acpi_init);
1538IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1539 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
1540 gic_acpi_init);
1541IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1542 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
1543 gic_acpi_init);
1544#endif