]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/irqchip/irq-crossbar.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[thirdparty/kernel/stable.git] / drivers / irqchip / irq-crossbar.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
96ca848e
S
2/*
3 * drivers/irqchip/irq-crossbar.c
4 *
5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
6 * Author: Sricharan R <r.sricharan@ti.com>
96ca848e
S
7 */
8#include <linux/err.h>
9#include <linux/io.h>
41a83e06 10#include <linux/irqchip.h>
783d3186 11#include <linux/irqdomain.h>
96ca848e
S
12#include <linux/of_address.h>
13#include <linux/of_irq.h>
14#include <linux/slab.h>
783d3186 15
96ca848e 16#define IRQ_FREE -1
1d50d2ce 17#define IRQ_RESERVED -2
64e0f8ba 18#define IRQ_SKIP -3
96ca848e
S
19#define GIC_IRQ_START 32
20
e30ef8ab
NM
21/**
22 * struct crossbar_device - crossbar device description
783d3186 23 * @lock: spinlock serializing access to @irq_map
96ca848e 24 * @int_max: maximum number of supported interrupts
a35057d1 25 * @safe_map: safe default value to initialize the crossbar
2f7d2fb7 26 * @max_crossbar_sources: Maximum number of crossbar sources
96ca848e
S
27 * @irq_map: array of interrupts to crossbar number mapping
28 * @crossbar_base: crossbar base address
29 * @register_offsets: offsets for each irq number
e30ef8ab 30 * @write: register write function pointer
96ca848e
S
31 */
32struct crossbar_device {
783d3186 33 raw_spinlock_t lock;
96ca848e 34 uint int_max;
a35057d1 35 uint safe_map;
2f7d2fb7 36 uint max_crossbar_sources;
96ca848e
S
37 uint *irq_map;
38 void __iomem *crossbar_base;
39 int *register_offsets;
a35057d1 40 void (*write)(int, int);
96ca848e
S
41};
42
43static struct crossbar_device *cb;
44
783d3186 45static void crossbar_writel(int irq_no, int cb_no)
96ca848e
S
46{
47 writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
48}
49
783d3186 50static void crossbar_writew(int irq_no, int cb_no)
96ca848e
S
51{
52 writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
53}
54
783d3186 55static void crossbar_writeb(int irq_no, int cb_no)
96ca848e
S
56{
57 writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
58}
59
783d3186
MZ
60static struct irq_chip crossbar_chip = {
61 .name = "CBAR",
62 .irq_eoi = irq_chip_eoi_parent,
63 .irq_mask = irq_chip_mask_parent,
64 .irq_unmask = irq_chip_unmask_parent,
65 .irq_retrigger = irq_chip_retrigger_hierarchy,
e269ec42 66 .irq_set_type = irq_chip_set_type_parent,
8200fe43
GS
67 .flags = IRQCHIP_MASK_ON_SUSPEND |
68 IRQCHIP_SKIP_SET_WAKE,
783d3186
MZ
69#ifdef CONFIG_SMP
70 .irq_set_affinity = irq_chip_set_affinity_parent,
71#endif
72};
6f16fc87 73
783d3186
MZ
74static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
75 irq_hw_number_t hwirq)
96ca848e 76{
f833f57f 77 struct irq_fwspec fwspec;
96ca848e 78 int i;
783d3186 79 int err;
96ca848e 80
f833f57f
MZ
81 if (!irq_domain_get_of_node(domain->parent))
82 return -EINVAL;
83
783d3186 84 raw_spin_lock(&cb->lock);
ddee0fb4 85 for (i = cb->int_max - 1; i >= 0; i--) {
96ca848e 86 if (cb->irq_map[i] == IRQ_FREE) {
783d3186
MZ
87 cb->irq_map[i] = hwirq;
88 break;
96ca848e
S
89 }
90 }
783d3186 91 raw_spin_unlock(&cb->lock);
96ca848e 92
783d3186
MZ
93 if (i < 0)
94 return -ENODEV;
96ca848e 95
f833f57f
MZ
96 fwspec.fwnode = domain->parent->fwnode;
97 fwspec.param_count = 3;
98 fwspec.param[0] = 0; /* SPI */
99 fwspec.param[1] = i;
100 fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
d360892d 101
f833f57f 102 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
783d3186
MZ
103 if (err)
104 cb->irq_map[i] = IRQ_FREE;
105 else
106 cb->write(i, hwirq);
29918b67 107
783d3186 108 return err;
29918b67
NM
109}
110
783d3186
MZ
111static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq,
112 unsigned int nr_irqs, void *data)
96ca848e 113{
f833f57f 114 struct irq_fwspec *fwspec = data;
783d3186
MZ
115 irq_hw_number_t hwirq;
116 int i;
117
f833f57f 118 if (fwspec->param_count != 3)
783d3186 119 return -EINVAL; /* Not GIC compliant */
f833f57f 120 if (fwspec->param[0] != 0)
783d3186
MZ
121 return -EINVAL; /* No PPI should point to this domain */
122
f833f57f 123 hwirq = fwspec->param[1];
783d3186
MZ
124 if ((hwirq + nr_irqs) > cb->max_crossbar_sources)
125 return -EINVAL; /* Can't deal with this */
126
127 for (i = 0; i < nr_irqs; i++) {
128 int err = allocate_gic_irq(d, virq + i, hwirq + i);
129
130 if (err)
131 return err;
132
133 irq_domain_set_hwirq_and_chip(d, virq + i, hwirq + i,
134 &crossbar_chip, NULL);
135 }
29918b67 136
96ca848e
S
137 return 0;
138}
139
8b09a45d 140/**
783d3186
MZ
141 * crossbar_domain_free - unmap/free a crossbar<->irq connection
142 * @domain: domain of irq to unmap
143 * @virq: virq number
144 * @nr_irqs: number of irqs to free
8b09a45d
S
145 *
146 * We do not maintain a use count of total number of map/unmap
147 * calls for a particular irq to find out if a irq can be really
148 * unmapped. This is because unmap is called during irq_dispose_mapping(irq),
149 * after which irq is anyways unusable. So an explicit map has to be called
150 * after that.
151 */
783d3186
MZ
152static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq,
153 unsigned int nr_irqs)
96ca848e 154{
783d3186 155 int i;
96ca848e 156
783d3186
MZ
157 raw_spin_lock(&cb->lock);
158 for (i = 0; i < nr_irqs; i++) {
159 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
160
161 irq_domain_reset_irq_data(d);
162 cb->irq_map[d->hwirq] = IRQ_FREE;
163 cb->write(d->hwirq, cb->safe_map);
a35057d1 164 }
783d3186 165 raw_spin_unlock(&cb->lock);
96ca848e
S
166}
167
f833f57f
MZ
168static int crossbar_domain_translate(struct irq_domain *d,
169 struct irq_fwspec *fwspec,
170 unsigned long *hwirq,
171 unsigned int *type)
96ca848e 172{
f833f57f
MZ
173 if (is_of_node(fwspec->fwnode)) {
174 if (fwspec->param_count != 3)
175 return -EINVAL;
783d3186 176
f833f57f
MZ
177 /* No PPI should point to this domain */
178 if (fwspec->param[0] != 0)
179 return -EINVAL;
180
181 *hwirq = fwspec->param[1];
a2a8fa55 182 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
f833f57f
MZ
183 return 0;
184 }
185
186 return -EINVAL;
96ca848e
S
187}
188
783d3186 189static const struct irq_domain_ops crossbar_domain_ops = {
f833f57f
MZ
190 .alloc = crossbar_domain_alloc,
191 .free = crossbar_domain_free,
192 .translate = crossbar_domain_translate,
96ca848e
S
193};
194
195static int __init crossbar_of_init(struct device_node *node)
196{
4b9de5da 197 u32 max = 0, entry, reg_size;
b28ace12 198 int i, size, reserved = 0;
96ca848e 199 const __be32 *irqsr;
edb442de 200 int ret = -ENOMEM;
96ca848e 201
3894e9e8 202 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
96ca848e
S
203
204 if (!cb)
edb442de 205 return ret;
96ca848e
S
206
207 cb->crossbar_base = of_iomap(node, 0);
208 if (!cb->crossbar_base)
3c44d515 209 goto err_cb;
96ca848e 210
2f7d2fb7
NM
211 of_property_read_u32(node, "ti,max-crossbar-sources",
212 &cb->max_crossbar_sources);
213 if (!cb->max_crossbar_sources) {
214 pr_err("missing 'ti,max-crossbar-sources' property\n");
215 ret = -EINVAL;
216 goto err_base;
217 }
218
96ca848e 219 of_property_read_u32(node, "ti,max-irqs", &max);
edb442de
NM
220 if (!max) {
221 pr_err("missing 'ti,max-irqs' property\n");
222 ret = -EINVAL;
3c44d515 223 goto err_base;
edb442de 224 }
4dbf45e3 225 cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
96ca848e 226 if (!cb->irq_map)
3c44d515 227 goto err_base;
96ca848e
S
228
229 cb->int_max = max;
230
231 for (i = 0; i < max; i++)
232 cb->irq_map[i] = IRQ_FREE;
233
234 /* Get and mark reserved irqs */
235 irqsr = of_get_property(node, "ti,irqs-reserved", &size);
236 if (irqsr) {
237 size /= sizeof(__be32);
238
239 for (i = 0; i < size; i++) {
240 of_property_read_u32_index(node,
241 "ti,irqs-reserved",
242 i, &entry);
702f7e36 243 if (entry >= max) {
96ca848e 244 pr_err("Invalid reserved entry\n");
edb442de 245 ret = -EINVAL;
3c44d515 246 goto err_irq_map;
96ca848e 247 }
1d50d2ce 248 cb->irq_map[entry] = IRQ_RESERVED;
96ca848e
S
249 }
250 }
251
64e0f8ba
NM
252 /* Skip irqs hardwired to bypass the crossbar */
253 irqsr = of_get_property(node, "ti,irqs-skip", &size);
254 if (irqsr) {
255 size /= sizeof(__be32);
256
257 for (i = 0; i < size; i++) {
258 of_property_read_u32_index(node,
259 "ti,irqs-skip",
260 i, &entry);
702f7e36 261 if (entry >= max) {
64e0f8ba
NM
262 pr_err("Invalid skip entry\n");
263 ret = -EINVAL;
3c44d515 264 goto err_irq_map;
64e0f8ba
NM
265 }
266 cb->irq_map[entry] = IRQ_SKIP;
267 }
268 }
269
270
4dbf45e3 271 cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL);
96ca848e 272 if (!cb->register_offsets)
3c44d515 273 goto err_irq_map;
96ca848e 274
4b9de5da 275 of_property_read_u32(node, "ti,reg-size", &reg_size);
96ca848e 276
4b9de5da 277 switch (reg_size) {
96ca848e
S
278 case 1:
279 cb->write = crossbar_writeb;
280 break;
281 case 2:
282 cb->write = crossbar_writew;
283 break;
284 case 4:
285 cb->write = crossbar_writel;
286 break;
287 default:
288 pr_err("Invalid reg-size property\n");
edb442de 289 ret = -EINVAL;
3c44d515 290 goto err_reg_offset;
96ca848e
S
291 break;
292 }
293
294 /*
295 * Register offsets are not linear because of the
296 * reserved irqs. so find and store the offsets once.
297 */
298 for (i = 0; i < max; i++) {
1d50d2ce 299 if (cb->irq_map[i] == IRQ_RESERVED)
96ca848e
S
300 continue;
301
302 cb->register_offsets[i] = reserved;
4b9de5da 303 reserved += reg_size;
96ca848e
S
304 }
305
a35057d1 306 of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
a35057d1
NM
307 /* Initialize the crossbar with safe map to start with */
308 for (i = 0; i < max; i++) {
309 if (cb->irq_map[i] == IRQ_RESERVED ||
310 cb->irq_map[i] == IRQ_SKIP)
311 continue;
312
313 cb->write(i, cb->safe_map);
314 }
315
783d3186
MZ
316 raw_spin_lock_init(&cb->lock);
317
96ca848e
S
318 return 0;
319
3c44d515 320err_reg_offset:
96ca848e 321 kfree(cb->register_offsets);
3c44d515 322err_irq_map:
96ca848e 323 kfree(cb->irq_map);
3c44d515 324err_base:
96ca848e 325 iounmap(cb->crossbar_base);
3c44d515 326err_cb:
96ca848e 327 kfree(cb);
99e37d0e
S
328
329 cb = NULL;
edb442de 330 return ret;
96ca848e
S
331}
332
783d3186
MZ
333static int __init irqcrossbar_init(struct device_node *node,
334 struct device_node *parent)
96ca848e 335{
783d3186
MZ
336 struct irq_domain *parent_domain, *domain;
337 int err;
338
339 if (!parent) {
e81f54c6 340 pr_err("%pOF: no parent, giving up\n", node);
96ca848e 341 return -ENODEV;
783d3186
MZ
342 }
343
344 parent_domain = irq_find_host(parent);
345 if (!parent_domain) {
e81f54c6 346 pr_err("%pOF: unable to obtain parent domain\n", node);
783d3186
MZ
347 return -ENXIO;
348 }
349
350 err = crossbar_of_init(node);
351 if (err)
352 return err;
353
354 domain = irq_domain_add_hierarchy(parent_domain, 0,
355 cb->max_crossbar_sources,
356 node, &crossbar_domain_ops,
357 NULL);
358 if (!domain) {
e81f54c6 359 pr_err("%pOF: failed to allocated domain\n", node);
783d3186
MZ
360 return -ENOMEM;
361 }
96ca848e 362
96ca848e
S
363 return 0;
364}
783d3186
MZ
365
366IRQCHIP_DECLARE(ti_irqcrossbar, "ti,irq-crossbar", irqcrossbar_init);