]>
Commit | Line | Data |
---|---|---|
9f1463b8 LV |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Texas Instruments' K3 Interrupt Aggregator irqchip driver | |
4 | * | |
5 | * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ | |
6 | * Lokesh Vutla <lokeshvutla@ti.com> | |
7 | */ | |
8 | ||
9 | #include <linux/err.h> | |
10 | #include <linux/io.h> | |
11 | #include <linux/irqchip.h> | |
12 | #include <linux/irqdomain.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/msi.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/moduleparam.h> | |
17 | #include <linux/of_address.h> | |
18 | #include <linux/of_irq.h> | |
19 | #include <linux/of_platform.h> | |
20 | #include <linux/irqchip/chained_irq.h> | |
f011df61 | 21 | #include <linux/soc/ti/ti_sci_inta_msi.h> |
9f1463b8 LV |
22 | #include <linux/soc/ti/ti_sci_protocol.h> |
23 | #include <asm-generic/msi.h> | |
24 | ||
25 | #define TI_SCI_DEV_ID_MASK 0xffff | |
26 | #define TI_SCI_DEV_ID_SHIFT 16 | |
27 | #define TI_SCI_IRQ_ID_MASK 0xffff | |
28 | #define TI_SCI_IRQ_ID_SHIFT 0 | |
29 | #define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \ | |
30 | (TI_SCI_DEV_ID_MASK)) | |
31 | #define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK)) | |
f011df61 LV |
32 | #define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \ |
33 | TI_SCI_DEV_ID_SHIFT) | \ | |
34 | ((index) & TI_SCI_IRQ_ID_MASK)) | |
9f1463b8 LV |
35 | |
36 | #define MAX_EVENTS_PER_VINT 64 | |
37 | #define VINT_ENABLE_SET_OFFSET 0x0 | |
38 | #define VINT_ENABLE_CLR_OFFSET 0x8 | |
39 | #define VINT_STATUS_OFFSET 0x18 | |
3688b0db | 40 | #define VINT_STATUS_MASKED_OFFSET 0x20 |
9f1463b8 LV |
41 | |
42 | /** | |
43 | * struct ti_sci_inta_event_desc - Description of an event coming to | |
44 | * Interrupt Aggregator. This serves | |
45 | * as a mapping table for global event, | |
46 | * hwirq and vint bit. | |
47 | * @global_event: Global event number corresponding to this event | |
48 | * @hwirq: Hwirq of the incoming interrupt | |
49 | * @vint_bit: Corresponding vint bit to which this event is attached. | |
50 | */ | |
51 | struct ti_sci_inta_event_desc { | |
52 | u16 global_event; | |
53 | u32 hwirq; | |
54 | u8 vint_bit; | |
55 | }; | |
56 | ||
57 | /** | |
58 | * struct ti_sci_inta_vint_desc - Description of a virtual interrupt coming out | |
59 | * of Interrupt Aggregator. | |
60 | * @domain: Pointer to IRQ domain to which this vint belongs. | |
61 | * @list: List entry for the vint list | |
62 | * @event_map: Bitmap to manage the allocation of events to vint. | |
63 | * @events: Array of event descriptors assigned to this vint. | |
64 | * @parent_virq: Linux IRQ number that gets attached to parent | |
65 | * @vint_id: TISCI vint ID | |
66 | */ | |
67 | struct ti_sci_inta_vint_desc { | |
68 | struct irq_domain *domain; | |
69 | struct list_head list; | |
70 | DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT); | |
71 | struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT]; | |
72 | unsigned int parent_virq; | |
73 | u16 vint_id; | |
74 | }; | |
75 | ||
76 | /** | |
77 | * struct ti_sci_inta_irq_domain - Structure representing a TISCI based | |
78 | * Interrupt Aggregator IRQ domain. | |
79 | * @sci: Pointer to TISCI handle | |
80 | * @vint: TISCI resource pointer representing IA inerrupts. | |
81 | * @global_event: TISCI resource pointer representing global events. | |
82 | * @vint_list: List of the vints active in the system | |
83 | * @vint_mutex: Mutex to protect vint_list | |
84 | * @base: Base address of the memory mapped IO registers | |
85 | * @pdev: Pointer to platform device. | |
86 | */ | |
87 | struct ti_sci_inta_irq_domain { | |
88 | const struct ti_sci_handle *sci; | |
89 | struct ti_sci_resource *vint; | |
90 | struct ti_sci_resource *global_event; | |
91 | struct list_head vint_list; | |
92 | /* Mutex to protect vint list */ | |
93 | struct mutex vint_mutex; | |
94 | void __iomem *base; | |
95 | struct platform_device *pdev; | |
96 | }; | |
97 | ||
98 | #define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \ | |
99 | events[i]) | |
100 | ||
101 | /** | |
102 | * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs | |
103 | * @desc: Pointer to irq_desc corresponding to the irq | |
104 | */ | |
105 | static void ti_sci_inta_irq_handler(struct irq_desc *desc) | |
106 | { | |
107 | struct ti_sci_inta_vint_desc *vint_desc; | |
108 | struct ti_sci_inta_irq_domain *inta; | |
109 | struct irq_domain *domain; | |
110 | unsigned int virq, bit; | |
111 | unsigned long val; | |
112 | ||
113 | vint_desc = irq_desc_get_handler_data(desc); | |
114 | domain = vint_desc->domain; | |
115 | inta = domain->host_data; | |
116 | ||
117 | chained_irq_enter(irq_desc_get_chip(desc), desc); | |
118 | ||
119 | val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 + | |
3688b0db | 120 | VINT_STATUS_MASKED_OFFSET); |
9f1463b8 LV |
121 | |
122 | for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) { | |
123 | virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq); | |
124 | if (virq) | |
125 | generic_handle_irq(virq); | |
126 | } | |
127 | ||
128 | chained_irq_exit(irq_desc_get_chip(desc), desc); | |
129 | } | |
130 | ||
131 | /** | |
132 | * ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator | |
133 | * @domain: IRQ domain corresponding to Interrupt Aggregator | |
134 | * | |
135 | * Return 0 if all went well else corresponding error value. | |
136 | */ | |
137 | static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain) | |
138 | { | |
139 | struct ti_sci_inta_irq_domain *inta = domain->host_data; | |
140 | struct ti_sci_inta_vint_desc *vint_desc; | |
141 | struct irq_fwspec parent_fwspec; | |
142 | unsigned int parent_virq; | |
143 | u16 vint_id; | |
144 | ||
145 | vint_id = ti_sci_get_free_resource(inta->vint); | |
146 | if (vint_id == TI_SCI_RESOURCE_NULL) | |
147 | return ERR_PTR(-EINVAL); | |
148 | ||
149 | vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL); | |
150 | if (!vint_desc) | |
151 | return ERR_PTR(-ENOMEM); | |
152 | ||
153 | vint_desc->domain = domain; | |
154 | vint_desc->vint_id = vint_id; | |
155 | INIT_LIST_HEAD(&vint_desc->list); | |
156 | ||
157 | parent_fwspec.fwnode = of_node_to_fwnode(of_irq_find_parent(dev_of_node(&inta->pdev->dev))); | |
158 | parent_fwspec.param_count = 2; | |
159 | parent_fwspec.param[0] = inta->pdev->id; | |
160 | parent_fwspec.param[1] = vint_desc->vint_id; | |
161 | ||
162 | parent_virq = irq_create_fwspec_mapping(&parent_fwspec); | |
eb737b8f | 163 | if (parent_virq == 0) { |
9f1463b8 | 164 | kfree(vint_desc); |
eb737b8f | 165 | return ERR_PTR(-EINVAL); |
9f1463b8 LV |
166 | } |
167 | vint_desc->parent_virq = parent_virq; | |
168 | ||
169 | list_add_tail(&vint_desc->list, &inta->vint_list); | |
170 | irq_set_chained_handler_and_data(vint_desc->parent_virq, | |
171 | ti_sci_inta_irq_handler, vint_desc); | |
172 | ||
173 | return vint_desc; | |
174 | } | |
175 | ||
176 | /** | |
177 | * ti_sci_inta_alloc_event() - Attach an event to a IA vint. | |
178 | * @vint_desc: Pointer to vint_desc to which the event gets attached | |
179 | * @free_bit: Bit inside vint to which event gets attached | |
180 | * @hwirq: hwirq of the input event | |
181 | * | |
182 | * Return event_desc pointer if all went ok else appropriate error value. | |
183 | */ | |
184 | static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta_vint_desc *vint_desc, | |
185 | u16 free_bit, | |
186 | u32 hwirq) | |
187 | { | |
188 | struct ti_sci_inta_irq_domain *inta = vint_desc->domain->host_data; | |
189 | struct ti_sci_inta_event_desc *event_desc; | |
190 | u16 dev_id, dev_index; | |
191 | int err; | |
192 | ||
193 | dev_id = HWIRQ_TO_DEVID(hwirq); | |
194 | dev_index = HWIRQ_TO_IRQID(hwirq); | |
195 | ||
196 | event_desc = &vint_desc->events[free_bit]; | |
197 | event_desc->hwirq = hwirq; | |
198 | event_desc->vint_bit = free_bit; | |
199 | event_desc->global_event = ti_sci_get_free_resource(inta->global_event); | |
200 | if (event_desc->global_event == TI_SCI_RESOURCE_NULL) | |
201 | return ERR_PTR(-EINVAL); | |
202 | ||
203 | err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci, | |
204 | dev_id, dev_index, | |
205 | inta->pdev->id, | |
206 | vint_desc->vint_id, | |
207 | event_desc->global_event, | |
208 | free_bit); | |
209 | if (err) | |
210 | goto free_global_event; | |
211 | ||
212 | return event_desc; | |
213 | free_global_event: | |
214 | ti_sci_release_resource(inta->global_event, event_desc->global_event); | |
215 | return ERR_PTR(err); | |
216 | } | |
217 | ||
218 | /** | |
219 | * ti_sci_inta_alloc_irq() - Allocate an irq within INTA domain | |
220 | * @domain: irq_domain pointer corresponding to INTA | |
221 | * @hwirq: hwirq of the input event | |
222 | * | |
223 | * Note: Allocation happens in the following manner: | |
224 | * - Find a free bit available in any of the vints available in the list. | |
225 | * - If not found, allocate a vint from the vint pool | |
226 | * - Attach the free bit to input hwirq. | |
227 | * Return event_desc if all went ok else appropriate error value. | |
228 | */ | |
229 | static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *domain, | |
230 | u32 hwirq) | |
231 | { | |
232 | struct ti_sci_inta_irq_domain *inta = domain->host_data; | |
233 | struct ti_sci_inta_vint_desc *vint_desc = NULL; | |
234 | struct ti_sci_inta_event_desc *event_desc; | |
235 | u16 free_bit; | |
236 | ||
237 | mutex_lock(&inta->vint_mutex); | |
238 | list_for_each_entry(vint_desc, &inta->vint_list, list) { | |
239 | free_bit = find_first_zero_bit(vint_desc->event_map, | |
240 | MAX_EVENTS_PER_VINT); | |
241 | if (free_bit != MAX_EVENTS_PER_VINT) { | |
242 | set_bit(free_bit, vint_desc->event_map); | |
243 | goto alloc_event; | |
244 | } | |
245 | } | |
246 | ||
247 | /* No free bits available. Allocate a new vint */ | |
248 | vint_desc = ti_sci_inta_alloc_parent_irq(domain); | |
249 | if (IS_ERR(vint_desc)) { | |
761becb2 ME |
250 | event_desc = ERR_CAST(vint_desc); |
251 | goto unlock; | |
9f1463b8 LV |
252 | } |
253 | ||
254 | free_bit = find_first_zero_bit(vint_desc->event_map, | |
255 | MAX_EVENTS_PER_VINT); | |
256 | set_bit(free_bit, vint_desc->event_map); | |
257 | ||
258 | alloc_event: | |
259 | event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq); | |
260 | if (IS_ERR(event_desc)) | |
261 | clear_bit(free_bit, vint_desc->event_map); | |
262 | ||
761becb2 | 263 | unlock: |
9f1463b8 LV |
264 | mutex_unlock(&inta->vint_mutex); |
265 | return event_desc; | |
266 | } | |
267 | ||
268 | /** | |
269 | * ti_sci_inta_free_parent_irq() - Free a parent irq to INTA | |
270 | * @inta: Pointer to inta domain. | |
271 | * @vint_desc: Pointer to vint_desc that needs to be freed. | |
272 | */ | |
273 | static void ti_sci_inta_free_parent_irq(struct ti_sci_inta_irq_domain *inta, | |
274 | struct ti_sci_inta_vint_desc *vint_desc) | |
275 | { | |
276 | if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) { | |
277 | list_del(&vint_desc->list); | |
278 | ti_sci_release_resource(inta->vint, vint_desc->vint_id); | |
279 | irq_dispose_mapping(vint_desc->parent_virq); | |
280 | kfree(vint_desc); | |
281 | } | |
282 | } | |
283 | ||
284 | /** | |
285 | * ti_sci_inta_free_irq() - Free an IRQ within INTA domain | |
286 | * @event_desc: Pointer to event_desc that needs to be freed. | |
287 | * @hwirq: Hwirq number within INTA domain that needs to be freed | |
288 | */ | |
289 | static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc, | |
290 | u32 hwirq) | |
291 | { | |
292 | struct ti_sci_inta_vint_desc *vint_desc; | |
293 | struct ti_sci_inta_irq_domain *inta; | |
294 | ||
295 | vint_desc = to_vint_desc(event_desc, event_desc->vint_bit); | |
296 | inta = vint_desc->domain->host_data; | |
297 | /* free event irq */ | |
298 | mutex_lock(&inta->vint_mutex); | |
299 | inta->sci->ops.rm_irq_ops.free_event_map(inta->sci, | |
300 | HWIRQ_TO_DEVID(hwirq), | |
301 | HWIRQ_TO_IRQID(hwirq), | |
302 | inta->pdev->id, | |
303 | vint_desc->vint_id, | |
304 | event_desc->global_event, | |
305 | event_desc->vint_bit); | |
306 | ||
307 | clear_bit(event_desc->vint_bit, vint_desc->event_map); | |
308 | ti_sci_release_resource(inta->global_event, event_desc->global_event); | |
309 | event_desc->global_event = TI_SCI_RESOURCE_NULL; | |
310 | event_desc->hwirq = 0; | |
311 | ||
312 | ti_sci_inta_free_parent_irq(inta, vint_desc); | |
313 | mutex_unlock(&inta->vint_mutex); | |
314 | } | |
315 | ||
316 | /** | |
317 | * ti_sci_inta_request_resources() - Allocate resources for input irq | |
318 | * @data: Pointer to corresponding irq_data | |
319 | * | |
320 | * Note: This is the core api where the actual allocation happens for input | |
321 | * hwirq. This allocation involves creating a parent irq for vint. | |
322 | * If this is done in irq_domain_ops.alloc() then a deadlock is reached | |
323 | * for allocation. So this allocation is being done in request_resources() | |
324 | * | |
325 | * Return: 0 if all went well else corresponding error. | |
326 | */ | |
327 | static int ti_sci_inta_request_resources(struct irq_data *data) | |
328 | { | |
329 | struct ti_sci_inta_event_desc *event_desc; | |
330 | ||
331 | event_desc = ti_sci_inta_alloc_irq(data->domain, data->hwirq); | |
332 | if (IS_ERR(event_desc)) | |
333 | return PTR_ERR(event_desc); | |
334 | ||
335 | data->chip_data = event_desc; | |
336 | ||
337 | return 0; | |
338 | } | |
339 | ||
340 | /** | |
341 | * ti_sci_inta_release_resources - Release resources for input irq | |
342 | * @data: Pointer to corresponding irq_data | |
343 | * | |
344 | * Note: Corresponding to request_resources(), all the unmapping and deletion | |
345 | * of parent vint irqs happens in this api. | |
346 | */ | |
347 | static void ti_sci_inta_release_resources(struct irq_data *data) | |
348 | { | |
349 | struct ti_sci_inta_event_desc *event_desc; | |
350 | ||
351 | event_desc = irq_data_get_irq_chip_data(data); | |
352 | ti_sci_inta_free_irq(event_desc, data->hwirq); | |
353 | } | |
354 | ||
355 | /** | |
356 | * ti_sci_inta_manage_event() - Control the event based on the offset | |
357 | * @data: Pointer to corresponding irq_data | |
358 | * @offset: register offset using which event is controlled. | |
359 | */ | |
360 | static void ti_sci_inta_manage_event(struct irq_data *data, u32 offset) | |
361 | { | |
362 | struct ti_sci_inta_event_desc *event_desc; | |
363 | struct ti_sci_inta_vint_desc *vint_desc; | |
364 | struct ti_sci_inta_irq_domain *inta; | |
365 | ||
366 | event_desc = irq_data_get_irq_chip_data(data); | |
367 | vint_desc = to_vint_desc(event_desc, event_desc->vint_bit); | |
368 | inta = data->domain->host_data; | |
369 | ||
370 | writeq_relaxed(BIT(event_desc->vint_bit), | |
371 | inta->base + vint_desc->vint_id * 0x1000 + offset); | |
372 | } | |
373 | ||
374 | /** | |
375 | * ti_sci_inta_mask_irq() - Mask an event | |
376 | * @data: Pointer to corresponding irq_data | |
377 | */ | |
378 | static void ti_sci_inta_mask_irq(struct irq_data *data) | |
379 | { | |
380 | ti_sci_inta_manage_event(data, VINT_ENABLE_CLR_OFFSET); | |
381 | } | |
382 | ||
383 | /** | |
384 | * ti_sci_inta_unmask_irq() - Unmask an event | |
385 | * @data: Pointer to corresponding irq_data | |
386 | */ | |
387 | static void ti_sci_inta_unmask_irq(struct irq_data *data) | |
388 | { | |
389 | ti_sci_inta_manage_event(data, VINT_ENABLE_SET_OFFSET); | |
390 | } | |
391 | ||
392 | /** | |
393 | * ti_sci_inta_ack_irq() - Ack an event | |
394 | * @data: Pointer to corresponding irq_data | |
395 | */ | |
396 | static void ti_sci_inta_ack_irq(struct irq_data *data) | |
397 | { | |
398 | /* | |
399 | * Do not clear the event if hardware is capable of sending | |
400 | * a down event. | |
401 | */ | |
402 | if (irqd_get_trigger_type(data) != IRQF_TRIGGER_HIGH) | |
403 | ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET); | |
404 | } | |
405 | ||
406 | static int ti_sci_inta_set_affinity(struct irq_data *d, | |
407 | const struct cpumask *mask_val, bool force) | |
408 | { | |
409 | return -EINVAL; | |
410 | } | |
411 | ||
412 | /** | |
413 | * ti_sci_inta_set_type() - Update the trigger type of the irq. | |
414 | * @data: Pointer to corresponding irq_data | |
415 | * @type: Trigger type as specified by user | |
416 | * | |
417 | * Note: This updates the handle_irq callback for level msi. | |
418 | * | |
419 | * Return 0 if all went well else appropriate error. | |
420 | */ | |
421 | static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type) | |
422 | { | |
423 | /* | |
424 | * .alloc default sets handle_edge_irq. But if the user specifies | |
425 | * that IRQ is level MSI, then update the handle to handle_level_irq | |
426 | */ | |
427 | switch (type & IRQ_TYPE_SENSE_MASK) { | |
428 | case IRQF_TRIGGER_HIGH: | |
429 | irq_set_handler_locked(data, handle_level_irq); | |
430 | return 0; | |
431 | case IRQF_TRIGGER_RISING: | |
432 | return 0; | |
433 | default: | |
434 | return -EINVAL; | |
435 | } | |
436 | ||
437 | return -EINVAL; | |
438 | } | |
439 | ||
440 | static struct irq_chip ti_sci_inta_irq_chip = { | |
441 | .name = "INTA", | |
442 | .irq_ack = ti_sci_inta_ack_irq, | |
443 | .irq_mask = ti_sci_inta_mask_irq, | |
444 | .irq_set_type = ti_sci_inta_set_type, | |
445 | .irq_unmask = ti_sci_inta_unmask_irq, | |
446 | .irq_set_affinity = ti_sci_inta_set_affinity, | |
447 | .irq_request_resources = ti_sci_inta_request_resources, | |
448 | .irq_release_resources = ti_sci_inta_release_resources, | |
449 | }; | |
450 | ||
451 | /** | |
452 | * ti_sci_inta_irq_domain_free() - Free an IRQ from the IRQ domain | |
453 | * @domain: Domain to which the irqs belong | |
454 | * @virq: base linux virtual IRQ to be freed. | |
455 | * @nr_irqs: Number of continuous irqs to be freed | |
456 | */ | |
457 | static void ti_sci_inta_irq_domain_free(struct irq_domain *domain, | |
458 | unsigned int virq, unsigned int nr_irqs) | |
459 | { | |
460 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); | |
461 | ||
462 | irq_domain_reset_irq_data(data); | |
463 | } | |
464 | ||
465 | /** | |
466 | * ti_sci_inta_irq_domain_alloc() - Allocate Interrupt aggregator IRQs | |
467 | * @domain: Point to the interrupt aggregator IRQ domain | |
468 | * @virq: Corresponding Linux virtual IRQ number | |
469 | * @nr_irqs: Continuous irqs to be allocated | |
470 | * @data: Pointer to firmware specifier | |
471 | * | |
472 | * No actual allocation happens here. | |
473 | * | |
474 | * Return 0 if all went well else appropriate error value. | |
475 | */ | |
476 | static int ti_sci_inta_irq_domain_alloc(struct irq_domain *domain, | |
477 | unsigned int virq, unsigned int nr_irqs, | |
478 | void *data) | |
479 | { | |
480 | msi_alloc_info_t *arg = data; | |
481 | ||
482 | irq_domain_set_info(domain, virq, arg->hwirq, &ti_sci_inta_irq_chip, | |
483 | NULL, handle_edge_irq, NULL, NULL); | |
484 | ||
485 | return 0; | |
486 | } | |
487 | ||
488 | static const struct irq_domain_ops ti_sci_inta_irq_domain_ops = { | |
489 | .free = ti_sci_inta_irq_domain_free, | |
490 | .alloc = ti_sci_inta_irq_domain_alloc, | |
491 | }; | |
492 | ||
f011df61 LV |
493 | static struct irq_chip ti_sci_inta_msi_irq_chip = { |
494 | .name = "MSI-INTA", | |
495 | .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, | |
496 | }; | |
497 | ||
498 | static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg, | |
499 | struct msi_desc *desc) | |
500 | { | |
501 | struct platform_device *pdev = to_platform_device(desc->dev); | |
502 | ||
503 | arg->desc = desc; | |
504 | arg->hwirq = TO_HWIRQ(pdev->id, desc->inta.dev_index); | |
505 | } | |
506 | ||
507 | static struct msi_domain_ops ti_sci_inta_msi_ops = { | |
508 | .set_desc = ti_sci_inta_msi_set_desc, | |
509 | }; | |
510 | ||
511 | static struct msi_domain_info ti_sci_inta_msi_domain_info = { | |
512 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | |
513 | MSI_FLAG_LEVEL_CAPABLE), | |
514 | .ops = &ti_sci_inta_msi_ops, | |
515 | .chip = &ti_sci_inta_msi_irq_chip, | |
516 | }; | |
517 | ||
9f1463b8 LV |
518 | static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) |
519 | { | |
f011df61 | 520 | struct irq_domain *parent_domain, *domain, *msi_domain; |
9f1463b8 LV |
521 | struct device_node *parent_node, *node; |
522 | struct ti_sci_inta_irq_domain *inta; | |
523 | struct device *dev = &pdev->dev; | |
524 | struct resource *res; | |
525 | int ret; | |
526 | ||
527 | node = dev_of_node(dev); | |
528 | parent_node = of_irq_find_parent(node); | |
529 | if (!parent_node) { | |
530 | dev_err(dev, "Failed to get IRQ parent node\n"); | |
531 | return -ENODEV; | |
532 | } | |
533 | ||
534 | parent_domain = irq_find_host(parent_node); | |
535 | if (!parent_domain) | |
536 | return -EPROBE_DEFER; | |
537 | ||
538 | inta = devm_kzalloc(dev, sizeof(*inta), GFP_KERNEL); | |
539 | if (!inta) | |
540 | return -ENOMEM; | |
541 | ||
542 | inta->pdev = pdev; | |
543 | inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); | |
544 | if (IS_ERR(inta->sci)) { | |
545 | ret = PTR_ERR(inta->sci); | |
546 | if (ret != -EPROBE_DEFER) | |
547 | dev_err(dev, "ti,sci read fail %d\n", ret); | |
548 | inta->sci = NULL; | |
549 | return ret; | |
550 | } | |
551 | ||
552 | ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &pdev->id); | |
553 | if (ret) { | |
554 | dev_err(dev, "missing 'ti,sci-dev-id' property\n"); | |
555 | return -EINVAL; | |
556 | } | |
557 | ||
558 | inta->vint = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id, | |
559 | "ti,sci-rm-range-vint"); | |
560 | if (IS_ERR(inta->vint)) { | |
561 | dev_err(dev, "VINT resource allocation failed\n"); | |
562 | return PTR_ERR(inta->vint); | |
563 | } | |
564 | ||
565 | inta->global_event = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id, | |
566 | "ti,sci-rm-range-global-event"); | |
567 | if (IS_ERR(inta->global_event)) { | |
568 | dev_err(dev, "Global event resource allocation failed\n"); | |
569 | return PTR_ERR(inta->global_event); | |
570 | } | |
571 | ||
572 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
573 | inta->base = devm_ioremap_resource(dev, res); | |
574 | if (IS_ERR(inta->base)) | |
575 | return -ENODEV; | |
576 | ||
577 | domain = irq_domain_add_linear(dev_of_node(dev), | |
578 | ti_sci_get_num_resources(inta->vint), | |
579 | &ti_sci_inta_irq_domain_ops, inta); | |
580 | if (!domain) { | |
581 | dev_err(dev, "Failed to allocate IRQ domain\n"); | |
582 | return -ENOMEM; | |
583 | } | |
584 | ||
f011df61 LV |
585 | msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node), |
586 | &ti_sci_inta_msi_domain_info, | |
587 | domain); | |
588 | if (!msi_domain) { | |
589 | irq_domain_remove(domain); | |
590 | dev_err(dev, "Failed to allocate msi domain\n"); | |
591 | return -ENOMEM; | |
592 | } | |
593 | ||
9f1463b8 LV |
594 | INIT_LIST_HEAD(&inta->vint_list); |
595 | mutex_init(&inta->vint_mutex); | |
596 | ||
597 | return 0; | |
598 | } | |
599 | ||
600 | static const struct of_device_id ti_sci_inta_irq_domain_of_match[] = { | |
601 | { .compatible = "ti,sci-inta", }, | |
602 | { /* sentinel */ }, | |
603 | }; | |
604 | MODULE_DEVICE_TABLE(of, ti_sci_inta_irq_domain_of_match); | |
605 | ||
606 | static struct platform_driver ti_sci_inta_irq_domain_driver = { | |
607 | .probe = ti_sci_inta_irq_domain_probe, | |
608 | .driver = { | |
609 | .name = "ti-sci-inta", | |
610 | .of_match_table = ti_sci_inta_irq_domain_of_match, | |
611 | }, | |
612 | }; | |
613 | module_platform_driver(ti_sci_inta_irq_domain_driver); | |
614 | ||
615 | MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>"); | |
616 | MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol"); | |
617 | MODULE_LICENSE("GPL v2"); |