]> git.ipfire.org Git - thirdparty/linux.git/blob - kernel/irq/manage.c
Merge tag 'timers-core-2024-03-10' of git://git.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/linux.git] / kernel / irq / manage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9 #define pr_fmt(fmt) "genirq: " fmt
10
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/isolation.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
24
25 #include "internals.h"
26
27 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28 DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
29
30 static int __init setup_forced_irqthreads(char *arg)
31 {
32 static_branch_enable(&force_irqthreads_key);
33 return 0;
34 }
35 early_param("threadirqs", setup_forced_irqthreads);
36 #endif
37
38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39 {
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 bool inprogress;
42
43 do {
44 unsigned long flags;
45
46 /*
47 * Wait until we're out of the critical section. This might
48 * give the wrong answer due to the lack of memory barriers.
49 */
50 while (irqd_irq_inprogress(&desc->irq_data))
51 cpu_relax();
52
53 /* Ok, that indicated we're done: double-check carefully. */
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
56
57 /*
58 * If requested and supported, check at the chip whether it
59 * is in flight at the hardware level, i.e. already pending
60 * in a CPU and waiting for service and acknowledge.
61 */
62 if (!inprogress && sync_chip) {
63 /*
64 * Ignore the return code. inprogress is only updated
65 * when the chip supports it.
66 */
67 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 &inprogress);
69 }
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
71
72 /* Oops, that failed? */
73 } while (inprogress);
74 }
75
76 /**
77 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78 * @irq: interrupt number to wait for
79 *
80 * This function waits for any pending hard IRQ handlers for this
81 * interrupt to complete before returning. If you use this
82 * function while holding a resource the IRQ handler may need you
83 * will deadlock. It does not take associated threaded handlers
84 * into account.
85 *
86 * Do not use this for shutdown scenarios where you must be sure
87 * that all parts (hardirq and threaded handler) have completed.
88 *
89 * Returns: false if a threaded handler is active.
90 *
91 * This function may be called - with care - from IRQ context.
92 *
93 * It does not check whether there is an interrupt in flight at the
94 * hardware level, but not serviced yet, as this might deadlock when
95 * called with interrupts disabled and the target CPU of the interrupt
96 * is the current CPU.
97 */
98 bool synchronize_hardirq(unsigned int irq)
99 {
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
105 }
106
107 return true;
108 }
109 EXPORT_SYMBOL(synchronize_hardirq);
110
111 static void __synchronize_irq(struct irq_desc *desc)
112 {
113 __synchronize_hardirq(desc, true);
114 /*
115 * We made sure that no hardirq handler is running. Now verify that no
116 * threaded handlers are active.
117 */
118 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
119 }
120
121 /**
122 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
123 * @irq: interrupt number to wait for
124 *
125 * This function waits for any pending IRQ handlers for this interrupt
126 * to complete before returning. If you use this function while
127 * holding a resource the IRQ handler may need you will deadlock.
128 *
129 * Can only be called from preemptible code as it might sleep when
130 * an interrupt thread is associated to @irq.
131 *
132 * It optionally makes sure (when the irq chip supports that method)
133 * that the interrupt is not pending in any CPU and waiting for
134 * service.
135 */
136 void synchronize_irq(unsigned int irq)
137 {
138 struct irq_desc *desc = irq_to_desc(irq);
139
140 if (desc)
141 __synchronize_irq(desc);
142 }
143 EXPORT_SYMBOL(synchronize_irq);
144
145 #ifdef CONFIG_SMP
146 cpumask_var_t irq_default_affinity;
147
148 static bool __irq_can_set_affinity(struct irq_desc *desc)
149 {
150 if (!desc || !irqd_can_balance(&desc->irq_data) ||
151 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
152 return false;
153 return true;
154 }
155
156 /**
157 * irq_can_set_affinity - Check if the affinity of a given irq can be set
158 * @irq: Interrupt to check
159 *
160 */
161 int irq_can_set_affinity(unsigned int irq)
162 {
163 return __irq_can_set_affinity(irq_to_desc(irq));
164 }
165
166 /**
167 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
168 * @irq: Interrupt to check
169 *
170 * Like irq_can_set_affinity() above, but additionally checks for the
171 * AFFINITY_MANAGED flag.
172 */
173 bool irq_can_set_affinity_usr(unsigned int irq)
174 {
175 struct irq_desc *desc = irq_to_desc(irq);
176
177 return __irq_can_set_affinity(desc) &&
178 !irqd_affinity_is_managed(&desc->irq_data);
179 }
180
181 /**
182 * irq_set_thread_affinity - Notify irq threads to adjust affinity
183 * @desc: irq descriptor which has affinity changed
184 *
185 * We just set IRQTF_AFFINITY and delegate the affinity setting
186 * to the interrupt thread itself. We can not call
187 * set_cpus_allowed_ptr() here as we hold desc->lock and this
188 * code can be called from hard interrupt context.
189 */
190 void irq_set_thread_affinity(struct irq_desc *desc)
191 {
192 struct irqaction *action;
193
194 for_each_action_of_desc(desc, action) {
195 if (action->thread) {
196 set_bit(IRQTF_AFFINITY, &action->thread_flags);
197 wake_up_process(action->thread);
198 }
199 if (action->secondary && action->secondary->thread) {
200 set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags);
201 wake_up_process(action->secondary->thread);
202 }
203 }
204 }
205
206 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
207 static void irq_validate_effective_affinity(struct irq_data *data)
208 {
209 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
210 struct irq_chip *chip = irq_data_get_irq_chip(data);
211
212 if (!cpumask_empty(m))
213 return;
214 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
215 chip->name, data->irq);
216 }
217 #else
218 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
219 #endif
220
221 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
222 bool force)
223 {
224 struct irq_desc *desc = irq_data_to_desc(data);
225 struct irq_chip *chip = irq_data_get_irq_chip(data);
226 const struct cpumask *prog_mask;
227 int ret;
228
229 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
230 static struct cpumask tmp_mask;
231
232 if (!chip || !chip->irq_set_affinity)
233 return -EINVAL;
234
235 raw_spin_lock(&tmp_mask_lock);
236 /*
237 * If this is a managed interrupt and housekeeping is enabled on
238 * it check whether the requested affinity mask intersects with
239 * a housekeeping CPU. If so, then remove the isolated CPUs from
240 * the mask and just keep the housekeeping CPU(s). This prevents
241 * the affinity setter from routing the interrupt to an isolated
242 * CPU to avoid that I/O submitted from a housekeeping CPU causes
243 * interrupts on an isolated one.
244 *
245 * If the masks do not intersect or include online CPU(s) then
246 * keep the requested mask. The isolated target CPUs are only
247 * receiving interrupts when the I/O operation was submitted
248 * directly from them.
249 *
250 * If all housekeeping CPUs in the affinity mask are offline, the
251 * interrupt will be migrated by the CPU hotplug code once a
252 * housekeeping CPU which belongs to the affinity mask comes
253 * online.
254 */
255 if (irqd_affinity_is_managed(data) &&
256 housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
257 const struct cpumask *hk_mask;
258
259 hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
260
261 cpumask_and(&tmp_mask, mask, hk_mask);
262 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
263 prog_mask = mask;
264 else
265 prog_mask = &tmp_mask;
266 } else {
267 prog_mask = mask;
268 }
269
270 /*
271 * Make sure we only provide online CPUs to the irqchip,
272 * unless we are being asked to force the affinity (in which
273 * case we do as we are told).
274 */
275 cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
276 if (!force && !cpumask_empty(&tmp_mask))
277 ret = chip->irq_set_affinity(data, &tmp_mask, force);
278 else if (force)
279 ret = chip->irq_set_affinity(data, mask, force);
280 else
281 ret = -EINVAL;
282
283 raw_spin_unlock(&tmp_mask_lock);
284
285 switch (ret) {
286 case IRQ_SET_MASK_OK:
287 case IRQ_SET_MASK_OK_DONE:
288 cpumask_copy(desc->irq_common_data.affinity, mask);
289 fallthrough;
290 case IRQ_SET_MASK_OK_NOCOPY:
291 irq_validate_effective_affinity(data);
292 irq_set_thread_affinity(desc);
293 ret = 0;
294 }
295
296 return ret;
297 }
298
299 #ifdef CONFIG_GENERIC_PENDING_IRQ
300 static inline int irq_set_affinity_pending(struct irq_data *data,
301 const struct cpumask *dest)
302 {
303 struct irq_desc *desc = irq_data_to_desc(data);
304
305 irqd_set_move_pending(data);
306 irq_copy_pending(desc, dest);
307 return 0;
308 }
309 #else
310 static inline int irq_set_affinity_pending(struct irq_data *data,
311 const struct cpumask *dest)
312 {
313 return -EBUSY;
314 }
315 #endif
316
317 static int irq_try_set_affinity(struct irq_data *data,
318 const struct cpumask *dest, bool force)
319 {
320 int ret = irq_do_set_affinity(data, dest, force);
321
322 /*
323 * In case that the underlying vector management is busy and the
324 * architecture supports the generic pending mechanism then utilize
325 * this to avoid returning an error to user space.
326 */
327 if (ret == -EBUSY && !force)
328 ret = irq_set_affinity_pending(data, dest);
329 return ret;
330 }
331
332 static bool irq_set_affinity_deactivated(struct irq_data *data,
333 const struct cpumask *mask)
334 {
335 struct irq_desc *desc = irq_data_to_desc(data);
336
337 /*
338 * Handle irq chips which can handle affinity only in activated
339 * state correctly
340 *
341 * If the interrupt is not yet activated, just store the affinity
342 * mask and do not call the chip driver at all. On activation the
343 * driver has to make sure anyway that the interrupt is in a
344 * usable state so startup works.
345 */
346 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
347 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
348 return false;
349
350 cpumask_copy(desc->irq_common_data.affinity, mask);
351 irq_data_update_effective_affinity(data, mask);
352 irqd_set(data, IRQD_AFFINITY_SET);
353 return true;
354 }
355
356 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
357 bool force)
358 {
359 struct irq_chip *chip = irq_data_get_irq_chip(data);
360 struct irq_desc *desc = irq_data_to_desc(data);
361 int ret = 0;
362
363 if (!chip || !chip->irq_set_affinity)
364 return -EINVAL;
365
366 if (irq_set_affinity_deactivated(data, mask))
367 return 0;
368
369 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
370 ret = irq_try_set_affinity(data, mask, force);
371 } else {
372 irqd_set_move_pending(data);
373 irq_copy_pending(desc, mask);
374 }
375
376 if (desc->affinity_notify) {
377 kref_get(&desc->affinity_notify->kref);
378 if (!schedule_work(&desc->affinity_notify->work)) {
379 /* Work was already scheduled, drop our extra ref */
380 kref_put(&desc->affinity_notify->kref,
381 desc->affinity_notify->release);
382 }
383 }
384 irqd_set(data, IRQD_AFFINITY_SET);
385
386 return ret;
387 }
388
389 /**
390 * irq_update_affinity_desc - Update affinity management for an interrupt
391 * @irq: The interrupt number to update
392 * @affinity: Pointer to the affinity descriptor
393 *
394 * This interface can be used to configure the affinity management of
395 * interrupts which have been allocated already.
396 *
397 * There are certain limitations on when it may be used - attempts to use it
398 * for when the kernel is configured for generic IRQ reservation mode (in
399 * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
400 * managed/non-managed interrupt accounting. In addition, attempts to use it on
401 * an interrupt which is already started or which has already been configured
402 * as managed will also fail, as these mean invalid init state or double init.
403 */
404 int irq_update_affinity_desc(unsigned int irq,
405 struct irq_affinity_desc *affinity)
406 {
407 struct irq_desc *desc;
408 unsigned long flags;
409 bool activated;
410 int ret = 0;
411
412 /*
413 * Supporting this with the reservation scheme used by x86 needs
414 * some more thought. Fail it for now.
415 */
416 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
417 return -EOPNOTSUPP;
418
419 desc = irq_get_desc_buslock(irq, &flags, 0);
420 if (!desc)
421 return -EINVAL;
422
423 /* Requires the interrupt to be shut down */
424 if (irqd_is_started(&desc->irq_data)) {
425 ret = -EBUSY;
426 goto out_unlock;
427 }
428
429 /* Interrupts which are already managed cannot be modified */
430 if (irqd_affinity_is_managed(&desc->irq_data)) {
431 ret = -EBUSY;
432 goto out_unlock;
433 }
434
435 /*
436 * Deactivate the interrupt. That's required to undo
437 * anything an earlier activation has established.
438 */
439 activated = irqd_is_activated(&desc->irq_data);
440 if (activated)
441 irq_domain_deactivate_irq(&desc->irq_data);
442
443 if (affinity->is_managed) {
444 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
445 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
446 }
447
448 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
449
450 /* Restore the activation state */
451 if (activated)
452 irq_domain_activate_irq(&desc->irq_data, false);
453
454 out_unlock:
455 irq_put_desc_busunlock(desc, flags);
456 return ret;
457 }
458
459 static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
460 bool force)
461 {
462 struct irq_desc *desc = irq_to_desc(irq);
463 unsigned long flags;
464 int ret;
465
466 if (!desc)
467 return -EINVAL;
468
469 raw_spin_lock_irqsave(&desc->lock, flags);
470 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
471 raw_spin_unlock_irqrestore(&desc->lock, flags);
472 return ret;
473 }
474
475 /**
476 * irq_set_affinity - Set the irq affinity of a given irq
477 * @irq: Interrupt to set affinity
478 * @cpumask: cpumask
479 *
480 * Fails if cpumask does not contain an online CPU
481 */
482 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
483 {
484 return __irq_set_affinity(irq, cpumask, false);
485 }
486 EXPORT_SYMBOL_GPL(irq_set_affinity);
487
488 /**
489 * irq_force_affinity - Force the irq affinity of a given irq
490 * @irq: Interrupt to set affinity
491 * @cpumask: cpumask
492 *
493 * Same as irq_set_affinity, but without checking the mask against
494 * online cpus.
495 *
496 * Solely for low level cpu hotplug code, where we need to make per
497 * cpu interrupts affine before the cpu becomes online.
498 */
499 int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
500 {
501 return __irq_set_affinity(irq, cpumask, true);
502 }
503 EXPORT_SYMBOL_GPL(irq_force_affinity);
504
505 int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
506 bool setaffinity)
507 {
508 unsigned long flags;
509 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
510
511 if (!desc)
512 return -EINVAL;
513 desc->affinity_hint = m;
514 irq_put_desc_unlock(desc, flags);
515 if (m && setaffinity)
516 __irq_set_affinity(irq, m, false);
517 return 0;
518 }
519 EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
520
521 static void irq_affinity_notify(struct work_struct *work)
522 {
523 struct irq_affinity_notify *notify =
524 container_of(work, struct irq_affinity_notify, work);
525 struct irq_desc *desc = irq_to_desc(notify->irq);
526 cpumask_var_t cpumask;
527 unsigned long flags;
528
529 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
530 goto out;
531
532 raw_spin_lock_irqsave(&desc->lock, flags);
533 if (irq_move_pending(&desc->irq_data))
534 irq_get_pending(cpumask, desc);
535 else
536 cpumask_copy(cpumask, desc->irq_common_data.affinity);
537 raw_spin_unlock_irqrestore(&desc->lock, flags);
538
539 notify->notify(notify, cpumask);
540
541 free_cpumask_var(cpumask);
542 out:
543 kref_put(&notify->kref, notify->release);
544 }
545
546 /**
547 * irq_set_affinity_notifier - control notification of IRQ affinity changes
548 * @irq: Interrupt for which to enable/disable notification
549 * @notify: Context for notification, or %NULL to disable
550 * notification. Function pointers must be initialised;
551 * the other fields will be initialised by this function.
552 *
553 * Must be called in process context. Notification may only be enabled
554 * after the IRQ is allocated and must be disabled before the IRQ is
555 * freed using free_irq().
556 */
557 int
558 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
559 {
560 struct irq_desc *desc = irq_to_desc(irq);
561 struct irq_affinity_notify *old_notify;
562 unsigned long flags;
563
564 /* The release function is promised process context */
565 might_sleep();
566
567 if (!desc || desc->istate & IRQS_NMI)
568 return -EINVAL;
569
570 /* Complete initialisation of *notify */
571 if (notify) {
572 notify->irq = irq;
573 kref_init(&notify->kref);
574 INIT_WORK(&notify->work, irq_affinity_notify);
575 }
576
577 raw_spin_lock_irqsave(&desc->lock, flags);
578 old_notify = desc->affinity_notify;
579 desc->affinity_notify = notify;
580 raw_spin_unlock_irqrestore(&desc->lock, flags);
581
582 if (old_notify) {
583 if (cancel_work_sync(&old_notify->work)) {
584 /* Pending work had a ref, put that one too */
585 kref_put(&old_notify->kref, old_notify->release);
586 }
587 kref_put(&old_notify->kref, old_notify->release);
588 }
589
590 return 0;
591 }
592 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
593
594 #ifndef CONFIG_AUTO_IRQ_AFFINITY
595 /*
596 * Generic version of the affinity autoselector.
597 */
598 int irq_setup_affinity(struct irq_desc *desc)
599 {
600 struct cpumask *set = irq_default_affinity;
601 int ret, node = irq_desc_get_node(desc);
602 static DEFINE_RAW_SPINLOCK(mask_lock);
603 static struct cpumask mask;
604
605 /* Excludes PER_CPU and NO_BALANCE interrupts */
606 if (!__irq_can_set_affinity(desc))
607 return 0;
608
609 raw_spin_lock(&mask_lock);
610 /*
611 * Preserve the managed affinity setting and a userspace affinity
612 * setup, but make sure that one of the targets is online.
613 */
614 if (irqd_affinity_is_managed(&desc->irq_data) ||
615 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
616 if (cpumask_intersects(desc->irq_common_data.affinity,
617 cpu_online_mask))
618 set = desc->irq_common_data.affinity;
619 else
620 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
621 }
622
623 cpumask_and(&mask, cpu_online_mask, set);
624 if (cpumask_empty(&mask))
625 cpumask_copy(&mask, cpu_online_mask);
626
627 if (node != NUMA_NO_NODE) {
628 const struct cpumask *nodemask = cpumask_of_node(node);
629
630 /* make sure at least one of the cpus in nodemask is online */
631 if (cpumask_intersects(&mask, nodemask))
632 cpumask_and(&mask, &mask, nodemask);
633 }
634 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
635 raw_spin_unlock(&mask_lock);
636 return ret;
637 }
638 #else
639 /* Wrapper for ALPHA specific affinity selector magic */
640 int irq_setup_affinity(struct irq_desc *desc)
641 {
642 return irq_select_affinity(irq_desc_get_irq(desc));
643 }
644 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
645 #endif /* CONFIG_SMP */
646
647
648 /**
649 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
650 * @irq: interrupt number to set affinity
651 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
652 * specific data for percpu_devid interrupts
653 *
654 * This function uses the vCPU specific data to set the vCPU
655 * affinity for an irq. The vCPU specific data is passed from
656 * outside, such as KVM. One example code path is as below:
657 * KVM -> IOMMU -> irq_set_vcpu_affinity().
658 */
659 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
660 {
661 unsigned long flags;
662 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
663 struct irq_data *data;
664 struct irq_chip *chip;
665 int ret = -ENOSYS;
666
667 if (!desc)
668 return -EINVAL;
669
670 data = irq_desc_get_irq_data(desc);
671 do {
672 chip = irq_data_get_irq_chip(data);
673 if (chip && chip->irq_set_vcpu_affinity)
674 break;
675 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
676 data = data->parent_data;
677 #else
678 data = NULL;
679 #endif
680 } while (data);
681
682 if (data)
683 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
684 irq_put_desc_unlock(desc, flags);
685
686 return ret;
687 }
688 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
689
690 void __disable_irq(struct irq_desc *desc)
691 {
692 if (!desc->depth++)
693 irq_disable(desc);
694 }
695
696 static int __disable_irq_nosync(unsigned int irq)
697 {
698 unsigned long flags;
699 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
700
701 if (!desc)
702 return -EINVAL;
703 __disable_irq(desc);
704 irq_put_desc_busunlock(desc, flags);
705 return 0;
706 }
707
708 /**
709 * disable_irq_nosync - disable an irq without waiting
710 * @irq: Interrupt to disable
711 *
712 * Disable the selected interrupt line. Disables and Enables are
713 * nested.
714 * Unlike disable_irq(), this function does not ensure existing
715 * instances of the IRQ handler have completed before returning.
716 *
717 * This function may be called from IRQ context.
718 */
719 void disable_irq_nosync(unsigned int irq)
720 {
721 __disable_irq_nosync(irq);
722 }
723 EXPORT_SYMBOL(disable_irq_nosync);
724
725 /**
726 * disable_irq - disable an irq and wait for completion
727 * @irq: Interrupt to disable
728 *
729 * Disable the selected interrupt line. Enables and Disables are
730 * nested.
731 * This function waits for any pending IRQ handlers for this interrupt
732 * to complete before returning. If you use this function while
733 * holding a resource the IRQ handler may need you will deadlock.
734 *
735 * Can only be called from preemptible code as it might sleep when
736 * an interrupt thread is associated to @irq.
737 *
738 */
739 void disable_irq(unsigned int irq)
740 {
741 might_sleep();
742 if (!__disable_irq_nosync(irq))
743 synchronize_irq(irq);
744 }
745 EXPORT_SYMBOL(disable_irq);
746
747 /**
748 * disable_hardirq - disables an irq and waits for hardirq completion
749 * @irq: Interrupt to disable
750 *
751 * Disable the selected interrupt line. Enables and Disables are
752 * nested.
753 * This function waits for any pending hard IRQ handlers for this
754 * interrupt to complete before returning. If you use this function while
755 * holding a resource the hard IRQ handler may need you will deadlock.
756 *
757 * When used to optimistically disable an interrupt from atomic context
758 * the return value must be checked.
759 *
760 * Returns: false if a threaded handler is active.
761 *
762 * This function may be called - with care - from IRQ context.
763 */
764 bool disable_hardirq(unsigned int irq)
765 {
766 if (!__disable_irq_nosync(irq))
767 return synchronize_hardirq(irq);
768
769 return false;
770 }
771 EXPORT_SYMBOL_GPL(disable_hardirq);
772
773 /**
774 * disable_nmi_nosync - disable an nmi without waiting
775 * @irq: Interrupt to disable
776 *
777 * Disable the selected interrupt line. Disables and enables are
778 * nested.
779 * The interrupt to disable must have been requested through request_nmi.
780 * Unlike disable_nmi(), this function does not ensure existing
781 * instances of the IRQ handler have completed before returning.
782 */
783 void disable_nmi_nosync(unsigned int irq)
784 {
785 disable_irq_nosync(irq);
786 }
787
788 void __enable_irq(struct irq_desc *desc)
789 {
790 switch (desc->depth) {
791 case 0:
792 err_out:
793 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
794 irq_desc_get_irq(desc));
795 break;
796 case 1: {
797 if (desc->istate & IRQS_SUSPENDED)
798 goto err_out;
799 /* Prevent probing on this irq: */
800 irq_settings_set_noprobe(desc);
801 /*
802 * Call irq_startup() not irq_enable() here because the
803 * interrupt might be marked NOAUTOEN. So irq_startup()
804 * needs to be invoked when it gets enabled the first
805 * time. If it was already started up, then irq_startup()
806 * will invoke irq_enable() under the hood.
807 */
808 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
809 break;
810 }
811 default:
812 desc->depth--;
813 }
814 }
815
816 /**
817 * enable_irq - enable handling of an irq
818 * @irq: Interrupt to enable
819 *
820 * Undoes the effect of one call to disable_irq(). If this
821 * matches the last disable, processing of interrupts on this
822 * IRQ line is re-enabled.
823 *
824 * This function may be called from IRQ context only when
825 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
826 */
827 void enable_irq(unsigned int irq)
828 {
829 unsigned long flags;
830 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
831
832 if (!desc)
833 return;
834 if (WARN(!desc->irq_data.chip,
835 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
836 goto out;
837
838 __enable_irq(desc);
839 out:
840 irq_put_desc_busunlock(desc, flags);
841 }
842 EXPORT_SYMBOL(enable_irq);
843
844 /**
845 * enable_nmi - enable handling of an nmi
846 * @irq: Interrupt to enable
847 *
848 * The interrupt to enable must have been requested through request_nmi.
849 * Undoes the effect of one call to disable_nmi(). If this
850 * matches the last disable, processing of interrupts on this
851 * IRQ line is re-enabled.
852 */
853 void enable_nmi(unsigned int irq)
854 {
855 enable_irq(irq);
856 }
857
858 static int set_irq_wake_real(unsigned int irq, unsigned int on)
859 {
860 struct irq_desc *desc = irq_to_desc(irq);
861 int ret = -ENXIO;
862
863 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
864 return 0;
865
866 if (desc->irq_data.chip->irq_set_wake)
867 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
868
869 return ret;
870 }
871
872 /**
873 * irq_set_irq_wake - control irq power management wakeup
874 * @irq: interrupt to control
875 * @on: enable/disable power management wakeup
876 *
877 * Enable/disable power management wakeup mode, which is
878 * disabled by default. Enables and disables must match,
879 * just as they match for non-wakeup mode support.
880 *
881 * Wakeup mode lets this IRQ wake the system from sleep
882 * states like "suspend to RAM".
883 *
884 * Note: irq enable/disable state is completely orthogonal
885 * to the enable/disable state of irq wake. An irq can be
886 * disabled with disable_irq() and still wake the system as
887 * long as the irq has wake enabled. If this does not hold,
888 * then the underlying irq chip and the related driver need
889 * to be investigated.
890 */
891 int irq_set_irq_wake(unsigned int irq, unsigned int on)
892 {
893 unsigned long flags;
894 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
895 int ret = 0;
896
897 if (!desc)
898 return -EINVAL;
899
900 /* Don't use NMIs as wake up interrupts please */
901 if (desc->istate & IRQS_NMI) {
902 ret = -EINVAL;
903 goto out_unlock;
904 }
905
906 /* wakeup-capable irqs can be shared between drivers that
907 * don't need to have the same sleep mode behaviors.
908 */
909 if (on) {
910 if (desc->wake_depth++ == 0) {
911 ret = set_irq_wake_real(irq, on);
912 if (ret)
913 desc->wake_depth = 0;
914 else
915 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
916 }
917 } else {
918 if (desc->wake_depth == 0) {
919 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
920 } else if (--desc->wake_depth == 0) {
921 ret = set_irq_wake_real(irq, on);
922 if (ret)
923 desc->wake_depth = 1;
924 else
925 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
926 }
927 }
928
929 out_unlock:
930 irq_put_desc_busunlock(desc, flags);
931 return ret;
932 }
933 EXPORT_SYMBOL(irq_set_irq_wake);
934
935 /*
936 * Internal function that tells the architecture code whether a
937 * particular irq has been exclusively allocated or is available
938 * for driver use.
939 */
940 int can_request_irq(unsigned int irq, unsigned long irqflags)
941 {
942 unsigned long flags;
943 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
944 int canrequest = 0;
945
946 if (!desc)
947 return 0;
948
949 if (irq_settings_can_request(desc)) {
950 if (!desc->action ||
951 irqflags & desc->action->flags & IRQF_SHARED)
952 canrequest = 1;
953 }
954 irq_put_desc_unlock(desc, flags);
955 return canrequest;
956 }
957
958 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
959 {
960 struct irq_chip *chip = desc->irq_data.chip;
961 int ret, unmask = 0;
962
963 if (!chip || !chip->irq_set_type) {
964 /*
965 * IRQF_TRIGGER_* but the PIC does not support multiple
966 * flow-types?
967 */
968 pr_debug("No set_type function for IRQ %d (%s)\n",
969 irq_desc_get_irq(desc),
970 chip ? (chip->name ? : "unknown") : "unknown");
971 return 0;
972 }
973
974 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
975 if (!irqd_irq_masked(&desc->irq_data))
976 mask_irq(desc);
977 if (!irqd_irq_disabled(&desc->irq_data))
978 unmask = 1;
979 }
980
981 /* Mask all flags except trigger mode */
982 flags &= IRQ_TYPE_SENSE_MASK;
983 ret = chip->irq_set_type(&desc->irq_data, flags);
984
985 switch (ret) {
986 case IRQ_SET_MASK_OK:
987 case IRQ_SET_MASK_OK_DONE:
988 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
989 irqd_set(&desc->irq_data, flags);
990 fallthrough;
991
992 case IRQ_SET_MASK_OK_NOCOPY:
993 flags = irqd_get_trigger_type(&desc->irq_data);
994 irq_settings_set_trigger_mask(desc, flags);
995 irqd_clear(&desc->irq_data, IRQD_LEVEL);
996 irq_settings_clr_level(desc);
997 if (flags & IRQ_TYPE_LEVEL_MASK) {
998 irq_settings_set_level(desc);
999 irqd_set(&desc->irq_data, IRQD_LEVEL);
1000 }
1001
1002 ret = 0;
1003 break;
1004 default:
1005 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
1006 flags, irq_desc_get_irq(desc), chip->irq_set_type);
1007 }
1008 if (unmask)
1009 unmask_irq(desc);
1010 return ret;
1011 }
1012
1013 #ifdef CONFIG_HARDIRQS_SW_RESEND
1014 int irq_set_parent(int irq, int parent_irq)
1015 {
1016 unsigned long flags;
1017 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1018
1019 if (!desc)
1020 return -EINVAL;
1021
1022 desc->parent_irq = parent_irq;
1023
1024 irq_put_desc_unlock(desc, flags);
1025 return 0;
1026 }
1027 EXPORT_SYMBOL_GPL(irq_set_parent);
1028 #endif
1029
1030 /*
1031 * Default primary interrupt handler for threaded interrupts. Is
1032 * assigned as primary handler when request_threaded_irq is called
1033 * with handler == NULL. Useful for oneshot interrupts.
1034 */
1035 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1036 {
1037 return IRQ_WAKE_THREAD;
1038 }
1039
1040 /*
1041 * Primary handler for nested threaded interrupts. Should never be
1042 * called.
1043 */
1044 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1045 {
1046 WARN(1, "Primary handler called for nested irq %d\n", irq);
1047 return IRQ_NONE;
1048 }
1049
1050 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1051 {
1052 WARN(1, "Secondary action handler called for irq %d\n", irq);
1053 return IRQ_NONE;
1054 }
1055
1056 #ifdef CONFIG_SMP
1057 /*
1058 * Check whether we need to change the affinity of the interrupt thread.
1059 */
1060 static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1061 {
1062 cpumask_var_t mask;
1063 bool valid = false;
1064
1065 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1066 return;
1067
1068 __set_current_state(TASK_RUNNING);
1069
1070 /*
1071 * In case we are out of memory we set IRQTF_AFFINITY again and
1072 * try again next time
1073 */
1074 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1075 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1076 return;
1077 }
1078
1079 raw_spin_lock_irq(&desc->lock);
1080 /*
1081 * This code is triggered unconditionally. Check the affinity
1082 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1083 */
1084 if (cpumask_available(desc->irq_common_data.affinity)) {
1085 const struct cpumask *m;
1086
1087 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1088 cpumask_copy(mask, m);
1089 valid = true;
1090 }
1091 raw_spin_unlock_irq(&desc->lock);
1092
1093 if (valid)
1094 set_cpus_allowed_ptr(current, mask);
1095 free_cpumask_var(mask);
1096 }
1097 #else
1098 static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1099 #endif
1100
1101 static int irq_wait_for_interrupt(struct irq_desc *desc,
1102 struct irqaction *action)
1103 {
1104 for (;;) {
1105 set_current_state(TASK_INTERRUPTIBLE);
1106 irq_thread_check_affinity(desc, action);
1107
1108 if (kthread_should_stop()) {
1109 /* may need to run one last time */
1110 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1111 &action->thread_flags)) {
1112 __set_current_state(TASK_RUNNING);
1113 return 0;
1114 }
1115 __set_current_state(TASK_RUNNING);
1116 return -1;
1117 }
1118
1119 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1120 &action->thread_flags)) {
1121 __set_current_state(TASK_RUNNING);
1122 return 0;
1123 }
1124 schedule();
1125 }
1126 }
1127
1128 /*
1129 * Oneshot interrupts keep the irq line masked until the threaded
1130 * handler finished. unmask if the interrupt has not been disabled and
1131 * is marked MASKED.
1132 */
1133 static void irq_finalize_oneshot(struct irq_desc *desc,
1134 struct irqaction *action)
1135 {
1136 if (!(desc->istate & IRQS_ONESHOT) ||
1137 action->handler == irq_forced_secondary_handler)
1138 return;
1139 again:
1140 chip_bus_lock(desc);
1141 raw_spin_lock_irq(&desc->lock);
1142
1143 /*
1144 * Implausible though it may be we need to protect us against
1145 * the following scenario:
1146 *
1147 * The thread is faster done than the hard interrupt handler
1148 * on the other CPU. If we unmask the irq line then the
1149 * interrupt can come in again and masks the line, leaves due
1150 * to IRQS_INPROGRESS and the irq line is masked forever.
1151 *
1152 * This also serializes the state of shared oneshot handlers
1153 * versus "desc->threads_oneshot |= action->thread_mask;" in
1154 * irq_wake_thread(). See the comment there which explains the
1155 * serialization.
1156 */
1157 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1158 raw_spin_unlock_irq(&desc->lock);
1159 chip_bus_sync_unlock(desc);
1160 cpu_relax();
1161 goto again;
1162 }
1163
1164 /*
1165 * Now check again, whether the thread should run. Otherwise
1166 * we would clear the threads_oneshot bit of this thread which
1167 * was just set.
1168 */
1169 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1170 goto out_unlock;
1171
1172 desc->threads_oneshot &= ~action->thread_mask;
1173
1174 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1175 irqd_irq_masked(&desc->irq_data))
1176 unmask_threaded_irq(desc);
1177
1178 out_unlock:
1179 raw_spin_unlock_irq(&desc->lock);
1180 chip_bus_sync_unlock(desc);
1181 }
1182
1183 /*
1184 * Interrupts which are not explicitly requested as threaded
1185 * interrupts rely on the implicit bh/preempt disable of the hard irq
1186 * context. So we need to disable bh here to avoid deadlocks and other
1187 * side effects.
1188 */
1189 static irqreturn_t
1190 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1191 {
1192 irqreturn_t ret;
1193
1194 local_bh_disable();
1195 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1196 local_irq_disable();
1197 ret = action->thread_fn(action->irq, action->dev_id);
1198 if (ret == IRQ_HANDLED)
1199 atomic_inc(&desc->threads_handled);
1200
1201 irq_finalize_oneshot(desc, action);
1202 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1203 local_irq_enable();
1204 local_bh_enable();
1205 return ret;
1206 }
1207
1208 /*
1209 * Interrupts explicitly requested as threaded interrupts want to be
1210 * preemptible - many of them need to sleep and wait for slow busses to
1211 * complete.
1212 */
1213 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1214 struct irqaction *action)
1215 {
1216 irqreturn_t ret;
1217
1218 ret = action->thread_fn(action->irq, action->dev_id);
1219 if (ret == IRQ_HANDLED)
1220 atomic_inc(&desc->threads_handled);
1221
1222 irq_finalize_oneshot(desc, action);
1223 return ret;
1224 }
1225
1226 void wake_threads_waitq(struct irq_desc *desc)
1227 {
1228 if (atomic_dec_and_test(&desc->threads_active))
1229 wake_up(&desc->wait_for_threads);
1230 }
1231
1232 static void irq_thread_dtor(struct callback_head *unused)
1233 {
1234 struct task_struct *tsk = current;
1235 struct irq_desc *desc;
1236 struct irqaction *action;
1237
1238 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1239 return;
1240
1241 action = kthread_data(tsk);
1242
1243 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1244 tsk->comm, tsk->pid, action->irq);
1245
1246
1247 desc = irq_to_desc(action->irq);
1248 /*
1249 * If IRQTF_RUNTHREAD is set, we need to decrement
1250 * desc->threads_active and wake possible waiters.
1251 */
1252 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1253 wake_threads_waitq(desc);
1254
1255 /* Prevent a stale desc->threads_oneshot */
1256 irq_finalize_oneshot(desc, action);
1257 }
1258
1259 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1260 {
1261 struct irqaction *secondary = action->secondary;
1262
1263 if (WARN_ON_ONCE(!secondary))
1264 return;
1265
1266 raw_spin_lock_irq(&desc->lock);
1267 __irq_wake_thread(desc, secondary);
1268 raw_spin_unlock_irq(&desc->lock);
1269 }
1270
1271 /*
1272 * Internal function to notify that a interrupt thread is ready.
1273 */
1274 static void irq_thread_set_ready(struct irq_desc *desc,
1275 struct irqaction *action)
1276 {
1277 set_bit(IRQTF_READY, &action->thread_flags);
1278 wake_up(&desc->wait_for_threads);
1279 }
1280
1281 /*
1282 * Internal function to wake up a interrupt thread and wait until it is
1283 * ready.
1284 */
1285 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1286 struct irqaction *action)
1287 {
1288 if (!action || !action->thread)
1289 return;
1290
1291 wake_up_process(action->thread);
1292 wait_event(desc->wait_for_threads,
1293 test_bit(IRQTF_READY, &action->thread_flags));
1294 }
1295
1296 /*
1297 * Interrupt handler thread
1298 */
1299 static int irq_thread(void *data)
1300 {
1301 struct callback_head on_exit_work;
1302 struct irqaction *action = data;
1303 struct irq_desc *desc = irq_to_desc(action->irq);
1304 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1305 struct irqaction *action);
1306
1307 irq_thread_set_ready(desc, action);
1308
1309 sched_set_fifo(current);
1310
1311 if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
1312 &action->thread_flags))
1313 handler_fn = irq_forced_thread_fn;
1314 else
1315 handler_fn = irq_thread_fn;
1316
1317 init_task_work(&on_exit_work, irq_thread_dtor);
1318 task_work_add(current, &on_exit_work, TWA_NONE);
1319
1320 while (!irq_wait_for_interrupt(desc, action)) {
1321 irqreturn_t action_ret;
1322
1323 action_ret = handler_fn(desc, action);
1324 if (action_ret == IRQ_WAKE_THREAD)
1325 irq_wake_secondary(desc, action);
1326
1327 wake_threads_waitq(desc);
1328 }
1329
1330 /*
1331 * This is the regular exit path. __free_irq() is stopping the
1332 * thread via kthread_stop() after calling
1333 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1334 * oneshot mask bit can be set.
1335 */
1336 task_work_cancel(current, irq_thread_dtor);
1337 return 0;
1338 }
1339
1340 /**
1341 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1342 * @irq: Interrupt line
1343 * @dev_id: Device identity for which the thread should be woken
1344 *
1345 */
1346 void irq_wake_thread(unsigned int irq, void *dev_id)
1347 {
1348 struct irq_desc *desc = irq_to_desc(irq);
1349 struct irqaction *action;
1350 unsigned long flags;
1351
1352 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1353 return;
1354
1355 raw_spin_lock_irqsave(&desc->lock, flags);
1356 for_each_action_of_desc(desc, action) {
1357 if (action->dev_id == dev_id) {
1358 if (action->thread)
1359 __irq_wake_thread(desc, action);
1360 break;
1361 }
1362 }
1363 raw_spin_unlock_irqrestore(&desc->lock, flags);
1364 }
1365 EXPORT_SYMBOL_GPL(irq_wake_thread);
1366
1367 static int irq_setup_forced_threading(struct irqaction *new)
1368 {
1369 if (!force_irqthreads())
1370 return 0;
1371 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1372 return 0;
1373
1374 /*
1375 * No further action required for interrupts which are requested as
1376 * threaded interrupts already
1377 */
1378 if (new->handler == irq_default_primary_handler)
1379 return 0;
1380
1381 new->flags |= IRQF_ONESHOT;
1382
1383 /*
1384 * Handle the case where we have a real primary handler and a
1385 * thread handler. We force thread them as well by creating a
1386 * secondary action.
1387 */
1388 if (new->handler && new->thread_fn) {
1389 /* Allocate the secondary action */
1390 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1391 if (!new->secondary)
1392 return -ENOMEM;
1393 new->secondary->handler = irq_forced_secondary_handler;
1394 new->secondary->thread_fn = new->thread_fn;
1395 new->secondary->dev_id = new->dev_id;
1396 new->secondary->irq = new->irq;
1397 new->secondary->name = new->name;
1398 }
1399 /* Deal with the primary handler */
1400 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1401 new->thread_fn = new->handler;
1402 new->handler = irq_default_primary_handler;
1403 return 0;
1404 }
1405
1406 static int irq_request_resources(struct irq_desc *desc)
1407 {
1408 struct irq_data *d = &desc->irq_data;
1409 struct irq_chip *c = d->chip;
1410
1411 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1412 }
1413
1414 static void irq_release_resources(struct irq_desc *desc)
1415 {
1416 struct irq_data *d = &desc->irq_data;
1417 struct irq_chip *c = d->chip;
1418
1419 if (c->irq_release_resources)
1420 c->irq_release_resources(d);
1421 }
1422
1423 static bool irq_supports_nmi(struct irq_desc *desc)
1424 {
1425 struct irq_data *d = irq_desc_get_irq_data(desc);
1426
1427 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1428 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1429 if (d->parent_data)
1430 return false;
1431 #endif
1432 /* Don't support NMIs for chips behind a slow bus */
1433 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1434 return false;
1435
1436 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1437 }
1438
1439 static int irq_nmi_setup(struct irq_desc *desc)
1440 {
1441 struct irq_data *d = irq_desc_get_irq_data(desc);
1442 struct irq_chip *c = d->chip;
1443
1444 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1445 }
1446
1447 static void irq_nmi_teardown(struct irq_desc *desc)
1448 {
1449 struct irq_data *d = irq_desc_get_irq_data(desc);
1450 struct irq_chip *c = d->chip;
1451
1452 if (c->irq_nmi_teardown)
1453 c->irq_nmi_teardown(d);
1454 }
1455
1456 static int
1457 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1458 {
1459 struct task_struct *t;
1460
1461 if (!secondary) {
1462 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1463 new->name);
1464 } else {
1465 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1466 new->name);
1467 }
1468
1469 if (IS_ERR(t))
1470 return PTR_ERR(t);
1471
1472 /*
1473 * We keep the reference to the task struct even if
1474 * the thread dies to avoid that the interrupt code
1475 * references an already freed task_struct.
1476 */
1477 new->thread = get_task_struct(t);
1478 /*
1479 * Tell the thread to set its affinity. This is
1480 * important for shared interrupt handlers as we do
1481 * not invoke setup_affinity() for the secondary
1482 * handlers as everything is already set up. Even for
1483 * interrupts marked with IRQF_NO_BALANCE this is
1484 * correct as we want the thread to move to the cpu(s)
1485 * on which the requesting code placed the interrupt.
1486 */
1487 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1488 return 0;
1489 }
1490
1491 /*
1492 * Internal function to register an irqaction - typically used to
1493 * allocate special interrupts that are part of the architecture.
1494 *
1495 * Locking rules:
1496 *
1497 * desc->request_mutex Provides serialization against a concurrent free_irq()
1498 * chip_bus_lock Provides serialization for slow bus operations
1499 * desc->lock Provides serialization against hard interrupts
1500 *
1501 * chip_bus_lock and desc->lock are sufficient for all other management and
1502 * interrupt related functions. desc->request_mutex solely serializes
1503 * request/free_irq().
1504 */
1505 static int
1506 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1507 {
1508 struct irqaction *old, **old_ptr;
1509 unsigned long flags, thread_mask = 0;
1510 int ret, nested, shared = 0;
1511
1512 if (!desc)
1513 return -EINVAL;
1514
1515 if (desc->irq_data.chip == &no_irq_chip)
1516 return -ENOSYS;
1517 if (!try_module_get(desc->owner))
1518 return -ENODEV;
1519
1520 new->irq = irq;
1521
1522 /*
1523 * If the trigger type is not specified by the caller,
1524 * then use the default for this interrupt.
1525 */
1526 if (!(new->flags & IRQF_TRIGGER_MASK))
1527 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1528
1529 /*
1530 * Check whether the interrupt nests into another interrupt
1531 * thread.
1532 */
1533 nested = irq_settings_is_nested_thread(desc);
1534 if (nested) {
1535 if (!new->thread_fn) {
1536 ret = -EINVAL;
1537 goto out_mput;
1538 }
1539 /*
1540 * Replace the primary handler which was provided from
1541 * the driver for non nested interrupt handling by the
1542 * dummy function which warns when called.
1543 */
1544 new->handler = irq_nested_primary_handler;
1545 } else {
1546 if (irq_settings_can_thread(desc)) {
1547 ret = irq_setup_forced_threading(new);
1548 if (ret)
1549 goto out_mput;
1550 }
1551 }
1552
1553 /*
1554 * Create a handler thread when a thread function is supplied
1555 * and the interrupt does not nest into another interrupt
1556 * thread.
1557 */
1558 if (new->thread_fn && !nested) {
1559 ret = setup_irq_thread(new, irq, false);
1560 if (ret)
1561 goto out_mput;
1562 if (new->secondary) {
1563 ret = setup_irq_thread(new->secondary, irq, true);
1564 if (ret)
1565 goto out_thread;
1566 }
1567 }
1568
1569 /*
1570 * Drivers are often written to work w/o knowledge about the
1571 * underlying irq chip implementation, so a request for a
1572 * threaded irq without a primary hard irq context handler
1573 * requires the ONESHOT flag to be set. Some irq chips like
1574 * MSI based interrupts are per se one shot safe. Check the
1575 * chip flags, so we can avoid the unmask dance at the end of
1576 * the threaded handler for those.
1577 */
1578 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1579 new->flags &= ~IRQF_ONESHOT;
1580
1581 /*
1582 * Protects against a concurrent __free_irq() call which might wait
1583 * for synchronize_hardirq() to complete without holding the optional
1584 * chip bus lock and desc->lock. Also protects against handing out
1585 * a recycled oneshot thread_mask bit while it's still in use by
1586 * its previous owner.
1587 */
1588 mutex_lock(&desc->request_mutex);
1589
1590 /*
1591 * Acquire bus lock as the irq_request_resources() callback below
1592 * might rely on the serialization or the magic power management
1593 * functions which are abusing the irq_bus_lock() callback,
1594 */
1595 chip_bus_lock(desc);
1596
1597 /* First installed action requests resources. */
1598 if (!desc->action) {
1599 ret = irq_request_resources(desc);
1600 if (ret) {
1601 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1602 new->name, irq, desc->irq_data.chip->name);
1603 goto out_bus_unlock;
1604 }
1605 }
1606
1607 /*
1608 * The following block of code has to be executed atomically
1609 * protected against a concurrent interrupt and any of the other
1610 * management calls which are not serialized via
1611 * desc->request_mutex or the optional bus lock.
1612 */
1613 raw_spin_lock_irqsave(&desc->lock, flags);
1614 old_ptr = &desc->action;
1615 old = *old_ptr;
1616 if (old) {
1617 /*
1618 * Can't share interrupts unless both agree to and are
1619 * the same type (level, edge, polarity). So both flag
1620 * fields must have IRQF_SHARED set and the bits which
1621 * set the trigger type must match. Also all must
1622 * agree on ONESHOT.
1623 * Interrupt lines used for NMIs cannot be shared.
1624 */
1625 unsigned int oldtype;
1626
1627 if (desc->istate & IRQS_NMI) {
1628 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1629 new->name, irq, desc->irq_data.chip->name);
1630 ret = -EINVAL;
1631 goto out_unlock;
1632 }
1633
1634 /*
1635 * If nobody did set the configuration before, inherit
1636 * the one provided by the requester.
1637 */
1638 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1639 oldtype = irqd_get_trigger_type(&desc->irq_data);
1640 } else {
1641 oldtype = new->flags & IRQF_TRIGGER_MASK;
1642 irqd_set_trigger_type(&desc->irq_data, oldtype);
1643 }
1644
1645 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1646 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1647 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1648 goto mismatch;
1649
1650 /* All handlers must agree on per-cpuness */
1651 if ((old->flags & IRQF_PERCPU) !=
1652 (new->flags & IRQF_PERCPU))
1653 goto mismatch;
1654
1655 /* add new interrupt at end of irq queue */
1656 do {
1657 /*
1658 * Or all existing action->thread_mask bits,
1659 * so we can find the next zero bit for this
1660 * new action.
1661 */
1662 thread_mask |= old->thread_mask;
1663 old_ptr = &old->next;
1664 old = *old_ptr;
1665 } while (old);
1666 shared = 1;
1667 }
1668
1669 /*
1670 * Setup the thread mask for this irqaction for ONESHOT. For
1671 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1672 * conditional in irq_wake_thread().
1673 */
1674 if (new->flags & IRQF_ONESHOT) {
1675 /*
1676 * Unlikely to have 32 resp 64 irqs sharing one line,
1677 * but who knows.
1678 */
1679 if (thread_mask == ~0UL) {
1680 ret = -EBUSY;
1681 goto out_unlock;
1682 }
1683 /*
1684 * The thread_mask for the action is or'ed to
1685 * desc->thread_active to indicate that the
1686 * IRQF_ONESHOT thread handler has been woken, but not
1687 * yet finished. The bit is cleared when a thread
1688 * completes. When all threads of a shared interrupt
1689 * line have completed desc->threads_active becomes
1690 * zero and the interrupt line is unmasked. See
1691 * handle.c:irq_wake_thread() for further information.
1692 *
1693 * If no thread is woken by primary (hard irq context)
1694 * interrupt handlers, then desc->threads_active is
1695 * also checked for zero to unmask the irq line in the
1696 * affected hard irq flow handlers
1697 * (handle_[fasteoi|level]_irq).
1698 *
1699 * The new action gets the first zero bit of
1700 * thread_mask assigned. See the loop above which or's
1701 * all existing action->thread_mask bits.
1702 */
1703 new->thread_mask = 1UL << ffz(thread_mask);
1704
1705 } else if (new->handler == irq_default_primary_handler &&
1706 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1707 /*
1708 * The interrupt was requested with handler = NULL, so
1709 * we use the default primary handler for it. But it
1710 * does not have the oneshot flag set. In combination
1711 * with level interrupts this is deadly, because the
1712 * default primary handler just wakes the thread, then
1713 * the irq lines is reenabled, but the device still
1714 * has the level irq asserted. Rinse and repeat....
1715 *
1716 * While this works for edge type interrupts, we play
1717 * it safe and reject unconditionally because we can't
1718 * say for sure which type this interrupt really
1719 * has. The type flags are unreliable as the
1720 * underlying chip implementation can override them.
1721 */
1722 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1723 new->name, irq);
1724 ret = -EINVAL;
1725 goto out_unlock;
1726 }
1727
1728 if (!shared) {
1729 /* Setup the type (level, edge polarity) if configured: */
1730 if (new->flags & IRQF_TRIGGER_MASK) {
1731 ret = __irq_set_trigger(desc,
1732 new->flags & IRQF_TRIGGER_MASK);
1733
1734 if (ret)
1735 goto out_unlock;
1736 }
1737
1738 /*
1739 * Activate the interrupt. That activation must happen
1740 * independently of IRQ_NOAUTOEN. request_irq() can fail
1741 * and the callers are supposed to handle
1742 * that. enable_irq() of an interrupt requested with
1743 * IRQ_NOAUTOEN is not supposed to fail. The activation
1744 * keeps it in shutdown mode, it merily associates
1745 * resources if necessary and if that's not possible it
1746 * fails. Interrupts which are in managed shutdown mode
1747 * will simply ignore that activation request.
1748 */
1749 ret = irq_activate(desc);
1750 if (ret)
1751 goto out_unlock;
1752
1753 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1754 IRQS_ONESHOT | IRQS_WAITING);
1755 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1756
1757 if (new->flags & IRQF_PERCPU) {
1758 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1759 irq_settings_set_per_cpu(desc);
1760 if (new->flags & IRQF_NO_DEBUG)
1761 irq_settings_set_no_debug(desc);
1762 }
1763
1764 if (noirqdebug)
1765 irq_settings_set_no_debug(desc);
1766
1767 if (new->flags & IRQF_ONESHOT)
1768 desc->istate |= IRQS_ONESHOT;
1769
1770 /* Exclude IRQ from balancing if requested */
1771 if (new->flags & IRQF_NOBALANCING) {
1772 irq_settings_set_no_balancing(desc);
1773 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1774 }
1775
1776 if (!(new->flags & IRQF_NO_AUTOEN) &&
1777 irq_settings_can_autoenable(desc)) {
1778 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1779 } else {
1780 /*
1781 * Shared interrupts do not go well with disabling
1782 * auto enable. The sharing interrupt might request
1783 * it while it's still disabled and then wait for
1784 * interrupts forever.
1785 */
1786 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1787 /* Undo nested disables: */
1788 desc->depth = 1;
1789 }
1790
1791 } else if (new->flags & IRQF_TRIGGER_MASK) {
1792 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1793 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1794
1795 if (nmsk != omsk)
1796 /* hope the handler works with current trigger mode */
1797 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1798 irq, omsk, nmsk);
1799 }
1800
1801 *old_ptr = new;
1802
1803 irq_pm_install_action(desc, new);
1804
1805 /* Reset broken irq detection when installing new handler */
1806 desc->irq_count = 0;
1807 desc->irqs_unhandled = 0;
1808
1809 /*
1810 * Check whether we disabled the irq via the spurious handler
1811 * before. Reenable it and give it another chance.
1812 */
1813 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1814 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1815 __enable_irq(desc);
1816 }
1817
1818 raw_spin_unlock_irqrestore(&desc->lock, flags);
1819 chip_bus_sync_unlock(desc);
1820 mutex_unlock(&desc->request_mutex);
1821
1822 irq_setup_timings(desc, new);
1823
1824 wake_up_and_wait_for_irq_thread_ready(desc, new);
1825 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1826
1827 register_irq_proc(irq, desc);
1828 new->dir = NULL;
1829 register_handler_proc(irq, new);
1830 return 0;
1831
1832 mismatch:
1833 if (!(new->flags & IRQF_PROBE_SHARED)) {
1834 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1835 irq, new->flags, new->name, old->flags, old->name);
1836 #ifdef CONFIG_DEBUG_SHIRQ
1837 dump_stack();
1838 #endif
1839 }
1840 ret = -EBUSY;
1841
1842 out_unlock:
1843 raw_spin_unlock_irqrestore(&desc->lock, flags);
1844
1845 if (!desc->action)
1846 irq_release_resources(desc);
1847 out_bus_unlock:
1848 chip_bus_sync_unlock(desc);
1849 mutex_unlock(&desc->request_mutex);
1850
1851 out_thread:
1852 if (new->thread) {
1853 struct task_struct *t = new->thread;
1854
1855 new->thread = NULL;
1856 kthread_stop_put(t);
1857 }
1858 if (new->secondary && new->secondary->thread) {
1859 struct task_struct *t = new->secondary->thread;
1860
1861 new->secondary->thread = NULL;
1862 kthread_stop_put(t);
1863 }
1864 out_mput:
1865 module_put(desc->owner);
1866 return ret;
1867 }
1868
1869 /*
1870 * Internal function to unregister an irqaction - used to free
1871 * regular and special interrupts that are part of the architecture.
1872 */
1873 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1874 {
1875 unsigned irq = desc->irq_data.irq;
1876 struct irqaction *action, **action_ptr;
1877 unsigned long flags;
1878
1879 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1880
1881 mutex_lock(&desc->request_mutex);
1882 chip_bus_lock(desc);
1883 raw_spin_lock_irqsave(&desc->lock, flags);
1884
1885 /*
1886 * There can be multiple actions per IRQ descriptor, find the right
1887 * one based on the dev_id:
1888 */
1889 action_ptr = &desc->action;
1890 for (;;) {
1891 action = *action_ptr;
1892
1893 if (!action) {
1894 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1895 raw_spin_unlock_irqrestore(&desc->lock, flags);
1896 chip_bus_sync_unlock(desc);
1897 mutex_unlock(&desc->request_mutex);
1898 return NULL;
1899 }
1900
1901 if (action->dev_id == dev_id)
1902 break;
1903 action_ptr = &action->next;
1904 }
1905
1906 /* Found it - now remove it from the list of entries: */
1907 *action_ptr = action->next;
1908
1909 irq_pm_remove_action(desc, action);
1910
1911 /* If this was the last handler, shut down the IRQ line: */
1912 if (!desc->action) {
1913 irq_settings_clr_disable_unlazy(desc);
1914 /* Only shutdown. Deactivate after synchronize_hardirq() */
1915 irq_shutdown(desc);
1916 }
1917
1918 #ifdef CONFIG_SMP
1919 /* make sure affinity_hint is cleaned up */
1920 if (WARN_ON_ONCE(desc->affinity_hint))
1921 desc->affinity_hint = NULL;
1922 #endif
1923
1924 raw_spin_unlock_irqrestore(&desc->lock, flags);
1925 /*
1926 * Drop bus_lock here so the changes which were done in the chip
1927 * callbacks above are synced out to the irq chips which hang
1928 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1929 *
1930 * Aside of that the bus_lock can also be taken from the threaded
1931 * handler in irq_finalize_oneshot() which results in a deadlock
1932 * because kthread_stop() would wait forever for the thread to
1933 * complete, which is blocked on the bus lock.
1934 *
1935 * The still held desc->request_mutex() protects against a
1936 * concurrent request_irq() of this irq so the release of resources
1937 * and timing data is properly serialized.
1938 */
1939 chip_bus_sync_unlock(desc);
1940
1941 unregister_handler_proc(irq, action);
1942
1943 /*
1944 * Make sure it's not being used on another CPU and if the chip
1945 * supports it also make sure that there is no (not yet serviced)
1946 * interrupt in flight at the hardware level.
1947 */
1948 __synchronize_irq(desc);
1949
1950 #ifdef CONFIG_DEBUG_SHIRQ
1951 /*
1952 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1953 * event to happen even now it's being freed, so let's make sure that
1954 * is so by doing an extra call to the handler ....
1955 *
1956 * ( We do this after actually deregistering it, to make sure that a
1957 * 'real' IRQ doesn't run in parallel with our fake. )
1958 */
1959 if (action->flags & IRQF_SHARED) {
1960 local_irq_save(flags);
1961 action->handler(irq, dev_id);
1962 local_irq_restore(flags);
1963 }
1964 #endif
1965
1966 /*
1967 * The action has already been removed above, but the thread writes
1968 * its oneshot mask bit when it completes. Though request_mutex is
1969 * held across this which prevents __setup_irq() from handing out
1970 * the same bit to a newly requested action.
1971 */
1972 if (action->thread) {
1973 kthread_stop_put(action->thread);
1974 if (action->secondary && action->secondary->thread)
1975 kthread_stop_put(action->secondary->thread);
1976 }
1977
1978 /* Last action releases resources */
1979 if (!desc->action) {
1980 /*
1981 * Reacquire bus lock as irq_release_resources() might
1982 * require it to deallocate resources over the slow bus.
1983 */
1984 chip_bus_lock(desc);
1985 /*
1986 * There is no interrupt on the fly anymore. Deactivate it
1987 * completely.
1988 */
1989 raw_spin_lock_irqsave(&desc->lock, flags);
1990 irq_domain_deactivate_irq(&desc->irq_data);
1991 raw_spin_unlock_irqrestore(&desc->lock, flags);
1992
1993 irq_release_resources(desc);
1994 chip_bus_sync_unlock(desc);
1995 irq_remove_timings(desc);
1996 }
1997
1998 mutex_unlock(&desc->request_mutex);
1999
2000 irq_chip_pm_put(&desc->irq_data);
2001 module_put(desc->owner);
2002 kfree(action->secondary);
2003 return action;
2004 }
2005
2006 /**
2007 * free_irq - free an interrupt allocated with request_irq
2008 * @irq: Interrupt line to free
2009 * @dev_id: Device identity to free
2010 *
2011 * Remove an interrupt handler. The handler is removed and if the
2012 * interrupt line is no longer in use by any driver it is disabled.
2013 * On a shared IRQ the caller must ensure the interrupt is disabled
2014 * on the card it drives before calling this function. The function
2015 * does not return until any executing interrupts for this IRQ
2016 * have completed.
2017 *
2018 * This function must not be called from interrupt context.
2019 *
2020 * Returns the devname argument passed to request_irq.
2021 */
2022 const void *free_irq(unsigned int irq, void *dev_id)
2023 {
2024 struct irq_desc *desc = irq_to_desc(irq);
2025 struct irqaction *action;
2026 const char *devname;
2027
2028 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2029 return NULL;
2030
2031 #ifdef CONFIG_SMP
2032 if (WARN_ON(desc->affinity_notify))
2033 desc->affinity_notify = NULL;
2034 #endif
2035
2036 action = __free_irq(desc, dev_id);
2037
2038 if (!action)
2039 return NULL;
2040
2041 devname = action->name;
2042 kfree(action);
2043 return devname;
2044 }
2045 EXPORT_SYMBOL(free_irq);
2046
2047 /* This function must be called with desc->lock held */
2048 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2049 {
2050 const char *devname = NULL;
2051
2052 desc->istate &= ~IRQS_NMI;
2053
2054 if (!WARN_ON(desc->action == NULL)) {
2055 irq_pm_remove_action(desc, desc->action);
2056 devname = desc->action->name;
2057 unregister_handler_proc(irq, desc->action);
2058
2059 kfree(desc->action);
2060 desc->action = NULL;
2061 }
2062
2063 irq_settings_clr_disable_unlazy(desc);
2064 irq_shutdown_and_deactivate(desc);
2065
2066 irq_release_resources(desc);
2067
2068 irq_chip_pm_put(&desc->irq_data);
2069 module_put(desc->owner);
2070
2071 return devname;
2072 }
2073
2074 const void *free_nmi(unsigned int irq, void *dev_id)
2075 {
2076 struct irq_desc *desc = irq_to_desc(irq);
2077 unsigned long flags;
2078 const void *devname;
2079
2080 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2081 return NULL;
2082
2083 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2084 return NULL;
2085
2086 /* NMI still enabled */
2087 if (WARN_ON(desc->depth == 0))
2088 disable_nmi_nosync(irq);
2089
2090 raw_spin_lock_irqsave(&desc->lock, flags);
2091
2092 irq_nmi_teardown(desc);
2093 devname = __cleanup_nmi(irq, desc);
2094
2095 raw_spin_unlock_irqrestore(&desc->lock, flags);
2096
2097 return devname;
2098 }
2099
2100 /**
2101 * request_threaded_irq - allocate an interrupt line
2102 * @irq: Interrupt line to allocate
2103 * @handler: Function to be called when the IRQ occurs.
2104 * Primary handler for threaded interrupts.
2105 * If handler is NULL and thread_fn != NULL
2106 * the default primary handler is installed.
2107 * @thread_fn: Function called from the irq handler thread
2108 * If NULL, no irq thread is created
2109 * @irqflags: Interrupt type flags
2110 * @devname: An ascii name for the claiming device
2111 * @dev_id: A cookie passed back to the handler function
2112 *
2113 * This call allocates interrupt resources and enables the
2114 * interrupt line and IRQ handling. From the point this
2115 * call is made your handler function may be invoked. Since
2116 * your handler function must clear any interrupt the board
2117 * raises, you must take care both to initialise your hardware
2118 * and to set up the interrupt handler in the right order.
2119 *
2120 * If you want to set up a threaded irq handler for your device
2121 * then you need to supply @handler and @thread_fn. @handler is
2122 * still called in hard interrupt context and has to check
2123 * whether the interrupt originates from the device. If yes it
2124 * needs to disable the interrupt on the device and return
2125 * IRQ_WAKE_THREAD which will wake up the handler thread and run
2126 * @thread_fn. This split handler design is necessary to support
2127 * shared interrupts.
2128 *
2129 * Dev_id must be globally unique. Normally the address of the
2130 * device data structure is used as the cookie. Since the handler
2131 * receives this value it makes sense to use it.
2132 *
2133 * If your interrupt is shared you must pass a non NULL dev_id
2134 * as this is required when freeing the interrupt.
2135 *
2136 * Flags:
2137 *
2138 * IRQF_SHARED Interrupt is shared
2139 * IRQF_TRIGGER_* Specify active edge(s) or level
2140 * IRQF_ONESHOT Run thread_fn with interrupt line masked
2141 */
2142 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2143 irq_handler_t thread_fn, unsigned long irqflags,
2144 const char *devname, void *dev_id)
2145 {
2146 struct irqaction *action;
2147 struct irq_desc *desc;
2148 int retval;
2149
2150 if (irq == IRQ_NOTCONNECTED)
2151 return -ENOTCONN;
2152
2153 /*
2154 * Sanity-check: shared interrupts must pass in a real dev-ID,
2155 * otherwise we'll have trouble later trying to figure out
2156 * which interrupt is which (messes up the interrupt freeing
2157 * logic etc).
2158 *
2159 * Also shared interrupts do not go well with disabling auto enable.
2160 * The sharing interrupt might request it while it's still disabled
2161 * and then wait for interrupts forever.
2162 *
2163 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2164 * it cannot be set along with IRQF_NO_SUSPEND.
2165 */
2166 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2167 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2168 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2169 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2170 return -EINVAL;
2171
2172 desc = irq_to_desc(irq);
2173 if (!desc)
2174 return -EINVAL;
2175
2176 if (!irq_settings_can_request(desc) ||
2177 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2178 return -EINVAL;
2179
2180 if (!handler) {
2181 if (!thread_fn)
2182 return -EINVAL;
2183 handler = irq_default_primary_handler;
2184 }
2185
2186 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2187 if (!action)
2188 return -ENOMEM;
2189
2190 action->handler = handler;
2191 action->thread_fn = thread_fn;
2192 action->flags = irqflags;
2193 action->name = devname;
2194 action->dev_id = dev_id;
2195
2196 retval = irq_chip_pm_get(&desc->irq_data);
2197 if (retval < 0) {
2198 kfree(action);
2199 return retval;
2200 }
2201
2202 retval = __setup_irq(irq, desc, action);
2203
2204 if (retval) {
2205 irq_chip_pm_put(&desc->irq_data);
2206 kfree(action->secondary);
2207 kfree(action);
2208 }
2209
2210 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2211 if (!retval && (irqflags & IRQF_SHARED)) {
2212 /*
2213 * It's a shared IRQ -- the driver ought to be prepared for it
2214 * to happen immediately, so let's make sure....
2215 * We disable the irq to make sure that a 'real' IRQ doesn't
2216 * run in parallel with our fake.
2217 */
2218 unsigned long flags;
2219
2220 disable_irq(irq);
2221 local_irq_save(flags);
2222
2223 handler(irq, dev_id);
2224
2225 local_irq_restore(flags);
2226 enable_irq(irq);
2227 }
2228 #endif
2229 return retval;
2230 }
2231 EXPORT_SYMBOL(request_threaded_irq);
2232
2233 /**
2234 * request_any_context_irq - allocate an interrupt line
2235 * @irq: Interrupt line to allocate
2236 * @handler: Function to be called when the IRQ occurs.
2237 * Threaded handler for threaded interrupts.
2238 * @flags: Interrupt type flags
2239 * @name: An ascii name for the claiming device
2240 * @dev_id: A cookie passed back to the handler function
2241 *
2242 * This call allocates interrupt resources and enables the
2243 * interrupt line and IRQ handling. It selects either a
2244 * hardirq or threaded handling method depending on the
2245 * context.
2246 *
2247 * On failure, it returns a negative value. On success,
2248 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2249 */
2250 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2251 unsigned long flags, const char *name, void *dev_id)
2252 {
2253 struct irq_desc *desc;
2254 int ret;
2255
2256 if (irq == IRQ_NOTCONNECTED)
2257 return -ENOTCONN;
2258
2259 desc = irq_to_desc(irq);
2260 if (!desc)
2261 return -EINVAL;
2262
2263 if (irq_settings_is_nested_thread(desc)) {
2264 ret = request_threaded_irq(irq, NULL, handler,
2265 flags, name, dev_id);
2266 return !ret ? IRQC_IS_NESTED : ret;
2267 }
2268
2269 ret = request_irq(irq, handler, flags, name, dev_id);
2270 return !ret ? IRQC_IS_HARDIRQ : ret;
2271 }
2272 EXPORT_SYMBOL_GPL(request_any_context_irq);
2273
2274 /**
2275 * request_nmi - allocate an interrupt line for NMI delivery
2276 * @irq: Interrupt line to allocate
2277 * @handler: Function to be called when the IRQ occurs.
2278 * Threaded handler for threaded interrupts.
2279 * @irqflags: Interrupt type flags
2280 * @name: An ascii name for the claiming device
2281 * @dev_id: A cookie passed back to the handler function
2282 *
2283 * This call allocates interrupt resources and enables the
2284 * interrupt line and IRQ handling. It sets up the IRQ line
2285 * to be handled as an NMI.
2286 *
2287 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2288 * cannot be threaded.
2289 *
2290 * Interrupt lines requested for NMI delivering must produce per cpu
2291 * interrupts and have auto enabling setting disabled.
2292 *
2293 * Dev_id must be globally unique. Normally the address of the
2294 * device data structure is used as the cookie. Since the handler
2295 * receives this value it makes sense to use it.
2296 *
2297 * If the interrupt line cannot be used to deliver NMIs, function
2298 * will fail and return a negative value.
2299 */
2300 int request_nmi(unsigned int irq, irq_handler_t handler,
2301 unsigned long irqflags, const char *name, void *dev_id)
2302 {
2303 struct irqaction *action;
2304 struct irq_desc *desc;
2305 unsigned long flags;
2306 int retval;
2307
2308 if (irq == IRQ_NOTCONNECTED)
2309 return -ENOTCONN;
2310
2311 /* NMI cannot be shared, used for Polling */
2312 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2313 return -EINVAL;
2314
2315 if (!(irqflags & IRQF_PERCPU))
2316 return -EINVAL;
2317
2318 if (!handler)
2319 return -EINVAL;
2320
2321 desc = irq_to_desc(irq);
2322
2323 if (!desc || (irq_settings_can_autoenable(desc) &&
2324 !(irqflags & IRQF_NO_AUTOEN)) ||
2325 !irq_settings_can_request(desc) ||
2326 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2327 !irq_supports_nmi(desc))
2328 return -EINVAL;
2329
2330 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2331 if (!action)
2332 return -ENOMEM;
2333
2334 action->handler = handler;
2335 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2336 action->name = name;
2337 action->dev_id = dev_id;
2338
2339 retval = irq_chip_pm_get(&desc->irq_data);
2340 if (retval < 0)
2341 goto err_out;
2342
2343 retval = __setup_irq(irq, desc, action);
2344 if (retval)
2345 goto err_irq_setup;
2346
2347 raw_spin_lock_irqsave(&desc->lock, flags);
2348
2349 /* Setup NMI state */
2350 desc->istate |= IRQS_NMI;
2351 retval = irq_nmi_setup(desc);
2352 if (retval) {
2353 __cleanup_nmi(irq, desc);
2354 raw_spin_unlock_irqrestore(&desc->lock, flags);
2355 return -EINVAL;
2356 }
2357
2358 raw_spin_unlock_irqrestore(&desc->lock, flags);
2359
2360 return 0;
2361
2362 err_irq_setup:
2363 irq_chip_pm_put(&desc->irq_data);
2364 err_out:
2365 kfree(action);
2366
2367 return retval;
2368 }
2369
2370 void enable_percpu_irq(unsigned int irq, unsigned int type)
2371 {
2372 unsigned int cpu = smp_processor_id();
2373 unsigned long flags;
2374 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2375
2376 if (!desc)
2377 return;
2378
2379 /*
2380 * If the trigger type is not specified by the caller, then
2381 * use the default for this interrupt.
2382 */
2383 type &= IRQ_TYPE_SENSE_MASK;
2384 if (type == IRQ_TYPE_NONE)
2385 type = irqd_get_trigger_type(&desc->irq_data);
2386
2387 if (type != IRQ_TYPE_NONE) {
2388 int ret;
2389
2390 ret = __irq_set_trigger(desc, type);
2391
2392 if (ret) {
2393 WARN(1, "failed to set type for IRQ%d\n", irq);
2394 goto out;
2395 }
2396 }
2397
2398 irq_percpu_enable(desc, cpu);
2399 out:
2400 irq_put_desc_unlock(desc, flags);
2401 }
2402 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2403
2404 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2405 {
2406 enable_percpu_irq(irq, type);
2407 }
2408
2409 /**
2410 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2411 * @irq: Linux irq number to check for
2412 *
2413 * Must be called from a non migratable context. Returns the enable
2414 * state of a per cpu interrupt on the current cpu.
2415 */
2416 bool irq_percpu_is_enabled(unsigned int irq)
2417 {
2418 unsigned int cpu = smp_processor_id();
2419 struct irq_desc *desc;
2420 unsigned long flags;
2421 bool is_enabled;
2422
2423 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2424 if (!desc)
2425 return false;
2426
2427 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2428 irq_put_desc_unlock(desc, flags);
2429
2430 return is_enabled;
2431 }
2432 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2433
2434 void disable_percpu_irq(unsigned int irq)
2435 {
2436 unsigned int cpu = smp_processor_id();
2437 unsigned long flags;
2438 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2439
2440 if (!desc)
2441 return;
2442
2443 irq_percpu_disable(desc, cpu);
2444 irq_put_desc_unlock(desc, flags);
2445 }
2446 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2447
2448 void disable_percpu_nmi(unsigned int irq)
2449 {
2450 disable_percpu_irq(irq);
2451 }
2452
2453 /*
2454 * Internal function to unregister a percpu irqaction.
2455 */
2456 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2457 {
2458 struct irq_desc *desc = irq_to_desc(irq);
2459 struct irqaction *action;
2460 unsigned long flags;
2461
2462 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2463
2464 if (!desc)
2465 return NULL;
2466
2467 raw_spin_lock_irqsave(&desc->lock, flags);
2468
2469 action = desc->action;
2470 if (!action || action->percpu_dev_id != dev_id) {
2471 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2472 goto bad;
2473 }
2474
2475 if (!cpumask_empty(desc->percpu_enabled)) {
2476 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2477 irq, cpumask_first(desc->percpu_enabled));
2478 goto bad;
2479 }
2480
2481 /* Found it - now remove it from the list of entries: */
2482 desc->action = NULL;
2483
2484 desc->istate &= ~IRQS_NMI;
2485
2486 raw_spin_unlock_irqrestore(&desc->lock, flags);
2487
2488 unregister_handler_proc(irq, action);
2489
2490 irq_chip_pm_put(&desc->irq_data);
2491 module_put(desc->owner);
2492 return action;
2493
2494 bad:
2495 raw_spin_unlock_irqrestore(&desc->lock, flags);
2496 return NULL;
2497 }
2498
2499 /**
2500 * remove_percpu_irq - free a per-cpu interrupt
2501 * @irq: Interrupt line to free
2502 * @act: irqaction for the interrupt
2503 *
2504 * Used to remove interrupts statically setup by the early boot process.
2505 */
2506 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2507 {
2508 struct irq_desc *desc = irq_to_desc(irq);
2509
2510 if (desc && irq_settings_is_per_cpu_devid(desc))
2511 __free_percpu_irq(irq, act->percpu_dev_id);
2512 }
2513
2514 /**
2515 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2516 * @irq: Interrupt line to free
2517 * @dev_id: Device identity to free
2518 *
2519 * Remove a percpu interrupt handler. The handler is removed, but
2520 * the interrupt line is not disabled. This must be done on each
2521 * CPU before calling this function. The function does not return
2522 * until any executing interrupts for this IRQ have completed.
2523 *
2524 * This function must not be called from interrupt context.
2525 */
2526 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2527 {
2528 struct irq_desc *desc = irq_to_desc(irq);
2529
2530 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2531 return;
2532
2533 chip_bus_lock(desc);
2534 kfree(__free_percpu_irq(irq, dev_id));
2535 chip_bus_sync_unlock(desc);
2536 }
2537 EXPORT_SYMBOL_GPL(free_percpu_irq);
2538
2539 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2540 {
2541 struct irq_desc *desc = irq_to_desc(irq);
2542
2543 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2544 return;
2545
2546 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2547 return;
2548
2549 kfree(__free_percpu_irq(irq, dev_id));
2550 }
2551
2552 /**
2553 * setup_percpu_irq - setup a per-cpu interrupt
2554 * @irq: Interrupt line to setup
2555 * @act: irqaction for the interrupt
2556 *
2557 * Used to statically setup per-cpu interrupts in the early boot process.
2558 */
2559 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2560 {
2561 struct irq_desc *desc = irq_to_desc(irq);
2562 int retval;
2563
2564 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2565 return -EINVAL;
2566
2567 retval = irq_chip_pm_get(&desc->irq_data);
2568 if (retval < 0)
2569 return retval;
2570
2571 retval = __setup_irq(irq, desc, act);
2572
2573 if (retval)
2574 irq_chip_pm_put(&desc->irq_data);
2575
2576 return retval;
2577 }
2578
2579 /**
2580 * __request_percpu_irq - allocate a percpu interrupt line
2581 * @irq: Interrupt line to allocate
2582 * @handler: Function to be called when the IRQ occurs.
2583 * @flags: Interrupt type flags (IRQF_TIMER only)
2584 * @devname: An ascii name for the claiming device
2585 * @dev_id: A percpu cookie passed back to the handler function
2586 *
2587 * This call allocates interrupt resources and enables the
2588 * interrupt on the local CPU. If the interrupt is supposed to be
2589 * enabled on other CPUs, it has to be done on each CPU using
2590 * enable_percpu_irq().
2591 *
2592 * Dev_id must be globally unique. It is a per-cpu variable, and
2593 * the handler gets called with the interrupted CPU's instance of
2594 * that variable.
2595 */
2596 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2597 unsigned long flags, const char *devname,
2598 void __percpu *dev_id)
2599 {
2600 struct irqaction *action;
2601 struct irq_desc *desc;
2602 int retval;
2603
2604 if (!dev_id)
2605 return -EINVAL;
2606
2607 desc = irq_to_desc(irq);
2608 if (!desc || !irq_settings_can_request(desc) ||
2609 !irq_settings_is_per_cpu_devid(desc))
2610 return -EINVAL;
2611
2612 if (flags && flags != IRQF_TIMER)
2613 return -EINVAL;
2614
2615 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2616 if (!action)
2617 return -ENOMEM;
2618
2619 action->handler = handler;
2620 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2621 action->name = devname;
2622 action->percpu_dev_id = dev_id;
2623
2624 retval = irq_chip_pm_get(&desc->irq_data);
2625 if (retval < 0) {
2626 kfree(action);
2627 return retval;
2628 }
2629
2630 retval = __setup_irq(irq, desc, action);
2631
2632 if (retval) {
2633 irq_chip_pm_put(&desc->irq_data);
2634 kfree(action);
2635 }
2636
2637 return retval;
2638 }
2639 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2640
2641 /**
2642 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2643 * @irq: Interrupt line to allocate
2644 * @handler: Function to be called when the IRQ occurs.
2645 * @name: An ascii name for the claiming device
2646 * @dev_id: A percpu cookie passed back to the handler function
2647 *
2648 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2649 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2650 * being enabled on the same CPU by using enable_percpu_nmi().
2651 *
2652 * Dev_id must be globally unique. It is a per-cpu variable, and
2653 * the handler gets called with the interrupted CPU's instance of
2654 * that variable.
2655 *
2656 * Interrupt lines requested for NMI delivering should have auto enabling
2657 * setting disabled.
2658 *
2659 * If the interrupt line cannot be used to deliver NMIs, function
2660 * will fail returning a negative value.
2661 */
2662 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2663 const char *name, void __percpu *dev_id)
2664 {
2665 struct irqaction *action;
2666 struct irq_desc *desc;
2667 unsigned long flags;
2668 int retval;
2669
2670 if (!handler)
2671 return -EINVAL;
2672
2673 desc = irq_to_desc(irq);
2674
2675 if (!desc || !irq_settings_can_request(desc) ||
2676 !irq_settings_is_per_cpu_devid(desc) ||
2677 irq_settings_can_autoenable(desc) ||
2678 !irq_supports_nmi(desc))
2679 return -EINVAL;
2680
2681 /* The line cannot already be NMI */
2682 if (desc->istate & IRQS_NMI)
2683 return -EINVAL;
2684
2685 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2686 if (!action)
2687 return -ENOMEM;
2688
2689 action->handler = handler;
2690 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2691 | IRQF_NOBALANCING;
2692 action->name = name;
2693 action->percpu_dev_id = dev_id;
2694
2695 retval = irq_chip_pm_get(&desc->irq_data);
2696 if (retval < 0)
2697 goto err_out;
2698
2699 retval = __setup_irq(irq, desc, action);
2700 if (retval)
2701 goto err_irq_setup;
2702
2703 raw_spin_lock_irqsave(&desc->lock, flags);
2704 desc->istate |= IRQS_NMI;
2705 raw_spin_unlock_irqrestore(&desc->lock, flags);
2706
2707 return 0;
2708
2709 err_irq_setup:
2710 irq_chip_pm_put(&desc->irq_data);
2711 err_out:
2712 kfree(action);
2713
2714 return retval;
2715 }
2716
2717 /**
2718 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2719 * @irq: Interrupt line to prepare for NMI delivery
2720 *
2721 * This call prepares an interrupt line to deliver NMI on the current CPU,
2722 * before that interrupt line gets enabled with enable_percpu_nmi().
2723 *
2724 * As a CPU local operation, this should be called from non-preemptible
2725 * context.
2726 *
2727 * If the interrupt line cannot be used to deliver NMIs, function
2728 * will fail returning a negative value.
2729 */
2730 int prepare_percpu_nmi(unsigned int irq)
2731 {
2732 unsigned long flags;
2733 struct irq_desc *desc;
2734 int ret = 0;
2735
2736 WARN_ON(preemptible());
2737
2738 desc = irq_get_desc_lock(irq, &flags,
2739 IRQ_GET_DESC_CHECK_PERCPU);
2740 if (!desc)
2741 return -EINVAL;
2742
2743 if (WARN(!(desc->istate & IRQS_NMI),
2744 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2745 irq)) {
2746 ret = -EINVAL;
2747 goto out;
2748 }
2749
2750 ret = irq_nmi_setup(desc);
2751 if (ret) {
2752 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2753 goto out;
2754 }
2755
2756 out:
2757 irq_put_desc_unlock(desc, flags);
2758 return ret;
2759 }
2760
2761 /**
2762 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2763 * @irq: Interrupt line from which CPU local NMI configuration should be
2764 * removed
2765 *
2766 * This call undoes the setup done by prepare_percpu_nmi().
2767 *
2768 * IRQ line should not be enabled for the current CPU.
2769 *
2770 * As a CPU local operation, this should be called from non-preemptible
2771 * context.
2772 */
2773 void teardown_percpu_nmi(unsigned int irq)
2774 {
2775 unsigned long flags;
2776 struct irq_desc *desc;
2777
2778 WARN_ON(preemptible());
2779
2780 desc = irq_get_desc_lock(irq, &flags,
2781 IRQ_GET_DESC_CHECK_PERCPU);
2782 if (!desc)
2783 return;
2784
2785 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2786 goto out;
2787
2788 irq_nmi_teardown(desc);
2789 out:
2790 irq_put_desc_unlock(desc, flags);
2791 }
2792
2793 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2794 bool *state)
2795 {
2796 struct irq_chip *chip;
2797 int err = -EINVAL;
2798
2799 do {
2800 chip = irq_data_get_irq_chip(data);
2801 if (WARN_ON_ONCE(!chip))
2802 return -ENODEV;
2803 if (chip->irq_get_irqchip_state)
2804 break;
2805 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2806 data = data->parent_data;
2807 #else
2808 data = NULL;
2809 #endif
2810 } while (data);
2811
2812 if (data)
2813 err = chip->irq_get_irqchip_state(data, which, state);
2814 return err;
2815 }
2816
2817 /**
2818 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2819 * @irq: Interrupt line that is forwarded to a VM
2820 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2821 * @state: a pointer to a boolean where the state is to be stored
2822 *
2823 * This call snapshots the internal irqchip state of an
2824 * interrupt, returning into @state the bit corresponding to
2825 * stage @which
2826 *
2827 * This function should be called with preemption disabled if the
2828 * interrupt controller has per-cpu registers.
2829 */
2830 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2831 bool *state)
2832 {
2833 struct irq_desc *desc;
2834 struct irq_data *data;
2835 unsigned long flags;
2836 int err = -EINVAL;
2837
2838 desc = irq_get_desc_buslock(irq, &flags, 0);
2839 if (!desc)
2840 return err;
2841
2842 data = irq_desc_get_irq_data(desc);
2843
2844 err = __irq_get_irqchip_state(data, which, state);
2845
2846 irq_put_desc_busunlock(desc, flags);
2847 return err;
2848 }
2849 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2850
2851 /**
2852 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2853 * @irq: Interrupt line that is forwarded to a VM
2854 * @which: State to be restored (one of IRQCHIP_STATE_*)
2855 * @val: Value corresponding to @which
2856 *
2857 * This call sets the internal irqchip state of an interrupt,
2858 * depending on the value of @which.
2859 *
2860 * This function should be called with migration disabled if the
2861 * interrupt controller has per-cpu registers.
2862 */
2863 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2864 bool val)
2865 {
2866 struct irq_desc *desc;
2867 struct irq_data *data;
2868 struct irq_chip *chip;
2869 unsigned long flags;
2870 int err = -EINVAL;
2871
2872 desc = irq_get_desc_buslock(irq, &flags, 0);
2873 if (!desc)
2874 return err;
2875
2876 data = irq_desc_get_irq_data(desc);
2877
2878 do {
2879 chip = irq_data_get_irq_chip(data);
2880 if (WARN_ON_ONCE(!chip)) {
2881 err = -ENODEV;
2882 goto out_unlock;
2883 }
2884 if (chip->irq_set_irqchip_state)
2885 break;
2886 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2887 data = data->parent_data;
2888 #else
2889 data = NULL;
2890 #endif
2891 } while (data);
2892
2893 if (data)
2894 err = chip->irq_set_irqchip_state(data, which, val);
2895
2896 out_unlock:
2897 irq_put_desc_busunlock(desc, flags);
2898 return err;
2899 }
2900 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2901
2902 /**
2903 * irq_has_action - Check whether an interrupt is requested
2904 * @irq: The linux irq number
2905 *
2906 * Returns: A snapshot of the current state
2907 */
2908 bool irq_has_action(unsigned int irq)
2909 {
2910 bool res;
2911
2912 rcu_read_lock();
2913 res = irq_desc_has_action(irq_to_desc(irq));
2914 rcu_read_unlock();
2915 return res;
2916 }
2917 EXPORT_SYMBOL_GPL(irq_has_action);
2918
2919 /**
2920 * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2921 * @irq: The linux irq number
2922 * @bitmask: The bitmask to evaluate
2923 *
2924 * Returns: True if one of the bits in @bitmask is set
2925 */
2926 bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2927 {
2928 struct irq_desc *desc;
2929 bool res = false;
2930
2931 rcu_read_lock();
2932 desc = irq_to_desc(irq);
2933 if (desc)
2934 res = !!(desc->status_use_accessors & bitmask);
2935 rcu_read_unlock();
2936 return res;
2937 }
2938 EXPORT_SYMBOL_GPL(irq_check_status_bit);