1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
6 * This file contains driver APIs to the irq subsystem.
9 #define pr_fmt(fmt) "genirq: " fmt
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/isolation.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
25 #include "internals.h"
27 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28 DEFINE_STATIC_KEY_FALSE(force_irqthreads_key
);
30 static int __init
setup_forced_irqthreads(char *arg
)
32 static_branch_enable(&force_irqthreads_key
);
35 early_param("threadirqs", setup_forced_irqthreads
);
38 static void __synchronize_hardirq(struct irq_desc
*desc
, bool sync_chip
)
40 struct irq_data
*irqd
= irq_desc_get_irq_data(desc
);
47 * Wait until we're out of the critical section. This might
48 * give the wrong answer due to the lack of memory barriers.
50 while (irqd_irq_inprogress(&desc
->irq_data
))
53 /* Ok, that indicated we're done: double-check carefully. */
54 raw_spin_lock_irqsave(&desc
->lock
, flags
);
55 inprogress
= irqd_irq_inprogress(&desc
->irq_data
);
58 * If requested and supported, check at the chip whether it
59 * is in flight at the hardware level, i.e. already pending
60 * in a CPU and waiting for service and acknowledge.
62 if (!inprogress
&& sync_chip
) {
64 * Ignore the return code. inprogress is only updated
65 * when the chip supports it.
67 __irq_get_irqchip_state(irqd
, IRQCHIP_STATE_ACTIVE
,
70 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
72 /* Oops, that failed? */
77 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78 * @irq: interrupt number to wait for
80 * This function waits for any pending hard IRQ handlers for this
81 * interrupt to complete before returning. If you use this
82 * function while holding a resource the IRQ handler may need you
83 * will deadlock. It does not take associated threaded handlers
86 * Do not use this for shutdown scenarios where you must be sure
87 * that all parts (hardirq and threaded handler) have completed.
89 * Returns: false if a threaded handler is active.
91 * This function may be called - with care - from IRQ context.
93 * It does not check whether there is an interrupt in flight at the
94 * hardware level, but not serviced yet, as this might deadlock when
95 * called with interrupts disabled and the target CPU of the interrupt
98 bool synchronize_hardirq(unsigned int irq
)
100 struct irq_desc
*desc
= irq_to_desc(irq
);
103 __synchronize_hardirq(desc
, false);
104 return !atomic_read(&desc
->threads_active
);
109 EXPORT_SYMBOL(synchronize_hardirq
);
111 static void __synchronize_irq(struct irq_desc
*desc
)
113 __synchronize_hardirq(desc
, true);
115 * We made sure that no hardirq handler is running. Now verify that no
116 * threaded handlers are active.
118 wait_event(desc
->wait_for_threads
, !atomic_read(&desc
->threads_active
));
122 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
123 * @irq: interrupt number to wait for
125 * This function waits for any pending IRQ handlers for this interrupt
126 * to complete before returning. If you use this function while
127 * holding a resource the IRQ handler may need you will deadlock.
129 * Can only be called from preemptible code as it might sleep when
130 * an interrupt thread is associated to @irq.
132 * It optionally makes sure (when the irq chip supports that method)
133 * that the interrupt is not pending in any CPU and waiting for
136 void synchronize_irq(unsigned int irq
)
138 struct irq_desc
*desc
= irq_to_desc(irq
);
141 __synchronize_irq(desc
);
143 EXPORT_SYMBOL(synchronize_irq
);
146 cpumask_var_t irq_default_affinity
;
148 static bool __irq_can_set_affinity(struct irq_desc
*desc
)
150 if (!desc
|| !irqd_can_balance(&desc
->irq_data
) ||
151 !desc
->irq_data
.chip
|| !desc
->irq_data
.chip
->irq_set_affinity
)
157 * irq_can_set_affinity - Check if the affinity of a given irq can be set
158 * @irq: Interrupt to check
161 int irq_can_set_affinity(unsigned int irq
)
163 return __irq_can_set_affinity(irq_to_desc(irq
));
167 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
168 * @irq: Interrupt to check
170 * Like irq_can_set_affinity() above, but additionally checks for the
171 * AFFINITY_MANAGED flag.
173 bool irq_can_set_affinity_usr(unsigned int irq
)
175 struct irq_desc
*desc
= irq_to_desc(irq
);
177 return __irq_can_set_affinity(desc
) &&
178 !irqd_affinity_is_managed(&desc
->irq_data
);
182 * irq_set_thread_affinity - Notify irq threads to adjust affinity
183 * @desc: irq descriptor which has affinity changed
185 * We just set IRQTF_AFFINITY and delegate the affinity setting
186 * to the interrupt thread itself. We can not call
187 * set_cpus_allowed_ptr() here as we hold desc->lock and this
188 * code can be called from hard interrupt context.
190 void irq_set_thread_affinity(struct irq_desc
*desc
)
192 struct irqaction
*action
;
194 for_each_action_of_desc(desc
, action
) {
195 if (action
->thread
) {
196 set_bit(IRQTF_AFFINITY
, &action
->thread_flags
);
197 wake_up_process(action
->thread
);
199 if (action
->secondary
&& action
->secondary
->thread
) {
200 set_bit(IRQTF_AFFINITY
, &action
->secondary
->thread_flags
);
201 wake_up_process(action
->secondary
->thread
);
206 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
207 static void irq_validate_effective_affinity(struct irq_data
*data
)
209 const struct cpumask
*m
= irq_data_get_effective_affinity_mask(data
);
210 struct irq_chip
*chip
= irq_data_get_irq_chip(data
);
212 if (!cpumask_empty(m
))
214 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
215 chip
->name
, data
->irq
);
218 static inline void irq_validate_effective_affinity(struct irq_data
*data
) { }
221 int irq_do_set_affinity(struct irq_data
*data
, const struct cpumask
*mask
,
224 struct irq_desc
*desc
= irq_data_to_desc(data
);
225 struct irq_chip
*chip
= irq_data_get_irq_chip(data
);
226 const struct cpumask
*prog_mask
;
229 static DEFINE_RAW_SPINLOCK(tmp_mask_lock
);
230 static struct cpumask tmp_mask
;
232 if (!chip
|| !chip
->irq_set_affinity
)
235 raw_spin_lock(&tmp_mask_lock
);
237 * If this is a managed interrupt and housekeeping is enabled on
238 * it check whether the requested affinity mask intersects with
239 * a housekeeping CPU. If so, then remove the isolated CPUs from
240 * the mask and just keep the housekeeping CPU(s). This prevents
241 * the affinity setter from routing the interrupt to an isolated
242 * CPU to avoid that I/O submitted from a housekeeping CPU causes
243 * interrupts on an isolated one.
245 * If the masks do not intersect or include online CPU(s) then
246 * keep the requested mask. The isolated target CPUs are only
247 * receiving interrupts when the I/O operation was submitted
248 * directly from them.
250 * If all housekeeping CPUs in the affinity mask are offline, the
251 * interrupt will be migrated by the CPU hotplug code once a
252 * housekeeping CPU which belongs to the affinity mask comes
255 if (irqd_affinity_is_managed(data
) &&
256 housekeeping_enabled(HK_TYPE_MANAGED_IRQ
)) {
257 const struct cpumask
*hk_mask
;
259 hk_mask
= housekeeping_cpumask(HK_TYPE_MANAGED_IRQ
);
261 cpumask_and(&tmp_mask
, mask
, hk_mask
);
262 if (!cpumask_intersects(&tmp_mask
, cpu_online_mask
))
265 prog_mask
= &tmp_mask
;
271 * Make sure we only provide online CPUs to the irqchip,
272 * unless we are being asked to force the affinity (in which
273 * case we do as we are told).
275 cpumask_and(&tmp_mask
, prog_mask
, cpu_online_mask
);
276 if (!force
&& !cpumask_empty(&tmp_mask
))
277 ret
= chip
->irq_set_affinity(data
, &tmp_mask
, force
);
279 ret
= chip
->irq_set_affinity(data
, mask
, force
);
283 raw_spin_unlock(&tmp_mask_lock
);
286 case IRQ_SET_MASK_OK
:
287 case IRQ_SET_MASK_OK_DONE
:
288 cpumask_copy(desc
->irq_common_data
.affinity
, mask
);
290 case IRQ_SET_MASK_OK_NOCOPY
:
291 irq_validate_effective_affinity(data
);
292 irq_set_thread_affinity(desc
);
299 #ifdef CONFIG_GENERIC_PENDING_IRQ
300 static inline int irq_set_affinity_pending(struct irq_data
*data
,
301 const struct cpumask
*dest
)
303 struct irq_desc
*desc
= irq_data_to_desc(data
);
305 irqd_set_move_pending(data
);
306 irq_copy_pending(desc
, dest
);
310 static inline int irq_set_affinity_pending(struct irq_data
*data
,
311 const struct cpumask
*dest
)
317 static int irq_try_set_affinity(struct irq_data
*data
,
318 const struct cpumask
*dest
, bool force
)
320 int ret
= irq_do_set_affinity(data
, dest
, force
);
323 * In case that the underlying vector management is busy and the
324 * architecture supports the generic pending mechanism then utilize
325 * this to avoid returning an error to user space.
327 if (ret
== -EBUSY
&& !force
)
328 ret
= irq_set_affinity_pending(data
, dest
);
332 static bool irq_set_affinity_deactivated(struct irq_data
*data
,
333 const struct cpumask
*mask
)
335 struct irq_desc
*desc
= irq_data_to_desc(data
);
338 * Handle irq chips which can handle affinity only in activated
341 * If the interrupt is not yet activated, just store the affinity
342 * mask and do not call the chip driver at all. On activation the
343 * driver has to make sure anyway that the interrupt is in a
344 * usable state so startup works.
346 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY
) ||
347 irqd_is_activated(data
) || !irqd_affinity_on_activate(data
))
350 cpumask_copy(desc
->irq_common_data
.affinity
, mask
);
351 irq_data_update_effective_affinity(data
, mask
);
352 irqd_set(data
, IRQD_AFFINITY_SET
);
356 int irq_set_affinity_locked(struct irq_data
*data
, const struct cpumask
*mask
,
359 struct irq_chip
*chip
= irq_data_get_irq_chip(data
);
360 struct irq_desc
*desc
= irq_data_to_desc(data
);
363 if (!chip
|| !chip
->irq_set_affinity
)
366 if (irq_set_affinity_deactivated(data
, mask
))
369 if (irq_can_move_pcntxt(data
) && !irqd_is_setaffinity_pending(data
)) {
370 ret
= irq_try_set_affinity(data
, mask
, force
);
372 irqd_set_move_pending(data
);
373 irq_copy_pending(desc
, mask
);
376 if (desc
->affinity_notify
) {
377 kref_get(&desc
->affinity_notify
->kref
);
378 if (!schedule_work(&desc
->affinity_notify
->work
)) {
379 /* Work was already scheduled, drop our extra ref */
380 kref_put(&desc
->affinity_notify
->kref
,
381 desc
->affinity_notify
->release
);
384 irqd_set(data
, IRQD_AFFINITY_SET
);
390 * irq_update_affinity_desc - Update affinity management for an interrupt
391 * @irq: The interrupt number to update
392 * @affinity: Pointer to the affinity descriptor
394 * This interface can be used to configure the affinity management of
395 * interrupts which have been allocated already.
397 * There are certain limitations on when it may be used - attempts to use it
398 * for when the kernel is configured for generic IRQ reservation mode (in
399 * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
400 * managed/non-managed interrupt accounting. In addition, attempts to use it on
401 * an interrupt which is already started or which has already been configured
402 * as managed will also fail, as these mean invalid init state or double init.
404 int irq_update_affinity_desc(unsigned int irq
,
405 struct irq_affinity_desc
*affinity
)
407 struct irq_desc
*desc
;
413 * Supporting this with the reservation scheme used by x86 needs
414 * some more thought. Fail it for now.
416 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE
))
419 desc
= irq_get_desc_buslock(irq
, &flags
, 0);
423 /* Requires the interrupt to be shut down */
424 if (irqd_is_started(&desc
->irq_data
)) {
429 /* Interrupts which are already managed cannot be modified */
430 if (irqd_affinity_is_managed(&desc
->irq_data
)) {
436 * Deactivate the interrupt. That's required to undo
437 * anything an earlier activation has established.
439 activated
= irqd_is_activated(&desc
->irq_data
);
441 irq_domain_deactivate_irq(&desc
->irq_data
);
443 if (affinity
->is_managed
) {
444 irqd_set(&desc
->irq_data
, IRQD_AFFINITY_MANAGED
);
445 irqd_set(&desc
->irq_data
, IRQD_MANAGED_SHUTDOWN
);
448 cpumask_copy(desc
->irq_common_data
.affinity
, &affinity
->mask
);
450 /* Restore the activation state */
452 irq_domain_activate_irq(&desc
->irq_data
, false);
455 irq_put_desc_busunlock(desc
, flags
);
459 static int __irq_set_affinity(unsigned int irq
, const struct cpumask
*mask
,
462 struct irq_desc
*desc
= irq_to_desc(irq
);
469 raw_spin_lock_irqsave(&desc
->lock
, flags
);
470 ret
= irq_set_affinity_locked(irq_desc_get_irq_data(desc
), mask
, force
);
471 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
476 * irq_set_affinity - Set the irq affinity of a given irq
477 * @irq: Interrupt to set affinity
480 * Fails if cpumask does not contain an online CPU
482 int irq_set_affinity(unsigned int irq
, const struct cpumask
*cpumask
)
484 return __irq_set_affinity(irq
, cpumask
, false);
486 EXPORT_SYMBOL_GPL(irq_set_affinity
);
489 * irq_force_affinity - Force the irq affinity of a given irq
490 * @irq: Interrupt to set affinity
493 * Same as irq_set_affinity, but without checking the mask against
496 * Solely for low level cpu hotplug code, where we need to make per
497 * cpu interrupts affine before the cpu becomes online.
499 int irq_force_affinity(unsigned int irq
, const struct cpumask
*cpumask
)
501 return __irq_set_affinity(irq
, cpumask
, true);
503 EXPORT_SYMBOL_GPL(irq_force_affinity
);
505 int __irq_apply_affinity_hint(unsigned int irq
, const struct cpumask
*m
,
509 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
513 desc
->affinity_hint
= m
;
514 irq_put_desc_unlock(desc
, flags
);
515 if (m
&& setaffinity
)
516 __irq_set_affinity(irq
, m
, false);
519 EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint
);
521 static void irq_affinity_notify(struct work_struct
*work
)
523 struct irq_affinity_notify
*notify
=
524 container_of(work
, struct irq_affinity_notify
, work
);
525 struct irq_desc
*desc
= irq_to_desc(notify
->irq
);
526 cpumask_var_t cpumask
;
529 if (!desc
|| !alloc_cpumask_var(&cpumask
, GFP_KERNEL
))
532 raw_spin_lock_irqsave(&desc
->lock
, flags
);
533 if (irq_move_pending(&desc
->irq_data
))
534 irq_get_pending(cpumask
, desc
);
536 cpumask_copy(cpumask
, desc
->irq_common_data
.affinity
);
537 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
539 notify
->notify(notify
, cpumask
);
541 free_cpumask_var(cpumask
);
543 kref_put(¬ify
->kref
, notify
->release
);
547 * irq_set_affinity_notifier - control notification of IRQ affinity changes
548 * @irq: Interrupt for which to enable/disable notification
549 * @notify: Context for notification, or %NULL to disable
550 * notification. Function pointers must be initialised;
551 * the other fields will be initialised by this function.
553 * Must be called in process context. Notification may only be enabled
554 * after the IRQ is allocated and must be disabled before the IRQ is
555 * freed using free_irq().
558 irq_set_affinity_notifier(unsigned int irq
, struct irq_affinity_notify
*notify
)
560 struct irq_desc
*desc
= irq_to_desc(irq
);
561 struct irq_affinity_notify
*old_notify
;
564 /* The release function is promised process context */
567 if (!desc
|| desc
->istate
& IRQS_NMI
)
570 /* Complete initialisation of *notify */
573 kref_init(¬ify
->kref
);
574 INIT_WORK(¬ify
->work
, irq_affinity_notify
);
577 raw_spin_lock_irqsave(&desc
->lock
, flags
);
578 old_notify
= desc
->affinity_notify
;
579 desc
->affinity_notify
= notify
;
580 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
583 if (cancel_work_sync(&old_notify
->work
)) {
584 /* Pending work had a ref, put that one too */
585 kref_put(&old_notify
->kref
, old_notify
->release
);
587 kref_put(&old_notify
->kref
, old_notify
->release
);
592 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier
);
594 #ifndef CONFIG_AUTO_IRQ_AFFINITY
596 * Generic version of the affinity autoselector.
598 int irq_setup_affinity(struct irq_desc
*desc
)
600 struct cpumask
*set
= irq_default_affinity
;
601 int ret
, node
= irq_desc_get_node(desc
);
602 static DEFINE_RAW_SPINLOCK(mask_lock
);
603 static struct cpumask mask
;
605 /* Excludes PER_CPU and NO_BALANCE interrupts */
606 if (!__irq_can_set_affinity(desc
))
609 raw_spin_lock(&mask_lock
);
611 * Preserve the managed affinity setting and a userspace affinity
612 * setup, but make sure that one of the targets is online.
614 if (irqd_affinity_is_managed(&desc
->irq_data
) ||
615 irqd_has_set(&desc
->irq_data
, IRQD_AFFINITY_SET
)) {
616 if (cpumask_intersects(desc
->irq_common_data
.affinity
,
618 set
= desc
->irq_common_data
.affinity
;
620 irqd_clear(&desc
->irq_data
, IRQD_AFFINITY_SET
);
623 cpumask_and(&mask
, cpu_online_mask
, set
);
624 if (cpumask_empty(&mask
))
625 cpumask_copy(&mask
, cpu_online_mask
);
627 if (node
!= NUMA_NO_NODE
) {
628 const struct cpumask
*nodemask
= cpumask_of_node(node
);
630 /* make sure at least one of the cpus in nodemask is online */
631 if (cpumask_intersects(&mask
, nodemask
))
632 cpumask_and(&mask
, &mask
, nodemask
);
634 ret
= irq_do_set_affinity(&desc
->irq_data
, &mask
, false);
635 raw_spin_unlock(&mask_lock
);
639 /* Wrapper for ALPHA specific affinity selector magic */
640 int irq_setup_affinity(struct irq_desc
*desc
)
642 return irq_select_affinity(irq_desc_get_irq(desc
));
644 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
645 #endif /* CONFIG_SMP */
649 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
650 * @irq: interrupt number to set affinity
651 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
652 * specific data for percpu_devid interrupts
654 * This function uses the vCPU specific data to set the vCPU
655 * affinity for an irq. The vCPU specific data is passed from
656 * outside, such as KVM. One example code path is as below:
657 * KVM -> IOMMU -> irq_set_vcpu_affinity().
659 int irq_set_vcpu_affinity(unsigned int irq
, void *vcpu_info
)
662 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
663 struct irq_data
*data
;
664 struct irq_chip
*chip
;
670 data
= irq_desc_get_irq_data(desc
);
672 chip
= irq_data_get_irq_chip(data
);
673 if (chip
&& chip
->irq_set_vcpu_affinity
)
675 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
676 data
= data
->parent_data
;
683 ret
= chip
->irq_set_vcpu_affinity(data
, vcpu_info
);
684 irq_put_desc_unlock(desc
, flags
);
688 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity
);
690 void __disable_irq(struct irq_desc
*desc
)
696 static int __disable_irq_nosync(unsigned int irq
)
699 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
704 irq_put_desc_busunlock(desc
, flags
);
709 * disable_irq_nosync - disable an irq without waiting
710 * @irq: Interrupt to disable
712 * Disable the selected interrupt line. Disables and Enables are
714 * Unlike disable_irq(), this function does not ensure existing
715 * instances of the IRQ handler have completed before returning.
717 * This function may be called from IRQ context.
719 void disable_irq_nosync(unsigned int irq
)
721 __disable_irq_nosync(irq
);
723 EXPORT_SYMBOL(disable_irq_nosync
);
726 * disable_irq - disable an irq and wait for completion
727 * @irq: Interrupt to disable
729 * Disable the selected interrupt line. Enables and Disables are
731 * This function waits for any pending IRQ handlers for this interrupt
732 * to complete before returning. If you use this function while
733 * holding a resource the IRQ handler may need you will deadlock.
735 * Can only be called from preemptible code as it might sleep when
736 * an interrupt thread is associated to @irq.
739 void disable_irq(unsigned int irq
)
742 if (!__disable_irq_nosync(irq
))
743 synchronize_irq(irq
);
745 EXPORT_SYMBOL(disable_irq
);
748 * disable_hardirq - disables an irq and waits for hardirq completion
749 * @irq: Interrupt to disable
751 * Disable the selected interrupt line. Enables and Disables are
753 * This function waits for any pending hard IRQ handlers for this
754 * interrupt to complete before returning. If you use this function while
755 * holding a resource the hard IRQ handler may need you will deadlock.
757 * When used to optimistically disable an interrupt from atomic context
758 * the return value must be checked.
760 * Returns: false if a threaded handler is active.
762 * This function may be called - with care - from IRQ context.
764 bool disable_hardirq(unsigned int irq
)
766 if (!__disable_irq_nosync(irq
))
767 return synchronize_hardirq(irq
);
771 EXPORT_SYMBOL_GPL(disable_hardirq
);
774 * disable_nmi_nosync - disable an nmi without waiting
775 * @irq: Interrupt to disable
777 * Disable the selected interrupt line. Disables and enables are
779 * The interrupt to disable must have been requested through request_nmi.
780 * Unlike disable_nmi(), this function does not ensure existing
781 * instances of the IRQ handler have completed before returning.
783 void disable_nmi_nosync(unsigned int irq
)
785 disable_irq_nosync(irq
);
788 void __enable_irq(struct irq_desc
*desc
)
790 switch (desc
->depth
) {
793 WARN(1, KERN_WARNING
"Unbalanced enable for IRQ %d\n",
794 irq_desc_get_irq(desc
));
797 if (desc
->istate
& IRQS_SUSPENDED
)
799 /* Prevent probing on this irq: */
800 irq_settings_set_noprobe(desc
);
802 * Call irq_startup() not irq_enable() here because the
803 * interrupt might be marked NOAUTOEN. So irq_startup()
804 * needs to be invoked when it gets enabled the first
805 * time. If it was already started up, then irq_startup()
806 * will invoke irq_enable() under the hood.
808 irq_startup(desc
, IRQ_RESEND
, IRQ_START_FORCE
);
817 * enable_irq - enable handling of an irq
818 * @irq: Interrupt to enable
820 * Undoes the effect of one call to disable_irq(). If this
821 * matches the last disable, processing of interrupts on this
822 * IRQ line is re-enabled.
824 * This function may be called from IRQ context only when
825 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
827 void enable_irq(unsigned int irq
)
830 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
834 if (WARN(!desc
->irq_data
.chip
,
835 KERN_ERR
"enable_irq before setup/request_irq: irq %u\n", irq
))
840 irq_put_desc_busunlock(desc
, flags
);
842 EXPORT_SYMBOL(enable_irq
);
845 * enable_nmi - enable handling of an nmi
846 * @irq: Interrupt to enable
848 * The interrupt to enable must have been requested through request_nmi.
849 * Undoes the effect of one call to disable_nmi(). If this
850 * matches the last disable, processing of interrupts on this
851 * IRQ line is re-enabled.
853 void enable_nmi(unsigned int irq
)
858 static int set_irq_wake_real(unsigned int irq
, unsigned int on
)
860 struct irq_desc
*desc
= irq_to_desc(irq
);
863 if (irq_desc_get_chip(desc
)->flags
& IRQCHIP_SKIP_SET_WAKE
)
866 if (desc
->irq_data
.chip
->irq_set_wake
)
867 ret
= desc
->irq_data
.chip
->irq_set_wake(&desc
->irq_data
, on
);
873 * irq_set_irq_wake - control irq power management wakeup
874 * @irq: interrupt to control
875 * @on: enable/disable power management wakeup
877 * Enable/disable power management wakeup mode, which is
878 * disabled by default. Enables and disables must match,
879 * just as they match for non-wakeup mode support.
881 * Wakeup mode lets this IRQ wake the system from sleep
882 * states like "suspend to RAM".
884 * Note: irq enable/disable state is completely orthogonal
885 * to the enable/disable state of irq wake. An irq can be
886 * disabled with disable_irq() and still wake the system as
887 * long as the irq has wake enabled. If this does not hold,
888 * then the underlying irq chip and the related driver need
889 * to be investigated.
891 int irq_set_irq_wake(unsigned int irq
, unsigned int on
)
894 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
900 /* Don't use NMIs as wake up interrupts please */
901 if (desc
->istate
& IRQS_NMI
) {
906 /* wakeup-capable irqs can be shared between drivers that
907 * don't need to have the same sleep mode behaviors.
910 if (desc
->wake_depth
++ == 0) {
911 ret
= set_irq_wake_real(irq
, on
);
913 desc
->wake_depth
= 0;
915 irqd_set(&desc
->irq_data
, IRQD_WAKEUP_STATE
);
918 if (desc
->wake_depth
== 0) {
919 WARN(1, "Unbalanced IRQ %d wake disable\n", irq
);
920 } else if (--desc
->wake_depth
== 0) {
921 ret
= set_irq_wake_real(irq
, on
);
923 desc
->wake_depth
= 1;
925 irqd_clear(&desc
->irq_data
, IRQD_WAKEUP_STATE
);
930 irq_put_desc_busunlock(desc
, flags
);
933 EXPORT_SYMBOL(irq_set_irq_wake
);
936 * Internal function that tells the architecture code whether a
937 * particular irq has been exclusively allocated or is available
940 int can_request_irq(unsigned int irq
, unsigned long irqflags
)
943 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
949 if (irq_settings_can_request(desc
)) {
951 irqflags
& desc
->action
->flags
& IRQF_SHARED
)
954 irq_put_desc_unlock(desc
, flags
);
958 int __irq_set_trigger(struct irq_desc
*desc
, unsigned long flags
)
960 struct irq_chip
*chip
= desc
->irq_data
.chip
;
963 if (!chip
|| !chip
->irq_set_type
) {
965 * IRQF_TRIGGER_* but the PIC does not support multiple
968 pr_debug("No set_type function for IRQ %d (%s)\n",
969 irq_desc_get_irq(desc
),
970 chip
? (chip
->name
? : "unknown") : "unknown");
974 if (chip
->flags
& IRQCHIP_SET_TYPE_MASKED
) {
975 if (!irqd_irq_masked(&desc
->irq_data
))
977 if (!irqd_irq_disabled(&desc
->irq_data
))
981 /* Mask all flags except trigger mode */
982 flags
&= IRQ_TYPE_SENSE_MASK
;
983 ret
= chip
->irq_set_type(&desc
->irq_data
, flags
);
986 case IRQ_SET_MASK_OK
:
987 case IRQ_SET_MASK_OK_DONE
:
988 irqd_clear(&desc
->irq_data
, IRQD_TRIGGER_MASK
);
989 irqd_set(&desc
->irq_data
, flags
);
992 case IRQ_SET_MASK_OK_NOCOPY
:
993 flags
= irqd_get_trigger_type(&desc
->irq_data
);
994 irq_settings_set_trigger_mask(desc
, flags
);
995 irqd_clear(&desc
->irq_data
, IRQD_LEVEL
);
996 irq_settings_clr_level(desc
);
997 if (flags
& IRQ_TYPE_LEVEL_MASK
) {
998 irq_settings_set_level(desc
);
999 irqd_set(&desc
->irq_data
, IRQD_LEVEL
);
1005 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
1006 flags
, irq_desc_get_irq(desc
), chip
->irq_set_type
);
1013 #ifdef CONFIG_HARDIRQS_SW_RESEND
1014 int irq_set_parent(int irq
, int parent_irq
)
1016 unsigned long flags
;
1017 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
1022 desc
->parent_irq
= parent_irq
;
1024 irq_put_desc_unlock(desc
, flags
);
1027 EXPORT_SYMBOL_GPL(irq_set_parent
);
1031 * Default primary interrupt handler for threaded interrupts. Is
1032 * assigned as primary handler when request_threaded_irq is called
1033 * with handler == NULL. Useful for oneshot interrupts.
1035 static irqreturn_t
irq_default_primary_handler(int irq
, void *dev_id
)
1037 return IRQ_WAKE_THREAD
;
1041 * Primary handler for nested threaded interrupts. Should never be
1044 static irqreturn_t
irq_nested_primary_handler(int irq
, void *dev_id
)
1046 WARN(1, "Primary handler called for nested irq %d\n", irq
);
1050 static irqreturn_t
irq_forced_secondary_handler(int irq
, void *dev_id
)
1052 WARN(1, "Secondary action handler called for irq %d\n", irq
);
1058 * Check whether we need to change the affinity of the interrupt thread.
1060 static void irq_thread_check_affinity(struct irq_desc
*desc
, struct irqaction
*action
)
1065 if (!test_and_clear_bit(IRQTF_AFFINITY
, &action
->thread_flags
))
1068 __set_current_state(TASK_RUNNING
);
1071 * In case we are out of memory we set IRQTF_AFFINITY again and
1072 * try again next time
1074 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
1075 set_bit(IRQTF_AFFINITY
, &action
->thread_flags
);
1079 raw_spin_lock_irq(&desc
->lock
);
1081 * This code is triggered unconditionally. Check the affinity
1082 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1084 if (cpumask_available(desc
->irq_common_data
.affinity
)) {
1085 const struct cpumask
*m
;
1087 m
= irq_data_get_effective_affinity_mask(&desc
->irq_data
);
1088 cpumask_copy(mask
, m
);
1091 raw_spin_unlock_irq(&desc
->lock
);
1094 set_cpus_allowed_ptr(current
, mask
);
1095 free_cpumask_var(mask
);
1098 static inline void irq_thread_check_affinity(struct irq_desc
*desc
, struct irqaction
*action
) { }
1101 static int irq_wait_for_interrupt(struct irq_desc
*desc
,
1102 struct irqaction
*action
)
1105 set_current_state(TASK_INTERRUPTIBLE
);
1106 irq_thread_check_affinity(desc
, action
);
1108 if (kthread_should_stop()) {
1109 /* may need to run one last time */
1110 if (test_and_clear_bit(IRQTF_RUNTHREAD
,
1111 &action
->thread_flags
)) {
1112 __set_current_state(TASK_RUNNING
);
1115 __set_current_state(TASK_RUNNING
);
1119 if (test_and_clear_bit(IRQTF_RUNTHREAD
,
1120 &action
->thread_flags
)) {
1121 __set_current_state(TASK_RUNNING
);
1129 * Oneshot interrupts keep the irq line masked until the threaded
1130 * handler finished. unmask if the interrupt has not been disabled and
1133 static void irq_finalize_oneshot(struct irq_desc
*desc
,
1134 struct irqaction
*action
)
1136 if (!(desc
->istate
& IRQS_ONESHOT
) ||
1137 action
->handler
== irq_forced_secondary_handler
)
1140 chip_bus_lock(desc
);
1141 raw_spin_lock_irq(&desc
->lock
);
1144 * Implausible though it may be we need to protect us against
1145 * the following scenario:
1147 * The thread is faster done than the hard interrupt handler
1148 * on the other CPU. If we unmask the irq line then the
1149 * interrupt can come in again and masks the line, leaves due
1150 * to IRQS_INPROGRESS and the irq line is masked forever.
1152 * This also serializes the state of shared oneshot handlers
1153 * versus "desc->threads_oneshot |= action->thread_mask;" in
1154 * irq_wake_thread(). See the comment there which explains the
1157 if (unlikely(irqd_irq_inprogress(&desc
->irq_data
))) {
1158 raw_spin_unlock_irq(&desc
->lock
);
1159 chip_bus_sync_unlock(desc
);
1165 * Now check again, whether the thread should run. Otherwise
1166 * we would clear the threads_oneshot bit of this thread which
1169 if (test_bit(IRQTF_RUNTHREAD
, &action
->thread_flags
))
1172 desc
->threads_oneshot
&= ~action
->thread_mask
;
1174 if (!desc
->threads_oneshot
&& !irqd_irq_disabled(&desc
->irq_data
) &&
1175 irqd_irq_masked(&desc
->irq_data
))
1176 unmask_threaded_irq(desc
);
1179 raw_spin_unlock_irq(&desc
->lock
);
1180 chip_bus_sync_unlock(desc
);
1184 * Interrupts which are not explicitly requested as threaded
1185 * interrupts rely on the implicit bh/preempt disable of the hard irq
1186 * context. So we need to disable bh here to avoid deadlocks and other
1190 irq_forced_thread_fn(struct irq_desc
*desc
, struct irqaction
*action
)
1195 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
1196 local_irq_disable();
1197 ret
= action
->thread_fn(action
->irq
, action
->dev_id
);
1198 if (ret
== IRQ_HANDLED
)
1199 atomic_inc(&desc
->threads_handled
);
1201 irq_finalize_oneshot(desc
, action
);
1202 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
1209 * Interrupts explicitly requested as threaded interrupts want to be
1210 * preemptible - many of them need to sleep and wait for slow busses to
1213 static irqreturn_t
irq_thread_fn(struct irq_desc
*desc
,
1214 struct irqaction
*action
)
1218 ret
= action
->thread_fn(action
->irq
, action
->dev_id
);
1219 if (ret
== IRQ_HANDLED
)
1220 atomic_inc(&desc
->threads_handled
);
1222 irq_finalize_oneshot(desc
, action
);
1226 void wake_threads_waitq(struct irq_desc
*desc
)
1228 if (atomic_dec_and_test(&desc
->threads_active
))
1229 wake_up(&desc
->wait_for_threads
);
1232 static void irq_thread_dtor(struct callback_head
*unused
)
1234 struct task_struct
*tsk
= current
;
1235 struct irq_desc
*desc
;
1236 struct irqaction
*action
;
1238 if (WARN_ON_ONCE(!(current
->flags
& PF_EXITING
)))
1241 action
= kthread_data(tsk
);
1243 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1244 tsk
->comm
, tsk
->pid
, action
->irq
);
1247 desc
= irq_to_desc(action
->irq
);
1249 * If IRQTF_RUNTHREAD is set, we need to decrement
1250 * desc->threads_active and wake possible waiters.
1252 if (test_and_clear_bit(IRQTF_RUNTHREAD
, &action
->thread_flags
))
1253 wake_threads_waitq(desc
);
1255 /* Prevent a stale desc->threads_oneshot */
1256 irq_finalize_oneshot(desc
, action
);
1259 static void irq_wake_secondary(struct irq_desc
*desc
, struct irqaction
*action
)
1261 struct irqaction
*secondary
= action
->secondary
;
1263 if (WARN_ON_ONCE(!secondary
))
1266 raw_spin_lock_irq(&desc
->lock
);
1267 __irq_wake_thread(desc
, secondary
);
1268 raw_spin_unlock_irq(&desc
->lock
);
1272 * Internal function to notify that a interrupt thread is ready.
1274 static void irq_thread_set_ready(struct irq_desc
*desc
,
1275 struct irqaction
*action
)
1277 set_bit(IRQTF_READY
, &action
->thread_flags
);
1278 wake_up(&desc
->wait_for_threads
);
1282 * Internal function to wake up a interrupt thread and wait until it is
1285 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc
*desc
,
1286 struct irqaction
*action
)
1288 if (!action
|| !action
->thread
)
1291 wake_up_process(action
->thread
);
1292 wait_event(desc
->wait_for_threads
,
1293 test_bit(IRQTF_READY
, &action
->thread_flags
));
1297 * Interrupt handler thread
1299 static int irq_thread(void *data
)
1301 struct callback_head on_exit_work
;
1302 struct irqaction
*action
= data
;
1303 struct irq_desc
*desc
= irq_to_desc(action
->irq
);
1304 irqreturn_t (*handler_fn
)(struct irq_desc
*desc
,
1305 struct irqaction
*action
);
1307 irq_thread_set_ready(desc
, action
);
1309 sched_set_fifo(current
);
1311 if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD
,
1312 &action
->thread_flags
))
1313 handler_fn
= irq_forced_thread_fn
;
1315 handler_fn
= irq_thread_fn
;
1317 init_task_work(&on_exit_work
, irq_thread_dtor
);
1318 task_work_add(current
, &on_exit_work
, TWA_NONE
);
1320 while (!irq_wait_for_interrupt(desc
, action
)) {
1321 irqreturn_t action_ret
;
1323 action_ret
= handler_fn(desc
, action
);
1324 if (action_ret
== IRQ_WAKE_THREAD
)
1325 irq_wake_secondary(desc
, action
);
1327 wake_threads_waitq(desc
);
1331 * This is the regular exit path. __free_irq() is stopping the
1332 * thread via kthread_stop() after calling
1333 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1334 * oneshot mask bit can be set.
1336 task_work_cancel(current
, irq_thread_dtor
);
1341 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1342 * @irq: Interrupt line
1343 * @dev_id: Device identity for which the thread should be woken
1346 void irq_wake_thread(unsigned int irq
, void *dev_id
)
1348 struct irq_desc
*desc
= irq_to_desc(irq
);
1349 struct irqaction
*action
;
1350 unsigned long flags
;
1352 if (!desc
|| WARN_ON(irq_settings_is_per_cpu_devid(desc
)))
1355 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1356 for_each_action_of_desc(desc
, action
) {
1357 if (action
->dev_id
== dev_id
) {
1359 __irq_wake_thread(desc
, action
);
1363 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1365 EXPORT_SYMBOL_GPL(irq_wake_thread
);
1367 static int irq_setup_forced_threading(struct irqaction
*new)
1369 if (!force_irqthreads())
1371 if (new->flags
& (IRQF_NO_THREAD
| IRQF_PERCPU
| IRQF_ONESHOT
))
1375 * No further action required for interrupts which are requested as
1376 * threaded interrupts already
1378 if (new->handler
== irq_default_primary_handler
)
1381 new->flags
|= IRQF_ONESHOT
;
1384 * Handle the case where we have a real primary handler and a
1385 * thread handler. We force thread them as well by creating a
1388 if (new->handler
&& new->thread_fn
) {
1389 /* Allocate the secondary action */
1390 new->secondary
= kzalloc(sizeof(struct irqaction
), GFP_KERNEL
);
1391 if (!new->secondary
)
1393 new->secondary
->handler
= irq_forced_secondary_handler
;
1394 new->secondary
->thread_fn
= new->thread_fn
;
1395 new->secondary
->dev_id
= new->dev_id
;
1396 new->secondary
->irq
= new->irq
;
1397 new->secondary
->name
= new->name
;
1399 /* Deal with the primary handler */
1400 set_bit(IRQTF_FORCED_THREAD
, &new->thread_flags
);
1401 new->thread_fn
= new->handler
;
1402 new->handler
= irq_default_primary_handler
;
1406 static int irq_request_resources(struct irq_desc
*desc
)
1408 struct irq_data
*d
= &desc
->irq_data
;
1409 struct irq_chip
*c
= d
->chip
;
1411 return c
->irq_request_resources
? c
->irq_request_resources(d
) : 0;
1414 static void irq_release_resources(struct irq_desc
*desc
)
1416 struct irq_data
*d
= &desc
->irq_data
;
1417 struct irq_chip
*c
= d
->chip
;
1419 if (c
->irq_release_resources
)
1420 c
->irq_release_resources(d
);
1423 static bool irq_supports_nmi(struct irq_desc
*desc
)
1425 struct irq_data
*d
= irq_desc_get_irq_data(desc
);
1427 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1428 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1432 /* Don't support NMIs for chips behind a slow bus */
1433 if (d
->chip
->irq_bus_lock
|| d
->chip
->irq_bus_sync_unlock
)
1436 return d
->chip
->flags
& IRQCHIP_SUPPORTS_NMI
;
1439 static int irq_nmi_setup(struct irq_desc
*desc
)
1441 struct irq_data
*d
= irq_desc_get_irq_data(desc
);
1442 struct irq_chip
*c
= d
->chip
;
1444 return c
->irq_nmi_setup
? c
->irq_nmi_setup(d
) : -EINVAL
;
1447 static void irq_nmi_teardown(struct irq_desc
*desc
)
1449 struct irq_data
*d
= irq_desc_get_irq_data(desc
);
1450 struct irq_chip
*c
= d
->chip
;
1452 if (c
->irq_nmi_teardown
)
1453 c
->irq_nmi_teardown(d
);
1457 setup_irq_thread(struct irqaction
*new, unsigned int irq
, bool secondary
)
1459 struct task_struct
*t
;
1462 t
= kthread_create(irq_thread
, new, "irq/%d-%s", irq
,
1465 t
= kthread_create(irq_thread
, new, "irq/%d-s-%s", irq
,
1473 * We keep the reference to the task struct even if
1474 * the thread dies to avoid that the interrupt code
1475 * references an already freed task_struct.
1477 new->thread
= get_task_struct(t
);
1479 * Tell the thread to set its affinity. This is
1480 * important for shared interrupt handlers as we do
1481 * not invoke setup_affinity() for the secondary
1482 * handlers as everything is already set up. Even for
1483 * interrupts marked with IRQF_NO_BALANCE this is
1484 * correct as we want the thread to move to the cpu(s)
1485 * on which the requesting code placed the interrupt.
1487 set_bit(IRQTF_AFFINITY
, &new->thread_flags
);
1492 * Internal function to register an irqaction - typically used to
1493 * allocate special interrupts that are part of the architecture.
1497 * desc->request_mutex Provides serialization against a concurrent free_irq()
1498 * chip_bus_lock Provides serialization for slow bus operations
1499 * desc->lock Provides serialization against hard interrupts
1501 * chip_bus_lock and desc->lock are sufficient for all other management and
1502 * interrupt related functions. desc->request_mutex solely serializes
1503 * request/free_irq().
1506 __setup_irq(unsigned int irq
, struct irq_desc
*desc
, struct irqaction
*new)
1508 struct irqaction
*old
, **old_ptr
;
1509 unsigned long flags
, thread_mask
= 0;
1510 int ret
, nested
, shared
= 0;
1515 if (desc
->irq_data
.chip
== &no_irq_chip
)
1517 if (!try_module_get(desc
->owner
))
1523 * If the trigger type is not specified by the caller,
1524 * then use the default for this interrupt.
1526 if (!(new->flags
& IRQF_TRIGGER_MASK
))
1527 new->flags
|= irqd_get_trigger_type(&desc
->irq_data
);
1530 * Check whether the interrupt nests into another interrupt
1533 nested
= irq_settings_is_nested_thread(desc
);
1535 if (!new->thread_fn
) {
1540 * Replace the primary handler which was provided from
1541 * the driver for non nested interrupt handling by the
1542 * dummy function which warns when called.
1544 new->handler
= irq_nested_primary_handler
;
1546 if (irq_settings_can_thread(desc
)) {
1547 ret
= irq_setup_forced_threading(new);
1554 * Create a handler thread when a thread function is supplied
1555 * and the interrupt does not nest into another interrupt
1558 if (new->thread_fn
&& !nested
) {
1559 ret
= setup_irq_thread(new, irq
, false);
1562 if (new->secondary
) {
1563 ret
= setup_irq_thread(new->secondary
, irq
, true);
1570 * Drivers are often written to work w/o knowledge about the
1571 * underlying irq chip implementation, so a request for a
1572 * threaded irq without a primary hard irq context handler
1573 * requires the ONESHOT flag to be set. Some irq chips like
1574 * MSI based interrupts are per se one shot safe. Check the
1575 * chip flags, so we can avoid the unmask dance at the end of
1576 * the threaded handler for those.
1578 if (desc
->irq_data
.chip
->flags
& IRQCHIP_ONESHOT_SAFE
)
1579 new->flags
&= ~IRQF_ONESHOT
;
1582 * Protects against a concurrent __free_irq() call which might wait
1583 * for synchronize_hardirq() to complete without holding the optional
1584 * chip bus lock and desc->lock. Also protects against handing out
1585 * a recycled oneshot thread_mask bit while it's still in use by
1586 * its previous owner.
1588 mutex_lock(&desc
->request_mutex
);
1591 * Acquire bus lock as the irq_request_resources() callback below
1592 * might rely on the serialization or the magic power management
1593 * functions which are abusing the irq_bus_lock() callback,
1595 chip_bus_lock(desc
);
1597 /* First installed action requests resources. */
1598 if (!desc
->action
) {
1599 ret
= irq_request_resources(desc
);
1601 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1602 new->name
, irq
, desc
->irq_data
.chip
->name
);
1603 goto out_bus_unlock
;
1608 * The following block of code has to be executed atomically
1609 * protected against a concurrent interrupt and any of the other
1610 * management calls which are not serialized via
1611 * desc->request_mutex or the optional bus lock.
1613 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1614 old_ptr
= &desc
->action
;
1618 * Can't share interrupts unless both agree to and are
1619 * the same type (level, edge, polarity). So both flag
1620 * fields must have IRQF_SHARED set and the bits which
1621 * set the trigger type must match. Also all must
1623 * Interrupt lines used for NMIs cannot be shared.
1625 unsigned int oldtype
;
1627 if (desc
->istate
& IRQS_NMI
) {
1628 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1629 new->name
, irq
, desc
->irq_data
.chip
->name
);
1635 * If nobody did set the configuration before, inherit
1636 * the one provided by the requester.
1638 if (irqd_trigger_type_was_set(&desc
->irq_data
)) {
1639 oldtype
= irqd_get_trigger_type(&desc
->irq_data
);
1641 oldtype
= new->flags
& IRQF_TRIGGER_MASK
;
1642 irqd_set_trigger_type(&desc
->irq_data
, oldtype
);
1645 if (!((old
->flags
& new->flags
) & IRQF_SHARED
) ||
1646 (oldtype
!= (new->flags
& IRQF_TRIGGER_MASK
)) ||
1647 ((old
->flags
^ new->flags
) & IRQF_ONESHOT
))
1650 /* All handlers must agree on per-cpuness */
1651 if ((old
->flags
& IRQF_PERCPU
) !=
1652 (new->flags
& IRQF_PERCPU
))
1655 /* add new interrupt at end of irq queue */
1658 * Or all existing action->thread_mask bits,
1659 * so we can find the next zero bit for this
1662 thread_mask
|= old
->thread_mask
;
1663 old_ptr
= &old
->next
;
1670 * Setup the thread mask for this irqaction for ONESHOT. For
1671 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1672 * conditional in irq_wake_thread().
1674 if (new->flags
& IRQF_ONESHOT
) {
1676 * Unlikely to have 32 resp 64 irqs sharing one line,
1679 if (thread_mask
== ~0UL) {
1684 * The thread_mask for the action is or'ed to
1685 * desc->thread_active to indicate that the
1686 * IRQF_ONESHOT thread handler has been woken, but not
1687 * yet finished. The bit is cleared when a thread
1688 * completes. When all threads of a shared interrupt
1689 * line have completed desc->threads_active becomes
1690 * zero and the interrupt line is unmasked. See
1691 * handle.c:irq_wake_thread() for further information.
1693 * If no thread is woken by primary (hard irq context)
1694 * interrupt handlers, then desc->threads_active is
1695 * also checked for zero to unmask the irq line in the
1696 * affected hard irq flow handlers
1697 * (handle_[fasteoi|level]_irq).
1699 * The new action gets the first zero bit of
1700 * thread_mask assigned. See the loop above which or's
1701 * all existing action->thread_mask bits.
1703 new->thread_mask
= 1UL << ffz(thread_mask
);
1705 } else if (new->handler
== irq_default_primary_handler
&&
1706 !(desc
->irq_data
.chip
->flags
& IRQCHIP_ONESHOT_SAFE
)) {
1708 * The interrupt was requested with handler = NULL, so
1709 * we use the default primary handler for it. But it
1710 * does not have the oneshot flag set. In combination
1711 * with level interrupts this is deadly, because the
1712 * default primary handler just wakes the thread, then
1713 * the irq lines is reenabled, but the device still
1714 * has the level irq asserted. Rinse and repeat....
1716 * While this works for edge type interrupts, we play
1717 * it safe and reject unconditionally because we can't
1718 * say for sure which type this interrupt really
1719 * has. The type flags are unreliable as the
1720 * underlying chip implementation can override them.
1722 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1729 /* Setup the type (level, edge polarity) if configured: */
1730 if (new->flags
& IRQF_TRIGGER_MASK
) {
1731 ret
= __irq_set_trigger(desc
,
1732 new->flags
& IRQF_TRIGGER_MASK
);
1739 * Activate the interrupt. That activation must happen
1740 * independently of IRQ_NOAUTOEN. request_irq() can fail
1741 * and the callers are supposed to handle
1742 * that. enable_irq() of an interrupt requested with
1743 * IRQ_NOAUTOEN is not supposed to fail. The activation
1744 * keeps it in shutdown mode, it merily associates
1745 * resources if necessary and if that's not possible it
1746 * fails. Interrupts which are in managed shutdown mode
1747 * will simply ignore that activation request.
1749 ret
= irq_activate(desc
);
1753 desc
->istate
&= ~(IRQS_AUTODETECT
| IRQS_SPURIOUS_DISABLED
| \
1754 IRQS_ONESHOT
| IRQS_WAITING
);
1755 irqd_clear(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
1757 if (new->flags
& IRQF_PERCPU
) {
1758 irqd_set(&desc
->irq_data
, IRQD_PER_CPU
);
1759 irq_settings_set_per_cpu(desc
);
1760 if (new->flags
& IRQF_NO_DEBUG
)
1761 irq_settings_set_no_debug(desc
);
1765 irq_settings_set_no_debug(desc
);
1767 if (new->flags
& IRQF_ONESHOT
)
1768 desc
->istate
|= IRQS_ONESHOT
;
1770 /* Exclude IRQ from balancing if requested */
1771 if (new->flags
& IRQF_NOBALANCING
) {
1772 irq_settings_set_no_balancing(desc
);
1773 irqd_set(&desc
->irq_data
, IRQD_NO_BALANCING
);
1776 if (!(new->flags
& IRQF_NO_AUTOEN
) &&
1777 irq_settings_can_autoenable(desc
)) {
1778 irq_startup(desc
, IRQ_RESEND
, IRQ_START_COND
);
1781 * Shared interrupts do not go well with disabling
1782 * auto enable. The sharing interrupt might request
1783 * it while it's still disabled and then wait for
1784 * interrupts forever.
1786 WARN_ON_ONCE(new->flags
& IRQF_SHARED
);
1787 /* Undo nested disables: */
1791 } else if (new->flags
& IRQF_TRIGGER_MASK
) {
1792 unsigned int nmsk
= new->flags
& IRQF_TRIGGER_MASK
;
1793 unsigned int omsk
= irqd_get_trigger_type(&desc
->irq_data
);
1796 /* hope the handler works with current trigger mode */
1797 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1803 irq_pm_install_action(desc
, new);
1805 /* Reset broken irq detection when installing new handler */
1806 desc
->irq_count
= 0;
1807 desc
->irqs_unhandled
= 0;
1810 * Check whether we disabled the irq via the spurious handler
1811 * before. Reenable it and give it another chance.
1813 if (shared
&& (desc
->istate
& IRQS_SPURIOUS_DISABLED
)) {
1814 desc
->istate
&= ~IRQS_SPURIOUS_DISABLED
;
1818 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1819 chip_bus_sync_unlock(desc
);
1820 mutex_unlock(&desc
->request_mutex
);
1822 irq_setup_timings(desc
, new);
1824 wake_up_and_wait_for_irq_thread_ready(desc
, new);
1825 wake_up_and_wait_for_irq_thread_ready(desc
, new->secondary
);
1827 register_irq_proc(irq
, desc
);
1829 register_handler_proc(irq
, new);
1833 if (!(new->flags
& IRQF_PROBE_SHARED
)) {
1834 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1835 irq
, new->flags
, new->name
, old
->flags
, old
->name
);
1836 #ifdef CONFIG_DEBUG_SHIRQ
1843 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1846 irq_release_resources(desc
);
1848 chip_bus_sync_unlock(desc
);
1849 mutex_unlock(&desc
->request_mutex
);
1853 struct task_struct
*t
= new->thread
;
1856 kthread_stop_put(t
);
1858 if (new->secondary
&& new->secondary
->thread
) {
1859 struct task_struct
*t
= new->secondary
->thread
;
1861 new->secondary
->thread
= NULL
;
1862 kthread_stop_put(t
);
1865 module_put(desc
->owner
);
1870 * Internal function to unregister an irqaction - used to free
1871 * regular and special interrupts that are part of the architecture.
1873 static struct irqaction
*__free_irq(struct irq_desc
*desc
, void *dev_id
)
1875 unsigned irq
= desc
->irq_data
.irq
;
1876 struct irqaction
*action
, **action_ptr
;
1877 unsigned long flags
;
1879 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq
);
1881 mutex_lock(&desc
->request_mutex
);
1882 chip_bus_lock(desc
);
1883 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1886 * There can be multiple actions per IRQ descriptor, find the right
1887 * one based on the dev_id:
1889 action_ptr
= &desc
->action
;
1891 action
= *action_ptr
;
1894 WARN(1, "Trying to free already-free IRQ %d\n", irq
);
1895 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1896 chip_bus_sync_unlock(desc
);
1897 mutex_unlock(&desc
->request_mutex
);
1901 if (action
->dev_id
== dev_id
)
1903 action_ptr
= &action
->next
;
1906 /* Found it - now remove it from the list of entries: */
1907 *action_ptr
= action
->next
;
1909 irq_pm_remove_action(desc
, action
);
1911 /* If this was the last handler, shut down the IRQ line: */
1912 if (!desc
->action
) {
1913 irq_settings_clr_disable_unlazy(desc
);
1914 /* Only shutdown. Deactivate after synchronize_hardirq() */
1919 /* make sure affinity_hint is cleaned up */
1920 if (WARN_ON_ONCE(desc
->affinity_hint
))
1921 desc
->affinity_hint
= NULL
;
1924 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1926 * Drop bus_lock here so the changes which were done in the chip
1927 * callbacks above are synced out to the irq chips which hang
1928 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1930 * Aside of that the bus_lock can also be taken from the threaded
1931 * handler in irq_finalize_oneshot() which results in a deadlock
1932 * because kthread_stop() would wait forever for the thread to
1933 * complete, which is blocked on the bus lock.
1935 * The still held desc->request_mutex() protects against a
1936 * concurrent request_irq() of this irq so the release of resources
1937 * and timing data is properly serialized.
1939 chip_bus_sync_unlock(desc
);
1941 unregister_handler_proc(irq
, action
);
1944 * Make sure it's not being used on another CPU and if the chip
1945 * supports it also make sure that there is no (not yet serviced)
1946 * interrupt in flight at the hardware level.
1948 __synchronize_irq(desc
);
1950 #ifdef CONFIG_DEBUG_SHIRQ
1952 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1953 * event to happen even now it's being freed, so let's make sure that
1954 * is so by doing an extra call to the handler ....
1956 * ( We do this after actually deregistering it, to make sure that a
1957 * 'real' IRQ doesn't run in parallel with our fake. )
1959 if (action
->flags
& IRQF_SHARED
) {
1960 local_irq_save(flags
);
1961 action
->handler(irq
, dev_id
);
1962 local_irq_restore(flags
);
1967 * The action has already been removed above, but the thread writes
1968 * its oneshot mask bit when it completes. Though request_mutex is
1969 * held across this which prevents __setup_irq() from handing out
1970 * the same bit to a newly requested action.
1972 if (action
->thread
) {
1973 kthread_stop_put(action
->thread
);
1974 if (action
->secondary
&& action
->secondary
->thread
)
1975 kthread_stop_put(action
->secondary
->thread
);
1978 /* Last action releases resources */
1979 if (!desc
->action
) {
1981 * Reacquire bus lock as irq_release_resources() might
1982 * require it to deallocate resources over the slow bus.
1984 chip_bus_lock(desc
);
1986 * There is no interrupt on the fly anymore. Deactivate it
1989 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1990 irq_domain_deactivate_irq(&desc
->irq_data
);
1991 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1993 irq_release_resources(desc
);
1994 chip_bus_sync_unlock(desc
);
1995 irq_remove_timings(desc
);
1998 mutex_unlock(&desc
->request_mutex
);
2000 irq_chip_pm_put(&desc
->irq_data
);
2001 module_put(desc
->owner
);
2002 kfree(action
->secondary
);
2007 * free_irq - free an interrupt allocated with request_irq
2008 * @irq: Interrupt line to free
2009 * @dev_id: Device identity to free
2011 * Remove an interrupt handler. The handler is removed and if the
2012 * interrupt line is no longer in use by any driver it is disabled.
2013 * On a shared IRQ the caller must ensure the interrupt is disabled
2014 * on the card it drives before calling this function. The function
2015 * does not return until any executing interrupts for this IRQ
2018 * This function must not be called from interrupt context.
2020 * Returns the devname argument passed to request_irq.
2022 const void *free_irq(unsigned int irq
, void *dev_id
)
2024 struct irq_desc
*desc
= irq_to_desc(irq
);
2025 struct irqaction
*action
;
2026 const char *devname
;
2028 if (!desc
|| WARN_ON(irq_settings_is_per_cpu_devid(desc
)))
2032 if (WARN_ON(desc
->affinity_notify
))
2033 desc
->affinity_notify
= NULL
;
2036 action
= __free_irq(desc
, dev_id
);
2041 devname
= action
->name
;
2045 EXPORT_SYMBOL(free_irq
);
2047 /* This function must be called with desc->lock held */
2048 static const void *__cleanup_nmi(unsigned int irq
, struct irq_desc
*desc
)
2050 const char *devname
= NULL
;
2052 desc
->istate
&= ~IRQS_NMI
;
2054 if (!WARN_ON(desc
->action
== NULL
)) {
2055 irq_pm_remove_action(desc
, desc
->action
);
2056 devname
= desc
->action
->name
;
2057 unregister_handler_proc(irq
, desc
->action
);
2059 kfree(desc
->action
);
2060 desc
->action
= NULL
;
2063 irq_settings_clr_disable_unlazy(desc
);
2064 irq_shutdown_and_deactivate(desc
);
2066 irq_release_resources(desc
);
2068 irq_chip_pm_put(&desc
->irq_data
);
2069 module_put(desc
->owner
);
2074 const void *free_nmi(unsigned int irq
, void *dev_id
)
2076 struct irq_desc
*desc
= irq_to_desc(irq
);
2077 unsigned long flags
;
2078 const void *devname
;
2080 if (!desc
|| WARN_ON(!(desc
->istate
& IRQS_NMI
)))
2083 if (WARN_ON(irq_settings_is_per_cpu_devid(desc
)))
2086 /* NMI still enabled */
2087 if (WARN_ON(desc
->depth
== 0))
2088 disable_nmi_nosync(irq
);
2090 raw_spin_lock_irqsave(&desc
->lock
, flags
);
2092 irq_nmi_teardown(desc
);
2093 devname
= __cleanup_nmi(irq
, desc
);
2095 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
2101 * request_threaded_irq - allocate an interrupt line
2102 * @irq: Interrupt line to allocate
2103 * @handler: Function to be called when the IRQ occurs.
2104 * Primary handler for threaded interrupts.
2105 * If handler is NULL and thread_fn != NULL
2106 * the default primary handler is installed.
2107 * @thread_fn: Function called from the irq handler thread
2108 * If NULL, no irq thread is created
2109 * @irqflags: Interrupt type flags
2110 * @devname: An ascii name for the claiming device
2111 * @dev_id: A cookie passed back to the handler function
2113 * This call allocates interrupt resources and enables the
2114 * interrupt line and IRQ handling. From the point this
2115 * call is made your handler function may be invoked. Since
2116 * your handler function must clear any interrupt the board
2117 * raises, you must take care both to initialise your hardware
2118 * and to set up the interrupt handler in the right order.
2120 * If you want to set up a threaded irq handler for your device
2121 * then you need to supply @handler and @thread_fn. @handler is
2122 * still called in hard interrupt context and has to check
2123 * whether the interrupt originates from the device. If yes it
2124 * needs to disable the interrupt on the device and return
2125 * IRQ_WAKE_THREAD which will wake up the handler thread and run
2126 * @thread_fn. This split handler design is necessary to support
2127 * shared interrupts.
2129 * Dev_id must be globally unique. Normally the address of the
2130 * device data structure is used as the cookie. Since the handler
2131 * receives this value it makes sense to use it.
2133 * If your interrupt is shared you must pass a non NULL dev_id
2134 * as this is required when freeing the interrupt.
2138 * IRQF_SHARED Interrupt is shared
2139 * IRQF_TRIGGER_* Specify active edge(s) or level
2140 * IRQF_ONESHOT Run thread_fn with interrupt line masked
2142 int request_threaded_irq(unsigned int irq
, irq_handler_t handler
,
2143 irq_handler_t thread_fn
, unsigned long irqflags
,
2144 const char *devname
, void *dev_id
)
2146 struct irqaction
*action
;
2147 struct irq_desc
*desc
;
2150 if (irq
== IRQ_NOTCONNECTED
)
2154 * Sanity-check: shared interrupts must pass in a real dev-ID,
2155 * otherwise we'll have trouble later trying to figure out
2156 * which interrupt is which (messes up the interrupt freeing
2159 * Also shared interrupts do not go well with disabling auto enable.
2160 * The sharing interrupt might request it while it's still disabled
2161 * and then wait for interrupts forever.
2163 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2164 * it cannot be set along with IRQF_NO_SUSPEND.
2166 if (((irqflags
& IRQF_SHARED
) && !dev_id
) ||
2167 ((irqflags
& IRQF_SHARED
) && (irqflags
& IRQF_NO_AUTOEN
)) ||
2168 (!(irqflags
& IRQF_SHARED
) && (irqflags
& IRQF_COND_SUSPEND
)) ||
2169 ((irqflags
& IRQF_NO_SUSPEND
) && (irqflags
& IRQF_COND_SUSPEND
)))
2172 desc
= irq_to_desc(irq
);
2176 if (!irq_settings_can_request(desc
) ||
2177 WARN_ON(irq_settings_is_per_cpu_devid(desc
)))
2183 handler
= irq_default_primary_handler
;
2186 action
= kzalloc(sizeof(struct irqaction
), GFP_KERNEL
);
2190 action
->handler
= handler
;
2191 action
->thread_fn
= thread_fn
;
2192 action
->flags
= irqflags
;
2193 action
->name
= devname
;
2194 action
->dev_id
= dev_id
;
2196 retval
= irq_chip_pm_get(&desc
->irq_data
);
2202 retval
= __setup_irq(irq
, desc
, action
);
2205 irq_chip_pm_put(&desc
->irq_data
);
2206 kfree(action
->secondary
);
2210 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2211 if (!retval
&& (irqflags
& IRQF_SHARED
)) {
2213 * It's a shared IRQ -- the driver ought to be prepared for it
2214 * to happen immediately, so let's make sure....
2215 * We disable the irq to make sure that a 'real' IRQ doesn't
2216 * run in parallel with our fake.
2218 unsigned long flags
;
2221 local_irq_save(flags
);
2223 handler(irq
, dev_id
);
2225 local_irq_restore(flags
);
2231 EXPORT_SYMBOL(request_threaded_irq
);
2234 * request_any_context_irq - allocate an interrupt line
2235 * @irq: Interrupt line to allocate
2236 * @handler: Function to be called when the IRQ occurs.
2237 * Threaded handler for threaded interrupts.
2238 * @flags: Interrupt type flags
2239 * @name: An ascii name for the claiming device
2240 * @dev_id: A cookie passed back to the handler function
2242 * This call allocates interrupt resources and enables the
2243 * interrupt line and IRQ handling. It selects either a
2244 * hardirq or threaded handling method depending on the
2247 * On failure, it returns a negative value. On success,
2248 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2250 int request_any_context_irq(unsigned int irq
, irq_handler_t handler
,
2251 unsigned long flags
, const char *name
, void *dev_id
)
2253 struct irq_desc
*desc
;
2256 if (irq
== IRQ_NOTCONNECTED
)
2259 desc
= irq_to_desc(irq
);
2263 if (irq_settings_is_nested_thread(desc
)) {
2264 ret
= request_threaded_irq(irq
, NULL
, handler
,
2265 flags
, name
, dev_id
);
2266 return !ret
? IRQC_IS_NESTED
: ret
;
2269 ret
= request_irq(irq
, handler
, flags
, name
, dev_id
);
2270 return !ret
? IRQC_IS_HARDIRQ
: ret
;
2272 EXPORT_SYMBOL_GPL(request_any_context_irq
);
2275 * request_nmi - allocate an interrupt line for NMI delivery
2276 * @irq: Interrupt line to allocate
2277 * @handler: Function to be called when the IRQ occurs.
2278 * Threaded handler for threaded interrupts.
2279 * @irqflags: Interrupt type flags
2280 * @name: An ascii name for the claiming device
2281 * @dev_id: A cookie passed back to the handler function
2283 * This call allocates interrupt resources and enables the
2284 * interrupt line and IRQ handling. It sets up the IRQ line
2285 * to be handled as an NMI.
2287 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2288 * cannot be threaded.
2290 * Interrupt lines requested for NMI delivering must produce per cpu
2291 * interrupts and have auto enabling setting disabled.
2293 * Dev_id must be globally unique. Normally the address of the
2294 * device data structure is used as the cookie. Since the handler
2295 * receives this value it makes sense to use it.
2297 * If the interrupt line cannot be used to deliver NMIs, function
2298 * will fail and return a negative value.
2300 int request_nmi(unsigned int irq
, irq_handler_t handler
,
2301 unsigned long irqflags
, const char *name
, void *dev_id
)
2303 struct irqaction
*action
;
2304 struct irq_desc
*desc
;
2305 unsigned long flags
;
2308 if (irq
== IRQ_NOTCONNECTED
)
2311 /* NMI cannot be shared, used for Polling */
2312 if (irqflags
& (IRQF_SHARED
| IRQF_COND_SUSPEND
| IRQF_IRQPOLL
))
2315 if (!(irqflags
& IRQF_PERCPU
))
2321 desc
= irq_to_desc(irq
);
2323 if (!desc
|| (irq_settings_can_autoenable(desc
) &&
2324 !(irqflags
& IRQF_NO_AUTOEN
)) ||
2325 !irq_settings_can_request(desc
) ||
2326 WARN_ON(irq_settings_is_per_cpu_devid(desc
)) ||
2327 !irq_supports_nmi(desc
))
2330 action
= kzalloc(sizeof(struct irqaction
), GFP_KERNEL
);
2334 action
->handler
= handler
;
2335 action
->flags
= irqflags
| IRQF_NO_THREAD
| IRQF_NOBALANCING
;
2336 action
->name
= name
;
2337 action
->dev_id
= dev_id
;
2339 retval
= irq_chip_pm_get(&desc
->irq_data
);
2343 retval
= __setup_irq(irq
, desc
, action
);
2347 raw_spin_lock_irqsave(&desc
->lock
, flags
);
2349 /* Setup NMI state */
2350 desc
->istate
|= IRQS_NMI
;
2351 retval
= irq_nmi_setup(desc
);
2353 __cleanup_nmi(irq
, desc
);
2354 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
2358 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
2363 irq_chip_pm_put(&desc
->irq_data
);
2370 void enable_percpu_irq(unsigned int irq
, unsigned int type
)
2372 unsigned int cpu
= smp_processor_id();
2373 unsigned long flags
;
2374 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, IRQ_GET_DESC_CHECK_PERCPU
);
2380 * If the trigger type is not specified by the caller, then
2381 * use the default for this interrupt.
2383 type
&= IRQ_TYPE_SENSE_MASK
;
2384 if (type
== IRQ_TYPE_NONE
)
2385 type
= irqd_get_trigger_type(&desc
->irq_data
);
2387 if (type
!= IRQ_TYPE_NONE
) {
2390 ret
= __irq_set_trigger(desc
, type
);
2393 WARN(1, "failed to set type for IRQ%d\n", irq
);
2398 irq_percpu_enable(desc
, cpu
);
2400 irq_put_desc_unlock(desc
, flags
);
2402 EXPORT_SYMBOL_GPL(enable_percpu_irq
);
2404 void enable_percpu_nmi(unsigned int irq
, unsigned int type
)
2406 enable_percpu_irq(irq
, type
);
2410 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2411 * @irq: Linux irq number to check for
2413 * Must be called from a non migratable context. Returns the enable
2414 * state of a per cpu interrupt on the current cpu.
2416 bool irq_percpu_is_enabled(unsigned int irq
)
2418 unsigned int cpu
= smp_processor_id();
2419 struct irq_desc
*desc
;
2420 unsigned long flags
;
2423 desc
= irq_get_desc_lock(irq
, &flags
, IRQ_GET_DESC_CHECK_PERCPU
);
2427 is_enabled
= cpumask_test_cpu(cpu
, desc
->percpu_enabled
);
2428 irq_put_desc_unlock(desc
, flags
);
2432 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled
);
2434 void disable_percpu_irq(unsigned int irq
)
2436 unsigned int cpu
= smp_processor_id();
2437 unsigned long flags
;
2438 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, IRQ_GET_DESC_CHECK_PERCPU
);
2443 irq_percpu_disable(desc
, cpu
);
2444 irq_put_desc_unlock(desc
, flags
);
2446 EXPORT_SYMBOL_GPL(disable_percpu_irq
);
2448 void disable_percpu_nmi(unsigned int irq
)
2450 disable_percpu_irq(irq
);
2454 * Internal function to unregister a percpu irqaction.
2456 static struct irqaction
*__free_percpu_irq(unsigned int irq
, void __percpu
*dev_id
)
2458 struct irq_desc
*desc
= irq_to_desc(irq
);
2459 struct irqaction
*action
;
2460 unsigned long flags
;
2462 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq
);
2467 raw_spin_lock_irqsave(&desc
->lock
, flags
);
2469 action
= desc
->action
;
2470 if (!action
|| action
->percpu_dev_id
!= dev_id
) {
2471 WARN(1, "Trying to free already-free IRQ %d\n", irq
);
2475 if (!cpumask_empty(desc
->percpu_enabled
)) {
2476 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2477 irq
, cpumask_first(desc
->percpu_enabled
));
2481 /* Found it - now remove it from the list of entries: */
2482 desc
->action
= NULL
;
2484 desc
->istate
&= ~IRQS_NMI
;
2486 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
2488 unregister_handler_proc(irq
, action
);
2490 irq_chip_pm_put(&desc
->irq_data
);
2491 module_put(desc
->owner
);
2495 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
2500 * remove_percpu_irq - free a per-cpu interrupt
2501 * @irq: Interrupt line to free
2502 * @act: irqaction for the interrupt
2504 * Used to remove interrupts statically setup by the early boot process.
2506 void remove_percpu_irq(unsigned int irq
, struct irqaction
*act
)
2508 struct irq_desc
*desc
= irq_to_desc(irq
);
2510 if (desc
&& irq_settings_is_per_cpu_devid(desc
))
2511 __free_percpu_irq(irq
, act
->percpu_dev_id
);
2515 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2516 * @irq: Interrupt line to free
2517 * @dev_id: Device identity to free
2519 * Remove a percpu interrupt handler. The handler is removed, but
2520 * the interrupt line is not disabled. This must be done on each
2521 * CPU before calling this function. The function does not return
2522 * until any executing interrupts for this IRQ have completed.
2524 * This function must not be called from interrupt context.
2526 void free_percpu_irq(unsigned int irq
, void __percpu
*dev_id
)
2528 struct irq_desc
*desc
= irq_to_desc(irq
);
2530 if (!desc
|| !irq_settings_is_per_cpu_devid(desc
))
2533 chip_bus_lock(desc
);
2534 kfree(__free_percpu_irq(irq
, dev_id
));
2535 chip_bus_sync_unlock(desc
);
2537 EXPORT_SYMBOL_GPL(free_percpu_irq
);
2539 void free_percpu_nmi(unsigned int irq
, void __percpu
*dev_id
)
2541 struct irq_desc
*desc
= irq_to_desc(irq
);
2543 if (!desc
|| !irq_settings_is_per_cpu_devid(desc
))
2546 if (WARN_ON(!(desc
->istate
& IRQS_NMI
)))
2549 kfree(__free_percpu_irq(irq
, dev_id
));
2553 * setup_percpu_irq - setup a per-cpu interrupt
2554 * @irq: Interrupt line to setup
2555 * @act: irqaction for the interrupt
2557 * Used to statically setup per-cpu interrupts in the early boot process.
2559 int setup_percpu_irq(unsigned int irq
, struct irqaction
*act
)
2561 struct irq_desc
*desc
= irq_to_desc(irq
);
2564 if (!desc
|| !irq_settings_is_per_cpu_devid(desc
))
2567 retval
= irq_chip_pm_get(&desc
->irq_data
);
2571 retval
= __setup_irq(irq
, desc
, act
);
2574 irq_chip_pm_put(&desc
->irq_data
);
2580 * __request_percpu_irq - allocate a percpu interrupt line
2581 * @irq: Interrupt line to allocate
2582 * @handler: Function to be called when the IRQ occurs.
2583 * @flags: Interrupt type flags (IRQF_TIMER only)
2584 * @devname: An ascii name for the claiming device
2585 * @dev_id: A percpu cookie passed back to the handler function
2587 * This call allocates interrupt resources and enables the
2588 * interrupt on the local CPU. If the interrupt is supposed to be
2589 * enabled on other CPUs, it has to be done on each CPU using
2590 * enable_percpu_irq().
2592 * Dev_id must be globally unique. It is a per-cpu variable, and
2593 * the handler gets called with the interrupted CPU's instance of
2596 int __request_percpu_irq(unsigned int irq
, irq_handler_t handler
,
2597 unsigned long flags
, const char *devname
,
2598 void __percpu
*dev_id
)
2600 struct irqaction
*action
;
2601 struct irq_desc
*desc
;
2607 desc
= irq_to_desc(irq
);
2608 if (!desc
|| !irq_settings_can_request(desc
) ||
2609 !irq_settings_is_per_cpu_devid(desc
))
2612 if (flags
&& flags
!= IRQF_TIMER
)
2615 action
= kzalloc(sizeof(struct irqaction
), GFP_KERNEL
);
2619 action
->handler
= handler
;
2620 action
->flags
= flags
| IRQF_PERCPU
| IRQF_NO_SUSPEND
;
2621 action
->name
= devname
;
2622 action
->percpu_dev_id
= dev_id
;
2624 retval
= irq_chip_pm_get(&desc
->irq_data
);
2630 retval
= __setup_irq(irq
, desc
, action
);
2633 irq_chip_pm_put(&desc
->irq_data
);
2639 EXPORT_SYMBOL_GPL(__request_percpu_irq
);
2642 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2643 * @irq: Interrupt line to allocate
2644 * @handler: Function to be called when the IRQ occurs.
2645 * @name: An ascii name for the claiming device
2646 * @dev_id: A percpu cookie passed back to the handler function
2648 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2649 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2650 * being enabled on the same CPU by using enable_percpu_nmi().
2652 * Dev_id must be globally unique. It is a per-cpu variable, and
2653 * the handler gets called with the interrupted CPU's instance of
2656 * Interrupt lines requested for NMI delivering should have auto enabling
2659 * If the interrupt line cannot be used to deliver NMIs, function
2660 * will fail returning a negative value.
2662 int request_percpu_nmi(unsigned int irq
, irq_handler_t handler
,
2663 const char *name
, void __percpu
*dev_id
)
2665 struct irqaction
*action
;
2666 struct irq_desc
*desc
;
2667 unsigned long flags
;
2673 desc
= irq_to_desc(irq
);
2675 if (!desc
|| !irq_settings_can_request(desc
) ||
2676 !irq_settings_is_per_cpu_devid(desc
) ||
2677 irq_settings_can_autoenable(desc
) ||
2678 !irq_supports_nmi(desc
))
2681 /* The line cannot already be NMI */
2682 if (desc
->istate
& IRQS_NMI
)
2685 action
= kzalloc(sizeof(struct irqaction
), GFP_KERNEL
);
2689 action
->handler
= handler
;
2690 action
->flags
= IRQF_PERCPU
| IRQF_NO_SUSPEND
| IRQF_NO_THREAD
2692 action
->name
= name
;
2693 action
->percpu_dev_id
= dev_id
;
2695 retval
= irq_chip_pm_get(&desc
->irq_data
);
2699 retval
= __setup_irq(irq
, desc
, action
);
2703 raw_spin_lock_irqsave(&desc
->lock
, flags
);
2704 desc
->istate
|= IRQS_NMI
;
2705 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
2710 irq_chip_pm_put(&desc
->irq_data
);
2718 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2719 * @irq: Interrupt line to prepare for NMI delivery
2721 * This call prepares an interrupt line to deliver NMI on the current CPU,
2722 * before that interrupt line gets enabled with enable_percpu_nmi().
2724 * As a CPU local operation, this should be called from non-preemptible
2727 * If the interrupt line cannot be used to deliver NMIs, function
2728 * will fail returning a negative value.
2730 int prepare_percpu_nmi(unsigned int irq
)
2732 unsigned long flags
;
2733 struct irq_desc
*desc
;
2736 WARN_ON(preemptible());
2738 desc
= irq_get_desc_lock(irq
, &flags
,
2739 IRQ_GET_DESC_CHECK_PERCPU
);
2743 if (WARN(!(desc
->istate
& IRQS_NMI
),
2744 KERN_ERR
"prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2750 ret
= irq_nmi_setup(desc
);
2752 pr_err("Failed to setup NMI delivery: irq %u\n", irq
);
2757 irq_put_desc_unlock(desc
, flags
);
2762 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2763 * @irq: Interrupt line from which CPU local NMI configuration should be
2766 * This call undoes the setup done by prepare_percpu_nmi().
2768 * IRQ line should not be enabled for the current CPU.
2770 * As a CPU local operation, this should be called from non-preemptible
2773 void teardown_percpu_nmi(unsigned int irq
)
2775 unsigned long flags
;
2776 struct irq_desc
*desc
;
2778 WARN_ON(preemptible());
2780 desc
= irq_get_desc_lock(irq
, &flags
,
2781 IRQ_GET_DESC_CHECK_PERCPU
);
2785 if (WARN_ON(!(desc
->istate
& IRQS_NMI
)))
2788 irq_nmi_teardown(desc
);
2790 irq_put_desc_unlock(desc
, flags
);
2793 int __irq_get_irqchip_state(struct irq_data
*data
, enum irqchip_irq_state which
,
2796 struct irq_chip
*chip
;
2800 chip
= irq_data_get_irq_chip(data
);
2801 if (WARN_ON_ONCE(!chip
))
2803 if (chip
->irq_get_irqchip_state
)
2805 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2806 data
= data
->parent_data
;
2813 err
= chip
->irq_get_irqchip_state(data
, which
, state
);
2818 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2819 * @irq: Interrupt line that is forwarded to a VM
2820 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2821 * @state: a pointer to a boolean where the state is to be stored
2823 * This call snapshots the internal irqchip state of an
2824 * interrupt, returning into @state the bit corresponding to
2827 * This function should be called with preemption disabled if the
2828 * interrupt controller has per-cpu registers.
2830 int irq_get_irqchip_state(unsigned int irq
, enum irqchip_irq_state which
,
2833 struct irq_desc
*desc
;
2834 struct irq_data
*data
;
2835 unsigned long flags
;
2838 desc
= irq_get_desc_buslock(irq
, &flags
, 0);
2842 data
= irq_desc_get_irq_data(desc
);
2844 err
= __irq_get_irqchip_state(data
, which
, state
);
2846 irq_put_desc_busunlock(desc
, flags
);
2849 EXPORT_SYMBOL_GPL(irq_get_irqchip_state
);
2852 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2853 * @irq: Interrupt line that is forwarded to a VM
2854 * @which: State to be restored (one of IRQCHIP_STATE_*)
2855 * @val: Value corresponding to @which
2857 * This call sets the internal irqchip state of an interrupt,
2858 * depending on the value of @which.
2860 * This function should be called with migration disabled if the
2861 * interrupt controller has per-cpu registers.
2863 int irq_set_irqchip_state(unsigned int irq
, enum irqchip_irq_state which
,
2866 struct irq_desc
*desc
;
2867 struct irq_data
*data
;
2868 struct irq_chip
*chip
;
2869 unsigned long flags
;
2872 desc
= irq_get_desc_buslock(irq
, &flags
, 0);
2876 data
= irq_desc_get_irq_data(desc
);
2879 chip
= irq_data_get_irq_chip(data
);
2880 if (WARN_ON_ONCE(!chip
)) {
2884 if (chip
->irq_set_irqchip_state
)
2886 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2887 data
= data
->parent_data
;
2894 err
= chip
->irq_set_irqchip_state(data
, which
, val
);
2897 irq_put_desc_busunlock(desc
, flags
);
2900 EXPORT_SYMBOL_GPL(irq_set_irqchip_state
);
2903 * irq_has_action - Check whether an interrupt is requested
2904 * @irq: The linux irq number
2906 * Returns: A snapshot of the current state
2908 bool irq_has_action(unsigned int irq
)
2913 res
= irq_desc_has_action(irq_to_desc(irq
));
2917 EXPORT_SYMBOL_GPL(irq_has_action
);
2920 * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2921 * @irq: The linux irq number
2922 * @bitmask: The bitmask to evaluate
2924 * Returns: True if one of the bits in @bitmask is set
2926 bool irq_check_status_bit(unsigned int irq
, unsigned int bitmask
)
2928 struct irq_desc
*desc
;
2932 desc
= irq_to_desc(irq
);
2934 res
= !!(desc
->status_use_accessors
& bitmask
);
2938 EXPORT_SYMBOL_GPL(irq_check_status_bit
);