]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
ath9k: fix race condition in enabling/disabling IRQs
authorFelix Fietkau <nbd@nbd.name>
Thu, 2 Feb 2017 09:14:52 +0000 (10:14 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 12 Mar 2017 05:44:14 +0000 (06:44 +0100)
commit 3a5e969bb2f6692a256352649355d56d018d6b88 upstream.

The code currently relies on refcounting to disable IRQs from within the
IRQ handler and re-enabling them again after the tasklet has run.

However, due to race conditions sometimes the IRQ handler might be
called twice, or the tasklet may not run at all (if interrupted in the
middle of a reset).

This can cause nasty imbalances in the irq-disable refcount which will
get the driver permanently stuck until the entire radio has been stopped
and started again (ath_reset will not recover from this).

Instead of using this fragile logic, change the code to ensure that
running the irq handler during tasklet processing is safe, and leave the
refcount untouched.

Signed-off-by: Felix Fietkau <nbd@nbd.name>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c

index 378d3458fddba61435cf720261085d2ab3b544a6..52578ae69bd88908b76cc8a518d85795bc1c88bc 100644 (file)
@@ -970,6 +970,7 @@ struct ath_softc {
        struct survey_info *cur_survey;
        struct survey_info survey[ATH9K_NUM_CHANNELS];
 
+       spinlock_t intr_lock;
        struct tasklet_struct intr_tq;
        struct tasklet_struct bcon_tasklet;
        struct ath_hw *sc_ah;
index 20794660d6aec42cbb4a5dcc52944127512e40e7..e0c1f64a29bf5d0a19046a00c578d3b2da4da4b6 100644 (file)
@@ -667,6 +667,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
                common->bt_ant_diversity = 1;
 
        spin_lock_init(&common->cc_lock);
+       spin_lock_init(&sc->intr_lock);
        spin_lock_init(&sc->sc_serial_rw);
        spin_lock_init(&sc->sc_pm_lock);
        spin_lock_init(&sc->chan_lock);
index bba85d1a6cd1bc44852d1845bede4918bc2b5ec7..d937c39b3a0b3569a7e4a67ec24973e17c9e4211 100644 (file)
@@ -805,21 +805,12 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
 }
 EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
 
-void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        u32 sync_default = AR_INTR_SYNC_DEFAULT;
        u32 async_mask;
 
-       if (!(ah->imask & ATH9K_INT_GLOBAL))
-               return;
-
-       if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
-               ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
-                       atomic_read(&ah->intr_ref_cnt));
-               return;
-       }
-
        if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
            AR_SREV_9561(ah))
                sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
@@ -841,6 +832,39 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
        ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
                REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
 }
+
+void ath9k_hw_resume_interrupts(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!(ah->imask & ATH9K_INT_GLOBAL))
+               return;
+
+       if (atomic_read(&ah->intr_ref_cnt) != 0) {
+               ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
+                       atomic_read(&ah->intr_ref_cnt));
+               return;
+       }
+
+       __ath9k_hw_enable_interrupts(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_resume_interrupts);
+
+void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!(ah->imask & ATH9K_INT_GLOBAL))
+               return;
+
+       if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
+               ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
+                       atomic_read(&ah->intr_ref_cnt));
+               return;
+       }
+
+       __ath9k_hw_enable_interrupts(ah);
+}
 EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
 
 void ath9k_hw_set_interrupts(struct ath_hw *ah)
index 3bab01435a8613263a0e6bd2365f9e0b37f44660..770fc11b41d183eae8f6b6a4599347732af88f1d 100644 (file)
@@ -744,6 +744,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah);
 void ath9k_hw_enable_interrupts(struct ath_hw *ah);
 void ath9k_hw_disable_interrupts(struct ath_hw *ah);
 void ath9k_hw_kill_interrupts(struct ath_hw *ah);
+void ath9k_hw_resume_interrupts(struct ath_hw *ah);
 
 void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
 
index 59e3bd0f4c2062b599a6a5527af27bbf63b87d58..9de8c95e6cdc0aa985b51ce2743f112c531c0b1a 100644 (file)
@@ -373,21 +373,20 @@ void ath9k_tasklet(unsigned long data)
        struct ath_common *common = ath9k_hw_common(ah);
        enum ath_reset_type type;
        unsigned long flags;
-       u32 status = sc->intrstatus;
+       u32 status;
        u32 rxmask;
 
+       spin_lock_irqsave(&sc->intr_lock, flags);
+       status = sc->intrstatus;
+       sc->intrstatus = 0;
+       spin_unlock_irqrestore(&sc->intr_lock, flags);
+
        ath9k_ps_wakeup(sc);
        spin_lock(&sc->sc_pcu_lock);
 
        if (status & ATH9K_INT_FATAL) {
                type = RESET_TYPE_FATAL_INT;
                ath9k_queue_reset(sc, type);
-
-               /*
-                * Increment the ref. counter here so that
-                * interrupts are enabled in the reset routine.
-                */
-               atomic_inc(&ah->intr_ref_cnt);
                ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
                goto out;
        }
@@ -403,11 +402,6 @@ void ath9k_tasklet(unsigned long data)
                        type = RESET_TYPE_BB_WATCHDOG;
                        ath9k_queue_reset(sc, type);
 
-                       /*
-                        * Increment the ref. counter here so that
-                        * interrupts are enabled in the reset routine.
-                        */
-                       atomic_inc(&ah->intr_ref_cnt);
                        ath_dbg(common, RESET,
                                "BB_WATCHDOG: Skipping interrupts\n");
                        goto out;
@@ -420,7 +414,6 @@ void ath9k_tasklet(unsigned long data)
                if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
                        type = RESET_TYPE_TX_GTT;
                        ath9k_queue_reset(sc, type);
-                       atomic_inc(&ah->intr_ref_cnt);
                        ath_dbg(common, RESET,
                                "GTT: Skipping interrupts\n");
                        goto out;
@@ -477,7 +470,7 @@ void ath9k_tasklet(unsigned long data)
        ath9k_btcoex_handle_interrupt(sc, status);
 
        /* re-enable hardware interrupt */
-       ath9k_hw_enable_interrupts(ah);
+       ath9k_hw_resume_interrupts(ah);
 out:
        spin_unlock(&sc->sc_pcu_lock);
        ath9k_ps_restore(sc);
@@ -541,7 +534,9 @@ irqreturn_t ath_isr(int irq, void *dev)
                return IRQ_NONE;
 
        /* Cache the status */
-       sc->intrstatus = status;
+       spin_lock(&sc->intr_lock);
+       sc->intrstatus |= status;
+       spin_unlock(&sc->intr_lock);
 
        if (status & SCHED_INTR)
                sched = true;
@@ -587,7 +582,7 @@ chip_reset:
 
        if (sched) {
                /* turn off every interrupt */
-               ath9k_hw_disable_interrupts(ah);
+               ath9k_hw_kill_interrupts(ah);
                tasklet_schedule(&sc->intr_tq);
        }