]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
spi: tegra210-quad: Fix timeout handling
authorVishwaroop A <va@nvidia.com>
Tue, 28 Oct 2025 15:57:01 +0000 (15:57 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 18 Dec 2025 12:54:53 +0000 (13:54 +0100)
[ Upstream commit b4e002d8a7cee3b1d70efad0e222567f92a73000 ]

When the CPU that the QSPI interrupt handler runs on (typically CPU 0)
is excessively busy, it can lead to rare cases of the IRQ thread not
running before the transfer timeout is reached.

While handling the timeouts, any pending transfers are cleaned up and
the message that they correspond to is marked as failed, which leaves
the curr_xfer field pointing at stale memory.

To avoid this, clear curr_xfer to NULL upon timeout and check for this
condition when the IRQ thread is finally run.

While at it, also make sure to clear interrupts on failure so that new
interrupts can be run.

A better, more involved, fix would move the interrupt clearing into a
hard IRQ handler. Ideally we would also want to signal that the IRQ
thread no longer needs to be run after the timeout is hit to avoid the
extra check for a valid transfer.

Fixes: 921fc1838fb0 ("spi: tegra210-quad: Add support for Tegra210 QSPI controller")
Signed-off-by: Thierry Reding <treding@nvidia.com>
Signed-off-by: Vishwaroop A <va@nvidia.com>
Link: https://patch.msgid.link/20251028155703.4151791-2-va@nvidia.com
Signed-off-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/spi/spi-tegra210-quad.c

index 92348ebc60c786fb50a6975b30b1455209450d66..39aa0f1485686ce50a6c1fb52ee9dfe4bf5b7a6b 100644 (file)
@@ -999,8 +999,10 @@ static void tegra_qspi_handle_error(struct tegra_qspi *tqspi)
        dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
        tegra_qspi_dump_regs(tqspi);
        tegra_qspi_flush_fifos(tqspi, true);
-       if (device_reset(tqspi->dev) < 0)
+       if (device_reset(tqspi->dev) < 0) {
                dev_warn_once(tqspi->dev, "device reset failed\n");
+               tegra_qspi_mask_clear_irq(tqspi);
+       }
 }
 
 static void tegra_qspi_transfer_end(struct spi_device *spi)
@@ -1145,9 +1147,11 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
                                }
 
                                /* Reset controller if timeout happens */
-                               if (device_reset(tqspi->dev) < 0)
+                               if (device_reset(tqspi->dev) < 0) {
                                        dev_warn_once(tqspi->dev,
                                                      "device reset failed\n");
+                                       tegra_qspi_mask_clear_irq(tqspi);
+                               }
                                ret = -EIO;
                                goto exit;
                        }
@@ -1169,11 +1173,13 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
                        tegra_qspi_transfer_end(spi);
                        spi_transfer_delay_exec(xfer);
                }
+               tqspi->curr_xfer = NULL;
                transfer_phase++;
        }
        ret = 0;
 
 exit:
+       tqspi->curr_xfer = NULL;
        msg->status = ret;
 
        return ret;
@@ -1257,6 +1263,8 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
                msg->actual_length += xfer->len + dummy_bytes;
 
 complete_xfer:
+               tqspi->curr_xfer = NULL;
+
                if (ret < 0) {
                        tegra_qspi_transfer_end(spi);
                        spi_transfer_delay_exec(xfer);
@@ -1353,6 +1361,7 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
        tegra_qspi_calculate_curr_xfer_param(tqspi, t);
        tegra_qspi_start_cpu_based_transfer(tqspi, t);
 exit:
+       tqspi->curr_xfer = NULL;
        spin_unlock_irqrestore(&tqspi->lock, flags);
        return IRQ_HANDLED;
 }
@@ -1436,6 +1445,15 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
 {
        struct tegra_qspi *tqspi = context_data;
 
+       /*
+        * Occasionally the IRQ thread takes a long time to wake up (usually
+        * when the CPU that it's running on is excessively busy) and we have
+        * already reached the timeout before and cleaned up the timed out
+        * transfer. Avoid any processing in that case and bail out early.
+        */
+       if (!tqspi->curr_xfer)
+               return IRQ_NONE;
+
        tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
 
        if (tqspi->cur_direction & DATA_DIR_TX)