]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
spi: tegra210-quad: Move curr_xfer read inside spinlock
authorBreno Leitao <leitao@debian.org>
Mon, 26 Jan 2026 17:50:27 +0000 (09:50 -0800)
committerMark Brown <broonie@kernel.org>
Fri, 30 Jan 2026 13:53:13 +0000 (13:53 +0000)
Move the assignment of the transfer pointer from curr_xfer inside the
spinlock critical section in both handle_cpu_based_xfer() and
handle_dma_based_xfer().

Previously, curr_xfer was read before acquiring the lock, creating a
window where the timeout path could clear curr_xfer between reading it
and using it. By moving the read inside the lock, the handlers are
guaranteed to see a consistent value that cannot be modified by the
timeout path.

Fixes: 921fc1838fb0 ("spi: tegra210-quad: Add support for Tegra210 QSPI controller")
Signed-off-by: Breno Leitao <leitao@debian.org>
Acked-by: Thierry Reding <treding@nvidia.com>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Acked-by: Jon Hunter <jonathanh@nvidia.com>
Link: https://patch.msgid.link/20260126-tegra_xfer-v2-2-6d2115e4f387@debian.org
Signed-off-by: Mark Brown <broonie@kernel.org>
drivers/spi/spi-tegra210-quad.c

index f0408c0b4b9816bcca0c8b9cdc081c5cda9e75c4..ee291b9e9e9c094728dc3d606d7a106ac0c33b54 100644 (file)
@@ -1440,10 +1440,11 @@ static int tegra_qspi_transfer_one_message(struct spi_controller *host,
 
 static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
 {
-       struct spi_transfer *t = tqspi->curr_xfer;
+       struct spi_transfer *t;
        unsigned long flags;
 
        spin_lock_irqsave(&tqspi->lock, flags);
+       t = tqspi->curr_xfer;
 
        if (tqspi->tx_status ||  tqspi->rx_status) {
                tegra_qspi_handle_error(tqspi);
@@ -1474,7 +1475,7 @@ exit:
 
 static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
 {
-       struct spi_transfer *t = tqspi->curr_xfer;
+       struct spi_transfer *t;
        unsigned int total_fifo_words;
        unsigned long flags;
        long wait_status;
@@ -1513,6 +1514,7 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
        }
 
        spin_lock_irqsave(&tqspi->lock, flags);
+       t = tqspi->curr_xfer;
 
        if (num_errors) {
                tegra_qspi_dma_unmap_xfer(tqspi, t);