dev_err(tqspi->dev, "error in transfer, fifo status 0x%08x\n", tqspi->status_reg);
tegra_qspi_dump_regs(tqspi);
tegra_qspi_flush_fifos(tqspi, true);
- if (device_reset(tqspi->dev) < 0)
+ if (device_reset(tqspi->dev) < 0) {
dev_warn_once(tqspi->dev, "device reset failed\n");
+ tegra_qspi_mask_clear_irq(tqspi);
+ }
}
static void tegra_qspi_transfer_end(struct spi_device *spi)
}
/* Reset controller if timeout happens */
- if (device_reset(tqspi->dev) < 0)
+ if (device_reset(tqspi->dev) < 0) {
dev_warn_once(tqspi->dev,
"device reset failed\n");
+ tegra_qspi_mask_clear_irq(tqspi);
+ }
ret = -EIO;
goto exit;
}
tegra_qspi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
}
+ tqspi->curr_xfer = NULL;
transfer_phase++;
}
ret = 0;
exit:
+ tqspi->curr_xfer = NULL;
msg->status = ret;
return ret;
msg->actual_length += xfer->len + dummy_bytes;
complete_xfer:
+ tqspi->curr_xfer = NULL;
+
if (ret < 0) {
tegra_qspi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
tegra_qspi_calculate_curr_xfer_param(tqspi, t);
tegra_qspi_start_cpu_based_transfer(tqspi, t);
exit:
+ tqspi->curr_xfer = NULL;
spin_unlock_irqrestore(&tqspi->lock, flags);
return IRQ_HANDLED;
}
{
struct tegra_qspi *tqspi = context_data;
+ /*
+ * Occasionally the IRQ thread takes a long time to wake up (usually
+ * when the CPU that it's running on is excessively busy) and we have
+ * already reached the timeout before and cleaned up the timed out
+ * transfer. Avoid any processing in that case and bail out early.
+ */
+ if (!tqspi->curr_xfer)
+ return IRQ_NONE;
+
tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
if (tqspi->cur_direction & DATA_DIR_TX)