spin_lock_irqsave(&tqspi->lock, flags);
t = tqspi->curr_xfer;
+ if (!t) {
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
if (tqspi->tx_status || tqspi->rx_status) {
tegra_qspi_handle_error(tqspi);
complete(&tqspi->xfer_completion);
spin_lock_irqsave(&tqspi->lock, flags);
t = tqspi->curr_xfer;
+ if (!t) {
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
if (num_errors) {
tegra_qspi_dma_unmap_xfer(tqspi, t);
tegra_qspi_handle_error(tqspi);
static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
{
struct tegra_qspi *tqspi = context_data;
+ unsigned long flags;
u32 status;
/*
* If no transfer is in progress, check if this was a real interrupt
* that the timeout handler already processed, or a spurious one.
*/
+ spin_lock_irqsave(&tqspi->lock, flags);
if (!tqspi->curr_xfer) {
+ spin_unlock_irqrestore(&tqspi->lock, flags);
/* Spurious interrupt - transfer not ready */
if (!(status & QSPI_RDY))
return IRQ_NONE;
tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
tegra_qspi_mask_clear_irq(tqspi);
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ /*
+ * Lock is released here but handlers safely re-check curr_xfer under
+ * lock before dereferencing.
+ * DMA handler also needs to sleep in wait_for_completion_*(), which
+ * cannot be done while holding spinlock.
+ */
if (!tqspi->is_curr_dma_xfer)
return handle_cpu_based_xfer(tqspi);