CQSPI_REG_IRQ_IND_SRAM_FULL | \
CQSPI_REG_IRQ_IND_COMP)
+#define CQSPI_IRQ_MASK_RD_SLOW_SRAM (CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_IND_COMP)
+
#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
CQSPI_REG_IRQ_WATERMARK | \
CQSPI_REG_IRQ_UNDERFLOW)
else if (!cqspi->slow_sram)
irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
else
- irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
+ irq_status &= CQSPI_IRQ_MASK_RD_SLOW_SRAM | CQSPI_IRQ_MASK_WR;
if (irq_status)
complete(&cqspi->transfer_complete);
*/
if (use_irq && cqspi->slow_sram)
- writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
+ writel(CQSPI_IRQ_MASK_RD_SLOW_SRAM, reg_base + CQSPI_REG_IRQMASK);
else if (use_irq)
writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
else
msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
ret = -ETIMEDOUT;
- /*
- * Disable all read interrupts until
- * we are out of "bytes to read"
- */
- if (cqspi->slow_sram)
- writel(0x0, reg_base + CQSPI_REG_IRQMASK);
-
/*
* Prevent lost interrupt and race condition by reinitializing early.
* A spurious wakeup and another wait cycle can occur here,
remaining -= bytes_to_read;
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
}
-
- if (use_irq && remaining > 0) {
- if (cqspi->slow_sram)
- writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
- }
}
/* Check indirect done status */