#include <linux/spi/spi.h>
#include <trace/events/spi.h>
+#define SPI_ENGINE_REG_DATA_WIDTH 0x0C
+#define SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK GENMASK(23, 16)
+#define SPI_ENGINE_REG_DATA_WIDTH_MASK GENMASK(15, 0)
#define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10
#define SPI_ENGINE_REG_RESET 0x40
#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
#define SPI_ENGINE_CMD_REG_CONFIG 0x1
#define SPI_ENGINE_CMD_REG_XFER_BITS 0x2
+#define SPI_ENGINE_CMD_REG_SDI_MASK 0x3
+#define SPI_ENGINE_CMD_REG_SDO_MASK 0x4
#define SPI_ENGINE_MISC_SYNC 0x0
#define SPI_ENGINE_MISC_SLEEP 0x1
#define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16
#define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16
+/* Extending SPI_MULTI_LANE_MODE values for optimizing messages. */
+#define SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN -1
+#define SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING -2
+
struct spi_engine_program {
unsigned int length;
uint16_t instructions[] __counted_by(length);
unsigned long flags;
unsigned int offload_num;
unsigned int spi_mode_config;
+ unsigned int multi_lane_mode;
+ u8 rx_primary_lane_mask;
+ u8 tx_primary_lane_mask;
+ u8 rx_all_lanes_mask;
+ u8 tx_all_lanes_mask;
u8 bits_per_word;
};
bool offload_requires_sync;
};
+static void spi_engine_primary_lane_flag(struct spi_device *spi,
+ u8 *rx_lane_flags, u8 *tx_lane_flags)
+{
+ *rx_lane_flags = BIT(spi->rx_lane_map[0]);
+ *tx_lane_flags = BIT(spi->tx_lane_map[0]);
+}
+
+static void spi_engine_all_lanes_flags(struct spi_device *spi,
+ u8 *rx_lane_flags, u8 *tx_lane_flags)
+{
+ int i;
+
+ for (i = 0; i < spi->num_rx_lanes; i++)
+ *rx_lane_flags |= BIT(spi->rx_lane_map[i]);
+
+ for (i = 0; i < spi->num_tx_lanes; i++)
+ *tx_lane_flags |= BIT(spi->tx_lane_map[i]);
+}
+
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
bool dry, uint16_t cmd)
{
}
static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
- struct spi_transfer *xfer)
+ struct spi_transfer *xfer, u32 num_lanes)
{
unsigned int len;
else
len = xfer->len / 4;
+ if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE)
+ len /= num_lanes;
+
while (len) {
unsigned int n = min(len, 256U);
unsigned int flags = 0;
{
unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
struct spi_transfer *xfer;
+ int multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN;
u8 min_bits_per_word = U8_MAX;
u8 max_bits_per_word = 0;
min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word);
max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word);
}
+
+ if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM ||
+ xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) {
+ switch (xfer->multi_lane_mode) {
+ case SPI_MULTI_LANE_MODE_SINGLE:
+ case SPI_MULTI_LANE_MODE_STRIPE:
+ break;
+ default:
+ /* Other modes, like mirror not supported */
+ return -EINVAL;
+ }
+
+ /* If all xfers have the same multi-lane mode, we can optimize. */
+ if (multi_lane_mode == SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN)
+ multi_lane_mode = xfer->multi_lane_mode;
+ else if (multi_lane_mode != xfer->multi_lane_mode)
+ multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING;
+ }
}
/*
priv->bits_per_word = min_bits_per_word;
else
priv->bits_per_word = 0;
+
+ priv->multi_lane_mode = multi_lane_mode;
+ spi_engine_primary_lane_flag(msg->spi,
+ &priv->rx_primary_lane_mask,
+ &priv->tx_primary_lane_mask);
+ spi_engine_all_lanes_flags(msg->spi,
+ &priv->rx_all_lanes_mask,
+ &priv->tx_all_lanes_mask);
}
return 0;
struct spi_engine_offload *priv;
struct spi_transfer *xfer;
int clk_div, new_clk_div, inst_ns;
+ int prev_multi_lane_mode = SPI_MULTI_LANE_MODE_SINGLE;
bool keep_cs = false;
u8 bits_per_word = 0;
* in the same way.
*/
bits_per_word = priv->bits_per_word;
+ prev_multi_lane_mode = priv->multi_lane_mode;
} else {
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM ||
+ xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) {
+ if (xfer->multi_lane_mode != prev_multi_lane_mode) {
+ u8 tx_lane_flags, rx_lane_flags;
+
+ if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE)
+ spi_engine_all_lanes_flags(spi, &rx_lane_flags,
+ &tx_lane_flags);
+ else
+ spi_engine_primary_lane_flag(spi, &rx_lane_flags,
+ &tx_lane_flags);
+
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
+ rx_lane_flags));
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
+ tx_lane_flags));
+ }
+ prev_multi_lane_mode = xfer->multi_lane_mode;
+ }
+
new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
if (new_clk_div != clk_div) {
clk_div = new_clk_div;
bits_per_word));
}
- spi_engine_gen_xfer(p, dry, xfer);
+ spi_engine_gen_xfer(p, dry, xfer, spi->num_rx_lanes);
spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
inst_ns, xfer->effective_speed_hz);
if (clk_div != 1)
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
+
+ /* Restore single lane mode unless offload disable will restore it later. */
+ if (prev_multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE &&
+ (!msg->offload || priv->multi_lane_mode != SPI_MULTI_LANE_MODE_STRIPE)) {
+ u8 rx_lane_flags, tx_lane_flags;
+
+ spi_engine_primary_lane_flag(spi, &rx_lane_flags, &tx_lane_flags);
+
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, rx_lane_flags));
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, tx_lane_flags));
+ }
}
static void spi_engine_xfer_next(struct spi_message *msg,
writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+ if (host->num_data_lanes > 1) {
+ u8 rx_lane_flags, tx_lane_flags;
+
+ spi_engine_primary_lane_flag(device, &rx_lane_flags, &tx_lane_flags);
+
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
+ rx_lane_flags),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
+ tx_lane_flags),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+ }
+
/*
* In addition to setting the flags, we have to do a CS assert command
* to make the new setting actually take effect.
priv->bits_per_word),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+ if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) {
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
+ priv->rx_all_lanes_mask),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
+ priv->tx_all_lanes_mask),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+ }
+
writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
writel_relaxed(reg, spi_engine->base +
SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+
+ /* Restore single-lane mode. */
+ if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) {
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
+ priv->rx_primary_lane_mask),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+ writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
+ priv->tx_primary_lane_mask),
+ spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
+ }
}
static struct dma_chan
{
struct spi_engine *spi_engine;
struct spi_controller *host;
- unsigned int version;
+ unsigned int version, data_width_reg_val;
int irq, ret;
irq = platform_get_irq(pdev, 0);
return PTR_ERR(spi_engine->base);
version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
- if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
+ if (ADI_AXI_PCORE_VER_MAJOR(version) > 2) {
dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
ADI_AXI_PCORE_VER_MAJOR(version),
ADI_AXI_PCORE_VER_MINOR(version),
return -ENODEV;
}
+ data_width_reg_val = readl(spi_engine->base + SPI_ENGINE_REG_DATA_WIDTH);
+
if (adi_axi_pcore_ver_gteq(version, 1, 1)) {
unsigned int sizes = readl(spi_engine->base +
SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
}
if (adi_axi_pcore_ver_gteq(version, 1, 3))
host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
+ if (adi_axi_pcore_ver_gteq(version, 2, 0))
+ host->num_data_lanes = FIELD_GET(SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK,
+ data_width_reg_val);
if (host->max_speed_hz == 0)
return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");