F: arch/arm/mach-versal/
F: drivers/net/xilinx_axi_mrmac.*
F: drivers/soc/soc_xilinx_versal.c
+F: drivers/spi/cadence_ospi_versal.c
F: drivers/watchdog/xilinx_wwdt.c
- F: drivers/gpio/zynqmp_gpio_modepin.c
N: (?<!uni)versal
ARM VERSATILE EXPRESS DRIVERS
F: driver/firmware/firmware-zynqmp.c
F: drivers/fpga/zynqpl.c
F: drivers/gpio/zynq_gpio.c
++F: drivers/gpio/zynqmp_gpio_modepin.c
F: drivers/i2c/i2c-cdns.c
F: drivers/i2c/muxes/pca954x.c
F: drivers/i2c/zynq_i2c.c
imply FAT_WRITE
imply MP
imply DM_USB_GADGET
++ imply ZYNQMP_GPIO_MODEPIN if DM_GPIO && USB
config ARCH_TEGRA
bool "NVIDIA Tegra"
--- /dev/null
- CONFIG_SYS_MALLOC_LEN=0x2000
+CONFIG_ARM=y
+CONFIG_SYS_CONFIG_NAME="xilinx_versal_mini_qspi"
+CONFIG_ARCH_VERSAL=y
+CONFIG_SYS_TEXT_BASE=0xFFFC0000
++CONFIG_SYS_MALLOC_LEN=0x2000
+CONFIG_NR_DRAM_BANKS=1
+CONFIG_ENV_SIZE=0x80
- CONFIG_DEFAULT_DEVICE_TREE="versal-mini-ospi-single"
+# CONFIG_DM_GPIO is not set
++CONFIG_DEFAULT_DEVICE_TREE="versal-mini-ospi-single"
+CONFIG_SYS_MEM_RSVD_FOR_MMU=y
+CONFIG_COUNTER_FREQUENCY=100000000
+CONFIG_VERSAL_NO_DDR=y
+# CONFIG_PSCI_RESET is not set
- # CONFIG_EFI_LOADER is not set
+# CONFIG_EXPERT is not set
++CONFIG_SYS_LOAD_ADDR=0x8000000
+# CONFIG_AUTOBOOT is not set
+CONFIG_SYS_CONSOLE_INFO_QUIET=y
+# CONFIG_DISPLAY_CPUINFO is not set
+CONFIG_BOARD_EARLY_INIT_R=y
+# CONFIG_BOARD_LATE_INIT is not set
+# CONFIG_CMDLINE_EDITING is not set
+# CONFIG_AUTO_COMPLETE is not set
+# CONFIG_SYS_LONGHELP is not set
+CONFIG_SYS_PROMPT="Versal> "
+# CONFIG_CMD_BDI is not set
+# CONFIG_CMD_CONSOLE is not set
+# CONFIG_CMD_BOOTD is not set
+# CONFIG_CMD_BOOTM is not set
+# CONFIG_CMD_BOOTI is not set
+# CONFIG_CMD_ELF is not set
+# CONFIG_CMD_FDT is not set
+# CONFIG_CMD_GO is not set
+# CONFIG_CMD_RUN is not set
+# CONFIG_CMD_IMI is not set
+# CONFIG_CMD_XIMG is not set
+# CONFIG_CMD_EXPORTENV is not set
+# CONFIG_CMD_IMPORTENV is not set
+# CONFIG_CMD_EDITENV is not set
+# CONFIG_CMD_SAVEENV is not set
+# CONFIG_CMD_ENV_EXISTS is not set
+# CONFIG_CMD_CRC32 is not set
+# CONFIG_CMD_LOADB is not set
+# CONFIG_CMD_LOADS is not set
+# CONFIG_CMD_ECHO is not set
+# CONFIG_CMD_ITEST is not set
+# CONFIG_CMD_SOURCE is not set
+# CONFIG_CMD_SETEXPR is not set
+# CONFIG_PARTITIONS is not set
+# CONFIG_NET is not set
+# CONFIG_DM_WARN is not set
+# CONFIG_DM_DEVICE_REMOVE is not set
+# CONFIG_MMC is not set
+CONFIG_DM_SPI_FLASH=y
+CONFIG_SF_DEFAULT_SPEED=30000000
+CONFIG_SPI_FLASH_GIGADEVICE=y
+CONFIG_SPI_FLASH_ISSI=y
+CONFIG_SPI_FLASH_MACRONIX=y
+CONFIG_SPI_FLASH_STMICRO=y
+CONFIG_ARM_DCC=y
+CONFIG_SPI=y
+CONFIG_DM_SPI=y
+CONFIG_CADENCE_QSPI=y
+CONFIG_CADENCE_OSPI_VERSAL=y
--- /dev/null
- CONFIG_SYS_MALLOC_LEN=0x2000
+CONFIG_ARM=y
+CONFIG_SYS_CONFIG_NAME="xilinx_versal_mini_qspi"
+CONFIG_ARCH_VERSAL=y
+CONFIG_SYS_TEXT_BASE=0xFFFC0000
++CONFIG_SYS_MALLOC_LEN=0x2000
+CONFIG_NR_DRAM_BANKS=1
+CONFIG_ENV_SIZE=0x80
- CONFIG_DEFAULT_DEVICE_TREE="versal-mini-qspi-single"
++CONFIG_DEFAULT_DEVICE_TREE="versal-mini-qspi-single"
+CONFIG_SYS_MEM_RSVD_FOR_MMU=y
+CONFIG_COUNTER_FREQUENCY=100000000
+CONFIG_VERSAL_NO_DDR=y
+# CONFIG_PSCI_RESET is not set
- # CONFIG_EFI_LOADER is not set
+# CONFIG_EXPERT is not set
++CONFIG_SYS_LOAD_ADDR=0x8000000
+# CONFIG_AUTOBOOT is not set
+CONFIG_SYS_CONSOLE_INFO_QUIET=y
+# CONFIG_DISPLAY_CPUINFO is not set
+CONFIG_BOARD_EARLY_INIT_R=y
+# CONFIG_BOARD_LATE_INIT is not set
+# CONFIG_CMDLINE_EDITING is not set
+# CONFIG_AUTO_COMPLETE is not set
+# CONFIG_SYS_LONGHELP is not set
+CONFIG_SYS_PROMPT="Versal> "
+# CONFIG_CMD_BDI is not set
+# CONFIG_CMD_CONSOLE is not set
+# CONFIG_CMD_BOOTD is not set
+# CONFIG_CMD_BOOTM is not set
+# CONFIG_CMD_BOOTI is not set
+# CONFIG_CMD_ELF is not set
+# CONFIG_CMD_FDT is not set
+# CONFIG_CMD_GO is not set
+# CONFIG_CMD_RUN is not set
+# CONFIG_CMD_IMI is not set
+# CONFIG_CMD_XIMG is not set
+# CONFIG_CMD_EXPORTENV is not set
+# CONFIG_CMD_IMPORTENV is not set
+# CONFIG_CMD_EDITENV is not set
+# CONFIG_CMD_SAVEENV is not set
+# CONFIG_CMD_ENV_EXISTS is not set
+# CONFIG_CMD_CRC32 is not set
+# CONFIG_CMD_LOADB is not set
+# CONFIG_CMD_LOADS is not set
+# CONFIG_CMD_ECHO is not set
+# CONFIG_CMD_ITEST is not set
+# CONFIG_CMD_SOURCE is not set
+# CONFIG_CMD_SETEXPR is not set
+# CONFIG_PARTITIONS is not set
+# CONFIG_NET is not set
+# CONFIG_DM_WARN is not set
+# CONFIG_DM_DEVICE_REMOVE is not set
+# CONFIG_MMC is not set
+CONFIG_DM_SPI_FLASH=y
+CONFIG_SF_DEFAULT_SPEED=30000000
+CONFIG_SPI_FLASH_ISSI=y
+CONFIG_SPI_FLASH_MACRONIX=y
+CONFIG_SPI_FLASH_SPANSION=y
+CONFIG_SPI_FLASH_STMICRO=y
+CONFIG_SPI_FLASH_SST=y
+CONFIG_SPI_FLASH_WINBOND=y
+# CONFIG_SPI_FLASH_USE_4K_SECTORS is not set
+CONFIG_ARM_DCC=y
+CONFIG_SPI=y
+CONFIG_DM_SPI=y
+CONFIG_ZYNQMP_GQSPI=y
CONFIG_SYS_MALLOC_F_LEN=0x100000
CONFIG_SYS_MEMTEST_START=0x00000000
CONFIG_SYS_MEMTEST_END=0x00001000
-CONFIG_DM_GPIO=y
++CONFIG_DEFAULT_DEVICE_TREE="versal-vc-p-a2197-00-revA-x-prc-01-revA"
CONFIG_CMD_FRU=y
CONFIG_DEFINE_TCM_OCM_MMAP=y
CONFIG_COUNTER_FREQUENCY=100000000
CONFIG_SOC_XILINX_VERSAL=y
CONFIG_SPI=y
CONFIG_DM_SPI=y
+CONFIG_CADENCE_QSPI=y
+CONFIG_CADENCE_OSPI_VERSAL=y
CONFIG_ZYNQ_SPI=y
+CONFIG_ZYNQMP_GQSPI=y
CONFIG_USB=y
- CONFIG_DM_USB=y
CONFIG_DM_USB_GADGET=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_DWC3=y
CONFIG_DM_MTD=y
CONFIG_MTD_RAW_NAND=y
CONFIG_NAND_ARASAN=y
+ CONFIG_SYS_NAND_ONFI_DETECTION=y
CONFIG_SYS_NAND_MAX_CHIPS=2
-CONFIG_SPI_FLASH_BAR=y
-CONFIG_SF_DUAL_FLASH=y
CONFIG_SPI_FLASH_ISSI=y
CONFIG_SPI_FLASH_MACRONIX=y
CONFIG_SPI_FLASH_SPANSION=y
The GPIOs for a device are defined in the device tree with one node
for each bank.
- endmenu
+ config NOMADIK_GPIO
+ bool "Nomadik GPIO driver"
+ depends on DM_GPIO
+ help
+ Support GPIO access on ST-Ericsson Ux500 SoCs. The GPIOs are arranged
+ into a number of banks each with 32 GPIOs. The GPIOs for a device are
+ defined in the device tree with one node for each bank.
+
++config ZYNQMP_GPIO_MODEPIN
++ bool "ZynqMP gpio modepin"
++ depends on DM_GPIO
++ help
++ This config enables the ZynqMP gpio modepin driver. ZynqMP modepin
++ driver will set and get the status of PS_MODE pins. These modepins
++ are accessed using xilinx firmware. In modepin register, [3:0] bits
++ set direction, [7:4] bits read IO, [11:8] bits set/clear IO.
++
+ endif
obj-$(CONFIG_MSCC_SGPIO) += mscc_sgpio.o
obj-$(CONFIG_NX_GPIO) += nx_gpio.o
obj-$(CONFIG_SIFIVE_GPIO) += sifive-gpio.o
+ obj-$(CONFIG_NOMADIK_GPIO) += nmk_gpio.o
+ obj-$(CONFIG_MAX7320_GPIO) += max7320_gpio.o
++obj-$(CONFIG_ZYNQMP_GPIO_MODEPIN) += zynqmp_gpio_modepin.o
Bank/Extended address registers are used to access the flash
which has size > 16MiB in 3-byte addressing.
-config SF_DUAL_FLASH
- bool "SPI DUAL flash memory support"
- help
- Enable this option to support two flash memories connected to a single
- controller. Currently Xilinx Zynq qspi supports this.
-
+ config SPI_FLASH_UNLOCK_ALL
+ bool "Unlock the entire SPI flash on u-boot startup"
+ default y
+ help
+ Some flashes tend to power up with the software write protection
+ bits set. If this option is set, the whole flash will be unlocked.
+
+ For legacy reasons, this option default to y. But if you intend to
+ actually use the software protection bits you should say n here.
+
config SPI_FLASH_ATMEL
bool "Atmel SPI flash support"
help
/* convert the dummy cycles to the number of bytes */
op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+ /*
+ * For 1_x_x, where x is not 1, Above calculation is not suitable
+ * for dummy buswidths > 1.
+ */
+ if (op.dummy.buswidth > 1)
+ op.dummy.nbytes = nor->read_dummy / 8;
+
+ if (spi_nor_protocol_is_dtr(nor->read_proto))
+ op.dummy.nbytes *= 2;
+
while (remaining) {
op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
ret = spi_mem_adjust_op_size(nor->spi, &op);
*/
static int read_sr(struct spi_nor *nor)
{
+ struct spi_mem_op op;
int ret;
u8 val[2];
+ u8 addr_nbytes, dummy;
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ addr_nbytes = nor->rdsr_addr_nbytes;
+ dummy = nor->rdsr_dummy;
+ } else {
+ addr_nbytes = 0;
+ dummy = 0;
+ }
+
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0),
+ SPI_MEM_OP_ADDR(addr_nbytes, 0, 0),
+ SPI_MEM_OP_DUMMY(dummy, 0),
+ SPI_MEM_OP_DATA_IN(1, NULL, 0));
+
+ spi_nor_setup_op(nor, &op, nor->reg_proto);
+
+ /*
+ * We don't want to read only one byte in DTR mode. So, read 2 and then
+ * discard the second byte.
+ */
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
+ op.data.nbytes = 2;
- ret = spi_nor_read_write_reg(nor, &op, val);
- if (ret < 0) {
- pr_debug("error %d reading SR\n", (int)ret);
- return ret;
+ if (nor->isparallel) {
- ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val[0], 2);
++ op.data.nbytes = 2;
++ ret = spi_nor_read_write_reg(nor, &op, &val[0]);
+ if (ret < 0) {
+ pr_debug("error %d reading SR\n", (int)ret);
+ return ret;
+ }
+ val[0] |= val[1];
+ } else {
- ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val[0], 1);
++ op.data.nbytes = 1;
++ ret = spi_nor_read_write_reg(nor, &op, &val[0]);
+ if (ret < 0) {
+ pr_debug("error %d reading SR\n", (int)ret);
+ return ret;
+ }
}
- return *val;
+ return val[0];
}
/*
*/
static int read_fsr(struct spi_nor *nor)
{
+ struct spi_mem_op op;
int ret;
u8 val[2];
+ u8 addr_nbytes, dummy;
+
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ addr_nbytes = nor->rdsr_addr_nbytes;
+ dummy = nor->rdsr_dummy;
+ } else {
+ addr_nbytes = 0;
+ dummy = 0;
+ }
+
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0),
+ SPI_MEM_OP_ADDR(addr_nbytes, 0, 0),
+ SPI_MEM_OP_DUMMY(dummy, 0),
+ SPI_MEM_OP_DATA_IN(1, NULL, 0));
+
+ spi_nor_setup_op(nor, &op, nor->reg_proto);
+
+ /*
+ * We don't want to read only one byte in DTR mode. So, read 2 and then
+ * discard the second byte.
+ */
+ if (spi_nor_protocol_is_dtr(nor->reg_proto))
+ op.data.nbytes = 2;
- ret = spi_nor_read_write_reg(nor, &op, val);
- if (ret < 0) {
- pr_debug("error %d reading FSR\n", ret);
- return ret;
+ if (nor->isparallel) {
- ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val[0], 2);
++ op.data.nbytes = 2;
++ ret = spi_nor_read_write_reg(nor, &op, &val[0]);
+ if (ret < 0) {
+ pr_debug("error %d reading SR\n", (int)ret);
+ return ret;
+ }
+ val[0] &= val[1];
+ } else {
- ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val[0], 1);
++ op.data.nbytes = 1;
++ ret = spi_nor_read_write_reg(nor, &op, &val[0]);
+ if (ret < 0) {
+ pr_debug("error %d reading FSR\n", ret);
+ return ret;
+ }
}
- return *val;
+ return val[0];
}
/*
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
+ u32 addr, len, rem, offset;
- u32 addr, len, rem;
+ bool addr_known = false;
+ int ret, err;
dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
(long long)instr->len);
addr = instr->addr;
len = instr->len;
+ instr->state = MTD_ERASING;
+ addr_known = true;
+
+ if (nor->flash_is_locked) {
+ write_disable(nor);
+
+ if (nor->flash_is_locked(nor, addr, len) > 0) {
+ printf("offset 0x%x is protected and cannot be erased\n",
+ addr);
+ return -EINVAL;
+ }
+ }
+
while (len) {
- write_enable(nor);
+ WATCHDOG_RESET();
+ if (ctrlc()) {
+ addr_known = false;
+ ret = -EINTR;
+ goto erase_err;
+ }
++
+ offset = addr;
+ if (nor->isparallel)
+ offset /= 2;
+
+ if (nor->isstacked) {
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->flags |= SPI_XFER_U_PAGE;
+ } else {
+ nor->spi->flags &= ~SPI_XFER_U_PAGE;
+ }
+ }
+ if (nor->addr_width == 3) {
#ifdef CONFIG_SPI_FLASH_BAR
- ret = write_bar(nor, addr);
- if (ret < 0)
- goto erase_err;
+ /* Update Extended Address Register */
+ ret = write_bar(nor, offset);
+ if (ret)
+ goto erase_err;
#endif
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
+ }
+
+ ret = write_enable(nor);
+ if (ret < 0)
goto erase_err;
- write_enable(nor);
-
- ret = spi_nor_erase_sector(nor, addr);
+ ret = spi_nor_erase_sector(nor, offset);
- if (ret)
+ if (ret < 0)
goto erase_err;
- addr += mtd->erasesize;
- len -= mtd->erasesize;
+ addr += ret;
+ len -= ret;
ret = spi_nor_wait_till_ready(nor);
if (ret)
#endif /* CONFIG_SPI_FLASH_SFDP_SUPPORT */
#endif /* CONFIG_SPI_FLASH_SPANSION */
- u8 buf = SPINOR_EN_OCTAL_DDR;
+static int micron_octal_ddr_enable(struct spi_nor *nor)
+{
+ struct spi_slave *spi = nor->spi;
+ int ret;
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRCR, 1),
++ u8 buf = SPINOR_MT_OCT_DTR;
+
+ struct spi_mem_op op =
- struct spi_nor_read_command {
- u8 num_mode_clocks;
- u8 num_wait_states;
- u8 opcode;
- enum spi_nor_protocol proto;
- };
-
- struct spi_nor_pp_command {
- u8 opcode;
- enum spi_nor_protocol proto;
- };
-
- enum spi_nor_read_command_index {
- SNOR_CMD_READ,
- SNOR_CMD_READ_FAST,
- SNOR_CMD_READ_1_1_1_DTR,
-
- /* Dual SPI */
- SNOR_CMD_READ_1_1_2,
- SNOR_CMD_READ_1_2_2,
- SNOR_CMD_READ_2_2_2,
- SNOR_CMD_READ_1_2_2_DTR,
-
- /* Quad SPI */
- SNOR_CMD_READ_1_1_4,
- SNOR_CMD_READ_1_4_4,
- SNOR_CMD_READ_4_4_4,
- SNOR_CMD_READ_1_4_4_DTR,
-
- /* Octo SPI */
- SNOR_CMD_READ_1_1_8,
- SNOR_CMD_READ_1_8_8,
- SNOR_CMD_READ_8_8_8,
- SNOR_CMD_READ_1_8_8_DTR,
-
- SNOR_CMD_READ_MAX
- };
-
- enum spi_nor_pp_command_index {
- SNOR_CMD_PP,
-
- /* Quad SPI */
- SNOR_CMD_PP_1_1_4,
- SNOR_CMD_PP_1_4_4,
- SNOR_CMD_PP_4_4_4,
-
- /* Octo SPI */
- SNOR_CMD_PP_1_1_8,
- SNOR_CMD_PP_1_8_8,
- SNOR_CMD_PP_8_8_8,
-
- SNOR_CMD_PP_MAX
- };
-
- struct spi_nor_flash_parameter {
- u64 size;
- u32 page_size;
-
- struct spi_nor_hwcaps hwcaps;
- struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
- struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
-
- int (*quad_enable)(struct spi_nor *nor);
- };
-
++ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(4, 0, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, &buf, 1));
+
+ ret = set_4byte(nor, nor->info, 1);
+ if (ret)
+ return ret;
+
+ ret = write_enable(nor);
+ if (ret)
+ return ret;
+
+ spi->flags |= SPI_XFER_SET_DDR;
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret < 0)
+ dev_dbg(nor->dev, "error %d writing\n", ret);
+
+ return ret;
+}
+
static void
spi_nor_set_read_settings(struct spi_nor_read_command *read,
u8 num_mode_clocks,
0, 8, SPINOR_OP_READ_1_1_8,
SNOR_PROTO_1_1_8);
}
+ if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8;
+ spi_nor_set_read_settings
+ (¶ms->reads[SNOR_CMD_READ_8_8_8],
+ 0, 16, SPINOR_OP_READ_1_8_8,
+ SNOR_PROTO_8_8_8);
+ }
+ if (info->flags & SPI_NOR_OCTAL_DTR_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
+ spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, 20, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_8_8_8_DTR);
+ }
+
/* Page Program settings. */
params->hwcaps.mask |= SNOR_HWCAPS_PP;
spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
}
}
+ if (nor->isparallel) {
+ nor->mtd.erasesize <<= nor->shift;
+ params->page_size <<= nor->shift;
+ }
+
+ if (nor->isparallel || nor->isstacked)
+ params->size <<= nor->shift;
+
+ spi_nor_post_sfdp_fixups(nor, params);
+
return 0;
}
else
nor->quad_enable = NULL;
- nor->octal_ddr_enable = micron_octal_ddr_enable;
+ if ((info->flags & SPI_NOR_OCTAL_DTR_READ) &&
+ (info->flags & SPI_NOR_OCTAL_WRITE))
++ nor->octal_dtr_enable = micron_octal_ddr_enable;
++
+ return 0;
+ }
+
+ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
+ const struct spi_nor_flash_parameter *params)
+ {
+ if (!nor->setup)
+ return 0;
+
+ return nor->setup(nor, info, params);
+ }
+
+ #ifdef CONFIG_SPI_FLASH_SPANSION
+ static int s25hx_t_mdp_ready(struct spi_nor *nor)
+ {
+ u32 addr;
+ int ret;
+
+ for (addr = 0; addr < nor->mtd.size; addr += SZ_128M) {
+ ret = spansion_sr_ready(nor, addr, 0);
+ if (!ret)
+ return ret;
+ }
+
+ return 1;
+ }
+
+ static int s25hx_t_quad_enable(struct spi_nor *nor)
+ {
+ u32 addr;
+ int ret;
+
+ for (addr = 0; addr < nor->mtd.size; addr += SZ_128M) {
+ ret = spansion_quad_enable_volatile(nor, addr, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ static int s25hx_t_erase_non_uniform(struct spi_nor *nor, loff_t addr)
+ {
+ /* Support 32 x 4KB sectors at bottom */
+ return spansion_erase_non_uniform(nor, addr, SPINOR_OP_BE_4K_4B, 0,
+ SZ_128K);
+ }
+
+ static int s25hx_t_setup(struct spi_nor *nor, const struct flash_info *info,
+ const struct spi_nor_flash_parameter *params)
+ {
+ int ret;
+ u8 cfr3v;
+
+ #ifdef CONFIG_SPI_FLASH_BAR
+ return -ENOTSUPP; /* Bank Address Register is not supported */
+ #endif
+ /*
+ * Read CFR3V to check if uniform sector is selected. If not, assign an
+ * erase hook that supports non-uniform erase.
+ */
+ ret = spansion_read_any_reg(nor, SPINOR_REG_ADDR_CFR3V, 0, &cfr3v);
+ if (ret)
+ return ret;
+ if (!(cfr3v & CFR3V_UNHYSA))
+ nor->erase = s25hx_t_erase_non_uniform;
+
+ /*
+ * For the multi-die package parts, the ready() hook is needed to check
+ * all dies' status via read any register.
+ */
+ if (nor->mtd.size > SZ_128M)
+ nor->ready = s25hx_t_mdp_ready;
+
+ return spi_nor_default_setup(nor, info, params);
+ }
+
+ static void s25hx_t_default_init(struct spi_nor *nor)
+ {
+ nor->setup = s25hx_t_setup;
+ }
+
+ static int s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+ {
+ int ret;
+ u32 addr;
+ u8 cfr3v;
+
+ /* erase size in case it is set to 4K from BFPT */
+ nor->erase_opcode = SPINOR_OP_SE_4B;
+ nor->mtd.erasesize = nor->info->sector_size;
+
+ ret = set_4byte(nor, nor->info, 1);
+ if (ret)
+ return ret;
+ nor->addr_width = 4;
+
+ /*
+ * The page_size is set to 512B from BFPT, but it actually depends on
+ * the configuration register. Look up the CFR3V and determine the
+ * page_size. For multi-die package parts, use 512B only when the all
+ * dies are configured to 512B buffer.
+ */
+ for (addr = 0; addr < params->size; addr += SZ_128M) {
+ ret = spansion_read_any_reg(nor, addr + SPINOR_REG_ADDR_CFR3V,
+ 0, &cfr3v);
+ if (ret)
+ return ret;
+
+ if (!(cfr3v & CFR3V_PGMBUF)) {
+ params->page_size = 256;
+ return 0;
+ }
+ }
+ params->page_size = 512;
+
+ return 0;
+ }
+
+ static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params)
+ {
+ /* READ_FAST_4B (0Ch) requires mode cycles*/
+ params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
+ /* PP_1_1_4 is not supported */
+ params->hwcaps.mask &= ~SNOR_HWCAPS_PP_1_1_4;
+ /* Use volatile register to enable quad */
+ params->quad_enable = s25hx_t_quad_enable;
+ }
+
+ static struct spi_nor_fixups s25hx_t_fixups = {
+ .default_init = s25hx_t_default_init,
+ .post_bfpt = s25hx_t_post_bfpt_fixup,
+ .post_sfdp = s25hx_t_post_sfdp_fixup,
+ };
+
+ static int s25fl256l_setup(struct spi_nor *nor, const struct flash_info *info,
+ const struct spi_nor_flash_parameter *params)
+ {
+ return -ENOTSUPP; /* Bank Address Register is not supported */
+ }
+
+ static void s25fl256l_default_init(struct spi_nor *nor)
+ {
+ nor->setup = s25fl256l_setup;
+ }
+
+ static struct spi_nor_fixups s25fl256l_fixups = {
+ .default_init = s25fl256l_default_init,
+ };
+ #endif
+
+ #ifdef CONFIG_SPI_FLASH_S28HS512T
+ /**
+ * spi_nor_cypress_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * This also sets the memory access latency cycles to 24 to allow the flash to
+ * run at up to 200MHz.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+ static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor)
+ {
+ struct spi_mem_op op;
+ u8 buf;
+ u8 addr_width = 3;
+ int ret;
+
+ /* Use 24 dummy cycles for memory array reads. */
+ ret = write_enable(nor);
+ if (ret)
+ return ret;
+
+ buf = SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24;
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_CYPRESS_CFR2V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, &buf, 1));
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret) {
+ dev_warn(nor->dev,
+ "failed to set default memory latency value: %d\n",
+ ret);
+ return ret;
+ }
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ nor->read_dummy = 24;
+
+ /* Set the octal and DTR enable bits. */
+ ret = write_enable(nor);
+ if (ret)
+ return ret;
+
+ buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN;
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_CYPRESS_CFR5V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, &buf, 1));
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret) {
+ dev_warn(nor->dev, "Failed to enable octal DTR mode\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ static int s28hs512t_erase_non_uniform(struct spi_nor *nor, loff_t addr)
+ {
+ /* Factory default configuration: 32 x 4 KiB sectors at bottom. */
+ return spansion_erase_non_uniform(nor, addr, SPINOR_OP_S28_SE_4K,
+ 0, SZ_128K);
+ }
+
+ static int s28hs512t_setup(struct spi_nor *nor, const struct flash_info *info,
+ const struct spi_nor_flash_parameter *params)
+ {
+ struct spi_mem_op op;
+ u8 buf;
+ u8 addr_width = 3;
+ int ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ /*
+ * Check CFR3V to check if non-uniform sector mode is selected. If it
+ * is, set the erase hook to the non-uniform erase procedure.
+ */
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(addr_width,
+ SPINOR_REG_CYPRESS_CFR3V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, &buf, 1));
+
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret)
+ return ret;
+
+ if (!(buf & SPINOR_REG_CYPRESS_CFR3V_UNISECT))
+ nor->erase = s28hs512t_erase_non_uniform;
+
+ return spi_nor_default_setup(nor, info, params);
+ }
+
+ static void s28hs512t_default_init(struct spi_nor *nor)
+ {
+ nor->octal_dtr_enable = spi_nor_cypress_octal_dtr_enable;
+ nor->setup = s28hs512t_setup;
+ }
+
+ static void s28hs512t_post_sfdp_fixup(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params)
+ {
+ /*
+ * On older versions of the flash the xSPI Profile 1.0 table has the
+ * 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE.
+ */
+ if (params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
+ params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
+ SPINOR_OP_CYPRESS_RD_FAST;
+
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
+
+ /* This flash is also missing the 4-byte Page Program opcode bit. */
+ spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ /*
+ * Since xSPI Page Program opcode is backward compatible with
+ * Legacy SPI, use Legacy SPI opcode there as well.
+ */
+ spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
+
+ /*
+ * The xSPI Profile 1.0 table advertises the number of additional
+ * address bytes needed for Read Status Register command as 0 but the
+ * actual value for that is 4.
+ */
+ params->rdsr_addr_nbytes = 4;
+ }
+
+ static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+ {
+ struct spi_mem_op op;
+ u8 buf;
+ u8 addr_width = 3;
+ int ret;
+
+ /*
+ * The BFPT table advertises a 512B page size but the page size is
+ * actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ */
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_CYPRESS_CFR3V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, &buf, 1));
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret)
+ return ret;
+
+ if (buf & SPINOR_REG_CYPRESS_CFR3V_PGSZ)
+ params->page_size = 512;
+ else
+ params->page_size = 256;
+
+ /*
+ * The BFPT advertises that it supports 4k erases, and the datasheet
+ * says the same. But 4k erases did not work when testing. So, use 256k
+ * erases for now.
+ */
+ nor->erase_opcode = SPINOR_OP_SE_4B;
+ nor->mtd.erasesize = 0x40000;
+
+ return 0;
+ }
+
+ static struct spi_nor_fixups s28hs512t_fixups = {
+ .default_init = s28hs512t_default_init,
+ .post_sfdp = s28hs512t_post_sfdp_fixup,
+ .post_bfpt = s28hs512t_post_bfpt_fixup,
+ };
+ #endif /* CONFIG_SPI_FLASH_S28HS512T */
+
+ #ifdef CONFIG_SPI_FLASH_MT35XU
+ static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor)
+ {
+ struct spi_mem_op op;
+ u8 buf;
+ u8 addr_width = 3;
+ int ret;
+
+ /* Set dummy cycles for Fast Read to the default of 20. */
+ ret = write_enable(nor);
+ if (ret)
+ return ret;
+
+ buf = 20;
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_MT_CFR1V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, &buf, 1));
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ nor->read_dummy = 20;
+
+ ret = write_enable(nor);
+ if (ret)
+ return ret;
+
+ buf = SPINOR_MT_OCT_DTR;
+ op = (struct spi_mem_op)
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1),
+ SPI_MEM_OP_ADDR(addr_width, SPINOR_REG_MT_CFR0V, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1, &buf, 1));
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret) {
+ dev_err(nor->dev, "Failed to enable octal DTR mode\n");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ static void mt35xu512aba_default_init(struct spi_nor *nor)
+ {
+ nor->octal_dtr_enable = spi_nor_micron_octal_dtr_enable;
+ }
+
+ static void mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params)
+ {
+ /* Set the Fast Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
+ spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
+ 0, 20, SPINOR_OP_MT_DTR_RD,
+ SNOR_PROTO_8_8_8_DTR);
+
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
+
+ nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
+ params->rdsr_dummy = 8;
+ params->rdsr_addr_nbytes = 0;
+
+ /*
+ * The BFPT quad enable field is set to a reserved value so the quad
+ * enable function is ignored by spi_nor_parse_bfpt(). Make sure we
+ * disable it.
+ */
+ params->quad_enable = NULL;
+ }
+
+ static struct spi_nor_fixups mt35xu512aba_fixups = {
+ .default_init = mt35xu512aba_default_init,
+ .post_sfdp = mt35xu512aba_post_sfdp_fixup,
+ };
+ #endif /* CONFIG_SPI_FLASH_MT35XU */
+
+ /** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+ static int spi_nor_octal_dtr_enable(struct spi_nor *nor)
+ {
+ int ret;
+
+ if (!nor->octal_dtr_enable)
+ return 0;
+
+ if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
+ nor->write_proto == SNOR_PROTO_8_8_8_DTR))
+ return 0;
+
+ ret = nor->octal_dtr_enable(nor);
+ if (ret)
+ return ret;
+
+ nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
return 0;
}
{
int err;
+ if (nor->isparallel)
+ nor->spi->flags |= SPI_XFER_STRIPE;
+
+ err = spi_nor_octal_dtr_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "Octal DTR mode not supported\n");
+ return err;
+ }
+
/*
* Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
* with the software protection bits set
set_4byte(nor, nor->info, 1);
}
- if (nor->octal_ddr_enable) {
- err = nor->octal_ddr_enable(nor);
++ if (nor->octal_dtr_enable) {
++ err = nor->octal_dtr_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "octal DDR mode not supported\n");
+ return err;
+ }
+ }
+
return 0;
}
+#if defined(CONFIG_SPI_FLASH_STMICRO)
+static inline uint16_t min_lockable_sectors(struct spi_nor *nor,
+ uint16_t n_sectors)
+{
+ u16 lock_granularity;
+
+ lock_granularity = 1;
+
+ return lock_granularity;
+}
+
+static inline uint32_t get_protected_area_start(struct spi_nor *nor,
+ u8 lock_bits,
+ bool is_bottom)
+{
+ u16 n_sectors;
+ u32 sector_size;
+ u32 flash_size;
+ int ret;
+
+ sector_size = nor->sector_size >> nor->shift;
+ n_sectors = (nor->size >> nor->shift) / sector_size;
+ flash_size = nor->size >> nor->shift;
+
+ if (!is_bottom)
+ ret = flash_size - ((1 << (lock_bits - 1)) * sector_size *
+ min_lockable_sectors(nor, n_sectors));
+ else
+ ret = (1 << (lock_bits - 1)) * sector_size *
+ min_lockable_sectors(nor, n_sectors);
+
+ return ret;
+}
+
+static uint8_t min_protected_area_including_offset(struct spi_nor *nor,
+ u32 offset,
+ bool is_bottom)
+{
+ u8 lock_bits, lockbits_limit;
+
+ lockbits_limit = MAX_LOCKBITS;
+
+ for (lock_bits = 1; lock_bits < lockbits_limit; lock_bits++) {
+ if (!is_bottom) {
+ /* top protection */
+ if (offset >= get_protected_area_start(nor,
+ lock_bits,
+ is_bottom))
+ break;
+ } else {
+ /* bottom protection */
+ if (offset <= get_protected_area_start(nor,
+ lock_bits,
+ is_bottom))
+ break;
+ }
+ }
+ return lock_bits;
+}
+
+static int write_sr_modify_protection(struct spi_nor *nor, u8 status,
+ u8 lock_bits, bool is_bottom)
+{
+ u8 status_new, bp_mask;
+ int ret;
+
+ status_new = status & ~BP_MASK;
+ bp_mask = (lock_bits << BP_SHIFT) & BP_MASK;
+
+ status_new &= ~SR_BP3;
+ /* Protected area starts from top */
+ status_new &= ~SR_TB;
+
+ /* If bottom area is to be Protected set SR_TB */
+ if (is_bottom)
+ status_new |= SR_TB;
+
+ if (lock_bits > 7)
+ bp_mask |= SR_BP3;
+
+ status_new |= bp_mask;
+
+ write_enable(nor);
+
+ nor->spi->flags |= SPI_XFER_LOWER;
+
+ ret = write_sr(nor, status_new);
+ if (ret)
+ return ret;
+
+ if (nor->isparallel) {
+ nor->spi->flags |= SPI_XFER_UPPER;
+ ret = write_sr(nor, status_new);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void micron_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ uint64_t *len)
+{
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ int shift = ffs(mask) - 1;
+ int pow;
+
+ if (!(sr & (mask | SR_BP3))) {
+ /* No protection */
+ *ofs = 0;
+ *len = 0;
+ } else {
+ pow = (sr & mask) >> shift;
+ pow |= sr & SR_BP3 ? BIT(3) : 0;
+
+ if (pow)
+ pow--;
+
+ *len = (nor->sector_size >> nor->shift) << pow;
+ if (*len >= (nor->size >> nor->shift))
+ *len = nor->size >> nor->shift;
+
+ if (!(sr & SR_TB))
+ *ofs = (nor->size >> nor->shift) - *len;
+ else
+ *ofs = 0;
+
+ debug("%s, ofs:0x%lx, len:0x%lx\n", __func__,
+ (unsigned long)*ofs, (unsigned long)*len);
+ }
+}
+
+static int micron_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ loff_t lock_offs;
+ u64 lock_len;
+
+ ofs >>= nor->shift;
+ /* Avoid shifting of data length for size 1 */
+ if (len != 1)
+ len >>= nor->shift;
+
+ debug("%s, ofs:0x%lx, len:0x%lx\n", __func__,
+ (unsigned long)ofs,
+ (unsigned long)len);
+
+ micron_get_locked_range(nor, sr, &lock_offs, &lock_len);
+
+ if (!(sr & SR_TB))
+ return (ofs + len <= lock_offs + lock_len) &&
+ (ofs >= lock_offs);
+ else
+ return (ofs + len <= lock_offs + lock_len);
+}
+
+static int micron_flash_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ u8 status_old, status_old_up;
+ u8 lock_bits;
+ loff_t lock_len;
+ bool is_bottom = false; /* Use TOP protection by default */
+
+ if (nor->isparallel)
+ nor->spi->flags |= SPI_XFER_LOWER;
+
+ status_old = read_sr(nor);
+ if (status_old < 0)
+ return status_old;
+
+ if (nor->isparallel) {
+ nor->spi->flags |= SPI_XFER_UPPER;
+ status_old_up = read_sr(nor);
+ if (status_old_up < 0)
+ return status_old_up;
+ if ((status_old & BPTB_MASK) != (status_old_up & BPTB_MASK)) {
+ printf("BP is different in both flashes lo:0x%x, up:0x%x\n",
+ status_old, status_old_up);
+ return -EINVAL;
+ }
+ }
+
+ if (ofs < nor->size / 2)
+ is_bottom = true; /* Change it to bottom protection */
+
+ debug("Status in both flashes lo:0x%x, up:0x%x\n",
+ status_old, status_old_up);
+ ofs >>= nor->shift;
+ len >>= nor->shift;
+
+ if (!is_bottom)
+ lock_len = ofs;
+ else
+ lock_len = ofs + len;
+ lock_bits = min_protected_area_including_offset(nor, lock_len,
+ is_bottom);
+
+ if (lock_bits > ((status_old & (BP_MASK << BP_SHIFT)) >> 2))
+ write_sr_modify_protection(nor, status_old, lock_bits,
+ is_bottom);
+
+ return 0;
+}
+
+static int micron_flash_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ write_sr_modify_protection(nor, 0, 0, 0);
+ return 0;
+}
+
+static int micron_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int status;
+
+ status = read_sr(nor);
+ if (status < 0)
+ return status;
+
+ return micron_is_locked_sr(nor, ofs, len, status);
+}
+#endif /* CONFIG_SPI_FLASH_STMICRO */
+
+ #ifdef CONFIG_SPI_FLASH_SOFT_RESET
+ /**
+ * spi_nor_soft_reset() - perform the JEDEC Software Reset sequence
+ * @nor: the spi_nor structure
+ *
+ * This function can be used to switch from Octal DTR mode to legacy mode on a
+ * flash that supports it. The soft reset is executed in Octal DTR mode.
+ *
+ * Return: 0 for success, -errno for failure.
+ */
+ static int spi_nor_soft_reset(struct spi_nor *nor)
+ {
+ struct spi_mem_op op;
+ int ret;
+ enum spi_nor_cmd_ext ext;
+
+ ext = nor->cmd_ext_type;
+ nor->cmd_ext_type = SPI_NOR_EXT_REPEAT;
+
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DATA);
+ spi_nor_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret) {
+ dev_warn(nor->dev, "Software reset enable failed: %d\n", ret);
+ goto out;
+ }
+
+ op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DATA);
+ spi_nor_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR);
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret) {
+ dev_warn(nor->dev, "Software reset failed: %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * Software Reset is not instant, and the delay varies from flash to
+ * flash. Looking at a few flashes, most range somewhere below 100
+ * microseconds. So, wait for 200ms just to be sure.
+ */
+ udelay(SPI_NOR_SRST_SLEEP_LEN);
+
+ out:
+ nor->cmd_ext_type = ext;
+ return ret;
+ }
+ #endif /* CONFIG_SPI_FLASH_SOFT_RESET */
+
+ int spi_nor_remove(struct spi_nor *nor)
+ {
+ #ifdef CONFIG_SPI_FLASH_SOFT_RESET
+ if (nor->info->flags & SPI_NOR_OCTAL_DTR_READ &&
+ nor->flags & SNOR_F_SOFT_RESET)
+ return spi_nor_soft_reset(nor);
+ #endif
+
+ return 0;
+ }
+
+ void spi_nor_set_fixups(struct spi_nor *nor)
+ {
+ #ifdef CONFIG_SPI_FLASH_SPANSION
+ if (JEDEC_MFR(nor->info) == SNOR_MFR_CYPRESS) {
+ switch (nor->info->id[1]) {
+ case 0x2a: /* S25HL (QSPI, 3.3V) */
+ case 0x2b: /* S25HS (QSPI, 1.8V) */
+ nor->fixups = &s25hx_t_fixups;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (CONFIG_IS_ENABLED(SPI_FLASH_BAR) &&
+ !strcmp(nor->info->name, "s25fl256l"))
+ nor->fixups = &s25fl256l_fixups;
+ #endif
+
+ #ifdef CONFIG_SPI_FLASH_S28HS512T
+ if (!strcmp(nor->info->name, "s28hs512t"))
+ nor->fixups = &s28hs512t_fixups;
+ #endif
+
+ #ifdef CONFIG_SPI_FLASH_MT35XU
+ if (!strcmp(nor->info->name, "mt35xu512aba"))
+ nor->fixups = &mt35xu512aba_fixups;
+ #endif
+ }
+
int spi_nor_scan(struct spi_nor *nor)
{
struct spi_nor_flash_parameter params;
nor->write = spi_nor_write_data;
nor->read_reg = spi_nor_read_reg;
nor->write_reg = spi_nor_write_reg;
-
+ nor->setup = spi_nor_default_setup;
- if (spi->mode & SPI_RX_OCTAL) {
- hwcaps.mask |= SNOR_HWCAPS_READ_MASK;
-
- if (spi->mode & SPI_TX_OCTAL)
- hwcaps.mask |= SNOR_HWCAPS_PP_MASK;
- } else if (spi->mode & SPI_RX_QUAD) {
- hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
-
- if (spi->mode & SPI_TX_QUAD)
- hwcaps.mask |= (SNOR_HWCAPS_READ_1_4_4 |
- SNOR_HWCAPS_PP_1_1_4 |
- SNOR_HWCAPS_PP_1_4_4);
- } else if (spi->mode & SPI_RX_DUAL) {
- hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
-
- if (spi->mode & SPI_TX_DUAL)
- hwcaps.mask |= SNOR_HWCAPS_READ_1_2_2;
- }
+ #ifdef CONFIG_SPI_FLASH_SOFT_RESET_ON_BOOT
+ /*
+ * When the flash is handed to us in a stateful mode like 8D-8D-8D, it
+ * is difficult to detect the mode the flash is in. One option is to
+ * read SFDP in all modes and see which one gives the correct "SFDP"
+ * signature, but not all flashes support SFDP in 8D-8D-8D mode.
+ *
+ * Further, even if you detect the mode of the flash via SFDP, you
+ * still have the problem of actually reading the ID. The Read ID
+ * command is not standardized across flash vendors. Flashes can have
+ * different dummy cycles needed for reading the ID. Some flashes even
+ * expect a 4-byte dummy address with the Read ID command. All this
+ * information cannot be obtained from the SFDP table.
+ *
+ * So, perform a Software Reset sequence before reading the ID and
+ * initializing the flash. A Soft Reset will bring back the flash in
+ * its default protocol mode assuming no non-volatile configuration was
+ * set. This will let us detect the flash even if ROM hands it to us in
+ * Octal DTR mode.
+ *
+ * To accommodate cases where there is more than one flash on a board,
+ * and only one of them needs a soft reset, failure to reset is not
+ * made fatal, and we still try to read ID if possible.
+ */
+ spi_nor_soft_reset(nor);
+ #endif /* CONFIG_SPI_FLASH_SOFT_RESET_ON_BOOT */
+ nor->isparallel = (spi->option == SF_DUAL_PARALLEL_FLASH) ? 1 : 0;
+ nor->isstacked = (spi->option == SF_DUAL_STACKED_FLASH) ? 1 : 0;
+ nor->shift = nor->isparallel;
+
info = spi_nor_read_id(nor);
if (IS_ERR_OR_NULL(info))
return -ENOENT;
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
+ {
+ INFO("gd25lq256d", 0xc86019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("gd25lx256e", 0xc86819, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES)
+ },
#endif
#ifdef CONFIG_SPI_FLASH_ISSI /* ISSI */
/* ISSI */
{ INFO("is25lp128", 0x9d6018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ) },
{ INFO("is25lp256", 0x9d6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ) },
{ INFO("is25lp512", 0x9d601a, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { INFO("is25wp032d", 0x9d7016, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
- { INFO("is25wp064d", 0x9d7017, 0, 64 * 1024, 128, SPI_NOR_QUAD_READ) },
+ { INFO("is25lp01g", 0x9d601b, 0, 64 * 1024, 2048,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("is25wp008d", 0x9d7014, 0, 64 * 1024, 16, SPI_NOR_QUAD_READ) },
+ { INFO("is25wp016d", 0x9d7015, 0, 64 * 1024, 32, SPI_NOR_QUAD_READ) },
- { INFO("is25wp064", 0x9d7017, 0, 64 * 1024, 128,
+ { INFO("is25wp032", 0x9d7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++ { INFO("is25wp064d", 0x9d7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ INFO("is25wp128", 0x9d7018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ INFO("is25wp256", 0x9d7019, 0, 64 * 1024, 512,
{ INFO("mx25u2033e", 0xc22532, 0, 64 * 1024, 4, SECT_4K) },
{ INFO("mx25u1635e", 0xc22535, 0, 64 * 1024, 32, SECT_4K) },
{ INFO("mx25u3235f", 0xc22536, 0, 4 * 1024, 1024, SECT_4K) },
- { INFO("mx25u6435f", 0xc22537, 0, 64 * 1024, 128, SECT_4K) },
- { INFO("mx25l12805d", 0xc22018, 0, 64 * 1024, 256, SECT_4K) },
- { INFO("mx25u12835f", 0xc22538, 0, 64 * 1024, 256, SECT_4K) },
- { INFO("mx25l12855e", 0xc22618, 0, 64 * 1024, 256, 0) },
+ { INFO("mx25u6435f", 0xc22537, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("mx25u12835f", 0xc22538, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("mx25l12805d", 0xc22018, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
+ { INFO("mx25l12855e", 0xc22618, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
{ INFO("mx25l25635e", 0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { INFO("mx25u25635f", 0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+ { INFO("mx25u25635f", 0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mx25l25655e", 0xc22619, 0, 64 * 1024, 512, SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mx25v8035f", 0xc22314, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("mx25r1635f", 0xc22815, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { INFO("mx25l25655e", 0xc22619, 0, 64 * 1024, 512, 0) },
{ INFO("mx66l51235l", 0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mx25u51245f", 0xc2953a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ INFO("mx66u51235f", 0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mx66u1g45g", 0xc2253b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ INFO("mx66u2g45g", 0xc2253c, 0, 64 * 1024, 4096, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { INFO("mx66l1g45g", 0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("mx66l1g45g", 0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mx66l2g45g", 0xc2201c, 0, 64 * 1024, 4096, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ INFO("mx25l1633e", 0xc22415, 0, 64 * 1024, 32, SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES | SECT_4K) },
{ INFO("mx25r6435f", 0xc22817, 0, 64 * 1024, 128, SECT_4K) },
+ { INFO("mx66uw2g345g", 0xc2943c, 0, 64 * 1024, 4096, SECT_4K | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) },
#endif
#ifdef CONFIG_SPI_FLASH_STMICRO /* STMICRO */
{ INFO("n25q064a", 0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ INFO("n25q128a11", 0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
{ INFO("n25q128a13", 0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { INFO6("mt25ql256a", 0x20ba19, 0x104400, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES | USE_FSR) },
{ INFO("n25q256a", 0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR) },
- { INFO6("mt25qu256a", 0x20bb19, 0x104400, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES | USE_FSR) },
{ INFO("n25q256ax1", 0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR) },
+ { INFO6("mt25qu512a", 0x20bb20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES |
+ USE_FSR) },
{ INFO("n25q512a", 0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { INFO6("mt25ql512a", 0x20ba20, 0x104400, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ INFO("n25q512ax3", 0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ INFO("n25q00", 0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
{ INFO("n25q00a", 0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { INFO6("mt25qu512a", 0x20bb20, 0x104400, 64 * 1024, 1024,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES |
- USE_FSR) },
+ { INFO6("mt25ql256a", 0x20ba19, 0x104400, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | USE_FSR) },
+ { INFO6("mt25ql512a", 0x20ba20, 0x104400, 64 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO6("mt25qu256a", 0x20bb19, 0x104400, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES | USE_FSR) },
{ INFO("mt25ql01g", 0x21ba20, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
{ INFO("mt25qu02g", 0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { INFO("mt25ql02g", 0x20ba22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { INFO("mt35xu512aba", 0x2c5b1a, 0, 128 * 1024, 512, SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) },
- { INFO("mt35xl512aba", 0x2c5a1a, 0, 128 * 1024, 512, SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) },
- { INFO6("mt35xu01g", 0x2c5b1b, 0x104100, 128 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) },
- { INFO("mt35xu02g", 0x2c5b1c, 0, 128 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mt25ql02g", 0x20ba22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE | SPI_NOR_4B_OPCODES) },
+ #ifdef CONFIG_SPI_FLASH_MT35XU
++ { INFO("mt35xl512aba", 0x2c5a1a, 0, 128 * 1024, 512, USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES | SPI_NOR_OCTAL_DTR_READ) },
+ { INFO("mt35xu512aba", 0x2c5b1a, 0, 128 * 1024, 512, USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES | SPI_NOR_OCTAL_DTR_READ) },
+ #endif /* CONFIG_SPI_FLASH_MT35XU */
++ { INFO6("mt35xu01g", 0x2c5b1b, 0x104100, 128 * 1024, 1024, USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mt35xu02g", 0x2c5b1c, 0, 128 * 1024, 2048, USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) },
#endif
#ifdef CONFIG_SPI_FLASH_SPANSION /* SPANSION */
/* Spansion/Cypress -- single (large) sector size only, at least
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
- { INFO("w25q80", 0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ {
+ INFO("w25q512jv", 0xef7119, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { INFO("w25q80", 0xef5014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ INFO("w25q80bl", 0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ INFO("w25q16cl", 0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("w25q32bv", 0xef4016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ INFO("w25q64cv", 0xef4017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { INFO("w25q128", 0xef4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("w25q128", 0xef4018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ INFO("w25q256", 0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ INFO("w25m512jw", 0xef6119, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ INFO("w25m512jv", 0xef7119, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
--- /dev/null
- int cadence_qspi_apb_dma_read(struct cadence_spi_platdata *plat,
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) Copyright 2018 Xilinx
+ *
+ * Cadence QSPI controller DMA operations
+ */
+
+#include <clk.h>
+#include <common.h>
+#include <memalign.h>
+#include <wait_bit.h>
+#include <asm/io.h>
+#include <asm/gpio.h>
+#include <asm/cache.h>
+#include <cpu_func.h>
+#include <zynqmp_firmware.h>
+#include <asm/arch/hardware.h>
+#include "cadence_qspi.h"
+
+#define CMD_4BYTE_READ 0x13
+#define CMD_4BYTE_FAST_READ 0x0C
+
+void cadence_qspi_apb_enable_linear_mode(bool enable)
+{
+ if (CONFIG_IS_ENABLED(ZYNQMP_FIRMWARE)) {
+ if (enable)
+ /* ahb read mode */
+ xilinx_pm_request(PM_IOCTL, DEV_OSPI,
+ IOCTL_OSPI_MUX_SELECT,
+ PM_OSPI_MUX_SEL_LINEAR, 0, NULL);
+ else
+ /* DMA mode */
+ xilinx_pm_request(PM_IOCTL, DEV_OSPI,
+ IOCTL_OSPI_MUX_SELECT,
+ PM_OSPI_MUX_SEL_DMA, 0, NULL);
+ } else {
+ if (enable)
+ writel(readl(VERSAL_AXI_MUX_SEL) |
+ VERSAL_OSPI_LINEAR_MODE, VERSAL_AXI_MUX_SEL);
+ else
+ writel(readl(VERSAL_AXI_MUX_SEL) &
+ ~VERSAL_OSPI_LINEAR_MODE, VERSAL_AXI_MUX_SEL);
+ }
+}
+
- int cadence_qspi_apb_wait_for_dma_cmplt(struct cadence_spi_platdata *plat)
++int cadence_qspi_apb_dma_read(struct cadence_spi_plat *plat,
+ unsigned int n_rx, u8 *rxbuf)
+{
+ u32 reg, ret, rx_rem, bytes_to_dma, data;
+ u8 opcode, addr_bytes, dummy_cycles;
+
+ rx_rem = n_rx % 4;
+ bytes_to_dma = n_rx - rx_rem;
+
+ if (bytes_to_dma) {
+ cadence_qspi_apb_enable_linear_mode(false);
+ reg = readl(plat->regbase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_ENBL_DMA;
+ writel(reg, plat->regbase + CQSPI_REG_CONFIG);
+
+ writel(bytes_to_dma, plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
+
+ writel(CQSPI_DFLT_INDIR_TRIG_ADDR_RANGE,
+ plat->regbase + CQSPI_REG_INDIR_TRIG_ADDR_RANGE);
+ writel(CQSPI_DFLT_DMA_PERIPH_CFG,
+ plat->regbase + CQSPI_REG_DMA_PERIPH_CFG);
+ writel((unsigned long)rxbuf, plat->regbase +
+ CQSPI_DMA_DST_ADDR_REG);
+ writel(plat->trigger_address, plat->regbase +
+ CQSPI_DMA_SRC_RD_ADDR_REG);
+ writel(bytes_to_dma, plat->regbase +
+ CQSPI_DMA_DST_SIZE_REG);
+ flush_dcache_range((unsigned long)rxbuf,
+ (unsigned long)rxbuf + bytes_to_dma);
+ writel(CQSPI_DFLT_DST_CTRL_REG_VAL,
+ plat->regbase + CQSPI_DMA_DST_CTRL_REG);
+
+ /* Start the indirect read transfer */
+ writel(CQSPI_REG_INDIRECTRD_START, plat->regbase +
+ CQSPI_REG_INDIRECTRD);
+ /* Wait for dma to complete transfer */
+ ret = cadence_qspi_apb_wait_for_dma_cmplt(plat);
+ if (ret)
+ return ret;
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE, plat->regbase +
+ CQSPI_REG_INDIRECTRD);
+ rxbuf += bytes_to_dma;
+ }
+
+ if (rx_rem) {
+ reg = readl(plat->regbase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_ENBL_DMA;
+ writel(reg, plat->regbase + CQSPI_REG_CONFIG);
+
+ reg = readl(plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
+ reg += bytes_to_dma;
+ writel(reg, plat->regbase + CQSPI_REG_CMDADDRESS);
+
+ addr_bytes = readl(plat->regbase + CQSPI_REG_SIZE) &
+ CQSPI_REG_SIZE_ADDRESS_MASK;
+
+ opcode = CMD_4BYTE_FAST_READ;
+ dummy_cycles = 8;
+ writel((dummy_cycles << CQSPI_REG_RD_INSTR_DUMMY_LSB) | opcode,
+ plat->regbase + CQSPI_REG_RD_INSTR);
+
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+ reg |= (addr_bytes & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ dummy_cycles = (readl(plat->regbase + CQSPI_REG_RD_INSTR) >>
+ CQSPI_REG_RD_INSTR_DUMMY_LSB) &
+ CQSPI_REG_RD_INSTR_DUMMY_MASK;
+ reg |= (dummy_cycles & CQSPI_REG_CMDCTRL_DUMMY_MASK) <<
+ CQSPI_REG_CMDCTRL_DUMMY_LSB;
+ reg |= (((rx_rem - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ ret = cadence_qspi_apb_exec_flash_cmd(plat->regbase, reg);
+ if (ret)
+ return ret;
+
+ data = readl(plat->regbase + CQSPI_REG_CMDREADDATALOWER);
+ memcpy(rxbuf, &data, rx_rem);
+ }
+
+ return 0;
+}
+
++int cadence_qspi_apb_wait_for_dma_cmplt(struct cadence_spi_plat *plat)
+{
+ u32 timeout = CQSPI_DMA_TIMEOUT;
+
+ while (!(readl(plat->regbase + CQSPI_DMA_DST_I_STS_REG) &
+ CQSPI_DMA_DST_I_STS_DONE) && timeout--)
+ udelay(1);
+
+ if (!timeout) {
+ printf("DMA timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ writel(readl(plat->regbase + CQSPI_DMA_DST_I_STS_REG),
+ plat->regbase + CQSPI_DMA_DST_I_STS_REG);
+ return 0;
+}
+
+#if defined(CONFIG_DM_GPIO)
+int cadence_spi_versal_flash_reset(struct udevice *dev)
+{
+ struct gpio_desc gpio;
+ u32 reset_gpio;
+ int ret;
+
+ /* request gpio and set direction as output set to 1 */
+ ret = gpio_request_by_name(dev, "reset-gpios", 0, &gpio,
+ GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
+ if (ret) {
+ printf("%s: unable to reset ospi flash device", __func__);
+ return ret;
+ }
+
+ reset_gpio = PMIO_NODE_ID_BASE + gpio.offset;
+
+ /* Request for pin */
+ xilinx_pm_request(PM_PINCTRL_REQUEST, reset_gpio, 0, 0, 0, NULL);
+
+ /* Enable hysteresis in cmos receiver */
+ xilinx_pm_request(PM_PINCTRL_CONFIG_PARAM_SET, reset_gpio,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT, 0, NULL);
+
+ /* Disable Tri-state */
+ xilinx_pm_request(PM_PINCTRL_CONFIG_PARAM_SET, reset_gpio,
+ PM_PINCTRL_CONFIG_TRI_STATE,
+ PM_PINCTRL_TRI_STATE_DISABLE, 0, NULL);
+ udelay(1);
+
+ /* Set value 0 to pin */
+ dm_gpio_set_value(&gpio, 0);
+ udelay(1);
+
+ /* Set value 1 to pin */
+ dm_gpio_set_value(&gpio, 1);
+ udelay(1);
+
+ return 0;
+}
+#else
+#define FLASH_RESET_GPIO 0xC
+int cadence_spi_versal_flash_reset(struct udevice *dev)
+{
+ /* CRP WPROT */
+ writel(0, 0xf126001c);
+ /* GPIO Reset */
+ writel(0, 0xf1260318);
+
+ /* disable IOU write protection */
+ writel(0, 0xff080728);
+
+ /* set direction as output */
+ writel((readl(0xf1020204) | BIT(FLASH_RESET_GPIO)), 0xf1020204);
+
+ /* Data output enable */
+ writel((readl(0xf1020208) | BIT(FLASH_RESET_GPIO)), 0xf1020208);
+
+ /* IOU SLCR write enable */
+ writel(0, 0xf1060828);
+
+ /* set MIO as GPIO */
+ writel(0x60, 0xf1060030);
+
+ /* Set value 1 to pin */
+ writel((readl(0xf1020040) | BIT(FLASH_RESET_GPIO)), 0xf1020040);
+ udelay(10);
+
+ /* Disable Tri-state */
+ writel((readl(0xf1060200) & ~BIT(FLASH_RESET_GPIO)), 0xf1060200);
+ udelay(1);
+
+ /* Set value 0 to pin */
+ writel((readl(0xf1020040) & ~BIT(FLASH_RESET_GPIO)), 0xf1020040);
+ udelay(10);
+
+ /* Set value 1 to pin */
+ writel((readl(0xf1020040) | BIT(FLASH_RESET_GPIO)), 0xf1020040);
+ udelay(10);
+
+ return 0;
+}
+#endif
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/sizes.h>
+#include <zynqmp_firmware.h>
#include "cadence_qspi.h"
+ #define NSEC_PER_SEC 1000000000L
+
#define CQSPI_STIG_READ 0
#define CQSPI_STIG_WRITE 1
#define CQSPI_READ 2
cadence_qspi_apb_controller_disable(priv->regbase);
/*
- * Calibration required for different current SCLK speed, requested
- * SCLK speed or chip select
+ * If the device tree already provides a read delay value, use that
+ * instead of calibrating.
*/
- if (priv->previous_hz != hz ||
- priv->qspi_calibrated_hz != hz ||
- priv->qspi_calibrated_cs != priv->cs) {
+ if (plat->read_delay >= 0) {
+ cadence_spi_write_speed(bus, hz);
+ cadence_qspi_apb_readdata_capture(priv->regbase, 1,
+ plat->read_delay);
+ } else if (priv->previous_hz != hz ||
+ priv->qspi_calibrated_hz != hz ||
- priv->qspi_calibrated_cs != spi_chip_select(bus)) {
++ priv->qspi_calibrated_cs != spi_chip_select(bus)) { //Ashok: priv->cs
+ /*
+ * Calibration required for different current SCLK speed,
+ * requested SCLK speed or chip select
+ */
err = spi_calibration(bus, hz);
if (err)
return err;
return 0;
}
+static int cadence_spi_child_pre_probe(struct udevice *bus)
+{
+ struct spi_slave *slave = dev_get_parent_priv(bus);
+ struct cadence_spi_priv *priv = dev_get_priv(bus->parent);
+
+ slave->bytemode = SPI_4BYTE_MODE;
+ slave->option = priv->is_dual;
+
+ return 0;
+}
+
+__weak int cadence_spi_versal_flash_reset(struct udevice *dev)
+{
+ return 0;
+}
+
static int cadence_spi_probe(struct udevice *bus)
{
- struct cadence_spi_platdata *plat = bus->platdata;
+ struct cadence_spi_plat *plat = dev_get_plat(bus);
struct cadence_spi_priv *priv = dev_get_priv(bus);
struct clk clk;
int ret;
priv->qspi_is_init = 1;
}
- return 0;
+ plat->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC, plat->ref_clk_hz);
+
+ /* Reset ospi flash device */
+ return cadence_spi_versal_flash_reset(bus);
}
static int cadence_spi_remove(struct udevice *dev)
.id = UCLASS_SPI,
.of_match = cadence_spi_ids,
.ops = &cadence_spi_ops,
- .ofdata_to_platdata = cadence_spi_ofdata_to_platdata,
- .platdata_auto_alloc_size = sizeof(struct cadence_spi_platdata),
- .priv_auto_alloc_size = sizeof(struct cadence_spi_priv),
+ .of_to_plat = cadence_spi_of_to_plat,
+ .plat_auto = sizeof(struct cadence_spi_plat),
+ .priv_auto = sizeof(struct cadence_spi_priv),
.probe = cadence_spi_probe,
+ .child_pre_probe = cadence_spi_child_pre_probe,
.remove = cadence_spi_remove,
.flags = DM_FLAG_OS_PREPARE,
};
#define CQSPI_DECODER_MAX_CS 16
#define CQSPI_READ_CAPTURE_MAX_DELAY 16
- #define CQSPI_DUMMY_CLKS_PER_BYTE 8
+#define CQSPI_SINGLE_FLASH 0
+#define CQSPI_DUAL_STACKED_FLASH 1
+
+#define CQSPI_CS0 0
+#define CQSPI_CS1 1
+
+#define CQSPI_REG_POLL_US 1 /* 1us */
+#define CQSPI_REG_RETRY 10000
+#define CQSPI_POLL_IDLE_RETRY 3
+
+/* Transfer mode */
+#define CQSPI_INST_TYPE_SINGLE 0
+#define CQSPI_INST_TYPE_DUAL 1
+#define CQSPI_INST_TYPE_QUAD 2
+#define CQSPI_INST_TYPE_OCTAL 3
+
+#define CQSPI_STIG_DATA_LEN_MAX 8
+
- struct cadence_spi_platdata {
++#define CQSPI_DUMMY_CLKS_PER_BYTE 8
+#define CQSPI_DUMMY_BYTES_MAX 4
++#define CQSPI_DUMMY_CLKS_MAX 31
+
+/****************************************************************************
+ * Controller's configuration and status register (offset from QSPI_BASE)
+ ****************************************************************************/
+#define CQSPI_REG_CONFIG 0x00
+#define CQSPI_REG_CONFIG_ENABLE BIT(0)
+#define CQSPI_REG_CONFIG_CLK_POL BIT(1)
+#define CQSPI_REG_CONFIG_CLK_PHA BIT(2)
+#define CQSPI_REG_CONFIG_PHY_ENABLE_MASK BIT(3)
+#define CQSPI_REG_CONFIG_DIRECT BIT(7)
+#define CQSPI_REG_CONFIG_DECODE BIT(9)
+#define CQSPI_REG_CONFIG_ENBL_DMA BIT(15)
+#define CQSPI_REG_CONFIG_XIP_IMM BIT(18)
+#define CQSPI_REG_CONFIG_DTR_PROT_EN_MASK BIT(24)
+#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
+#define CQSPI_REG_CONFIG_BAUD_LSB 19
++#define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
++#define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
+#define CQSPI_REG_CONFIG_IDLE_LSB 31
+#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
+#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
+
+#define CQSPI_REG_RD_INSTR 0x04
+#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
+#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
+#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
+#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
+
+#define CQSPI_REG_WR_INSTR 0x08
+#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
++#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
+
+#define CQSPI_REG_DELAY 0x0C
+#define CQSPI_REG_DELAY_TSLCH_LSB 0
+#define CQSPI_REG_DELAY_TCHSH_LSB 8
+#define CQSPI_REG_DELAY_TSD2D_LSB 16
+#define CQSPI_REG_DELAY_TSHSL_LSB 24
+#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
+#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
+#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
+#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
+
+#define CQSPI_REG_RD_DATA_CAPTURE 0x10
+#define CQSPI_REG_RD_DATA_CAPTURE_BYPASS BIT(0)
+#define CQSPI_REG_READCAPTURE_DQS_ENABLE BIT(8)
+#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB 1
+#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK 0xF
+
+#define CQSPI_REG_SIZE 0x14
+#define CQSPI_REG_SIZE_ADDRESS_LSB 0
+#define CQSPI_REG_SIZE_PAGE_LSB 4
+#define CQSPI_REG_SIZE_BLOCK_LSB 16
+#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
+#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
+#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
+
+#define CQSPI_REG_SRAMPARTITION 0x18
+#define CQSPI_REG_INDIRECTTRIGGER 0x1C
+
+#define CQSPI_REG_REMAP 0x24
+#define CQSPI_REG_MODE_BIT 0x28
+
+#define CQSPI_REG_SDRAMLEVEL 0x2C
+#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
+#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
+#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
+#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+
++#define CQSPI_REG_WR_COMPLETION_CTRL 0x38
++#define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
++
+#define CQSPI_REG_IRQSTATUS 0x40
+#define CQSPI_REG_IRQMASK 0x44
+
+#define CQSPI_REG_INDIRECTRD 0x60
+#define CQSPI_REG_INDIRECTRD_START BIT(0)
+#define CQSPI_REG_INDIRECTRD_CANCEL BIT(1)
+#define CQSPI_REG_INDIRECTRD_INPROGRESS BIT(2)
+#define CQSPI_REG_INDIRECTRD_DONE BIT(5)
+
+#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
+#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
+#define CQSPI_REG_INDIRECTRDBYTES 0x6C
+
+#define CQSPI_REG_CMDCTRL 0x90
+#define CQSPI_REG_CMDCTRL_EXECUTE BIT(0)
+#define CQSPI_REG_CMDCTRL_INPROGRESS BIT(1)
+#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
+#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
+#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
+#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
+#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
+#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
+#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
+#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
+#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
+#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_OPCODE_MASK 0xFF
+
+#define CQSPI_REG_INDIRECTWR 0x70
+#define CQSPI_REG_INDIRECTWR_START BIT(0)
+#define CQSPI_REG_INDIRECTWR_CANCEL BIT(1)
+#define CQSPI_REG_INDIRECTWR_INPROGRESS BIT(2)
+#define CQSPI_REG_INDIRECTWR_DONE BIT(5)
+
+#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
+#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
+#define CQSPI_REG_INDIRECTWRBYTES 0x7C
+
+#define CQSPI_REG_CMDADDRESS 0x94
+#define CQSPI_REG_CMDREADDATALOWER 0xA0
+#define CQSPI_REG_CMDREADDATAUPPER 0xA4
+#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
+#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+
++#define CQSPI_REG_OP_EXT_LOWER 0xE0
++#define CQSPI_REG_OP_EXT_READ_LSB 24
++#define CQSPI_REG_OP_EXT_WRITE_LSB 16
++#define CQSPI_REG_OP_EXT_STIG_LSB 0
++
+#define CQSPI_REG_PHY_CONFIG 0xB4
+#define CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK 0x40000000
+
+#define CQSPI_DMA_DST_ADDR_REG 0x1800
+#define CQSPI_DMA_DST_SIZE_REG 0x1804
+#define CQSPI_DMA_DST_STS_REG 0x1808
+#define CQSPI_DMA_DST_CTRL_REG 0x180C
+#define CQSPI_DMA_DST_I_STS_REG 0x1814
+#define CQSPI_DMA_DST_I_ENBL_REG 0x1818
+#define CQSPI_DMA_DST_I_DISBL_REG 0x181C
+#define CQSPI_DMA_DST_CTRL2_REG 0x1824
+#define CQSPI_DMA_DST_ADDR_MSB_REG 0x1828
+
+#define CQSPI_DMA_SRC_RD_ADDR_REG 0x1000
+
+#define CQSPI_REG_DMA_PERIPH_CFG 0x20
+#define CQSPI_REG_INDIR_TRIG_ADDR_RANGE 0x80
+#define CQSPI_DFLT_INDIR_TRIG_ADDR_RANGE 6
+#define CQSPI_DFLT_DMA_PERIPH_CFG 0x602
+#define CQSPI_DFLT_DST_CTRL_REG_VAL 0xF43FFA00
+
+#define CQSPI_DMA_DST_I_STS_DONE BIT(1)
+#define CQSPI_DMA_TIMEOUT 10000000
+
+#define CQSPI_REG_IS_IDLE(base) \
+ ((readl(base + CQSPI_REG_CONFIG) >> \
+ CQSPI_REG_CONFIG_IDLE_LSB) & 0x1)
+
+#define CQSPI_GET_RD_SRAM_LEVEL(reg_base) \
+ (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
+ CQSPI_REG_SDRAMLEVEL_RD_LSB) & CQSPI_REG_SDRAMLEVEL_RD_MASK)
+
+#define CQSPI_GET_WR_SRAM_LEVEL(reg_base) \
+ (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
+ CQSPI_REG_SDRAMLEVEL_WR_LSB) & CQSPI_REG_SDRAMLEVEL_WR_MASK)
+
+ struct cadence_spi_plat {
unsigned int ref_clk_hz;
unsigned int max_hz;
void *regbase;
u32 tsd2d_ns;
u32 tchsh_ns;
u32 tslch_ns;
+ bool is_dma;
+ int is_dual;
+
+ /* Transaction protocol parameters. */
+ u8 inst_width;
+ u8 addr_width;
+ u8 data_width;
+ bool dtr;
};
struct cadence_spi_priv {
void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy);
void cadence_qspi_apb_readdata_capture(void *reg_base,
unsigned int bypass, unsigned int delay);
- int cadence_qspi_apb_dma_read(struct cadence_spi_platdata *plat,
++int cadence_qspi_apb_dma_read(struct cadence_spi_plat *plat,
+ unsigned int n_rx, u8 *rxbuf);
+int cadence_qspi_apb_exec_flash_cmd(void *reg_base, unsigned int reg);
- int cadence_qspi_apb_wait_for_dma_cmplt(struct cadence_spi_platdata *plat);
++int cadence_qspi_apb_wait_for_dma_cmplt(struct cadence_spi_plat *plat);
+int cadence_spi_versal_flash_reset(struct udevice *dev);
+void cadence_qspi_apb_enable_linear_mode(bool enable);
#endif /* __CADENCE_QSPI_H__ */
#include <malloc.h>
#include "cadence_qspi.h"
-#define CQSPI_REG_POLL_US 1 /* 1us */
-#define CQSPI_REG_RETRY 10000
-#define CQSPI_POLL_IDLE_RETRY 3
-
-/* Transfer mode */
-#define CQSPI_INST_TYPE_SINGLE 0
-#define CQSPI_INST_TYPE_DUAL 1
-#define CQSPI_INST_TYPE_QUAD 2
-#define CQSPI_INST_TYPE_OCTAL 3
-
-#define CQSPI_STIG_DATA_LEN_MAX 8
-
-#define CQSPI_DUMMY_CLKS_PER_BYTE 8
-#define CQSPI_DUMMY_CLKS_MAX 31
-
-/****************************************************************************
- * Controller's configuration and status register (offset from QSPI_BASE)
- ****************************************************************************/
-#define CQSPI_REG_CONFIG 0x00
-#define CQSPI_REG_CONFIG_ENABLE BIT(0)
-#define CQSPI_REG_CONFIG_CLK_POL BIT(1)
-#define CQSPI_REG_CONFIG_CLK_PHA BIT(2)
-#define CQSPI_REG_CONFIG_DIRECT BIT(7)
-#define CQSPI_REG_CONFIG_DECODE BIT(9)
-#define CQSPI_REG_CONFIG_XIP_IMM BIT(18)
-#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
-#define CQSPI_REG_CONFIG_BAUD_LSB 19
-#define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
-#define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
-#define CQSPI_REG_CONFIG_IDLE_LSB 31
-#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
-#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
-
-#define CQSPI_REG_RD_INSTR 0x04
-#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
-#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
-#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
-#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
-#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
-#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
-#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
-#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
-#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
-#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
-
-#define CQSPI_REG_WR_INSTR 0x08
-#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
-#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
-#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
-
-#define CQSPI_REG_DELAY 0x0C
-#define CQSPI_REG_DELAY_TSLCH_LSB 0
-#define CQSPI_REG_DELAY_TCHSH_LSB 8
-#define CQSPI_REG_DELAY_TSD2D_LSB 16
-#define CQSPI_REG_DELAY_TSHSL_LSB 24
-#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
-#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
-#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
-#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
-
-#define CQSPI_REG_RD_DATA_CAPTURE 0x10
-#define CQSPI_REG_RD_DATA_CAPTURE_BYPASS BIT(0)
-#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB 1
-#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK 0xF
-
-#define CQSPI_REG_SIZE 0x14
-#define CQSPI_REG_SIZE_ADDRESS_LSB 0
-#define CQSPI_REG_SIZE_PAGE_LSB 4
-#define CQSPI_REG_SIZE_BLOCK_LSB 16
-#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
-#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
-#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
-
-#define CQSPI_REG_SRAMPARTITION 0x18
-#define CQSPI_REG_INDIRECTTRIGGER 0x1C
-
-#define CQSPI_REG_REMAP 0x24
-#define CQSPI_REG_MODE_BIT 0x28
-
-#define CQSPI_REG_SDRAMLEVEL 0x2C
-#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
-#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
-#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
-#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
-
-#define CQSPI_REG_WR_COMPLETION_CTRL 0x38
-#define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
-
-#define CQSPI_REG_IRQSTATUS 0x40
-#define CQSPI_REG_IRQMASK 0x44
-
-#define CQSPI_REG_INDIRECTRD 0x60
-#define CQSPI_REG_INDIRECTRD_START BIT(0)
-#define CQSPI_REG_INDIRECTRD_CANCEL BIT(1)
-#define CQSPI_REG_INDIRECTRD_INPROGRESS BIT(2)
-#define CQSPI_REG_INDIRECTRD_DONE BIT(5)
-
-#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
-#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
-#define CQSPI_REG_INDIRECTRDBYTES 0x6C
-
-#define CQSPI_REG_CMDCTRL 0x90
-#define CQSPI_REG_CMDCTRL_EXECUTE BIT(0)
-#define CQSPI_REG_CMDCTRL_INPROGRESS BIT(1)
-#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
-#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
-#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
-#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
-#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
-#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
-#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
-#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
-#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
-#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
-#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
-#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
-#define CQSPI_REG_CMDCTRL_OPCODE_MASK 0xFF
-
-#define CQSPI_REG_INDIRECTWR 0x70
-#define CQSPI_REG_INDIRECTWR_START BIT(0)
-#define CQSPI_REG_INDIRECTWR_CANCEL BIT(1)
-#define CQSPI_REG_INDIRECTWR_INPROGRESS BIT(2)
-#define CQSPI_REG_INDIRECTWR_DONE BIT(5)
-
-#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
-#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
-#define CQSPI_REG_INDIRECTWRBYTES 0x7C
-
-#define CQSPI_REG_CMDADDRESS 0x94
-#define CQSPI_REG_CMDREADDATALOWER 0xA0
-#define CQSPI_REG_CMDREADDATAUPPER 0xA4
-#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
-#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
-
-#define CQSPI_REG_OP_EXT_LOWER 0xE0
-#define CQSPI_REG_OP_EXT_READ_LSB 24
-#define CQSPI_REG_OP_EXT_WRITE_LSB 16
-#define CQSPI_REG_OP_EXT_STIG_LSB 0
-
-#define CQSPI_REG_IS_IDLE(base) \
- ((readl(base + CQSPI_REG_CONFIG) >> \
- CQSPI_REG_CONFIG_IDLE_LSB) & 0x1)
-
-#define CQSPI_GET_RD_SRAM_LEVEL(reg_base) \
- (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
- CQSPI_REG_SDRAMLEVEL_RD_LSB) & CQSPI_REG_SDRAMLEVEL_RD_MASK)
-
-#define CQSPI_GET_WR_SRAM_LEVEL(reg_base) \
- (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
- CQSPI_REG_SDRAMLEVEL_WR_LSB) & CQSPI_REG_SDRAMLEVEL_WR_MASK)
+__weak int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return 0;
+}
+
- __weak int cadence_qspi_apb_dma_read(struct cadence_spi_platdata *plat,
++__weak int cadence_qspi_apb_dma_read(struct cadence_spi_plat *plat,
+ unsigned int n_rx, u8 *rxbuf)
+{
+ return 0;
+}
+
+__weak
- int cadence_qspi_apb_wait_for_dma_cmplt(struct cadence_spi_platdata *plat)
++int cadence_qspi_apb_wait_for_dma_cmplt(struct cadence_spi_plat *plat)
+{
+ return 0;
+}
void cadence_qspi_apb_controller_enable(void *reg_base)
{
const void *buf = op->data.buf.out;
size_t len = op->data.nbytes;
- cadence_qspi_apb_enable_linear_mode(true);
- if (plat->use_dac_mode && (to + len < plat->ahbsize)) {
+ /*
+ * Some flashes like the Cypress Semper flash expect a dummy 4-byte
+ * address (all 0s) with the read status register command in DTR mode.
+ * But this controller does not support sending dummy address bytes to
+ * the flash when it is polling the write completion register in DTR
+ * mode. So, we can not use direct mode when in DTR mode for writing
+ * data.
+ */
++ cadence_qspi_apb_enable_linear_mode(true); //Ashok: check
+ if (!plat->dtr && plat->use_dac_mode && (to + len < plat->ahbsize)) {
memcpy_toio(plat->ahbbase + to, buf, len);
if (!cadence_qspi_wait_idle(plat->regbase))
return -EIO;
return 0;
}
- struct dm_spi_slave_platdata *slave_plat =
- dev_get_parent_platdata(spi->dev);
+static void xilinx_spi_startup_block(struct spi_slave *spi)
+{
- struct dm_spi_slave_platdata *slave_plat =
- dev_get_parent_platdata(spi->dev);
++ struct dm_spi_slave_plat *slave_plat =
++ dev_get_parent_plat(spi->dev);
+ unsigned char txp;
+ unsigned char rxp[8];
+
+ /*
+ * Perform a dummy read as a work around for
+ * the startup block issue.
+ */
+ spi_cs_activate(spi->dev, slave_plat->cs);
+ txp = 0x9f;
+ start_transfer(spi, (void *)&txp, NULL, 1);
+
+ start_transfer(spi, NULL, (void *)rxp, 6);
+
+ spi_cs_deactivate(spi->dev);
+}
+
+static int xilinx_spi_mem_exec_op(struct spi_slave *spi,
+ const struct spi_mem_op *op)
+{
++ struct dm_spi_slave_plat *slave_plat =
++ dev_get_parent_plat(spi->dev);
+ static u32 startup;
+ u32 dummy_len, ret;
+
+ /*
+ * This is the work around for the startup block issue in
+ * the spi controller. SPI clock is passing through STARTUP
+ * block to FLASH. STARTUP block don't provide clock as soon
+ * as QSPI provides command. So first command fails.
+ */
+ if (!startup) {
+ xilinx_spi_startup_block(spi);
+ startup++;
+ }
+
+ spi_cs_activate(spi->dev, slave_plat->cs);
+
+ if (op->cmd.opcode) {
+ ret = start_transfer(spi, (void *)&op->cmd.opcode, NULL, 1);
+ if (ret)
+ goto done;
+ }
+ if (op->addr.nbytes) {
+ int i;
+ u8 addr_buf[4];
+
+ for (i = 0; i < op->addr.nbytes; i++)
+ addr_buf[i] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+
+ ret = start_transfer(spi, (void *)addr_buf, NULL,
+ op->addr.nbytes);
+ if (ret)
+ goto done;
+ }
+ if (op->dummy.nbytes) {
+ dummy_len = op->dummy.nbytes * op->data.buswidth;
+ ret = start_transfer(spi, NULL, NULL, dummy_len);
+ if (ret)
+ goto done;
+ }
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ ret = start_transfer(spi, NULL,
+ op->data.buf.in, op->data.nbytes);
+ } else {
+ ret = start_transfer(spi, op->data.buf.out,
+ NULL, op->data.nbytes);
+ }
+ if (ret)
+ goto done;
+ }
+done:
+ spi_cs_deactivate(spi->dev);
+
+ return ret;
+}
+
static int xilinx_spi_set_speed(struct udevice *bus, uint speed)
{
struct xilinx_spi_priv *priv = dev_get_priv(bus);
.id = UCLASS_SPI,
.of_match = xilinx_spi_ids,
.ops = &xilinx_spi_ops,
- .priv_auto_alloc_size = sizeof(struct xilinx_spi_priv),
+ .priv_auto = sizeof(struct xilinx_spi_priv),
.probe = xilinx_spi_probe,
+ .child_pre_probe = xilinx_spi_child_pre_probe,
};
#include <log.h>
#include <malloc.h>
#include <spi.h>
+#include <spi_flash.h>
+ #include <asm/global_data.h>
#include <asm/io.h>
-#include <linux/bitops.h>
+#include <clk.h>
+#include <spi-mem.h>
+#include "../mtd/spi/sf_internal.h"
DECLARE_GLOBAL_DATA_PTR;
int bytes_to_transfer;
int bytes_to_receive;
unsigned int is_inst;
+ unsigned int is_dual;
+ unsigned int is_dio;
+ unsigned int u_page;
unsigned cs_change:1;
+ unsigned is_strip:1;
};
- static int zynq_qspi_ofdata_to_platdata(struct udevice *bus)
+ static int zynq_qspi_of_to_plat(struct udevice *bus)
{
- struct zynq_qspi_platdata *plat = bus->platdata;
+ struct zynq_qspi_plat *plat = dev_get_plat(bus);
const void *blob = gd->fdt_blob;
int node = dev_of_offset(bus);
+ int is_dual;
+ u32 mode = 0;
+ int offset;
+ u32 value;
plat->regs = (struct zynq_qspi_regs *)fdtdec_get_addr(blob,
node, "reg");
writel(ZYNQ_QSPI_ENR_SPI_EN_MASK, ®s->enr);
}
- struct zynq_qspi_platdata *plat = dev_get_platdata(bus->parent);
+static int zynq_qspi_child_pre_probe(struct udevice *bus)
+{
+ struct spi_slave *slave = dev_get_parent_priv(bus);
+ struct zynq_qspi_priv *priv = dev_get_priv(bus->parent);
++ struct zynq_qspi_plat *plat = dev_get_plat(bus->parent);
+
+ slave->option = priv->is_dual;
+ slave->dio = priv->is_dio;
+ slave->mode = plat->tx_rx_mode;
+
+ return 0;
+}
+
static int zynq_qspi_probe(struct udevice *bus)
{
- struct zynq_qspi_platdata *plat = dev_get_platdata(bus);
+ struct zynq_qspi_plat *plat = dev_get_plat(bus);
struct zynq_qspi_priv *priv = dev_get_priv(bus);
struct clk clk;
unsigned long clock;
return 0;
}
- op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+bool update_stripe(const struct spi_mem_op *op)
+{
+ if (op->cmd.opcode == SPINOR_OP_BE_4K ||
+ op->cmd.opcode == SPINOR_OP_CHIP_ERASE ||
+ op->cmd.opcode == SPINOR_OP_SE ||
+ op->cmd.opcode == SPINOR_OP_WREAR
+ )
+ return false;
+
+ return true;
+}
+
+static int zynq_qspi_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct udevice *bus = slave->dev->parent;
+ struct zynq_qspi_priv *priv = dev_get_priv(bus);
+ int op_len, pos = 0, ret, i;
+ unsigned int flag = 0;
+ const u8 *tx_buf = NULL;
+ u8 *rx_buf = NULL;
+
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ rx_buf = op->data.buf.in;
+ else
+ tx_buf = op->data.buf.out;
+ }
+
++ op_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
+
+ u8 op_buf[op_len];
+
+ op_buf[pos++] = op->cmd.opcode;
+
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++)
+ op_buf[pos + i] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+
+ pos += op->addr.nbytes;
+ }
+
+ if (op->dummy.nbytes) {
+ memset(op_buf + pos, 0xff, op->dummy.nbytes);
+ slave->dummy_bytes = op->dummy.nbytes;
+ }
+
+ if (slave->flags & SPI_XFER_U_PAGE)
+ flag |= SPI_XFER_U_PAGE;
+
+ /* 1st transfer: opcode + address + dummy cycles */
+ /* Make sure to set END bit if no tx or rx data messages follow */
+ if (!tx_buf && !rx_buf)
+ flag |= SPI_XFER_END;
+
+ ret = zynq_qspi_xfer(slave->dev, op_len * 8, op_buf, NULL,
+ flag | SPI_XFER_BEGIN);
+ if (ret)
+ return ret;
+
+ slave->dummy_bytes = 0;
+ priv->is_strip = update_stripe(op);
+
+ /* 2nd transfer: rx or tx data path */
+ if (tx_buf || rx_buf) {
+ ret = zynq_qspi_xfer(slave->dev, op->data.nbytes * 8, tx_buf,
+ rx_buf, flag | SPI_XFER_END);
+ if (ret)
+ return ret;
+ }
+
+ slave->flags &= ~SPI_XFER_MASK;
+ spi_release_bus(slave);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
+ .exec_op = zynq_qspi_exec_op,
+};
+
static const struct dm_spi_ops zynq_qspi_ops = {
.claim_bus = zynq_qspi_claim_bus,
.release_bus = zynq_qspi_release_bus,
.id = UCLASS_SPI,
.of_match = zynq_qspi_ids,
.ops = &zynq_qspi_ops,
- .ofdata_to_platdata = zynq_qspi_ofdata_to_platdata,
- .platdata_auto_alloc_size = sizeof(struct zynq_qspi_platdata),
- .priv_auto_alloc_size = sizeof(struct zynq_qspi_priv),
+ .of_to_plat = zynq_qspi_of_to_plat,
+ .plat_auto = sizeof(struct zynq_qspi_plat),
+ .priv_auto = sizeof(struct zynq_qspi_priv),
.probe = zynq_qspi_probe,
+ .child_pre_probe = zynq_qspi_child_pre_probe,
};
unsigned int len;
int bytes_to_transfer;
int bytes_to_receive;
- unsigned int is_inst;
+ unsigned int is_dual;
+ unsigned int u_page;
+ unsigned int bus;
+ unsigned int stripe;
- unsigned int cs_change:1;
- unsigned int dummy_bytes;
- unsigned int tx_rx_mode;
+ unsigned int io_mode;
++ unsigned int flags;
+ const struct spi_mem_op *op;
};
- static u8 last_cmd;
-
- static int zynqmp_qspi_ofdata_to_platdata(struct udevice *bus)
+ static int zynqmp_qspi_of_to_plat(struct udevice *bus)
{
- struct zynqmp_qspi_platdata *plat = bus->platdata;
+ struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
+ int is_dual;
debug("%s\n", __func__);
writel(GQSPI_FIFO_THRESHOLD, ®s->rxftr);
writel(GQSPI_GENFIFO_THRESHOLD, ®s->gqfthr);
writel(GQSPI_GFIFO_ALL_INT_MASK, ®s->isr);
- writel(0x0, ®s->enbr);
+ writel(~GQSPI_ENABLE_ENABLE_MASK, ®s->enbr);
config_reg = readl(®s->confr);
- config_reg &= ~(GQSPI_GFIFO_STRT_MODE_MASK |
- GQSPI_CONFIG_MODE_EN_MASK);
- config_reg |= GQSPI_CONFIG_DMA_MODE | GQSPI_GFIFO_WP_HOLD |
- GQSPI_DFLT_BAUD_RATE_DIV | GQSPI_GFIFO_STRT_MODE_MASK;
+ config_reg &= ~(GQSPI_CONFIG_MODE_EN_MASK);
+ config_reg |= GQSPI_GFIFO_WP_HOLD | GQSPI_DFLT_BAUD_RATE_DIV;
+ config_reg |= GQSPI_GFIFO_STRT_MODE_MASK;
+ if (!priv->io_mode)
+ config_reg |= GQSPI_CONFIG_DMA_MODE;
+
writel(config_reg, ®s->confr);
writel(GQSPI_ENABLE_ENABLE_MASK, ®s->enbr);
return 0;
}
+static int zynqmp_qspi_child_pre_probe(struct udevice *bus)
+{
+ struct spi_slave *slave = dev_get_parent_priv(bus);
+ struct zynqmp_qspi_priv *priv = dev_get_priv(bus->parent);
+
+ slave->option = priv->is_dual;
+ slave->bytemode = SPI_4BYTE_MODE;
+
+ return 0;
+}
+
static int zynqmp_qspi_probe(struct udevice *bus)
{
- struct zynqmp_qspi_platdata *plat = dev_get_platdata(bus);
+ struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
struct clk clk;
unsigned long clock;
debug("%s\n", __func__);
/* Set the SPI Clock phase and polarities */
confr = readl(®s->confr);
-- confr &= ~(GQSPI_CONFIG_CPHA_MASK |
-- GQSPI_CONFIG_CPOL_MASK);
++ confr &= ~(GQSPI_CONFIG_CPHA_MASK | GQSPI_CONFIG_CPOL_MASK);
if (mode & SPI_CPHA)
confr |= GQSPI_CONFIG_CPHA_MASK;
static void zynqmp_qspi_genfifo_cmd(struct zynqmp_qspi_priv *priv)
{
- u8 command = 1;
- const struct spi_mem_op *op = priv->op;
u32 gen_fifo_cmd;
- u32 bytecount = 0;
+ u8 i, dummy_cycles, addr;
++ const struct spi_mem_op *op = priv->op;
- if (priv->dummy_bytes)
- priv->len -= priv->dummy_bytes;
+ /* Send opcode */
+ gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
+ gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->cmd.buswidth);
+ gen_fifo_cmd |= GQSPI_GFIFO_TX;
+ gen_fifo_cmd |= op->cmd.opcode;
+ zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
+
+ /* Send address */
+ for (i = 0; i < op->addr.nbytes; i++) {
+ addr = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
- while (priv->len) {
gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
+ gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->addr.buswidth);
gen_fifo_cmd |= GQSPI_GFIFO_TX;
-
- if (command) {
- command = 0;
- last_cmd = *(u8 *)priv->tx_buf;
- }
-
- gen_fifo_cmd |= GQSPI_SPI_MODE_SPI;
- gen_fifo_cmd |= *(u8 *)priv->tx_buf;
- bytecount++;
- priv->len--;
- priv->tx_buf = (u8 *)priv->tx_buf + 1;
+ gen_fifo_cmd |= addr;
debug("GFIFO_CMD_Cmd = 0x%x\n", gen_fifo_cmd);
int ret = 0;
gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
- gen_fifo_cmd |= GQSPI_GFIFO_TX |
- GQSPI_GFIFO_DATA_XFR_MASK;
+ gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
- gen_fifo_cmd |= GQSPI_GFIFO_TX |
- GQSPI_GFIFO_DATA_XFR_MASK;
++ gen_fifo_cmd |= GQSPI_GFIFO_TX | GQSPI_GFIFO_DATA_XFR_MASK;
+
+ if (priv->stripe)
+ gen_fifo_cmd |= GQSPI_GFIFO_STRIPE_MASK;
- if (priv->tx_nbits == GQSPI_SELECT_MODE_QUADSPI)
- gen_fifo_cmd |= GQSPI_SPI_MODE_QSPI;
- else if (priv->tx_nbits == GQSPI_SELECT_MODE_DUALSPI)
- gen_fifo_cmd |= GQSPI_SPI_MODE_DUAL_SPI;
- else
- gen_fifo_cmd |= GQSPI_SPI_MODE_SPI;
-
while (priv->len) {
len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
debug("GFIFO_CMD_TX:0x%x\n", gen_fifo_cmd);
if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK)
-- ret = zynqmp_qspi_fill_tx_fifo(priv,
-- 1 << len);
++ ret = zynqmp_qspi_fill_tx_fifo(priv, 1 << len);
else
-- ret = zynqmp_qspi_fill_tx_fifo(priv,
-- len);
++ ret = zynqmp_qspi_fill_tx_fifo(priv, len);
if (ret)
return ret;
u32 actuallen = priv->len;
gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
- gen_fifo_cmd |= GQSPI_GFIFO_RX |
- GQSPI_GFIFO_DATA_XFR_MASK;
-
- if (priv->rx_nbits == GQSPI_SELECT_MODE_QUADSPI)
- gen_fifo_cmd |= GQSPI_SPI_MODE_QSPI;
- else if (priv->rx_nbits == GQSPI_SELECT_MODE_DUALSPI)
- gen_fifo_cmd |= GQSPI_SPI_MODE_DUAL_SPI;
- else
- gen_fifo_cmd |= GQSPI_SPI_MODE_SPI;
+ gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
- gen_fifo_cmd |= GQSPI_GFIFO_RX |
- GQSPI_GFIFO_DATA_XFR_MASK;
++ gen_fifo_cmd |= GQSPI_GFIFO_RX | GQSPI_GFIFO_DATA_XFR_MASK;
+
+ if (priv->stripe)
+ gen_fifo_cmd |= GQSPI_GFIFO_STRIPE_MASK;
/*
* Check if receive buffer is aligned to 4 byte and length
* is multiples of four byte as we are using dma to receive.
*/
- if (!((unsigned long)priv->rx_buf & (GQSPI_DMA_ALIGN - 1)) &&
- !(actuallen % GQSPI_DMA_ALIGN)) {
+ if ((!((unsigned long)priv->rx_buf & (GQSPI_DMA_ALIGN - 1)) &&
+ !(actuallen % GQSPI_DMA_ALIGN)) || priv->io_mode) {
buf = (u32 *)priv->rx_buf;
- return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
+ if (priv->io_mode)
+ return zynqmp_qspi_start_io(priv, gen_fifo_cmd, buf);
+ else
+ return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
}
-- ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len,
-- GQSPI_DMA_ALIGN));
++ ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len, GQSPI_DMA_ALIGN));
buf = (u32 *)tmp;
return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
}
return 0;
}
- int zynqmp_qspi_xfer(struct udevice *dev, unsigned int len,
- const void *dout, void *din, unsigned long flags)
- {
- struct udevice *bus = dev->parent;
- struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
- struct spi_slave *slave = dev_get_parent_priv(dev);
-
- debug("%s: priv: 0x%08lx len: %d dout: 0x%08lx ", __func__,
- (unsigned long)priv, len, (unsigned long)dout);
- debug("din: 0x%08lx flags: 0x%lx\n", (unsigned long)din, flags);
-
- priv->tx_buf = dout;
- priv->rx_buf = din;
- priv->len = len;
-
- /*
- * Festering sore.
- * Assume that the beginning of a transfer with bits to
- * transmit must contain a device command.
- */
- if (dout && flags & SPI_XFER_BEGIN)
- priv->is_inst = 1;
- else
- priv->is_inst = 0;
-
- if (flags & SPI_XFER_END)
- priv->cs_change = 1;
- else
- priv->cs_change = 0;
-
- if (flags & SPI_XFER_U_PAGE)
- priv->u_page = 1;
- else
- priv->u_page = 0;
-
- priv->stripe = 0;
- priv->bus = 0;
-
- if (priv->is_dual == SF_DUAL_PARALLEL_FLASH) {
- if (flags & SPI_XFER_MASK)
- priv->bus = (flags & SPI_XFER_MASK) >> 8;
- if (flags & SPI_XFER_STRIPE)
- priv->stripe = 1;
- }
-
- priv->dummy_bytes = slave->dummy_bytes;
- zynqmp_qspi_transfer(priv);
-
- return 0;
- }
-
+static bool zynqmp_qspi_update_stripe(const struct spi_mem_op *op)
+{
+ /*
+ * This is a list of opcodes for which we must not use striped access
+ * even in dual parallel mode, but instead broadcast the same data to
+ * both chips. This is primarily erase commands and writing some
+ * registers.
+ */
+ switch (op->cmd.opcode) {
+ case SPINOR_OP_BE_4K:
+ case SPINOR_OP_BE_32K:
+ case SPINOR_OP_CHIP_ERASE:
+ case SPINOR_OP_SE:
+ case SPINOR_OP_BE_32K_4B:
+ case SPINOR_OP_SE_4B:
+ case SPINOR_OP_BE_4K_4B:
+ case SPINOR_OP_WRSR:
+ case SPINOR_OP_WREAR:
+ case SPINOR_OP_BRWR:
+ return false;
+ case SPINOR_OP_WRSR2:
+ return op->addr.nbytes != 0;
+ default:
+ return true;
+ }
+}
+
static int zynqmp_qspi_exec_op(struct spi_slave *slave,
const struct spi_mem_op *op)
{
- struct udevice *bus = slave->dev->parent;
- struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
- int op_len, pos = 0, ret, i;
- unsigned int flag = 0;
- const u8 *tx_buf = NULL;
- u8 *rx_buf = NULL;
-
- if (op->data.nbytes) {
- if (op->data.dir == SPI_MEM_DATA_IN) {
- rx_buf = op->data.buf.in;
- priv->rx_nbits = op->data.buswidth;
- } else {
- tx_buf = op->data.buf.out;
- priv->tx_nbits = op->data.buswidth;
- }
- }
+ struct zynqmp_qspi_priv *priv = dev_get_priv(slave->dev->parent);
+ int ret = 0;
- op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+ priv->op = op;
+ priv->tx_buf = op->data.buf.out;
+ priv->rx_buf = op->data.buf.in;
+ priv->len = op->data.nbytes;
- u8 op_buf[op_len];
++ if (slave->flags & SPI_XFER_U_PAGE)
++ priv->u_page = 1;
++ else
++ priv->u_page = 0;
+
- op_buf[pos++] = op->cmd.opcode;
++ priv->stripe = 0;
++ priv->bus = 0;
+
- if (op->addr.nbytes) {
- for (i = 0; i < op->addr.nbytes; i++)
- op_buf[pos + i] = op->addr.val >>
- (8 * (op->addr.nbytes - i - 1));
++ if (priv->is_dual == SF_DUAL_PARALLEL_FLASH) {
++ if (slave->flags & SPI_XFER_MASK)
++ priv->bus = (slave->flags & SPI_XFER_MASK) >> 8;
++ if (slave->flags & SPI_XFER_STRIPE && zynqmp_qspi_update_stripe(op))
++ priv->stripe = 1;
++ };
+
- pos += op->addr.nbytes;
- }
+ zynqmp_qspi_chipselect(priv, 1);
- if (op->dummy.nbytes) {
- memset(op_buf + pos, 0xff, op->dummy.nbytes);
- slave->dummy_bytes = op->dummy.nbytes;
- }
-
- if (slave->flags & SPI_XFER_U_PAGE)
- flag |= SPI_XFER_U_PAGE;
- if (slave->flags & SPI_XFER_LOWER)
- flag |= SPI_XFER_LOWER;
- if (slave->flags & SPI_XFER_UPPER)
- flag |= SPI_XFER_UPPER;
- if (slave->flags & SPI_XFER_STRIPE && zynqmp_qspi_update_stripe(op))
- flag |= SPI_XFER_STRIPE;
-
- /* 1st transfer: opcode + address + dummy cycles */
- /* Make sure to set END bit if no tx or rx data messages follow */
- if (!tx_buf && !rx_buf)
- flag |= SPI_XFER_END;
-
- ret = zynqmp_qspi_xfer(slave->dev, op_len, op_buf, NULL,
- flag | SPI_XFER_BEGIN);
- if (ret)
- return ret;
+ /* Send opcode, addr, dummy */
+ zynqmp_qspi_genfifo_cmd(priv);
- slave->dummy_bytes = 0;
+ /* Request the transfer */
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ ret = zynqmp_qspi_genfifo_fill_rx(priv);
+ else if (op->data.dir == SPI_MEM_DATA_OUT)
+ ret = zynqmp_qspi_genfifo_fill_tx(priv);
- if (op->data.nbytes) {
- if (op->data.dir == SPI_MEM_DATA_IN)
- priv->rx_nbits = op->data.buswidth;
- else
- priv->tx_nbits = op->data.buswidth;
- }
- /* 2nd transfer: rx or tx data path */
- if (tx_buf || rx_buf) {
- ret = zynqmp_qspi_xfer(slave->dev, op->data.nbytes, tx_buf,
- rx_buf, flag | SPI_XFER_END);
- if (ret)
- return ret;
- }
+ zynqmp_qspi_chipselect(priv, 0);
- spi_release_bus(slave);
+ slave->flags &= ~SPI_XFER_MASK;
- return 0;
+
+ return ret;
}
static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
.id = UCLASS_SPI,
.of_match = zynqmp_qspi_ids,
.ops = &zynqmp_qspi_ops,
- .ofdata_to_platdata = zynqmp_qspi_ofdata_to_platdata,
- .platdata_auto_alloc_size = sizeof(struct zynqmp_qspi_platdata),
- .priv_auto_alloc_size = sizeof(struct zynqmp_qspi_priv),
+ .of_to_plat = zynqmp_qspi_of_to_plat,
+ .plat_auto = sizeof(struct zynqmp_qspi_plat),
+ .priv_auto = sizeof(struct zynqmp_qspi_priv),
.probe = zynqmp_qspi_probe,
+ .child_pre_probe = zynqmp_qspi_child_pre_probe,
};
static int dwc3_generic_probe(struct udevice *dev,
struct dwc3_generic_priv *priv)
{
- int rc;
+ int rc, ret;
- struct dwc3_generic_plat *plat = dev_get_platdata(dev);
+ struct dwc3_generic_plat *plat = dev_get_plat(dev);
struct dwc3 *dwc3 = &priv->dwc3;
- struct dwc3_glue_data *glue = dev_get_platdata(dev->parent);
+ struct dwc3_glue_data *glue = dev_get_plat(dev->parent);
+ struct gpio_desc reset_gpio;
dwc3->dev = dev;
dwc3->maximum_speed = plat->maximum_speed;
enum spi_nor_protocol write_proto;
enum spi_nor_protocol reg_proto;
bool sst_write_second;
+ bool shift;
+ bool isparallel;
+ bool isstacked;
u32 flags;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+ enum spi_nor_cmd_ext cmd_ext_type;
+ struct spi_nor_fixups *fixups;
+ int (*setup)(struct spi_nor *nor, const struct flash_info *info,
+ const struct spi_nor_flash_parameter *params);
int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
PM_QID_CLOCK_GET_MAX_DIVISOR = 13,
};
+enum pm_pinctrl_config_param {
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ PM_PINCTRL_CONFIG_TRI_STATE,
+ PM_PINCTRL_CONFIG_MAX,
+};
+
+enum pm_pinctrl_slew_rate {
+ PM_PINCTRL_SLEW_RATE_FAST,
+ PM_PINCTRL_SLEW_RATE_SLOW,
+};
+
+enum pm_pinctrl_bias_status {
+ PM_PINCTRL_BIAS_DISABLE,
+ PM_PINCTRL_BIAS_ENABLE,
+};
+
+enum pm_pinctrl_pull_ctrl {
+ PM_PINCTRL_BIAS_PULL_DOWN,
+ PM_PINCTRL_BIAS_PULL_UP,
+};
+
+enum pm_pinctrl_schmitt_cmos {
+ PM_PINCTRL_INPUT_TYPE_CMOS,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT,
+};
+
+enum pm_pinctrl_drive_strength {
+ PM_PINCTRL_DRIVE_STRENGTH_2MA,
+ PM_PINCTRL_DRIVE_STRENGTH_4MA,
+ PM_PINCTRL_DRIVE_STRENGTH_8MA,
+ PM_PINCTRL_DRIVE_STRENGTH_12MA,
+};
+
+enum pm_pinctrl_tri_state {
+ PM_PINCTRL_TRI_STATE_DISABLE = 0,
+ PM_PINCTRL_TRI_STATE_ENABLE,
+};
+
+ enum zynqmp_pm_reset_action {
+ PM_RESET_ACTION_RELEASE = 0,
+ PM_RESET_ACTION_ASSERT = 1,
+ PM_RESET_ACTION_PULSE = 2,
+ };
+
+ enum zynqmp_pm_reset {
+ ZYNQMP_PM_RESET_START = 1000,
+ ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START,
+ ZYNQMP_PM_RESET_PCIE_BRIDGE = 1001,
+ ZYNQMP_PM_RESET_PCIE_CTRL = 1002,
+ ZYNQMP_PM_RESET_DP = 1003,
+ ZYNQMP_PM_RESET_SWDT_CRF = 1004,
+ ZYNQMP_PM_RESET_AFI_FM5 = 1005,
+ ZYNQMP_PM_RESET_AFI_FM4 = 1006,
+ ZYNQMP_PM_RESET_AFI_FM3 = 1007,
+ ZYNQMP_PM_RESET_AFI_FM2 = 1008,
+ ZYNQMP_PM_RESET_AFI_FM1 = 1009,
+ ZYNQMP_PM_RESET_AFI_FM0 = 1010,
+ ZYNQMP_PM_RESET_GDMA = 1011,
+ ZYNQMP_PM_RESET_GPU_PP1 = 1012,
+ ZYNQMP_PM_RESET_GPU_PP0 = 1013,
+ ZYNQMP_PM_RESET_GPU = 1014,
+ ZYNQMP_PM_RESET_GT = 1015,
+ ZYNQMP_PM_RESET_SATA = 1016,
+ ZYNQMP_PM_RESET_ACPU3_PWRON = 1017,
+ ZYNQMP_PM_RESET_ACPU2_PWRON = 1018,
+ ZYNQMP_PM_RESET_ACPU1_PWRON = 1019,
+ ZYNQMP_PM_RESET_ACPU0_PWRON = 1020,
+ ZYNQMP_PM_RESET_APU_L2 = 1021,
+ ZYNQMP_PM_RESET_ACPU3 = 1022,
+ ZYNQMP_PM_RESET_ACPU2 = 1023,
+ ZYNQMP_PM_RESET_ACPU1 = 1024,
+ ZYNQMP_PM_RESET_ACPU0 = 1025,
+ ZYNQMP_PM_RESET_DDR = 1026,
+ ZYNQMP_PM_RESET_APM_FPD = 1027,
+ ZYNQMP_PM_RESET_SOFT = 1028,
+ ZYNQMP_PM_RESET_GEM0 = 1029,
+ ZYNQMP_PM_RESET_GEM1 = 1030,
+ ZYNQMP_PM_RESET_GEM2 = 1031,
+ ZYNQMP_PM_RESET_GEM3 = 1032,
+ ZYNQMP_PM_RESET_QSPI = 1033,
+ ZYNQMP_PM_RESET_UART0 = 1034,
+ ZYNQMP_PM_RESET_UART1 = 1035,
+ ZYNQMP_PM_RESET_SPI0 = 1036,
+ ZYNQMP_PM_RESET_SPI1 = 1037,
+ ZYNQMP_PM_RESET_SDIO0 = 1038,
+ ZYNQMP_PM_RESET_SDIO1 = 1039,
+ ZYNQMP_PM_RESET_CAN0 = 1040,
+ ZYNQMP_PM_RESET_CAN1 = 1041,
+ ZYNQMP_PM_RESET_I2C0 = 1042,
+ ZYNQMP_PM_RESET_I2C1 = 1043,
+ ZYNQMP_PM_RESET_TTC0 = 1044,
+ ZYNQMP_PM_RESET_TTC1 = 1045,
+ ZYNQMP_PM_RESET_TTC2 = 1046,
+ ZYNQMP_PM_RESET_TTC3 = 1047,
+ ZYNQMP_PM_RESET_SWDT_CRL = 1048,
+ ZYNQMP_PM_RESET_NAND = 1049,
+ ZYNQMP_PM_RESET_ADMA = 1050,
+ ZYNQMP_PM_RESET_GPIO = 1051,
+ ZYNQMP_PM_RESET_IOU_CC = 1052,
+ ZYNQMP_PM_RESET_TIMESTAMP = 1053,
+ ZYNQMP_PM_RESET_RPU_R50 = 1054,
+ ZYNQMP_PM_RESET_RPU_R51 = 1055,
+ ZYNQMP_PM_RESET_RPU_AMBA = 1056,
+ ZYNQMP_PM_RESET_OCM = 1057,
+ ZYNQMP_PM_RESET_RPU_PGE = 1058,
+ ZYNQMP_PM_RESET_USB0_CORERESET = 1059,
+ ZYNQMP_PM_RESET_USB1_CORERESET = 1060,
+ ZYNQMP_PM_RESET_USB0_HIBERRESET = 1061,
+ ZYNQMP_PM_RESET_USB1_HIBERRESET = 1062,
+ ZYNQMP_PM_RESET_USB0_APB = 1063,
+ ZYNQMP_PM_RESET_USB1_APB = 1064,
+ ZYNQMP_PM_RESET_IPI = 1065,
+ ZYNQMP_PM_RESET_APM_LPD = 1066,
+ ZYNQMP_PM_RESET_RTC = 1067,
+ ZYNQMP_PM_RESET_SYSMON = 1068,
+ ZYNQMP_PM_RESET_AFI_FM6 = 1069,
+ ZYNQMP_PM_RESET_LPD_SWDT = 1070,
+ ZYNQMP_PM_RESET_FPD = 1071,
+ ZYNQMP_PM_RESET_RPU_DBG1 = 1072,
+ ZYNQMP_PM_RESET_RPU_DBG0 = 1073,
+ ZYNQMP_PM_RESET_DBG_LPD = 1074,
+ ZYNQMP_PM_RESET_DBG_FPD = 1075,
+ ZYNQMP_PM_RESET_APLL = 1076,
+ ZYNQMP_PM_RESET_DPLL = 1077,
+ ZYNQMP_PM_RESET_VPLL = 1078,
+ ZYNQMP_PM_RESET_IOPLL = 1079,
+ ZYNQMP_PM_RESET_RPLL = 1080,
+ ZYNQMP_PM_RESET_GPO3_PL_0 = 1081,
+ ZYNQMP_PM_RESET_GPO3_PL_1 = 1082,
+ ZYNQMP_PM_RESET_GPO3_PL_2 = 1083,
+ ZYNQMP_PM_RESET_GPO3_PL_3 = 1084,
+ ZYNQMP_PM_RESET_GPO3_PL_4 = 1085,
+ ZYNQMP_PM_RESET_GPO3_PL_5 = 1086,
+ ZYNQMP_PM_RESET_GPO3_PL_6 = 1087,
+ ZYNQMP_PM_RESET_GPO3_PL_7 = 1088,
+ ZYNQMP_PM_RESET_GPO3_PL_8 = 1089,
+ ZYNQMP_PM_RESET_GPO3_PL_9 = 1090,
+ ZYNQMP_PM_RESET_GPO3_PL_10 = 1091,
+ ZYNQMP_PM_RESET_GPO3_PL_11 = 1092,
+ ZYNQMP_PM_RESET_GPO3_PL_12 = 1093,
+ ZYNQMP_PM_RESET_GPO3_PL_13 = 1094,
+ ZYNQMP_PM_RESET_GPO3_PL_14 = 1095,
+ ZYNQMP_PM_RESET_GPO3_PL_15 = 1096,
+ ZYNQMP_PM_RESET_GPO3_PL_16 = 1097,
+ ZYNQMP_PM_RESET_GPO3_PL_17 = 1098,
+ ZYNQMP_PM_RESET_GPO3_PL_18 = 1099,
+ ZYNQMP_PM_RESET_GPO3_PL_19 = 1100,
+ ZYNQMP_PM_RESET_GPO3_PL_20 = 1101,
+ ZYNQMP_PM_RESET_GPO3_PL_21 = 1102,
+ ZYNQMP_PM_RESET_GPO3_PL_22 = 1103,
+ ZYNQMP_PM_RESET_GPO3_PL_23 = 1104,
+ ZYNQMP_PM_RESET_GPO3_PL_24 = 1105,
+ ZYNQMP_PM_RESET_GPO3_PL_25 = 1106,
+ ZYNQMP_PM_RESET_GPO3_PL_26 = 1107,
+ ZYNQMP_PM_RESET_GPO3_PL_27 = 1108,
+ ZYNQMP_PM_RESET_GPO3_PL_28 = 1109,
+ ZYNQMP_PM_RESET_GPO3_PL_29 = 1110,
+ ZYNQMP_PM_RESET_GPO3_PL_30 = 1111,
+ ZYNQMP_PM_RESET_GPO3_PL_31 = 1112,
+ ZYNQMP_PM_RESET_RPU_LS = 1113,
+ ZYNQMP_PM_RESET_PS_ONLY = 1114,
+ ZYNQMP_PM_RESET_PL = 1115,
+ ZYNQMP_PM_RESET_PS_PL0 = 1116,
+ ZYNQMP_PM_RESET_PS_PL1 = 1117,
+ ZYNQMP_PM_RESET_PS_PL2 = 1118,
+ ZYNQMP_PM_RESET_PS_PL3 = 1119,
+ ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
+ };
+
enum pm_ioctl_id {
- IOCTL_GET_RPU_OPER_MODE,
- IOCTL_SET_RPU_OPER_MODE,
- IOCTL_RPU_BOOT_ADDR_CONFIG,
- IOCTL_TCM_COMB_CONFIG,
- IOCTL_SET_TAPDELAY_BYPASS,
- IOCTL_SET_SGMII_MODE,
- IOCTL_SD_DLL_RESET,
- IOCTL_SET_SD_TAPDELAY,
- IOCTL_SET_PLL_FRAC_MODE,
- IOCTL_GET_PLL_FRAC_MODE,
- IOCTL_SET_PLL_FRAC_DATA,
- IOCTL_GET_PLL_FRAC_DATA,
- IOCTL_WRITE_GGS,
- IOCTL_READ_GGS,
- IOCTL_WRITE_PGGS,
- IOCTL_READ_PGGS,
+ IOCTL_GET_RPU_OPER_MODE = 0,
+ IOCTL_SET_RPU_OPER_MODE = 1,
+ IOCTL_RPU_BOOT_ADDR_CONFIG = 2,
+ IOCTL_TCM_COMB_CONFIG = 3,
+ IOCTL_SET_TAPDELAY_BYPASS = 4,
+ IOCTL_SET_SGMII_MODE = 5,
+ IOCTL_SD_DLL_RESET = 6,
+ IOCTL_SET_SD_TAPDELAY = 7,
+ IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_GET_PLL_FRAC_MODE = 9,
+ IOCTL_SET_PLL_FRAC_DATA = 10,
+ IOCTL_GET_PLL_FRAC_DATA = 11,
+ IOCTL_WRITE_GGS = 12,
+ IOCTL_READ_GGS = 13,
+ IOCTL_WRITE_PGGS = 14,
+ IOCTL_READ_PGGS = 15,
/* IOCTL for ULPI reset */
- IOCTL_ULPI_RESET,
+ IOCTL_ULPI_RESET = 16,
/* Set healthy bit value*/
- IOCTL_SET_BOOT_HEALTH_STATUS,
- IOCTL_AFI,
+ IOCTL_SET_BOOT_HEALTH_STATUS = 17,
+ IOCTL_AFI = 18,
/* Probe counter read/write */
- IOCTL_PROBE_COUNTER_READ,
- IOCTL_PROBE_COUNTER_WRITE,
- IOCTL_OSPI_MUX_SELECT,
+ IOCTL_PROBE_COUNTER_READ = 19,
+ IOCTL_PROBE_COUNTER_WRITE = 20,
+ IOCTL_OSPI_MUX_SELECT = 21,
/* IOCTL for USB power request */
- IOCTL_USB_SET_STATE,
+ IOCTL_USB_SET_STATE = 22,
/* IOCTL to get last reset reason */
- IOCTL_GET_LAST_RESET_REASON,
+ IOCTL_GET_LAST_RESET_REASON = 23,
/* AIE ISR Clear */
- IOCTL_AIE_ISR_CLEAR,
+ IOCTL_AIE_ISR_CLEAR = 24,
};
+enum ospi_mux_select_type {
+ PM_OSPI_MUX_SEL_DMA,
+ PM_OSPI_MUX_SEL_LINEAR,
+ PM_OSPI_MUX_GET_MODE,
+};
+
+enum zynqmp_pm_request_ack {
+ PM_REQUEST_ACK_NO = 1,
+ PM_REQUEST_ACK_BLOCKING,
+ PM_REQUEST_ACK_NON_BLOCKING,
+};
+
#define PM_SIP_SVC 0xc2000000
#define ZYNQMP_PM_VERSION_MAJOR 1