{
struct spi_mem_op op = spinand->op_templates->set_feature;
+ if (op.cmd.dtr && op.cmd.buswidth == 8)
+ reg |= reg << 8;
+
op.addr.val = reg;
op.data.buf.out = valptr;
{
struct spi_mem_op op = spinand->op_templates->get_feature;
+ if (op.cmd.dtr && op.cmd.buswidth == 8)
+ reg |= reg << 8;
+
op.addr.val = reg;
op.data.buf.in = valptr;
return spinand->manufacturer->ops->cleanup(spinand);
}
+static bool spinand_op_is_odtr(const struct spi_mem_op *op)
+{
+ return op->cmd.dtr && op->cmd.buswidth == 8;
+}
+
static void spinand_init_ssdr_templates(struct spinand_device *spinand)
{
struct spinand_mem_ops *tmpl = &spinand->ssdr_op_templates;
for (i = 0; i < info->vendor_ops->nops; i++) {
const struct spi_mem_op *op = &info->vendor_ops->ops[i];
+ if ((iface == SSDR && spinand_op_is_odtr(op)) ||
+ (iface == ODTR && !spinand_op_is_odtr(op)))
+ continue;
+
if (!spi_mem_supports_op(spinand->spimem, op))
return -EOPNOTSUPP;
}
return 0;
}
+static int spinand_init_odtr_instruction_set(struct spinand_device *spinand)
+{
+ struct spinand_mem_ops *tmpl = &spinand->odtr_op_templates;
+
+ tmpl->reset = (struct spi_mem_op)SPINAND_RESET_8D_0_0_OP;
+ if (!spi_mem_supports_op(spinand->spimem, &tmpl->reset))
+ return -EOPNOTSUPP;
+
+ tmpl->readid = (struct spi_mem_op)SPINAND_READID_8D_8D_8D_OP(0, 0, NULL, 0);
+ if (!spi_mem_supports_op(spinand->spimem, &tmpl->readid))
+ return -EOPNOTSUPP;
+
+ tmpl->wr_en = (struct spi_mem_op)SPINAND_WR_EN_8D_0_0_OP;
+ if (!spi_mem_supports_op(spinand->spimem, &tmpl->wr_en))
+ return -EOPNOTSUPP;
+
+ tmpl->wr_dis = (struct spi_mem_op)SPINAND_WR_DIS_8D_0_0_OP;
+ if (!spi_mem_supports_op(spinand->spimem, &tmpl->wr_dis))
+ return -EOPNOTSUPP;
+
+ tmpl->set_feature = (struct spi_mem_op)SPINAND_SET_FEATURE_8D_8D_8D_OP(0, NULL);
+ if (!spi_mem_supports_op(spinand->spimem, &tmpl->set_feature))
+ return -EOPNOTSUPP;
+
+ tmpl->get_feature = (struct spi_mem_op)SPINAND_GET_FEATURE_8D_8D_8D_OP(0, NULL);
+ if (!spi_mem_supports_op(spinand->spimem, &tmpl->get_feature))
+ return -EOPNOTSUPP;
+
+ tmpl->blk_erase = (struct spi_mem_op)SPINAND_BLK_ERASE_8D_8D_0_OP(0);
+ if (!spi_mem_supports_op(spinand->spimem, &tmpl->blk_erase))
+ return -EOPNOTSUPP;
+
+ tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_8D_8D_0_OP(0);
+ if (!spi_mem_supports_op(spinand->spimem, &tmpl->page_read))
+ return -EOPNOTSUPP;
+
+ tmpl->prog_exec = (struct spi_mem_op)SPINAND_PROG_EXEC_8D_8D_0_OP(0);
+ if (!spi_mem_supports_op(spinand->spimem, &tmpl->prog_exec))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device *spinand, enum spinand_bus_interface iface,
const struct spinand_op_variants *variants)
unsigned int nbytes;
int ret;
+ if ((iface == SSDR && spinand_op_is_odtr(&op)) ||
+ (iface == ODTR && !spinand_op_is_odtr(&op)))
+ continue;
+
nbytes = nanddev_per_page_oobsize(nand) +
nanddev_page_size(nand);
spinand->read_retries = table[i].read_retries;
spinand->set_read_retry = table[i].set_read_retry;
+ /* I/O variants selection with single-spi SDR commands */
+
op = spinand_select_op_variant(spinand, SSDR,
info->op_variants.read_cache);
if (!op)
if (ret)
return ret;
+ /* I/O variants selection with octo-spi DDR commands (optional) */
+
+ ret = spinand_init_odtr_instruction_set(spinand);
+ if (ret)
+ return 0;
+
+ ret = spinand_support_vendor_ops(spinand, info, ODTR);
+ if (ret)
+ return 0;
+
+ op = spinand_select_op_variant(spinand, ODTR,
+ info->op_variants.read_cache);
+ spinand->odtr_op_templates.read_cache = op;
+
+ op = spinand_select_op_variant(spinand, ODTR,
+ info->op_variants.write_cache);
+ spinand->odtr_op_templates.write_cache = op;
+
+ op = spinand_select_op_variant(spinand, ODTR,
+ info->op_variants.update_cache);
+ spinand->odtr_op_templates.update_cache = op;
+
return 0;
}
static int spinand_configure_chip(struct spinand_device *spinand)
{
- bool quad_enable = false;
+ bool odtr = false, quad_enable = false;
int ret;
+ if (spinand->odtr_op_templates.read_cache &&
+ spinand->odtr_op_templates.write_cache &&
+ spinand->odtr_op_templates.update_cache)
+ odtr = true;
+
+ if (odtr) {
+ if (!spinand->configure_chip)
+ goto try_ssdr;
+
+ /* ODTR bus interface configuration happens here */
+ ret = spinand->configure_chip(spinand, ODTR);
+ if (ret) {
+ spinand->odtr_op_templates.read_cache = NULL;
+ spinand->odtr_op_templates.write_cache = NULL;
+ spinand->odtr_op_templates.update_cache = NULL;
+ goto try_ssdr;
+ }
+
+ spinand->op_templates = &spinand->odtr_op_templates;
+ spinand->bus_iface = ODTR;
+
+ return 0;
+ }
+
+try_ssdr:
if (spinand->flags & SPINAND_HAS_QE_BIT) {
if (spinand->ssdr_op_templates.read_cache->data.buswidth == 4 ||
spinand->ssdr_op_templates.write_cache->data.buswidth == 4 ||
spinand_ecc_enable(spinand, false);
}
+static int spinand_mtd_suspend(struct mtd_info *mtd)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ /*
+ * Return to SSDR interface in the suspend path to make sure the
+ * reset operation is correctly processed upon resume.
+ *
+ * Note: Once back in SSDR mode, every operation but the page helpers
+ * (dirmap based I/O accessors) will work. Page accesses would require
+ * destroying and recreating the dirmaps twice to work, which would be
+ * impacting for no reason, as this is just a transitional state.
+ */
+ if (spinand->bus_iface == ODTR) {
+ ret = spinand->configure_chip(spinand, SSDR);
+ if (ret)
+ return ret;
+
+ spinand->op_templates = &spinand->ssdr_op_templates;
+ spinand->bus_iface = SSDR;
+ }
+
+ return 0;
+}
+
static int spinand_init(struct spinand_device *spinand)
{
struct device *dev = &spinand->spimem->spi->dev;
mtd->_block_isreserved = spinand_mtd_block_isreserved;
mtd->_erase = spinand_mtd_erase;
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
+ mtd->_suspend = spinand_mtd_suspend;
mtd->_resume = spinand_mtd_resume;
if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 8))
+/**
+ * Octal DDR SPI NAND flash operations
+ */
+
+#define SPINAND_RESET_8D_0_0_OP \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0xff, 8), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_READID_8D_8D_8D_OP(naddr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x9f, 8), \
+ SPI_MEM_DTR_OP_ADDR(naddr, 0, 8), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 8), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 8))
+
+#define SPINAND_WR_EN_8D_0_0_OP \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x06, 8), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_WR_DIS_8D_0_0_OP \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x04, 8), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_SET_FEATURE_8D_8D_8D_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x1f, 8), \
+ SPI_MEM_DTR_OP_RPT_ADDR(reg, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_DTR_OP_DATA_OUT(2, valptr, 8))
+
+#define SPINAND_GET_FEATURE_8D_8D_8D_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x0f, 8), \
+ SPI_MEM_DTR_OP_RPT_ADDR(reg, 8), \
+ SPI_MEM_DTR_OP_DUMMY(14, 8), \
+ SPI_MEM_DTR_OP_DATA_IN(2, valptr, 8))
+
+#define SPINAND_BLK_ERASE_8D_8D_0_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0xd8, 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_8D_8D_0_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x13, 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_FROM_CACHE_8D_8D_8D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x9d, 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 8), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PROG_EXEC_8D_8D_0_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD(0x10, 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PROG_LOAD_8D_8D_8D_OP(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_DTR_OP_RPT_CMD((reset ? 0xc2 : 0xc4), 8), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_DTR_OP_DATA_OUT(len, buf, 8))
+
/* feature register */
#define REG_BLOCK_LOCK 0xa0
#define BL_ALL_UNLOCKED 0x00
struct spinand_op;
struct spinand_device;
-#define SPINAND_MAX_ID_LEN 5
+#define SPINAND_MAX_ID_LEN 6
/*
* For erase, write and read operation, we got the following timings :
* tBERS (erase) 1ms to 4ms
/**
* struct spinand_id - SPI NAND id structure
- * @data: buffer containing the id bytes. Currently 5 bytes large, but can
+ * @data: buffer containing the id bytes. Currently 6 bytes large, but can
* be extended if required
* @len: ID length
*/
/**
* enum spinand_bus_interface - SPI NAND bus interface types
* @SSDR: Bus configuration supporting all 1S-XX-XX operations, including dual and quad
+ * @ODTR: Bus configuration supporting only 8D-8D-8D operations
*/
enum spinand_bus_interface {
SSDR,
+ ODTR,
};
/**
* @id: NAND ID as returned by READ_ID
* @flags: NAND flags
* @ssdr_op_templates: Templates for all single SDR SPI mem operations
+ * @odtr_op_templates: Templates for all octal DTR SPI mem operations
* @op_templates: Templates for all SPI mem operations
* @bus_iface: Current bus interface
* @select_target: select a specific target/die. Usually called before sending
u32 flags;
struct spinand_mem_ops ssdr_op_templates;
+ struct spinand_mem_ops odtr_op_templates;
struct spinand_mem_ops *op_templates;
enum spinand_bus_interface bus_iface;