if (!(spinand->flags & SPINAND_HAS_QE_BIT))
return 0;
- if (spinand->op_templates.read_cache->data.buswidth == 4 ||
- spinand->op_templates.write_cache->data.buswidth == 4 ||
- spinand->op_templates.update_cache->data.buswidth == 4)
+ if (spinand->op_templates->read_cache->data.buswidth == 4 ||
+ spinand->op_templates->write_cache->data.buswidth == 4 ||
+ spinand->op_templates->update_cache->data.buswidth == 4)
enable = true;
return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
info.offset = plane << fls(nand->memorg.pagesize);
info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
- info.op_tmpl = *spinand->op_templates.update_cache;
+ info.op_tmpl = *spinand->op_templates->update_cache;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
if (IS_ERR(desc))
spinand->dirmaps[plane].wdesc = desc;
- info.op_tmpl = *spinand->op_templates.read_cache;
+ info.op_tmpl = *spinand->op_templates->read_cache;
desc = spinand_create_rdesc(spinand, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
}
info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
- info.op_tmpl = *spinand->op_templates.update_cache;
+ info.op_tmpl = *spinand->op_templates->update_cache;
info.op_tmpl.data.ecc = true;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
spinand->dirmaps[plane].wdesc_ecc = desc;
- info.op_tmpl = *spinand->op_templates.read_cache;
+ info.op_tmpl = *spinand->op_templates->read_cache;
info.op_tmpl.data.ecc = true;
desc = spinand_create_rdesc(spinand, &info);
if (IS_ERR(desc))
return spinand->manufacturer->ops->cleanup(spinand);
}
+static void spinand_init_ssdr_templates(struct spinand_device *spinand)
+{
+ struct spinand_mem_ops *tmpl = &spinand->ssdr_op_templates;
+
+ tmpl->reset = (struct spi_mem_op)SPINAND_RESET_1S_0_0_OP;
+ tmpl->readid = (struct spi_mem_op)SPINAND_READID_1S_1S_1S_OP(0, 0, NULL, 0);
+ tmpl->wr_en = (struct spi_mem_op)SPINAND_WR_EN_1S_0_0_OP;
+ tmpl->wr_dis = (struct spi_mem_op)SPINAND_WR_DIS_1S_0_0_OP;
+ tmpl->set_feature = (struct spi_mem_op)SPINAND_SET_FEATURE_1S_1S_1S_OP(0, NULL);
+ tmpl->get_feature = (struct spi_mem_op)SPINAND_GET_FEATURE_1S_1S_1S_OP(0, NULL);
+ tmpl->blk_erase = (struct spi_mem_op)SPINAND_BLK_ERASE_1S_1S_0_OP(0);
+ tmpl->page_read = (struct spi_mem_op)SPINAND_PAGE_READ_1S_1S_0_OP(0);
+ tmpl->prog_exec = (struct spi_mem_op)SPINAND_PROG_EXEC_1S_1S_0_OP(0);
+ spinand->op_templates = &spinand->ssdr_op_templates;
+}
+
static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device *spinand,
const struct spinand_op_variants *variants)
if (!op)
return -EOPNOTSUPP;
- spinand->op_templates.read_cache = op;
+ spinand->ssdr_op_templates.read_cache = op;
op = spinand_select_op_variant(spinand,
info->op_variants.write_cache);
if (!op)
return -EOPNOTSUPP;
- spinand->op_templates.write_cache = op;
+ spinand->ssdr_op_templates.write_cache = op;
op = spinand_select_op_variant(spinand,
info->op_variants.update_cache);
if (!op)
return -EOPNOTSUPP;
- spinand->op_templates.update_cache = op;
+ spinand->ssdr_op_templates.update_cache = op;
return 0;
}
if (!spinand->scratchbuf)
return -ENOMEM;
+ spinand_init_ssdr_templates(spinand);
+
ret = spinand_detect(spinand);
if (ret)
goto err_free_bufs;
struct spi_mem_dirmap_desc *rdesc_ecc;
};
+/**
+ * struct spinand_mem_ops - SPI NAND memory operations
+ * @reset: reset op template
+ * @readid: read ID op template
+ * @wr_en: write enable op template
+ * @wr_dis: write disable op template
+ * @set_feature: set feature op template
+ * @get_feature: get feature op template
+ * @blk_erase: blk erase op template
+ * @page_read: page read op template
+ * @prog_exec: prog exec op template
+ * @read_cache: read cache op template
+ * @write_cache: write cache op template
+ * @update_cache: update cache op template
+ */
+struct spinand_mem_ops {
+ struct spi_mem_op reset;
+ struct spi_mem_op readid;
+ struct spi_mem_op wr_en;
+ struct spi_mem_op wr_dis;
+ struct spi_mem_op set_feature;
+ struct spi_mem_op get_feature;
+ struct spi_mem_op blk_erase;
+ struct spi_mem_op page_read;
+ struct spi_mem_op prog_exec;
+ const struct spi_mem_op *read_cache;
+ const struct spi_mem_op *write_cache;
+ const struct spi_mem_op *update_cache;
+};
+
/**
* struct spinand_device - SPI NAND device instance
* @base: NAND device instance
* @lock: lock used to serialize accesses to the NAND
* @id: NAND ID as returned by READ_ID
* @flags: NAND flags
- * @op_templates: various SPI mem op templates
- * @op_templates.read_cache: read cache op template
- * @op_templates.write_cache: write cache op template
- * @op_templates.update_cache: update cache op template
+ * @ssdr_op_templates: Templates for all single SDR SPI mem operations
+ * @op_templates: Templates for all SPI mem operations
* @select_target: select a specific target/die. Usually called before sending
* a command addressing a page or an eraseblock embedded in
* this die. Only required if your chip exposes several dies
struct spinand_id id;
u32 flags;
- struct {
- const struct spi_mem_op *read_cache;
- const struct spi_mem_op *write_cache;
- const struct spi_mem_op *update_cache;
- } op_templates;
+ struct spinand_mem_ops ssdr_op_templates;
+ struct spinand_mem_ops *op_templates;
struct spinand_dirmap *dirmaps;