struct mtd_partition *parts;
#endif
void __iomem *nand_base;
+ u32 page;
};
struct nand_regs {
status = arasan_nand_read_page(mtd, buf, (mtd->writesize));
+ if (oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
return status;
}
u32 size = mtd->writesize;
u32 rdcount = 0;
u8 column_addr_cycles;
+ struct arasan_nand_info *xnand = chip->priv;
if (chip->ecc_step_ds >= ARASAN_NAND_PKTSIZE_1K)
pktsize = ARASAN_NAND_PKTSIZE_1K;
writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK ,
&arasan_nand_base->intsts_reg);
+ if (oob_required)
+ chip->ecc.write_oob(mtd, chip, xnand->page);
+
return 0;
}
int column, int page_addr)
{
u32 i;
+ struct nand_chip *chip = mtd->priv;
+ struct arasan_nand_info *xnand = chip->priv;
curr_cmd = NULL;
writel(0x4, &arasan_nand_base->intsts_enr);
arasan_nand_send_rdcmd(curr_cmd, column, page_addr, mtd);
if ((curr_cmd->cmd1 == NAND_CMD_SET_FEATURES) ||
- (curr_cmd->cmd1 == NAND_CMD_SEQIN))
+ (curr_cmd->cmd1 == NAND_CMD_SEQIN)) {
+ xnand->page = page_addr;
arasan_nand_send_wrcmd(curr_cmd, column, page_addr, mtd);
+ }
if (curr_cmd->cmd1 == NAND_CMD_ERASE1)
arasan_nand_erase(curr_cmd, column, page_addr, mtd);