1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2017 Micron Technology, Inc.
6 * Peter Pan <peterpandong@micron.com>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
10 #define pr_fmt(fmt) "spi-nand: " fmt
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/spinand.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
23 static int spinand_read_reg_op(struct spinand_device
*spinand
, u8 reg
, u8
*val
)
25 struct spi_mem_op op
= SPINAND_GET_FEATURE_OP(reg
,
29 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
33 *val
= *spinand
->scratchbuf
;
37 static int spinand_write_reg_op(struct spinand_device
*spinand
, u8 reg
, u8 val
)
39 struct spi_mem_op op
= SPINAND_SET_FEATURE_OP(reg
,
42 *spinand
->scratchbuf
= val
;
43 return spi_mem_exec_op(spinand
->spimem
, &op
);
46 static int spinand_read_status(struct spinand_device
*spinand
, u8
*status
)
48 return spinand_read_reg_op(spinand
, REG_STATUS
, status
);
51 static int spinand_get_cfg(struct spinand_device
*spinand
, u8
*cfg
)
53 struct nand_device
*nand
= spinand_to_nand(spinand
);
55 if (WARN_ON(spinand
->cur_target
< 0 ||
56 spinand
->cur_target
>= nand
->memorg
.ntargets
))
59 *cfg
= spinand
->cfg_cache
[spinand
->cur_target
];
63 static int spinand_set_cfg(struct spinand_device
*spinand
, u8 cfg
)
65 struct nand_device
*nand
= spinand_to_nand(spinand
);
68 if (WARN_ON(spinand
->cur_target
< 0 ||
69 spinand
->cur_target
>= nand
->memorg
.ntargets
))
72 if (spinand
->cfg_cache
[spinand
->cur_target
] == cfg
)
75 ret
= spinand_write_reg_op(spinand
, REG_CFG
, cfg
);
79 spinand
->cfg_cache
[spinand
->cur_target
] = cfg
;
84 * spinand_upd_cfg() - Update the configuration register
85 * @spinand: the spinand device
86 * @mask: the mask encoding the bits to update in the config reg
87 * @val: the new value to apply
89 * Update the configuration register.
91 * Return: 0 on success, a negative error code otherwise.
93 int spinand_upd_cfg(struct spinand_device
*spinand
, u8 mask
, u8 val
)
98 ret
= spinand_get_cfg(spinand
, &cfg
);
105 return spinand_set_cfg(spinand
, cfg
);
109 * spinand_select_target() - Select a specific NAND target/die
110 * @spinand: the spinand device
111 * @target: the target/die to select
113 * Select a new target/die. If chip only has one die, this function is a NOOP.
115 * Return: 0 on success, a negative error code otherwise.
117 int spinand_select_target(struct spinand_device
*spinand
, unsigned int target
)
119 struct nand_device
*nand
= spinand_to_nand(spinand
);
122 if (WARN_ON(target
>= nand
->memorg
.ntargets
))
125 if (spinand
->cur_target
== target
)
128 if (nand
->memorg
.ntargets
== 1) {
129 spinand
->cur_target
= target
;
133 ret
= spinand
->select_target(spinand
, target
);
137 spinand
->cur_target
= target
;
141 static int spinand_init_cfg_cache(struct spinand_device
*spinand
)
143 struct nand_device
*nand
= spinand_to_nand(spinand
);
144 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
148 spinand
->cfg_cache
= devm_kcalloc(dev
,
149 nand
->memorg
.ntargets
,
150 sizeof(*spinand
->cfg_cache
),
152 if (!spinand
->cfg_cache
)
155 for (target
= 0; target
< nand
->memorg
.ntargets
; target
++) {
156 ret
= spinand_select_target(spinand
, target
);
161 * We use spinand_read_reg_op() instead of spinand_get_cfg()
162 * here to bypass the config cache.
164 ret
= spinand_read_reg_op(spinand
, REG_CFG
,
165 &spinand
->cfg_cache
[target
]);
173 static int spinand_init_quad_enable(struct spinand_device
*spinand
)
177 if (!(spinand
->flags
& SPINAND_HAS_QE_BIT
))
180 if (spinand
->op_templates
.read_cache
->data
.buswidth
== 4 ||
181 spinand
->op_templates
.write_cache
->data
.buswidth
== 4 ||
182 spinand
->op_templates
.update_cache
->data
.buswidth
== 4)
185 return spinand_upd_cfg(spinand
, CFG_QUAD_ENABLE
,
186 enable
? CFG_QUAD_ENABLE
: 0);
189 static int spinand_ecc_enable(struct spinand_device
*spinand
,
192 return spinand_upd_cfg(spinand
, CFG_ECC_ENABLE
,
193 enable
? CFG_ECC_ENABLE
: 0);
196 static int spinand_write_enable_op(struct spinand_device
*spinand
)
198 struct spi_mem_op op
= SPINAND_WR_EN_DIS_OP(true);
200 return spi_mem_exec_op(spinand
->spimem
, &op
);
203 static int spinand_load_page_op(struct spinand_device
*spinand
,
204 const struct nand_page_io_req
*req
)
206 struct nand_device
*nand
= spinand_to_nand(spinand
);
207 unsigned int row
= nanddev_pos_to_row(nand
, &req
->pos
);
208 struct spi_mem_op op
= SPINAND_PAGE_READ_OP(row
);
210 return spi_mem_exec_op(spinand
->spimem
, &op
);
213 static int spinand_read_from_cache_op(struct spinand_device
*spinand
,
214 const struct nand_page_io_req
*req
)
216 struct nand_device
*nand
= spinand_to_nand(spinand
);
217 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
218 struct spi_mem_dirmap_desc
*rdesc
;
219 unsigned int nbytes
= 0;
225 buf
= spinand
->databuf
;
226 nbytes
= nanddev_page_size(nand
);
231 nbytes
+= nanddev_per_page_oobsize(nand
);
233 buf
= spinand
->oobbuf
;
234 column
= nanddev_page_size(nand
);
238 rdesc
= spinand
->dirmaps
[req
->pos
.plane
].rdesc
;
241 ret
= spi_mem_dirmap_read(rdesc
, column
, nbytes
, buf
);
245 if (!ret
|| ret
> nbytes
)
254 memcpy(req
->databuf
.in
, spinand
->databuf
+ req
->dataoffs
,
258 if (req
->mode
== MTD_OPS_AUTO_OOB
)
259 mtd_ooblayout_get_databytes(mtd
, req
->oobbuf
.in
,
264 memcpy(req
->oobbuf
.in
, spinand
->oobbuf
+ req
->ooboffs
,
271 static int spinand_write_to_cache_op(struct spinand_device
*spinand
,
272 const struct nand_page_io_req
*req
)
274 struct nand_device
*nand
= spinand_to_nand(spinand
);
275 struct mtd_info
*mtd
= nanddev_to_mtd(nand
);
276 struct spi_mem_dirmap_desc
*wdesc
;
277 unsigned int nbytes
, column
= 0;
278 void *buf
= spinand
->databuf
;
282 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
283 * the cache content to 0xFF (depends on vendor implementation), so we
284 * must fill the page cache entirely even if we only want to program
285 * the data portion of the page, otherwise we might corrupt the BBM or
286 * user data previously programmed in OOB area.
288 nbytes
= nanddev_page_size(nand
) + nanddev_per_page_oobsize(nand
);
289 memset(spinand
->databuf
, 0xff, nbytes
);
292 memcpy(spinand
->databuf
+ req
->dataoffs
, req
->databuf
.out
,
296 if (req
->mode
== MTD_OPS_AUTO_OOB
)
297 mtd_ooblayout_set_databytes(mtd
, req
->oobbuf
.out
,
302 memcpy(spinand
->oobbuf
+ req
->ooboffs
, req
->oobbuf
.out
,
306 wdesc
= spinand
->dirmaps
[req
->pos
.plane
].wdesc
;
309 ret
= spi_mem_dirmap_write(wdesc
, column
, nbytes
, buf
);
313 if (!ret
|| ret
> nbytes
)
324 static int spinand_program_op(struct spinand_device
*spinand
,
325 const struct nand_page_io_req
*req
)
327 struct nand_device
*nand
= spinand_to_nand(spinand
);
328 unsigned int row
= nanddev_pos_to_row(nand
, &req
->pos
);
329 struct spi_mem_op op
= SPINAND_PROG_EXEC_OP(row
);
331 return spi_mem_exec_op(spinand
->spimem
, &op
);
334 static int spinand_erase_op(struct spinand_device
*spinand
,
335 const struct nand_pos
*pos
)
337 struct nand_device
*nand
= spinand_to_nand(spinand
);
338 unsigned int row
= nanddev_pos_to_row(nand
, pos
);
339 struct spi_mem_op op
= SPINAND_BLK_ERASE_OP(row
);
341 return spi_mem_exec_op(spinand
->spimem
, &op
);
344 static int spinand_wait(struct spinand_device
*spinand
, u8
*s
)
346 unsigned long timeo
= jiffies
+ msecs_to_jiffies(400);
351 ret
= spinand_read_status(spinand
, &status
);
355 if (!(status
& STATUS_BUSY
))
357 } while (time_before(jiffies
, timeo
));
360 * Extra read, just in case the STATUS_READY bit has changed
361 * since our last check
363 ret
= spinand_read_status(spinand
, &status
);
371 return status
& STATUS_BUSY
? -ETIMEDOUT
: 0;
374 static int spinand_read_id_op(struct spinand_device
*spinand
, u8 naddr
,
377 struct spi_mem_op op
= SPINAND_READID_OP(
378 naddr
, ndummy
, spinand
->scratchbuf
, SPINAND_MAX_ID_LEN
);
381 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
383 memcpy(buf
, spinand
->scratchbuf
, SPINAND_MAX_ID_LEN
);
388 static int spinand_reset_op(struct spinand_device
*spinand
)
390 struct spi_mem_op op
= SPINAND_RESET_OP
;
393 ret
= spi_mem_exec_op(spinand
->spimem
, &op
);
397 return spinand_wait(spinand
, NULL
);
400 static int spinand_lock_block(struct spinand_device
*spinand
, u8 lock
)
402 return spinand_write_reg_op(spinand
, REG_BLOCK_LOCK
, lock
);
405 static int spinand_check_ecc_status(struct spinand_device
*spinand
, u8 status
)
407 struct nand_device
*nand
= spinand_to_nand(spinand
);
409 if (spinand
->eccinfo
.get_status
)
410 return spinand
->eccinfo
.get_status(spinand
, status
);
412 switch (status
& STATUS_ECC_MASK
) {
413 case STATUS_ECC_NO_BITFLIPS
:
416 case STATUS_ECC_HAS_BITFLIPS
:
418 * We have no way to know exactly how many bitflips have been
419 * fixed, so let's return the maximum possible value so that
420 * wear-leveling layers move the data immediately.
422 return nand
->eccreq
.strength
;
424 case STATUS_ECC_UNCOR_ERROR
:
434 static int spinand_read_page(struct spinand_device
*spinand
,
435 const struct nand_page_io_req
*req
,
441 ret
= spinand_load_page_op(spinand
, req
);
445 ret
= spinand_wait(spinand
, &status
);
449 ret
= spinand_read_from_cache_op(spinand
, req
);
456 return spinand_check_ecc_status(spinand
, status
);
459 static int spinand_write_page(struct spinand_device
*spinand
,
460 const struct nand_page_io_req
*req
)
465 ret
= spinand_write_enable_op(spinand
);
469 ret
= spinand_write_to_cache_op(spinand
, req
);
473 ret
= spinand_program_op(spinand
, req
);
477 ret
= spinand_wait(spinand
, &status
);
478 if (!ret
&& (status
& STATUS_PROG_FAILED
))
484 static int spinand_mtd_read(struct mtd_info
*mtd
, loff_t from
,
485 struct mtd_oob_ops
*ops
)
487 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
488 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
489 unsigned int max_bitflips
= 0;
490 struct nand_io_iter iter
;
491 bool enable_ecc
= false;
492 bool ecc_failed
= false;
495 if (ops
->mode
!= MTD_OPS_RAW
&& spinand
->eccinfo
.ooblayout
)
498 mutex_lock(&spinand
->lock
);
500 nanddev_io_for_each_page(nand
, from
, ops
, &iter
) {
501 ret
= spinand_select_target(spinand
, iter
.req
.pos
.target
);
505 ret
= spinand_ecc_enable(spinand
, enable_ecc
);
509 ret
= spinand_read_page(spinand
, &iter
.req
, enable_ecc
);
510 if (ret
< 0 && ret
!= -EBADMSG
)
513 if (ret
== -EBADMSG
) {
515 mtd
->ecc_stats
.failed
++;
517 mtd
->ecc_stats
.corrected
+= ret
;
518 max_bitflips
= max_t(unsigned int, max_bitflips
, ret
);
522 ops
->retlen
+= iter
.req
.datalen
;
523 ops
->oobretlen
+= iter
.req
.ooblen
;
526 mutex_unlock(&spinand
->lock
);
528 if (ecc_failed
&& !ret
)
531 return ret
? ret
: max_bitflips
;
534 static int spinand_mtd_write(struct mtd_info
*mtd
, loff_t to
,
535 struct mtd_oob_ops
*ops
)
537 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
538 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
539 struct nand_io_iter iter
;
540 bool enable_ecc
= false;
543 if (ops
->mode
!= MTD_OPS_RAW
&& mtd
->ooblayout
)
546 mutex_lock(&spinand
->lock
);
548 nanddev_io_for_each_page(nand
, to
, ops
, &iter
) {
549 ret
= spinand_select_target(spinand
, iter
.req
.pos
.target
);
553 ret
= spinand_ecc_enable(spinand
, enable_ecc
);
557 ret
= spinand_write_page(spinand
, &iter
.req
);
561 ops
->retlen
+= iter
.req
.datalen
;
562 ops
->oobretlen
+= iter
.req
.ooblen
;
565 mutex_unlock(&spinand
->lock
);
570 static bool spinand_isbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
572 struct spinand_device
*spinand
= nand_to_spinand(nand
);
574 struct nand_page_io_req req
= {
576 .ooblen
= sizeof(marker
),
582 spinand_select_target(spinand
, pos
->target
);
583 spinand_read_page(spinand
, &req
, false);
584 if (marker
[0] != 0xff || marker
[1] != 0xff)
590 static int spinand_mtd_block_isbad(struct mtd_info
*mtd
, loff_t offs
)
592 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
593 struct spinand_device
*spinand
= nand_to_spinand(nand
);
597 nanddev_offs_to_pos(nand
, offs
, &pos
);
598 mutex_lock(&spinand
->lock
);
599 ret
= nanddev_isbad(nand
, &pos
);
600 mutex_unlock(&spinand
->lock
);
605 static int spinand_markbad(struct nand_device
*nand
, const struct nand_pos
*pos
)
607 struct spinand_device
*spinand
= nand_to_spinand(nand
);
609 struct nand_page_io_req req
= {
612 .ooblen
= sizeof(marker
),
613 .oobbuf
.out
= marker
,
618 ret
= spinand_select_target(spinand
, pos
->target
);
622 ret
= spinand_write_enable_op(spinand
);
626 return spinand_write_page(spinand
, &req
);
629 static int spinand_mtd_block_markbad(struct mtd_info
*mtd
, loff_t offs
)
631 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
632 struct spinand_device
*spinand
= nand_to_spinand(nand
);
636 nanddev_offs_to_pos(nand
, offs
, &pos
);
637 mutex_lock(&spinand
->lock
);
638 ret
= nanddev_markbad(nand
, &pos
);
639 mutex_unlock(&spinand
->lock
);
644 static int spinand_erase(struct nand_device
*nand
, const struct nand_pos
*pos
)
646 struct spinand_device
*spinand
= nand_to_spinand(nand
);
650 ret
= spinand_select_target(spinand
, pos
->target
);
654 ret
= spinand_write_enable_op(spinand
);
658 ret
= spinand_erase_op(spinand
, pos
);
662 ret
= spinand_wait(spinand
, &status
);
663 if (!ret
&& (status
& STATUS_ERASE_FAILED
))
669 static int spinand_mtd_erase(struct mtd_info
*mtd
,
670 struct erase_info
*einfo
)
672 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
675 mutex_lock(&spinand
->lock
);
676 ret
= nanddev_mtd_erase(mtd
, einfo
);
677 mutex_unlock(&spinand
->lock
);
682 static int spinand_mtd_block_isreserved(struct mtd_info
*mtd
, loff_t offs
)
684 struct spinand_device
*spinand
= mtd_to_spinand(mtd
);
685 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
689 nanddev_offs_to_pos(nand
, offs
, &pos
);
690 mutex_lock(&spinand
->lock
);
691 ret
= nanddev_isreserved(nand
, &pos
);
692 mutex_unlock(&spinand
->lock
);
697 static int spinand_create_dirmap(struct spinand_device
*spinand
,
700 struct nand_device
*nand
= spinand_to_nand(spinand
);
701 struct spi_mem_dirmap_info info
= {
702 .length
= nanddev_page_size(nand
) +
703 nanddev_per_page_oobsize(nand
),
705 struct spi_mem_dirmap_desc
*desc
;
707 /* The plane number is passed in MSB just above the column address */
708 info
.offset
= plane
<< fls(nand
->memorg
.pagesize
);
710 info
.op_tmpl
= *spinand
->op_templates
.update_cache
;
711 desc
= devm_spi_mem_dirmap_create(&spinand
->spimem
->spi
->dev
,
712 spinand
->spimem
, &info
);
714 return PTR_ERR(desc
);
716 spinand
->dirmaps
[plane
].wdesc
= desc
;
718 info
.op_tmpl
= *spinand
->op_templates
.read_cache
;
719 desc
= devm_spi_mem_dirmap_create(&spinand
->spimem
->spi
->dev
,
720 spinand
->spimem
, &info
);
722 return PTR_ERR(desc
);
724 spinand
->dirmaps
[plane
].rdesc
= desc
;
729 static int spinand_create_dirmaps(struct spinand_device
*spinand
)
731 struct nand_device
*nand
= spinand_to_nand(spinand
);
734 spinand
->dirmaps
= devm_kzalloc(&spinand
->spimem
->spi
->dev
,
735 sizeof(*spinand
->dirmaps
) *
736 nand
->memorg
.planes_per_lun
,
738 if (!spinand
->dirmaps
)
741 for (i
= 0; i
< nand
->memorg
.planes_per_lun
; i
++) {
742 ret
= spinand_create_dirmap(spinand
, i
);
750 static const struct nand_ops spinand_ops
= {
751 .erase
= spinand_erase
,
752 .markbad
= spinand_markbad
,
753 .isbad
= spinand_isbad
,
756 static const struct spinand_manufacturer
*spinand_manufacturers
[] = {
757 &gigadevice_spinand_manufacturer
,
758 ¯onix_spinand_manufacturer
,
759 µn_spinand_manufacturer
,
760 ¶gon_spinand_manufacturer
,
761 &toshiba_spinand_manufacturer
,
762 &winbond_spinand_manufacturer
,
765 static int spinand_manufacturer_match(struct spinand_device
*spinand
,
766 enum spinand_readid_method rdid_method
)
768 u8
*id
= spinand
->id
.data
;
772 for (i
= 0; i
< ARRAY_SIZE(spinand_manufacturers
); i
++) {
773 const struct spinand_manufacturer
*manufacturer
=
774 spinand_manufacturers
[i
];
776 if (id
[0] != manufacturer
->id
)
779 ret
= spinand_match_and_init(spinand
,
781 manufacturer
->nchips
,
786 spinand
->manufacturer
= manufacturer
;
792 static int spinand_id_detect(struct spinand_device
*spinand
)
794 u8
*id
= spinand
->id
.data
;
797 ret
= spinand_read_id_op(spinand
, 0, 0, id
);
800 ret
= spinand_manufacturer_match(spinand
, SPINAND_READID_METHOD_OPCODE
);
804 ret
= spinand_read_id_op(spinand
, 1, 0, id
);
807 ret
= spinand_manufacturer_match(spinand
,
808 SPINAND_READID_METHOD_OPCODE_ADDR
);
812 ret
= spinand_read_id_op(spinand
, 0, 1, id
);
815 ret
= spinand_manufacturer_match(spinand
,
816 SPINAND_READID_METHOD_OPCODE_DUMMY
);
821 static int spinand_manufacturer_init(struct spinand_device
*spinand
)
823 if (spinand
->manufacturer
->ops
->init
)
824 return spinand
->manufacturer
->ops
->init(spinand
);
829 static void spinand_manufacturer_cleanup(struct spinand_device
*spinand
)
831 /* Release manufacturer private data */
832 if (spinand
->manufacturer
->ops
->cleanup
)
833 return spinand
->manufacturer
->ops
->cleanup(spinand
);
836 static const struct spi_mem_op
*
837 spinand_select_op_variant(struct spinand_device
*spinand
,
838 const struct spinand_op_variants
*variants
)
840 struct nand_device
*nand
= spinand_to_nand(spinand
);
843 for (i
= 0; i
< variants
->nops
; i
++) {
844 struct spi_mem_op op
= variants
->ops
[i
];
848 nbytes
= nanddev_per_page_oobsize(nand
) +
849 nanddev_page_size(nand
);
852 op
.data
.nbytes
= nbytes
;
853 ret
= spi_mem_adjust_op_size(spinand
->spimem
, &op
);
857 if (!spi_mem_supports_op(spinand
->spimem
, &op
))
860 nbytes
-= op
.data
.nbytes
;
864 return &variants
->ops
[i
];
871 * spinand_match_and_init() - Try to find a match between a device ID and an
872 * entry in a spinand_info table
873 * @spinand: SPI NAND object
874 * @table: SPI NAND device description table
875 * @table_size: size of the device description table
876 * @rdid_method: read id method to match
878 * Match between a device ID retrieved through the READ_ID command and an
879 * entry in the SPI NAND description table. If a match is found, the spinand
880 * object will be initialized with information provided by the matching
881 * spinand_info entry.
883 * Return: 0 on success, a negative error code otherwise.
885 int spinand_match_and_init(struct spinand_device
*spinand
,
886 const struct spinand_info
*table
,
887 unsigned int table_size
,
888 enum spinand_readid_method rdid_method
)
890 u8
*id
= spinand
->id
.data
;
891 struct nand_device
*nand
= spinand_to_nand(spinand
);
894 for (i
= 0; i
< table_size
; i
++) {
895 const struct spinand_info
*info
= &table
[i
];
896 const struct spi_mem_op
*op
;
898 if (rdid_method
!= info
->devid
.method
)
901 if (memcmp(id
+ 1, info
->devid
.id
, info
->devid
.len
))
904 nand
->memorg
= table
[i
].memorg
;
905 nand
->eccreq
= table
[i
].eccreq
;
906 spinand
->eccinfo
= table
[i
].eccinfo
;
907 spinand
->flags
= table
[i
].flags
;
908 spinand
->id
.len
= 1 + table
[i
].devid
.len
;
909 spinand
->select_target
= table
[i
].select_target
;
911 op
= spinand_select_op_variant(spinand
,
912 info
->op_variants
.read_cache
);
916 spinand
->op_templates
.read_cache
= op
;
918 op
= spinand_select_op_variant(spinand
,
919 info
->op_variants
.write_cache
);
923 spinand
->op_templates
.write_cache
= op
;
925 op
= spinand_select_op_variant(spinand
,
926 info
->op_variants
.update_cache
);
927 spinand
->op_templates
.update_cache
= op
;
935 static int spinand_detect(struct spinand_device
*spinand
)
937 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
938 struct nand_device
*nand
= spinand_to_nand(spinand
);
941 ret
= spinand_reset_op(spinand
);
945 ret
= spinand_id_detect(spinand
);
947 dev_err(dev
, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN
,
952 if (nand
->memorg
.ntargets
> 1 && !spinand
->select_target
) {
954 "SPI NANDs with more than one die must implement ->select_target()\n");
958 dev_info(&spinand
->spimem
->spi
->dev
,
959 "%s SPI NAND was found.\n", spinand
->manufacturer
->name
);
960 dev_info(&spinand
->spimem
->spi
->dev
,
961 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
962 nanddev_size(nand
) >> 20, nanddev_eraseblock_size(nand
) >> 10,
963 nanddev_page_size(nand
), nanddev_per_page_oobsize(nand
));
968 static int spinand_noecc_ooblayout_ecc(struct mtd_info
*mtd
, int section
,
969 struct mtd_oob_region
*region
)
974 static int spinand_noecc_ooblayout_free(struct mtd_info
*mtd
, int section
,
975 struct mtd_oob_region
*region
)
980 /* Reserve 2 bytes for the BBM. */
987 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout
= {
988 .ecc
= spinand_noecc_ooblayout_ecc
,
989 .free
= spinand_noecc_ooblayout_free
,
992 static int spinand_init(struct spinand_device
*spinand
)
994 struct device
*dev
= &spinand
->spimem
->spi
->dev
;
995 struct mtd_info
*mtd
= spinand_to_mtd(spinand
);
996 struct nand_device
*nand
= mtd_to_nanddev(mtd
);
1000 * We need a scratch buffer because the spi_mem interface requires that
1001 * buf passed in spi_mem_op->data.buf be DMA-able.
1003 spinand
->scratchbuf
= kzalloc(SPINAND_MAX_ID_LEN
, GFP_KERNEL
);
1004 if (!spinand
->scratchbuf
)
1007 ret
= spinand_detect(spinand
);
1012 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1013 * may use this buffer for DMA access.
1014 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1016 spinand
->databuf
= kzalloc(nanddev_page_size(nand
) +
1017 nanddev_per_page_oobsize(nand
),
1019 if (!spinand
->databuf
) {
1024 spinand
->oobbuf
= spinand
->databuf
+ nanddev_page_size(nand
);
1026 ret
= spinand_init_cfg_cache(spinand
);
1030 ret
= spinand_init_quad_enable(spinand
);
1034 ret
= spinand_upd_cfg(spinand
, CFG_OTP_ENABLE
, 0);
1038 ret
= spinand_manufacturer_init(spinand
);
1041 "Failed to initialize the SPI NAND chip (err = %d)\n",
1046 ret
= spinand_create_dirmaps(spinand
);
1049 "Failed to create direct mappings for read/write operations (err = %d)\n",
1051 goto err_manuf_cleanup
;
1054 /* After power up, all blocks are locked, so unlock them here. */
1055 for (i
= 0; i
< nand
->memorg
.ntargets
; i
++) {
1056 ret
= spinand_select_target(spinand
, i
);
1058 goto err_manuf_cleanup
;
1060 ret
= spinand_lock_block(spinand
, BL_ALL_UNLOCKED
);
1062 goto err_manuf_cleanup
;
1065 ret
= nanddev_init(nand
, &spinand_ops
, THIS_MODULE
);
1067 goto err_manuf_cleanup
;
1070 * Right now, we don't support ECC, so let the whole oob
1071 * area is available for user.
1073 mtd
->_read_oob
= spinand_mtd_read
;
1074 mtd
->_write_oob
= spinand_mtd_write
;
1075 mtd
->_block_isbad
= spinand_mtd_block_isbad
;
1076 mtd
->_block_markbad
= spinand_mtd_block_markbad
;
1077 mtd
->_block_isreserved
= spinand_mtd_block_isreserved
;
1078 mtd
->_erase
= spinand_mtd_erase
;
1079 mtd
->_max_bad_blocks
= nanddev_mtd_max_bad_blocks
;
1081 if (spinand
->eccinfo
.ooblayout
)
1082 mtd_set_ooblayout(mtd
, spinand
->eccinfo
.ooblayout
);
1084 mtd_set_ooblayout(mtd
, &spinand_noecc_ooblayout
);
1086 ret
= mtd_ooblayout_count_freebytes(mtd
);
1088 goto err_cleanup_nanddev
;
1090 mtd
->oobavail
= ret
;
1094 err_cleanup_nanddev
:
1095 nanddev_cleanup(nand
);
1098 spinand_manufacturer_cleanup(spinand
);
1101 kfree(spinand
->databuf
);
1102 kfree(spinand
->scratchbuf
);
1106 static void spinand_cleanup(struct spinand_device
*spinand
)
1108 struct nand_device
*nand
= spinand_to_nand(spinand
);
1110 nanddev_cleanup(nand
);
1111 spinand_manufacturer_cleanup(spinand
);
1112 kfree(spinand
->databuf
);
1113 kfree(spinand
->scratchbuf
);
1116 static int spinand_probe(struct spi_mem
*mem
)
1118 struct spinand_device
*spinand
;
1119 struct mtd_info
*mtd
;
1122 spinand
= devm_kzalloc(&mem
->spi
->dev
, sizeof(*spinand
),
1127 spinand
->spimem
= mem
;
1128 spi_mem_set_drvdata(mem
, spinand
);
1129 spinand_set_of_node(spinand
, mem
->spi
->dev
.of_node
);
1130 mutex_init(&spinand
->lock
);
1131 mtd
= spinand_to_mtd(spinand
);
1132 mtd
->dev
.parent
= &mem
->spi
->dev
;
1134 ret
= spinand_init(spinand
);
1138 ret
= mtd_device_register(mtd
, NULL
, 0);
1140 goto err_spinand_cleanup
;
1144 err_spinand_cleanup
:
1145 spinand_cleanup(spinand
);
1150 static int spinand_remove(struct spi_mem
*mem
)
1152 struct spinand_device
*spinand
;
1153 struct mtd_info
*mtd
;
1156 spinand
= spi_mem_get_drvdata(mem
);
1157 mtd
= spinand_to_mtd(spinand
);
1159 ret
= mtd_device_unregister(mtd
);
1163 spinand_cleanup(spinand
);
1168 static const struct spi_device_id spinand_ids
[] = {
1169 { .name
= "spi-nand" },
1174 static const struct of_device_id spinand_of_ids
[] = {
1175 { .compatible
= "spi-nand" },
1180 static struct spi_mem_driver spinand_drv
= {
1182 .id_table
= spinand_ids
,
1185 .of_match_table
= of_match_ptr(spinand_of_ids
),
1188 .probe
= spinand_probe
,
1189 .remove
= spinand_remove
,
1191 module_spi_mem_driver(spinand_drv
);
1193 MODULE_DESCRIPTION("SPI NAND framework");
1194 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1195 MODULE_LICENSE("GPL v2");