]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/mtd/spi-nor/spi-nor.c
mtd: spi-nor: Spelling s/Writ/Write/
[thirdparty/linux.git] / drivers / mtd / spi-nor / spi-nor.c
CommitLineData
e9f3a2bc 1// SPDX-License-Identifier: GPL-2.0
b199489d 2/*
8eabdd1e
HS
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5 *
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
b199489d
HS
8 */
9
10#include <linux/err.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/mutex.h>
15#include <linux/math64.h>
09b6a377 16#include <linux/sizes.h>
f384b352 17#include <linux/slab.h>
5390a8df 18#include <linux/sort.h>
b199489d 19
b199489d
HS
20#include <linux/mtd/mtd.h>
21#include <linux/of_platform.h>
22#include <linux/spi/flash.h>
23#include <linux/mtd/spi-nor.h>
24
25/* Define max times to check status register before we give up. */
09b6a377
FS
26
27/*
28 * For everything but full-chip erase; probably could be much smaller, but kept
29 * around for safety for now
30 */
31#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
32
33/*
34 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
35 * for larger flash
36 */
37#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
b199489d 38
d928a259 39#define SPI_NOR_MAX_ID_LEN 6
c67cbb83 40#define SPI_NOR_MAX_ADDR_WIDTH 4
d928a259 41
2aaa5f7e
BB
42struct spi_nor_read_command {
43 u8 num_mode_clocks;
44 u8 num_wait_states;
45 u8 opcode;
46 enum spi_nor_protocol proto;
47};
48
49struct spi_nor_pp_command {
50 u8 opcode;
51 enum spi_nor_protocol proto;
52};
53
54enum spi_nor_read_command_index {
55 SNOR_CMD_READ,
56 SNOR_CMD_READ_FAST,
57 SNOR_CMD_READ_1_1_1_DTR,
58
59 /* Dual SPI */
60 SNOR_CMD_READ_1_1_2,
61 SNOR_CMD_READ_1_2_2,
62 SNOR_CMD_READ_2_2_2,
63 SNOR_CMD_READ_1_2_2_DTR,
64
65 /* Quad SPI */
66 SNOR_CMD_READ_1_1_4,
67 SNOR_CMD_READ_1_4_4,
68 SNOR_CMD_READ_4_4_4,
69 SNOR_CMD_READ_1_4_4_DTR,
70
fcd44b64 71 /* Octal SPI */
2aaa5f7e
BB
72 SNOR_CMD_READ_1_1_8,
73 SNOR_CMD_READ_1_8_8,
74 SNOR_CMD_READ_8_8_8,
75 SNOR_CMD_READ_1_8_8_DTR,
76
77 SNOR_CMD_READ_MAX
78};
79
80enum spi_nor_pp_command_index {
81 SNOR_CMD_PP,
82
83 /* Quad SPI */
84 SNOR_CMD_PP_1_1_4,
85 SNOR_CMD_PP_1_4_4,
86 SNOR_CMD_PP_4_4_4,
87
fcd44b64 88 /* Octal SPI */
2aaa5f7e
BB
89 SNOR_CMD_PP_1_1_8,
90 SNOR_CMD_PP_1_8_8,
91 SNOR_CMD_PP_8_8_8,
92
93 SNOR_CMD_PP_MAX
94};
95
96struct spi_nor_flash_parameter {
97 u64 size;
98 u32 page_size;
99
100 struct spi_nor_hwcaps hwcaps;
101 struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
102 struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
103
104 int (*quad_enable)(struct spi_nor *nor);
105};
106
107struct sfdp_parameter_header {
108 u8 id_lsb;
109 u8 minor;
110 u8 major;
111 u8 length; /* in double words */
112 u8 parameter_table_pointer[3]; /* byte address */
113 u8 id_msb;
114};
115
116#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
117#define SFDP_PARAM_HEADER_PTP(p) \
118 (((p)->parameter_table_pointer[2] << 16) | \
119 ((p)->parameter_table_pointer[1] << 8) | \
120 ((p)->parameter_table_pointer[0] << 0))
121
122#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
123#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
816873ea 124#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
2aaa5f7e
BB
125
126#define SFDP_SIGNATURE 0x50444653U
127#define SFDP_JESD216_MAJOR 1
128#define SFDP_JESD216_MINOR 0
129#define SFDP_JESD216A_MINOR 5
130#define SFDP_JESD216B_MINOR 6
131
132struct sfdp_header {
133 u32 signature; /* Ox50444653U <=> "SFDP" */
134 u8 minor;
135 u8 major;
136 u8 nph; /* 0-base number of parameter headers */
137 u8 unused;
138
139 /* Basic Flash Parameter Table. */
140 struct sfdp_parameter_header bfpt_header;
141};
142
143/* Basic Flash Parameter Table */
144
145/*
146 * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
147 * They are indexed from 1 but C arrays are indexed from 0.
148 */
149#define BFPT_DWORD(i) ((i) - 1)
150#define BFPT_DWORD_MAX 16
151
152/* The first version of JESB216 defined only 9 DWORDs. */
153#define BFPT_DWORD_MAX_JESD216 9
154
155/* 1st DWORD. */
156#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
157#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
158#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
159#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
160#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
161#define BFPT_DWORD1_DTR BIT(19)
162#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
163#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
164#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
165
166/* 5th DWORD. */
167#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
168#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
169
170/* 11th DWORD. */
171#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
172#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
173
174/* 15th DWORD. */
175
176/*
177 * (from JESD216 rev B)
178 * Quad Enable Requirements (QER):
179 * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
180 * reads based on instruction. DQ3/HOLD# functions are hold during
181 * instruction phase.
182 * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
183 * two data bytes where bit 1 of the second byte is one.
184 * [...]
185 * Writing only one byte to the status register has the side-effect of
186 * clearing status register 2, including the QE bit. The 100b code is
187 * used if writing one byte to the status register does not modify
188 * status register 2.
189 * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
190 * one data byte where bit 6 is one.
191 * [...]
192 * - 011b: QE is bit 7 of status register 2. It is set via Write status
193 * register 2 instruction 3Eh with one data byte where bit 7 is one.
194 * [...]
195 * The status register 2 is read using instruction 3Fh.
196 * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
197 * two data bytes where bit 1 of the second byte is one.
198 * [...]
199 * In contrast to the 001b code, writing one byte to the status
200 * register does not modify status register 2.
201 * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
202 * Read Status instruction 05h. Status register2 is read using
2d60d1f6 203 * instruction 35h. QE is set via Write Status instruction 01h with
2aaa5f7e
BB
204 * two data bytes where bit 1 of the second byte is one.
205 * [...]
206 */
207#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
208#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
209#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
210#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
211#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
212#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
213#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
214
215struct sfdp_bfpt {
216 u32 dwords[BFPT_DWORD_MAX];
217};
218
219/**
220 * struct spi_nor_fixups - SPI NOR fixup hooks
221 * @post_bfpt: called after the BFPT table has been parsed
222 *
223 * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
224 * table is broken or not available.
225 */
226struct spi_nor_fixups {
227 int (*post_bfpt)(struct spi_nor *nor,
228 const struct sfdp_parameter_header *bfpt_header,
229 const struct sfdp_bfpt *bfpt,
230 struct spi_nor_flash_parameter *params);
231};
232
d928a259 233struct flash_info {
06bb6f5a
RM
234 char *name;
235
d928a259
HS
236 /*
237 * This array stores the ID bytes.
238 * The first three bytes are the JEDIC ID.
239 * JEDEC ID zero means "no ID" (mostly older chips).
240 */
241 u8 id[SPI_NOR_MAX_ID_LEN];
242 u8 id_len;
243
244 /* The size listed here is what works with SPINOR_OP_SE, which isn't
245 * necessarily called a "sector" by the vendor.
246 */
247 unsigned sector_size;
248 u16 n_sectors;
249
250 u16 page_size;
251 u16 addr_width;
252
253 u16 flags;
0618114e
BN
254#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
255#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
256#define SST_WRITE BIT(2) /* use SST byte programming */
257#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
258#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
259#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
260#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
261#define USE_FSR BIT(7) /* use flag status register */
76a4707d 262#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
3dd8012a
BN
263#define SPI_NOR_HAS_TB BIT(9) /*
264 * Flash SR has Top/Bottom (TB) protect
265 * bit. Must be used with
266 * SPI_NOR_HAS_LOCK.
267 */
e99ca98f
RR
268#define SPI_S3AN BIT(10) /*
269 * Xilinx Spartan 3AN In-System Flash
270 * (MFR cannot be used for probing
271 * because it has the same value as
272 * ATMEL flashes)
273 */
ba3ae6a1
CP
274#define SPI_NOR_4B_OPCODES BIT(11) /*
275 * Use dedicated 4byte address op codes
276 * to support memory size above 128Mib.
277 */
2f5ad7f0 278#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
f384b352 279#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
c4b3eacc 280#define USE_CLSR BIT(14) /* use CLSR command */
fcd44b64 281#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
e2707285 282
2aaa5f7e
BB
283 /* Part specific fixup hooks. */
284 const struct spi_nor_fixups *fixups;
285
e2707285 286 int (*quad_enable)(struct spi_nor *nor);
d928a259
HS
287};
288
289#define JEDEC_MFR(info) ((info)->id[0])
b199489d
HS
290
291/*
292 * Read the status register, returning its value in the location
293 * Return the status register value.
294 * Returns negative if error occurred.
295 */
296static int read_sr(struct spi_nor *nor)
297{
298 int ret;
299 u8 val;
300
b02e7f3e 301 ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
b199489d
HS
302 if (ret < 0) {
303 pr_err("error %d reading SR\n", (int) ret);
304 return ret;
305 }
306
307 return val;
308}
309
c14dedde 310/*
311 * Read the flag status register, returning its value in the location
312 * Return the status register value.
313 * Returns negative if error occurred.
314 */
315static int read_fsr(struct spi_nor *nor)
316{
317 int ret;
318 u8 val;
319
320 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
321 if (ret < 0) {
322 pr_err("error %d reading FSR\n", ret);
323 return ret;
324 }
325
326 return val;
327}
328
b199489d
HS
329/*
330 * Read configuration register, returning its value in the
331 * location. Return the configuration register value.
5d708ecc 332 * Returns negative if error occurred.
b199489d
HS
333 */
334static int read_cr(struct spi_nor *nor)
335{
336 int ret;
337 u8 val;
338
b02e7f3e 339 ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
b199489d
HS
340 if (ret < 0) {
341 dev_err(nor->dev, "error %d reading CR\n", ret);
342 return ret;
343 }
344
345 return val;
346}
347
b199489d
HS
348/*
349 * Write status register 1 byte
350 * Returns negative if error occurred.
351 */
87f3ed18 352static int write_sr(struct spi_nor *nor, u8 val)
b199489d
HS
353{
354 nor->cmd_buf[0] = val;
f9f3ce83 355 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
b199489d
HS
356}
357
358/*
359 * Set write enable latch with Write Enable command.
360 * Returns negative if error occurred.
361 */
87f3ed18 362static int write_enable(struct spi_nor *nor)
b199489d 363{
f9f3ce83 364 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
b199489d
HS
365}
366
367/*
8a1115ff 368 * Send write disable instruction to the chip.
b199489d 369 */
87f3ed18 370static int write_disable(struct spi_nor *nor)
b199489d 371{
f9f3ce83 372 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
b199489d
HS
373}
374
87f3ed18 375static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
b199489d
HS
376{
377 return mtd->priv;
378}
379
ba3ae6a1
CP
380
381static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
382{
383 size_t i;
384
385 for (i = 0; i < size; i++)
386 if (table[i][0] == opcode)
387 return table[i][1];
388
389 /* No conversion found, keep input op code. */
390 return opcode;
391}
392
87f3ed18 393static u8 spi_nor_convert_3to4_read(u8 opcode)
ba3ae6a1
CP
394{
395 static const u8 spi_nor_3to4_read[][2] = {
396 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
397 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
398 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
399 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
400 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
401 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
fcd44b64
YNG
402 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
403 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
15f55331
CP
404
405 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
406 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
407 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
ba3ae6a1
CP
408 };
409
410 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
411 ARRAY_SIZE(spi_nor_3to4_read));
412}
413
87f3ed18 414static u8 spi_nor_convert_3to4_program(u8 opcode)
ba3ae6a1
CP
415{
416 static const u8 spi_nor_3to4_program[][2] = {
417 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
418 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
419 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
fcd44b64
YNG
420 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
421 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
ba3ae6a1
CP
422 };
423
424 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
425 ARRAY_SIZE(spi_nor_3to4_program));
426}
427
87f3ed18 428static u8 spi_nor_convert_3to4_erase(u8 opcode)
ba3ae6a1
CP
429{
430 static const u8 spi_nor_3to4_erase[][2] = {
431 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
432 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
433 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
434 };
435
436 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
437 ARRAY_SIZE(spi_nor_3to4_erase));
438}
439
b296379f 440static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
ba3ae6a1
CP
441{
442 /* Do some manufacturer fixups first */
b296379f 443 switch (JEDEC_MFR(nor->info)) {
ba3ae6a1
CP
444 case SNOR_MFR_SPANSION:
445 /* No small sector erase for 4-byte command set */
446 nor->erase_opcode = SPINOR_OP_SE;
b296379f 447 nor->mtd.erasesize = nor->info->sector_size;
ba3ae6a1
CP
448 break;
449
450 default:
451 break;
452 }
453
454 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
455 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
456 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
5390a8df
TA
457
458 if (!spi_nor_has_uniform_erase(nor)) {
459 struct spi_nor_erase_map *map = &nor->erase_map;
460 struct spi_nor_erase_type *erase;
461 int i;
462
463 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
464 erase = &map->erase_type[i];
465 erase->opcode =
466 spi_nor_convert_3to4_erase(erase->opcode);
467 }
468 }
ba3ae6a1
CP
469}
470
b199489d 471/* Enable/disable 4-byte addressing mode. */
eb6ec1d7 472static int set_4byte(struct spi_nor *nor, bool enable)
b199489d
HS
473{
474 int status;
475 bool need_wren = false;
476 u8 cmd;
477
b296379f 478 switch (JEDEC_MFR(nor->info)) {
0005aad0 479 case SNOR_MFR_ST:
f0d2448e 480 case SNOR_MFR_MICRON:
b199489d
HS
481 /* Some Micron need WREN command; all will accept it */
482 need_wren = true;
f66734ae 483 /* fall through */
f0d2448e
BN
484 case SNOR_MFR_MACRONIX:
485 case SNOR_MFR_WINBOND:
b199489d
HS
486 if (need_wren)
487 write_enable(nor);
488
b02e7f3e 489 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
f9f3ce83 490 status = nor->write_reg(nor, cmd, NULL, 0);
b199489d
HS
491 if (need_wren)
492 write_disable(nor);
493
f134fbbb 494 if (!status && !enable &&
b296379f 495 JEDEC_MFR(nor->info) == SNOR_MFR_WINBOND) {
f134fbbb
N
496 /*
497 * On Winbond W25Q256FV, leaving 4byte mode causes
498 * the Extended Address Register to be set to 1, so all
499 * 3-byte-address reads come from the second 16M.
500 * We must clear the register to enable normal behavior.
501 */
502 write_enable(nor);
503 nor->cmd_buf[0] = 0;
504 nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
505 write_disable(nor);
506 }
507
b199489d
HS
508 return status;
509 default:
510 /* Spansion style */
511 nor->cmd_buf[0] = enable << 7;
f9f3ce83 512 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
b199489d
HS
513 }
514}
e99ca98f
RR
515
516static int s3an_sr_ready(struct spi_nor *nor)
517{
518 int ret;
519 u8 val;
520
521 ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
522 if (ret < 0) {
523 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
524 return ret;
525 }
526
527 return !!(val & XSR_RDY);
528}
529
87f3ed18 530static int spi_nor_sr_ready(struct spi_nor *nor)
b199489d 531{
51983b7d
BN
532 int sr = read_sr(nor);
533 if (sr < 0)
534 return sr;
c4b3eacc
AS
535
536 if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
537 if (sr & SR_E_ERR)
538 dev_err(nor->dev, "Erase Error occurred\n");
539 else
540 dev_err(nor->dev, "Programming Error occurred\n");
541
542 nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
543 return -EIO;
544 }
545
546 return !(sr & SR_WIP);
51983b7d 547}
b199489d 548
87f3ed18 549static int spi_nor_fsr_ready(struct spi_nor *nor)
51983b7d
BN
550{
551 int fsr = read_fsr(nor);
552 if (fsr < 0)
553 return fsr;
20ccb993
BH
554
555 if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
556 if (fsr & FSR_E_ERR)
557 dev_err(nor->dev, "Erase operation failed.\n");
558 else
559 dev_err(nor->dev, "Program operation failed.\n");
560
561 if (fsr & FSR_PT_ERR)
562 dev_err(nor->dev,
563 "Attempted to modify a protected sector.\n");
564
565 nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
566 return -EIO;
567 }
568
569 return fsr & FSR_READY;
51983b7d 570}
b199489d 571
51983b7d
BN
572static int spi_nor_ready(struct spi_nor *nor)
573{
574 int sr, fsr;
e99ca98f
RR
575
576 if (nor->flags & SNOR_F_READY_XSR_RDY)
577 sr = s3an_sr_ready(nor);
578 else
579 sr = spi_nor_sr_ready(nor);
51983b7d
BN
580 if (sr < 0)
581 return sr;
582 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
583 if (fsr < 0)
584 return fsr;
585 return sr && fsr;
b199489d
HS
586}
587
b94ed087
BN
588/*
589 * Service routine to read status register until ready, or timeout occurs.
590 * Returns non-zero if error.
591 */
09b6a377
FS
592static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
593 unsigned long timeout_jiffies)
c14dedde 594{
595 unsigned long deadline;
a95ce92e 596 int timeout = 0, ret;
c14dedde 597
09b6a377 598 deadline = jiffies + timeout_jiffies;
c14dedde 599
a95ce92e
BN
600 while (!timeout) {
601 if (time_after_eq(jiffies, deadline))
602 timeout = 1;
c14dedde 603
51983b7d
BN
604 ret = spi_nor_ready(nor);
605 if (ret < 0)
606 return ret;
607 if (ret)
608 return 0;
a95ce92e
BN
609
610 cond_resched();
611 }
612
613 dev_err(nor->dev, "flash operation timed out\n");
c14dedde 614
615 return -ETIMEDOUT;
616}
617
09b6a377
FS
618static int spi_nor_wait_till_ready(struct spi_nor *nor)
619{
620 return spi_nor_wait_till_ready_with_timeout(nor,
621 DEFAULT_READY_WAIT_JIFFIES);
622}
623
b199489d
HS
624/*
625 * Erase the whole flash memory
626 *
627 * Returns 0 if successful, non-zero otherwise.
628 */
629static int erase_chip(struct spi_nor *nor)
630{
19763671 631 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
b199489d 632
f9f3ce83 633 return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
b199489d
HS
634}
635
636static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
637{
638 int ret = 0;
639
640 mutex_lock(&nor->lock);
641
642 if (nor->prepare) {
643 ret = nor->prepare(nor, ops);
644 if (ret) {
645 dev_err(nor->dev, "failed in the preparation.\n");
646 mutex_unlock(&nor->lock);
647 return ret;
648 }
649 }
650 return ret;
651}
652
653static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
654{
655 if (nor->unprepare)
656 nor->unprepare(nor, ops);
657 mutex_unlock(&nor->lock);
658}
659
e99ca98f
RR
660/*
661 * This code converts an address to the Default Address Mode, that has non
662 * power of two page sizes. We must support this mode because it is the default
663 * mode supported by Xilinx tools, it can access the whole flash area and
664 * changing over to the Power-of-two mode is irreversible and corrupts the
665 * original data.
666 * Addr can safely be unsigned int, the biggest S3AN device is smaller than
667 * 4 MiB.
668 */
669static loff_t spi_nor_s3an_addr_convert(struct spi_nor *nor, unsigned int addr)
670{
56c5c328
RR
671 unsigned int offset;
672 unsigned int page;
e99ca98f 673
56c5c328
RR
674 offset = addr % nor->page_size;
675 page = addr / nor->page_size;
676 page <<= (nor->page_size > 512) ? 10 : 9;
e99ca98f 677
56c5c328 678 return page | offset;
e99ca98f
RR
679}
680
c67cbb83
BN
681/*
682 * Initiate the erasure of a single sector
683 */
684static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
685{
686 u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
687 int i;
688
e99ca98f
RR
689 if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
690 addr = spi_nor_s3an_addr_convert(nor, addr);
691
c67cbb83
BN
692 if (nor->erase)
693 return nor->erase(nor, addr);
694
695 /*
696 * Default implementation, if driver doesn't have a specialized HW
697 * control
698 */
699 for (i = nor->addr_width - 1; i >= 0; i--) {
700 buf[i] = addr & 0xff;
701 addr >>= 8;
702 }
703
704 return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
705}
706
5390a8df
TA
707/**
708 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
709 * @erase: pointer to a structure that describes a SPI NOR erase type
710 * @dividend: dividend value
711 * @remainder: pointer to u32 remainder (will be updated)
712 *
713 * Return: the result of the division
714 */
715static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
716 u64 dividend, u32 *remainder)
717{
718 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
719 *remainder = (u32)dividend & erase->size_mask;
720 return dividend >> erase->size_shift;
721}
722
723/**
724 * spi_nor_find_best_erase_type() - find the best erase type for the given
725 * offset in the serial flash memory and the
726 * number of bytes to erase. The region in
727 * which the address fits is expected to be
728 * provided.
729 * @map: the erase map of the SPI NOR
730 * @region: pointer to a structure that describes a SPI NOR erase region
731 * @addr: offset in the serial flash memory
732 * @len: number of bytes to erase
733 *
734 * Return: a pointer to the best fitted erase type, NULL otherwise.
735 */
736static const struct spi_nor_erase_type *
737spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
738 const struct spi_nor_erase_region *region,
739 u64 addr, u32 len)
740{
741 const struct spi_nor_erase_type *erase;
742 u32 rem;
743 int i;
744 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
745
746 /*
70670734 747 * Erase types are ordered by size, with the smallest erase type at
5390a8df
TA
748 * index 0.
749 */
750 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
751 /* Does the erase region support the tested erase type? */
752 if (!(erase_mask & BIT(i)))
753 continue;
754
755 erase = &map->erase_type[i];
756
757 /* Don't erase more than what the user has asked for. */
758 if (erase->size > len)
759 continue;
760
761 /* Alignment is not mandatory for overlaid regions */
762 if (region->offset & SNOR_OVERLAID_REGION)
763 return erase;
764
765 spi_nor_div_by_erase_size(erase, addr, &rem);
766 if (rem)
767 continue;
768 else
769 return erase;
770 }
771
772 return NULL;
773}
774
775/**
776 * spi_nor_region_next() - get the next spi nor region
777 * @region: pointer to a structure that describes a SPI NOR erase region
778 *
779 * Return: the next spi nor region or NULL if last region.
780 */
781static struct spi_nor_erase_region *
782spi_nor_region_next(struct spi_nor_erase_region *region)
783{
784 if (spi_nor_region_is_last(region))
785 return NULL;
786 region++;
787 return region;
788}
789
790/**
791 * spi_nor_find_erase_region() - find the region of the serial flash memory in
792 * which the offset fits
793 * @map: the erase map of the SPI NOR
794 * @addr: offset in the serial flash memory
795 *
796 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
797 * otherwise.
798 */
799static struct spi_nor_erase_region *
800spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
801{
802 struct spi_nor_erase_region *region = map->regions;
803 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
804 u64 region_end = region_start + region->size;
805
806 while (addr < region_start || addr >= region_end) {
807 region = spi_nor_region_next(region);
808 if (!region)
809 return ERR_PTR(-EINVAL);
810
811 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
812 region_end = region_start + region->size;
813 }
814
815 return region;
816}
817
818/**
819 * spi_nor_init_erase_cmd() - initialize an erase command
820 * @region: pointer to a structure that describes a SPI NOR erase region
821 * @erase: pointer to a structure that describes a SPI NOR erase type
822 *
823 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
824 * otherwise.
825 */
826static struct spi_nor_erase_command *
827spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
828 const struct spi_nor_erase_type *erase)
829{
830 struct spi_nor_erase_command *cmd;
831
832 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
833 if (!cmd)
834 return ERR_PTR(-ENOMEM);
835
836 INIT_LIST_HEAD(&cmd->list);
837 cmd->opcode = erase->opcode;
838 cmd->count = 1;
839
840 if (region->offset & SNOR_OVERLAID_REGION)
841 cmd->size = region->size;
842 else
843 cmd->size = erase->size;
844
845 return cmd;
846}
847
848/**
849 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
850 * @erase_list: list of erase commands
851 */
852static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
853{
854 struct spi_nor_erase_command *cmd, *next;
855
856 list_for_each_entry_safe(cmd, next, erase_list, list) {
857 list_del(&cmd->list);
858 kfree(cmd);
859 }
860}
861
862/**
863 * spi_nor_init_erase_cmd_list() - initialize erase command list
864 * @nor: pointer to a 'struct spi_nor'
865 * @erase_list: list of erase commands to be executed once we validate that the
866 * erase can be performed
867 * @addr: offset in the serial flash memory
868 * @len: number of bytes to erase
869 *
870 * Builds the list of best fitted erase commands and verifies if the erase can
871 * be performed.
872 *
873 * Return: 0 on success, -errno otherwise.
874 */
875static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
876 struct list_head *erase_list,
877 u64 addr, u32 len)
878{
879 const struct spi_nor_erase_map *map = &nor->erase_map;
880 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
881 struct spi_nor_erase_region *region;
882 struct spi_nor_erase_command *cmd = NULL;
883 u64 region_end;
884 int ret = -EINVAL;
885
886 region = spi_nor_find_erase_region(map, addr);
887 if (IS_ERR(region))
888 return PTR_ERR(region);
889
890 region_end = spi_nor_region_end(region);
891
892 while (len) {
893 erase = spi_nor_find_best_erase_type(map, region, addr, len);
894 if (!erase)
895 goto destroy_erase_cmd_list;
896
897 if (prev_erase != erase ||
898 region->offset & SNOR_OVERLAID_REGION) {
899 cmd = spi_nor_init_erase_cmd(region, erase);
900 if (IS_ERR(cmd)) {
901 ret = PTR_ERR(cmd);
902 goto destroy_erase_cmd_list;
903 }
904
905 list_add_tail(&cmd->list, erase_list);
906 } else {
907 cmd->count++;
908 }
909
910 addr += cmd->size;
911 len -= cmd->size;
912
913 if (len && addr >= region_end) {
914 region = spi_nor_region_next(region);
915 if (!region)
916 goto destroy_erase_cmd_list;
917 region_end = spi_nor_region_end(region);
918 }
919
920 prev_erase = erase;
921 }
922
923 return 0;
924
925destroy_erase_cmd_list:
926 spi_nor_destroy_erase_cmd_list(erase_list);
927 return ret;
928}
929
930/**
931 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
932 * @nor: pointer to a 'struct spi_nor'
933 * @addr: offset in the serial flash memory
934 * @len: number of bytes to erase
935 *
936 * Build a list of best fitted erase commands and execute it once we validate
937 * that the erase can be performed.
938 *
939 * Return: 0 on success, -errno otherwise.
940 */
941static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
942{
943 LIST_HEAD(erase_list);
944 struct spi_nor_erase_command *cmd, *next;
945 int ret;
946
947 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
948 if (ret)
949 return ret;
950
951 list_for_each_entry_safe(cmd, next, &erase_list, list) {
952 nor->erase_opcode = cmd->opcode;
953 while (cmd->count) {
954 write_enable(nor);
955
956 ret = spi_nor_erase_sector(nor, addr);
957 if (ret)
958 goto destroy_erase_cmd_list;
959
960 addr += cmd->size;
961 cmd->count--;
962
963 ret = spi_nor_wait_till_ready(nor);
964 if (ret)
965 goto destroy_erase_cmd_list;
966 }
967 list_del(&cmd->list);
968 kfree(cmd);
969 }
970
971 return 0;
972
973destroy_erase_cmd_list:
974 spi_nor_destroy_erase_cmd_list(&erase_list);
975 return ret;
976}
977
b199489d
HS
978/*
979 * Erase an address range on the nor chip. The address range may extend
980 * one or more erase sectors. Return an error is there is a problem erasing.
981 */
982static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
983{
984 struct spi_nor *nor = mtd_to_spi_nor(mtd);
985 u32 addr, len;
986 uint32_t rem;
987 int ret;
988
989 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
990 (long long)instr->len);
991
5390a8df
TA
992 if (spi_nor_has_uniform_erase(nor)) {
993 div_u64_rem(instr->len, mtd->erasesize, &rem);
994 if (rem)
995 return -EINVAL;
996 }
b199489d
HS
997
998 addr = instr->addr;
999 len = instr->len;
1000
1001 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
1002 if (ret)
1003 return ret;
1004
1005 /* whole-chip erase? */
e99ca98f 1006 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
09b6a377
FS
1007 unsigned long timeout;
1008
05241aea
BN
1009 write_enable(nor);
1010
b199489d
HS
1011 if (erase_chip(nor)) {
1012 ret = -EIO;
1013 goto erase_err;
1014 }
1015
09b6a377
FS
1016 /*
1017 * Scale the timeout linearly with the size of the flash, with
1018 * a minimum calibrated to an old 2MB flash. We could try to
1019 * pull these from CFI/SFDP, but these values should be good
1020 * enough for now.
1021 */
1022 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1023 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1024 (unsigned long)(mtd->size / SZ_2M));
1025 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
dfa9c0cb
BN
1026 if (ret)
1027 goto erase_err;
1028
b199489d 1029 /* REVISIT in some cases we could speed up erasing large regions
b02e7f3e 1030 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
b199489d
HS
1031 * to use "small sector erase", but that's not always optimal.
1032 */
1033
1034 /* "sector"-at-a-time erase */
5390a8df 1035 } else if (spi_nor_has_uniform_erase(nor)) {
b199489d 1036 while (len) {
05241aea
BN
1037 write_enable(nor);
1038
c67cbb83
BN
1039 ret = spi_nor_erase_sector(nor, addr);
1040 if (ret)
b199489d 1041 goto erase_err;
b199489d
HS
1042
1043 addr += mtd->erasesize;
1044 len -= mtd->erasesize;
dfa9c0cb
BN
1045
1046 ret = spi_nor_wait_till_ready(nor);
1047 if (ret)
1048 goto erase_err;
b199489d 1049 }
5390a8df
TA
1050
1051 /* erase multiple sectors */
1052 } else {
1053 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1054 if (ret)
1055 goto erase_err;
b199489d
HS
1056 }
1057
05241aea
BN
1058 write_disable(nor);
1059
d6af2694 1060erase_err:
b199489d
HS
1061 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
1062
b199489d 1063 return ret;
b199489d
HS
1064}
1065
2666067f
AS
1066/* Write status register and ensure bits in mask match written values */
1067static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
1068{
1069 int ret;
1070
1071 write_enable(nor);
1072 ret = write_sr(nor, status_new);
1073 if (ret)
1074 return ret;
1075
1076 ret = spi_nor_wait_till_ready(nor);
1077 if (ret)
1078 return ret;
1079
1080 ret = read_sr(nor);
1081 if (ret < 0)
1082 return ret;
1083
1084 return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
1085}
1086
62593cf4
BN
1087static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
1088 uint64_t *len)
1089{
1090 struct mtd_info *mtd = &nor->mtd;
1091 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1092 int shift = ffs(mask) - 1;
1093 int pow;
1094
1095 if (!(sr & mask)) {
1096 /* No protection */
1097 *ofs = 0;
1098 *len = 0;
1099 } else {
1100 pow = ((sr & mask) ^ mask) >> shift;
1101 *len = mtd->size >> pow;
3dd8012a
BN
1102 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
1103 *ofs = 0;
1104 else
1105 *ofs = mtd->size - *len;
62593cf4
BN
1106 }
1107}
1108
1109/*
f8860802
BN
1110 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
1111 * @locked is false); 0 otherwise
62593cf4 1112 */
f8860802
BN
1113static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1114 u8 sr, bool locked)
62593cf4
BN
1115{
1116 loff_t lock_offs;
1117 uint64_t lock_len;
1118
f8860802
BN
1119 if (!len)
1120 return 1;
1121
62593cf4
BN
1122 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
1123
f8860802
BN
1124 if (locked)
1125 /* Requested range is a sub-range of locked range */
1126 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1127 else
1128 /* Requested range does not overlap with locked range */
1129 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1130}
1131
1132static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1133 u8 sr)
1134{
1135 return stm_check_lock_status_sr(nor, ofs, len, sr, true);
1136}
1137
1138static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1139 u8 sr)
1140{
1141 return stm_check_lock_status_sr(nor, ofs, len, sr, false);
62593cf4
BN
1142}
1143
1144/*
1145 * Lock a region of the flash. Compatible with ST Micro and similar flash.
3dd8012a 1146 * Supports the block protection bits BP{0,1,2} in the status register
62593cf4 1147 * (SR). Does not support these features found in newer SR bitfields:
62593cf4
BN
1148 * - SEC: sector/block protect - only handle SEC=0 (block protect)
1149 * - CMP: complement protect - only support CMP=0 (range is not complemented)
1150 *
3dd8012a
BN
1151 * Support for the following is provided conditionally for some flash:
1152 * - TB: top/bottom protect
1153 *
62593cf4
BN
1154 * Sample table portion for 8MB flash (Winbond w25q64fw):
1155 *
1156 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
1157 * --------------------------------------------------------------------------
1158 * X | X | 0 | 0 | 0 | NONE | NONE
1159 * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
1160 * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
1161 * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
1162 * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
1163 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
1164 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
1165 * X | X | 1 | 1 | 1 | 8 MB | ALL
3dd8012a
BN
1166 * ------|-------|-------|-------|-------|---------------|-------------------
1167 * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
1168 * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
1169 * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
1170 * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
1171 * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
1172 * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
62593cf4
BN
1173 *
1174 * Returns negative on errors, 0 on success.
1175 */
8cc7f33a 1176static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
b199489d 1177{
19763671 1178 struct mtd_info *mtd = &nor->mtd;
f49289ce 1179 int status_old, status_new;
62593cf4
BN
1180 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1181 u8 shift = ffs(mask) - 1, pow, val;
f8860802 1182 loff_t lock_len;
3dd8012a
BN
1183 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1184 bool use_top;
b199489d 1185
b199489d 1186 status_old = read_sr(nor);
f49289ce
FE
1187 if (status_old < 0)
1188 return status_old;
b199489d 1189
f8860802
BN
1190 /* If nothing in our range is unlocked, we don't need to do anything */
1191 if (stm_is_locked_sr(nor, ofs, len, status_old))
1192 return 0;
1193
3dd8012a
BN
1194 /* If anything below us is unlocked, we can't use 'bottom' protection */
1195 if (!stm_is_locked_sr(nor, 0, ofs, status_old))
1196 can_be_bottom = false;
1197
f8860802
BN
1198 /* If anything above us is unlocked, we can't use 'top' protection */
1199 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1200 status_old))
3dd8012a
BN
1201 can_be_top = false;
1202
1203 if (!can_be_bottom && !can_be_top)
f8860802
BN
1204 return -EINVAL;
1205
3dd8012a
BN
1206 /* Prefer top, if both are valid */
1207 use_top = can_be_top;
1208
f8860802 1209 /* lock_len: length of region that should end up locked */
3dd8012a
BN
1210 if (use_top)
1211 lock_len = mtd->size - ofs;
1212 else
1213 lock_len = ofs + len;
62593cf4
BN
1214
1215 /*
1216 * Need smallest pow such that:
1217 *
1218 * 1 / (2^pow) <= (len / size)
1219 *
1220 * so (assuming power-of-2 size) we do:
1221 *
1222 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
1223 */
f8860802 1224 pow = ilog2(mtd->size) - ilog2(lock_len);
62593cf4
BN
1225 val = mask - (pow << shift);
1226 if (val & ~mask)
1227 return -EINVAL;
1228 /* Don't "lock" with no region! */
1229 if (!(val & mask))
1230 return -EINVAL;
1231
3dd8012a 1232 status_new = (status_old & ~mask & ~SR_TB) | val;
b199489d 1233
47b8edbf
BN
1234 /* Disallow further writes if WP pin is asserted */
1235 status_new |= SR_SRWD;
1236
3dd8012a
BN
1237 if (!use_top)
1238 status_new |= SR_TB;
1239
4c0dba44
BN
1240 /* Don't bother if they're the same */
1241 if (status_new == status_old)
1242 return 0;
1243
b199489d 1244 /* Only modify protection if it will not unlock other areas */
4c0dba44 1245 if ((status_new & mask) < (status_old & mask))
62593cf4 1246 return -EINVAL;
b199489d 1247
2666067f 1248 return write_sr_and_check(nor, status_new, mask);
b199489d
HS
1249}
1250
62593cf4
BN
1251/*
1252 * Unlock a region of the flash. See stm_lock() for more info
1253 *
1254 * Returns negative on errors, 0 on success.
1255 */
8cc7f33a 1256static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
b199489d 1257{
19763671 1258 struct mtd_info *mtd = &nor->mtd;
f49289ce 1259 int status_old, status_new;
62593cf4
BN
1260 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1261 u8 shift = ffs(mask) - 1, pow, val;
f8860802 1262 loff_t lock_len;
3dd8012a
BN
1263 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1264 bool use_top;
b199489d 1265
b199489d 1266 status_old = read_sr(nor);
f49289ce
FE
1267 if (status_old < 0)
1268 return status_old;
b199489d 1269
f8860802
BN
1270 /* If nothing in our range is locked, we don't need to do anything */
1271 if (stm_is_unlocked_sr(nor, ofs, len, status_old))
1272 return 0;
1273
1274 /* If anything below us is locked, we can't use 'top' protection */
1275 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
3dd8012a
BN
1276 can_be_top = false;
1277
1278 /* If anything above us is locked, we can't use 'bottom' protection */
1279 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1280 status_old))
1281 can_be_bottom = false;
1282
1283 if (!can_be_bottom && !can_be_top)
62593cf4 1284 return -EINVAL;
b199489d 1285
3dd8012a
BN
1286 /* Prefer top, if both are valid */
1287 use_top = can_be_top;
1288
f8860802 1289 /* lock_len: length of region that should remain locked */
3dd8012a
BN
1290 if (use_top)
1291 lock_len = mtd->size - (ofs + len);
1292 else
1293 lock_len = ofs;
f8860802 1294
62593cf4
BN
1295 /*
1296 * Need largest pow such that:
1297 *
1298 * 1 / (2^pow) >= (len / size)
1299 *
1300 * so (assuming power-of-2 size) we do:
1301 *
1302 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
1303 */
f8860802
BN
1304 pow = ilog2(mtd->size) - order_base_2(lock_len);
1305 if (lock_len == 0) {
62593cf4
BN
1306 val = 0; /* fully unlocked */
1307 } else {
1308 val = mask - (pow << shift);
1309 /* Some power-of-two sizes are not supported */
1310 if (val & ~mask)
1311 return -EINVAL;
b199489d
HS
1312 }
1313
3dd8012a 1314 status_new = (status_old & ~mask & ~SR_TB) | val;
62593cf4 1315
47b8edbf 1316 /* Don't protect status register if we're fully unlocked */
06586204 1317 if (lock_len == 0)
47b8edbf
BN
1318 status_new &= ~SR_SRWD;
1319
3dd8012a
BN
1320 if (!use_top)
1321 status_new |= SR_TB;
1322
4c0dba44
BN
1323 /* Don't bother if they're the same */
1324 if (status_new == status_old)
1325 return 0;
1326
62593cf4 1327 /* Only modify protection if it will not lock other areas */
4c0dba44 1328 if ((status_new & mask) > (status_old & mask))
62593cf4
BN
1329 return -EINVAL;
1330
2666067f 1331 return write_sr_and_check(nor, status_new, mask);
8cc7f33a
BN
1332}
1333
5bf0e69b
BN
1334/*
1335 * Check if a region of the flash is (completely) locked. See stm_lock() for
1336 * more info.
1337 *
1338 * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
1339 * negative on errors.
1340 */
1341static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1342{
1343 int status;
1344
1345 status = read_sr(nor);
1346 if (status < 0)
1347 return status;
1348
1349 return stm_is_locked_sr(nor, ofs, len, status);
1350}
1351
8cc7f33a
BN
1352static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1353{
1354 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1355 int ret;
1356
1357 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
1358 if (ret)
1359 return ret;
1360
1361 ret = nor->flash_lock(nor, ofs, len);
1362
b199489d
HS
1363 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
1364 return ret;
1365}
1366
8cc7f33a
BN
1367static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1368{
1369 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1370 int ret;
1371
1372 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1373 if (ret)
1374 return ret;
1375
1376 ret = nor->flash_unlock(nor, ofs, len);
1377
1378 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1379 return ret;
1380}
1381
5bf0e69b
BN
1382static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1383{
1384 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1385 int ret;
1386
1387 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
1388 if (ret)
1389 return ret;
1390
1391 ret = nor->flash_is_locked(nor, ofs, len);
1392
1393 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
1394 return ret;
1395}
1396
f10aa369
BB
1397/*
1398 * Write status Register and configuration register with 2 bytes
1399 * The first byte will be written to the status register, while the
1400 * second byte will be written to the configuration register.
1401 * Return negative if error occurred.
1402 */
1403static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1404{
1405 int ret;
1406
1407 write_enable(nor);
1408
1409 ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1410 if (ret < 0) {
1411 dev_err(nor->dev,
1412 "error while writing configuration register\n");
1413 return -EINVAL;
1414 }
1415
1416 ret = spi_nor_wait_till_ready(nor);
1417 if (ret) {
1418 dev_err(nor->dev,
1419 "timeout while writing configuration register\n");
1420 return ret;
1421 }
1422
1423 return 0;
1424}
1425
1426/**
1427 * macronix_quad_enable() - set QE bit in Status Register.
1428 * @nor: pointer to a 'struct spi_nor'
1429 *
1430 * Set the Quad Enable (QE) bit in the Status Register.
1431 *
1432 * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
1433 *
1434 * Return: 0 on success, -errno otherwise.
1435 */
1436static int macronix_quad_enable(struct spi_nor *nor)
1437{
1438 int ret, val;
1439
1440 val = read_sr(nor);
1441 if (val < 0)
1442 return val;
1443 if (val & SR_QUAD_EN_MX)
1444 return 0;
1445
1446 write_enable(nor);
1447
1448 write_sr(nor, val | SR_QUAD_EN_MX);
1449
1450 ret = spi_nor_wait_till_ready(nor);
1451 if (ret)
1452 return ret;
1453
1454 ret = read_sr(nor);
1455 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1456 dev_err(nor->dev, "Macronix Quad bit not set\n");
1457 return -EINVAL;
1458 }
1459
1460 return 0;
1461}
1462
1463/**
1464 * spansion_quad_enable() - set QE bit in Configuraiton Register.
1465 * @nor: pointer to a 'struct spi_nor'
1466 *
1467 * Set the Quad Enable (QE) bit in the Configuration Register.
1468 * This function is kept for legacy purpose because it has been used for a
1469 * long time without anybody complaining but it should be considered as
1470 * deprecated and maybe buggy.
1471 * First, this function doesn't care about the previous values of the Status
1472 * and Configuration Registers when it sets the QE bit (bit 1) in the
1473 * Configuration Register: all other bits are cleared, which may have unwanted
1474 * side effects like removing some block protections.
1475 * Secondly, it uses the Read Configuration Register (35h) instruction though
1476 * some very old and few memories don't support this instruction. If a pull-up
1477 * resistor is present on the MISO/IO1 line, we might still be able to pass the
1478 * "read back" test because the QSPI memory doesn't recognize the command,
1479 * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
1480 *
1481 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1482 * memories.
1483 *
1484 * Return: 0 on success, -errno otherwise.
1485 */
1486static int spansion_quad_enable(struct spi_nor *nor)
1487{
1488 u8 sr_cr[2] = {0, CR_QUAD_EN_SPAN};
1489 int ret;
1490
1491 ret = write_sr_cr(nor, sr_cr);
1492 if (ret)
1493 return ret;
1494
1495 /* read back and check it */
1496 ret = read_cr(nor);
1497 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1498 dev_err(nor->dev, "Spansion Quad bit not set\n");
1499 return -EINVAL;
1500 }
1501
1502 return 0;
1503}
1504
1505/**
1506 * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
1507 * @nor: pointer to a 'struct spi_nor'
1508 *
1509 * Set the Quad Enable (QE) bit in the Configuration Register.
1510 * This function should be used with QSPI memories not supporting the Read
1511 * Configuration Register (35h) instruction.
1512 *
1513 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1514 * memories.
1515 *
1516 * Return: 0 on success, -errno otherwise.
1517 */
1518static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1519{
1520 u8 sr_cr[2];
1521 int ret;
1522
1523 /* Keep the current value of the Status Register. */
1524 ret = read_sr(nor);
1525 if (ret < 0) {
1526 dev_err(nor->dev, "error while reading status register\n");
1527 return -EINVAL;
1528 }
1529 sr_cr[0] = ret;
1530 sr_cr[1] = CR_QUAD_EN_SPAN;
1531
1532 return write_sr_cr(nor, sr_cr);
1533}
1534
1535/**
1536 * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
1537 * @nor: pointer to a 'struct spi_nor'
1538 *
1539 * Set the Quad Enable (QE) bit in the Configuration Register.
1540 * This function should be used with QSPI memories supporting the Read
1541 * Configuration Register (35h) instruction.
1542 *
1543 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1544 * memories.
1545 *
1546 * Return: 0 on success, -errno otherwise.
1547 */
1548static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1549{
1550 struct device *dev = nor->dev;
1551 u8 sr_cr[2];
1552 int ret;
1553
1554 /* Check current Quad Enable bit value. */
1555 ret = read_cr(nor);
1556 if (ret < 0) {
1557 dev_err(dev, "error while reading configuration register\n");
1558 return -EINVAL;
1559 }
1560
1561 if (ret & CR_QUAD_EN_SPAN)
1562 return 0;
1563
1564 sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1565
1566 /* Keep the current value of the Status Register. */
1567 ret = read_sr(nor);
1568 if (ret < 0) {
1569 dev_err(dev, "error while reading status register\n");
1570 return -EINVAL;
1571 }
1572 sr_cr[0] = ret;
1573
1574 ret = write_sr_cr(nor, sr_cr);
1575 if (ret)
1576 return ret;
1577
1578 /* Read back and check it. */
1579 ret = read_cr(nor);
1580 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1581 dev_err(nor->dev, "Spansion Quad bit not set\n");
1582 return -EINVAL;
1583 }
1584
1585 return 0;
1586}
1587
1588/**
1589 * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1590 * @nor: pointer to a 'struct spi_nor'
1591 *
1592 * Set the Quad Enable (QE) bit in the Status Register 2.
1593 *
1594 * This is one of the procedures to set the QE bit described in the SFDP
1595 * (JESD216 rev B) specification but no manufacturer using this procedure has
1596 * been identified yet, hence the name of the function.
1597 *
1598 * Return: 0 on success, -errno otherwise.
1599 */
1600static int sr2_bit7_quad_enable(struct spi_nor *nor)
1601{
1602 u8 sr2;
1603 int ret;
1604
1605 /* Check current Quad Enable bit value. */
1606 ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
1607 if (ret)
1608 return ret;
1609 if (sr2 & SR2_QUAD_EN_BIT7)
1610 return 0;
1611
1612 /* Update the Quad Enable bit. */
1613 sr2 |= SR2_QUAD_EN_BIT7;
1614
1615 write_enable(nor);
1616
1617 ret = nor->write_reg(nor, SPINOR_OP_WRSR2, &sr2, 1);
1618 if (ret < 0) {
1619 dev_err(nor->dev, "error while writing status register 2\n");
1620 return -EINVAL;
1621 }
1622
1623 ret = spi_nor_wait_till_ready(nor);
1624 if (ret < 0) {
1625 dev_err(nor->dev, "timeout while writing status register 2\n");
1626 return ret;
1627 }
1628
1629 /* Read back and check it. */
1630 ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
1631 if (!(ret > 0 && (sr2 & SR2_QUAD_EN_BIT7))) {
1632 dev_err(nor->dev, "SR2 Quad bit not set\n");
1633 return -EINVAL;
1634 }
1635
1636 return 0;
1637}
65153846 1638
09ffafb6 1639/* Used when the "_ext_id" is two bytes at most */
b199489d 1640#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
09ffafb6
HS
1641 .id = { \
1642 ((_jedec_id) >> 16) & 0xff, \
1643 ((_jedec_id) >> 8) & 0xff, \
1644 (_jedec_id) & 0xff, \
1645 ((_ext_id) >> 8) & 0xff, \
1646 (_ext_id) & 0xff, \
1647 }, \
1648 .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
b199489d
HS
1649 .sector_size = (_sector_size), \
1650 .n_sectors = (_n_sectors), \
1651 .page_size = 256, \
06bb6f5a 1652 .flags = (_flags),
b199489d 1653
6d7604e5 1654#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
6d7604e5
HS
1655 .id = { \
1656 ((_jedec_id) >> 16) & 0xff, \
1657 ((_jedec_id) >> 8) & 0xff, \
1658 (_jedec_id) & 0xff, \
1659 ((_ext_id) >> 16) & 0xff, \
1660 ((_ext_id) >> 8) & 0xff, \
1661 (_ext_id) & 0xff, \
1662 }, \
1663 .id_len = 6, \
1664 .sector_size = (_sector_size), \
1665 .n_sectors = (_n_sectors), \
1666 .page_size = 256, \
06bb6f5a 1667 .flags = (_flags),
6d7604e5 1668
b199489d 1669#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
b199489d
HS
1670 .sector_size = (_sector_size), \
1671 .n_sectors = (_n_sectors), \
1672 .page_size = (_page_size), \
1673 .addr_width = (_addr_width), \
06bb6f5a 1674 .flags = (_flags),
b199489d 1675
e99ca98f
RR
1676#define S3AN_INFO(_jedec_id, _n_sectors, _page_size) \
1677 .id = { \
1678 ((_jedec_id) >> 16) & 0xff, \
1679 ((_jedec_id) >> 8) & 0xff, \
1680 (_jedec_id) & 0xff \
1681 }, \
1682 .id_len = 3, \
1683 .sector_size = (8*_page_size), \
1684 .n_sectors = (_n_sectors), \
1685 .page_size = _page_size, \
1686 .addr_width = 3, \
1687 .flags = SPI_NOR_NO_FR | SPI_S3AN,
1688
2bffa65d
BB
1689static int
1690mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
1691 const struct sfdp_parameter_header *bfpt_header,
1692 const struct sfdp_bfpt *bfpt,
1693 struct spi_nor_flash_parameter *params)
1694{
1695 /*
1696 * MX25L25635F supports 4B opcodes but MX25L25635E does not.
1697 * Unfortunately, Macronix has re-used the same JEDEC ID for both
1698 * variants which prevents us from defining a new entry in the parts
1699 * table.
1700 * We need a way to differentiate MX25L25635E and MX25L25635F, and it
1701 * seems that the F version advertises support for Fast Read 4-4-4 in
1702 * its BFPT table.
1703 */
1704 if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
1705 nor->flags |= SNOR_F_4B_OPCODES;
1706
1707 return 0;
1708}
1709
1710static struct spi_nor_fixups mx25l25635_fixups = {
1711 .post_bfpt = mx25l25635_post_bfpt_fixups,
1712};
1713
b199489d
HS
1714/* NOTE: double check command sets and memory organization when you add
1715 * more nor chips. This current list focusses on newer chips, which
1716 * have been converging on command sets which including JEDEC ID.
c19900ed
RM
1717 *
1718 * All newly added entries should describe *hardware* and should use SECT_4K
1719 * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
1720 * scenarios excluding small sectors there is config option that can be
1721 * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
1722 * For historical (and compatibility) reasons (before we got above config) some
1723 * old entries may be missing 4K flag.
b199489d 1724 */
06bb6f5a 1725static const struct flash_info spi_nor_ids[] = {
b199489d
HS
1726 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
1727 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
1728 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
1729
1730 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
b08618c9 1731 { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
b199489d
HS
1732 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
1733 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
1734
1735 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
1736 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
1737 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
1738 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
1739
1740 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
1741
1742 /* EON -- en25xxx */
1743 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
1744 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
1745 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
1746 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
1747 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
08326d8a
FS
1748 { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16,
1749 SECT_4K | SPI_NOR_DUAL_READ) },
771ff17e 1750 { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) },
30a2c8aa
RPC
1751 { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128,
1752 SECT_4K | SPI_NOR_DUAL_READ) },
a41595b3 1753 { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
b199489d 1754 { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
c19900ed 1755 { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
b199489d
HS
1756
1757 /* ESMT */
fcf690a2 1758 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
ca1fa1a8
P
1759 { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
1760 { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
b199489d
HS
1761
1762 /* Everspin */
282e45dc 1763 { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
b199489d
HS
1764 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
1765 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
edd0c8f4 1766 { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
b199489d 1767
ce56ce7d
RL
1768 /* Fujitsu */
1769 { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
1770
b199489d 1771 /* GigaDevice */
e9cf64de
KD
1772 {
1773 "gd25q16", INFO(0xc84015, 0, 64 * 1024, 32,
1774 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1775 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1776 },
595f0e10
BN
1777 {
1778 "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
1779 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1780 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1781 },
5a068283
KG
1782 {
1783 "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
1784 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1785 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1786 },
595f0e10
BN
1787 {
1788 "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
1789 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1790 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1791 },
1792 {
1793 "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
1794 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1795 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1796 },
1797 {
1798 "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
1799 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1800 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1801 },
65153846
AY
1802 {
1803 "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
1804 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1805 SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
1806 .quad_enable = macronix_quad_enable,
1807 },
b199489d
HS
1808
1809 /* Intel/Numonyx -- xxxs33b */
1810 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
1811 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
1812 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
1813
b79c332f 1814 /* ISSI */
29d6b29f
SN
1815 { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
1816 { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
ded8a044 1817 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
b4228478
FC
1818 { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
1819 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
17407ec3
RP
1820 { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
1821 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4cc106f8
UKK
1822 { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
1823 SECT_4K | SPI_NOR_DUAL_READ) },
1824 { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
1825 SECT_4K | SPI_NOR_DUAL_READ) },
29d6b29f 1826 { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
34354d4b 1827 SECT_4K | SPI_NOR_DUAL_READ) },
c7aa1b77 1828 { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
d05e21e3
LX
1829 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1830 SPI_NOR_4B_OPCODES) },
d616f81c
KR
1831 { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
1832 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1833 { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
1834 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1835 { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
1836 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
b79c332f 1837
b199489d 1838 /* Macronix */
660b5b07 1839 { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
b199489d
HS
1840 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
1841 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
1842 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
1843 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
0501f2e5 1844 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
b199489d 1845 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
0501f2e5 1846 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
9f3cd453 1847 { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) },
748df6d8
AV
1848 { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64,
1849 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
9f3cd453
AK
1850 { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
1851 { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
81a1209c 1852 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
b199489d
HS
1853 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
1854 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
81554171
AS
1855 { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
1856 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2bffa65d
BB
1857 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
1858 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
1859 .fixups = &mx25l25635_fixups },
b0fcb4b4 1860 { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
e36bb65e
FS
1861 { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
1862 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
b199489d 1863 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
d342b6a9 1864 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
af18ba48 1865 { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
ce398a81 1866 { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
b199489d
HS
1867 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
1868
0005aad0 1869 /* Micron <--> ST Micro */
61e46118 1870 { "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
548cd3ab 1871 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
f9bcb6dc 1872 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
0db7fae2 1873 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
2a06c7b1 1874 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
4607777c
EG
1875 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
1876 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
d7c9ade2 1877 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
835ed7bf 1878 { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
548cd3ab
BH
1879 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1880 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
193fb3c1 1881 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1882 { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
21ed90ac
AS
1883 { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
1884 SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
1885 NO_CHIP_ERASE) },
56c6855c 1886 { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
b199489d 1887
a98086e0
YNG
1888 /* Micron */
1889 {
1890 "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
2bda2f81
YNG
1891 SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
1892 SPI_NOR_4B_OPCODES)
a98086e0
YNG
1893 },
1894
b199489d
HS
1895 /* PMC */
1896 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
1897 { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
1898 { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
1899
0074a8f3 1900 /* Spansion/Cypress -- single (large) sector size only, at least
b199489d
HS
1901 * for the chips listed here (without boot sectors).
1902 */
9ab86995 1903 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
0f12a27b 1904 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
50685024
AC
1905 { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
1906 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
1907 { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
1908 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
c4b3eacc
AS
1909 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
1910 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
dcb4b22e
JB
1911 { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
1912 SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1913 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | USE_CLSR) },
dfd2b745 1914 { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
b199489d
HS
1915 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
1916 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
1917 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
c4b3eacc
AS
1918 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
1919 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
b199489d
HS
1920 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
1921 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
1922 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
1923 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
1924 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
7c748f57 1925 { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
adf508c3
JE
1926 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1927 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
b199489d 1928 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
c0826679 1929 { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
c19900ed 1930 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
413780d7 1931 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
aada20cd 1932 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
022a400f 1933 { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
d8b494a3 1934 { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
0074a8f3
RG
1935 { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
1936 { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
b199489d
HS
1937
1938 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
1939 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
1940 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
1941 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
1942 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
1943 { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
1944 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
1945 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
1946 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
a1d97ef9 1947 { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
c887be71 1948 { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
b199489d 1949 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
f02985b7 1950 { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
18f7ce2f 1951 { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
b199489d
HS
1952
1953 /* ST Microelectronics -- newer production may have feature updates */
1954 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
1955 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
1956 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
1957 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
1958 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
1959 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
1960 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
1961 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
1962 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
b199489d
HS
1963
1964 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
1965 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
1966 { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
1967 { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
1968 { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
1969 { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
1970 { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
1971 { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
1972 { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
1973
1974 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
1975 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
1976 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
1977
1978 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
1979 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
1980 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
1981
1982 { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
1983 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
1984 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
1985 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
1986 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
f2fabe16 1987 { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
b199489d
HS
1988
1989 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
40d19ab6 1990 { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
b199489d
HS
1991 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
1992 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
1993 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
1994 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
1995 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
ace3cbdd
NA
1996 {
1997 "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
1998 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1999 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2000 },
b199489d 2001 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
88489c29
RM
2002 {
2003 "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
2004 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2005 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2006 },
34fc99db
AK
2007 { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
2008 { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
2009 { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
b199489d 2010 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
9648388f
BN
2011 {
2012 "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
2013 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2014 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2015 },
7fccf56e
SD
2016 {
2017 "w25q32jv", INFO(0xef7016, 0, 64 * 1024, 64,
2018 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2019 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2020 },
b199489d
HS
2021 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
2022 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
9648388f
BN
2023 {
2024 "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
2025 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2026 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2027 },
2028 {
2029 "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
2030 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2031 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2032 },
d720a433
RM
2033 {
2034 "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
2035 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2036 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2037 },
b199489d
HS
2038 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
2039 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
2040 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
d7c9ade2 2041 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
0cbef932
BH
2042 { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
2043 SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
b199489d
HS
2044
2045 /* Catalyst / On Semiconductor -- non-JEDEC */
2046 { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2047 { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2048 { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2049 { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2050 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
e99ca98f
RR
2051
2052 /* Xilinx S3AN Internal Flash */
2053 { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
2054 { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
2055 { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
2056 { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
2057 { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
ce5013ff
HM
2058
2059 /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
2060 { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2061 { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
b199489d
HS
2062 { },
2063};
2064
06bb6f5a 2065static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
b199489d
HS
2066{
2067 int tmp;
09ffafb6 2068 u8 id[SPI_NOR_MAX_ID_LEN];
06bb6f5a 2069 const struct flash_info *info;
b199489d 2070
09ffafb6 2071 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
b199489d 2072 if (tmp < 0) {
92aae4ce 2073 dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
b199489d
HS
2074 return ERR_PTR(tmp);
2075 }
b199489d
HS
2076
2077 for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
06bb6f5a 2078 info = &spi_nor_ids[tmp];
09ffafb6
HS
2079 if (info->id_len) {
2080 if (!memcmp(info->id, id, info->id_len))
b199489d
HS
2081 return &spi_nor_ids[tmp];
2082 }
2083 }
3ddc8adb
GU
2084 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2085 SPI_NOR_MAX_ID_LEN, id);
b199489d
HS
2086 return ERR_PTR(-ENODEV);
2087}
2088
b199489d
HS
2089static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2090 size_t *retlen, u_char *buf)
2091{
2092 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2093 int ret;
2094
2095 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2096
2097 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
2098 if (ret)
2099 return ret;
2100
26f9bcad 2101 while (len) {
e99ca98f
RR
2102 loff_t addr = from;
2103
2104 if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
2105 addr = spi_nor_s3an_addr_convert(nor, addr);
2106
2107 ret = nor->read(nor, addr, len, buf);
26f9bcad
MS
2108 if (ret == 0) {
2109 /* We shouldn't see 0-length reads */
2110 ret = -EIO;
2111 goto read_err;
2112 }
2113 if (ret < 0)
2114 goto read_err;
b199489d 2115
26f9bcad
MS
2116 WARN_ON(ret > len);
2117 *retlen += ret;
2118 buf += ret;
2119 from += ret;
2120 len -= ret;
2121 }
2122 ret = 0;
59451e12 2123
26f9bcad
MS
2124read_err:
2125 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
2126 return ret;
b199489d
HS
2127}
2128
2129static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
2130 size_t *retlen, const u_char *buf)
2131{
2132 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2133 size_t actual;
2134 int ret;
2135
2136 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2137
2138 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2139 if (ret)
2140 return ret;
2141
b199489d
HS
2142 write_enable(nor);
2143
2144 nor->sst_write_second = false;
2145
2146 actual = to % 2;
2147 /* Start write from odd address. */
2148 if (actual) {
b02e7f3e 2149 nor->program_opcode = SPINOR_OP_BP;
b199489d
HS
2150
2151 /* write one byte. */
2dd087b1 2152 ret = nor->write(nor, to, 1, buf);
0bad7b93
MS
2153 if (ret < 0)
2154 goto sst_write_err;
2155 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2156 (int)ret);
b94ed087 2157 ret = spi_nor_wait_till_ready(nor);
b199489d 2158 if (ret)
0bad7b93 2159 goto sst_write_err;
b199489d
HS
2160 }
2161 to += actual;
2162
2163 /* Write out most of the data here. */
2164 for (; actual < len - 1; actual += 2) {
b02e7f3e 2165 nor->program_opcode = SPINOR_OP_AAI_WP;
b199489d
HS
2166
2167 /* write two bytes. */
2dd087b1 2168 ret = nor->write(nor, to, 2, buf + actual);
0bad7b93
MS
2169 if (ret < 0)
2170 goto sst_write_err;
2171 WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
2172 (int)ret);
b94ed087 2173 ret = spi_nor_wait_till_ready(nor);
b199489d 2174 if (ret)
0bad7b93 2175 goto sst_write_err;
b199489d
HS
2176 to += 2;
2177 nor->sst_write_second = true;
2178 }
2179 nor->sst_write_second = false;
2180
2181 write_disable(nor);
b94ed087 2182 ret = spi_nor_wait_till_ready(nor);
b199489d 2183 if (ret)
0bad7b93 2184 goto sst_write_err;
b199489d
HS
2185
2186 /* Write out trailing byte if it exists. */
2187 if (actual != len) {
2188 write_enable(nor);
2189
b02e7f3e 2190 nor->program_opcode = SPINOR_OP_BP;
2dd087b1 2191 ret = nor->write(nor, to, 1, buf + actual);
0bad7b93
MS
2192 if (ret < 0)
2193 goto sst_write_err;
2194 WARN(ret != 1, "While writing 1 byte written %i bytes\n",
2195 (int)ret);
b94ed087 2196 ret = spi_nor_wait_till_ready(nor);
b199489d 2197 if (ret)
0bad7b93 2198 goto sst_write_err;
b199489d 2199 write_disable(nor);
2dd087b1 2200 actual += 1;
b199489d 2201 }
0bad7b93 2202sst_write_err:
2dd087b1 2203 *retlen += actual;
b199489d
HS
2204 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
2205 return ret;
2206}
2207
2208/*
2209 * Write an address range to the nor chip. Data must be written in
2210 * FLASH_PAGESIZE chunks. The address range may be any size provided
2211 * it is within the physical boundaries.
2212 */
2213static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2214 size_t *retlen, const u_char *buf)
2215{
2216 struct spi_nor *nor = mtd_to_spi_nor(mtd);
e5d05cbd
MS
2217 size_t page_offset, page_remain, i;
2218 ssize_t ret;
b199489d
HS
2219
2220 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2221
2222 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
2223 if (ret)
2224 return ret;
2225
e5d05cbd
MS
2226 for (i = 0; i < len; ) {
2227 ssize_t written;
e99ca98f 2228 loff_t addr = to + i;
b199489d 2229
e99ca98f
RR
2230 /*
2231 * If page_size is a power of two, the offset can be quickly
2232 * calculated with an AND operation. On the other cases we
2233 * need to do a modulus operation (more expensive).
2234 * Power of two numbers have only one bit set and we can use
2235 * the instruction hweight32 to detect if we need to do a
2236 * modulus (do_div()) or not.
2237 */
2238 if (hweight32(nor->page_size) == 1) {
2239 page_offset = addr & (nor->page_size - 1);
2240 } else {
2241 uint64_t aux = addr;
b199489d 2242
e99ca98f
RR
2243 page_offset = do_div(aux, nor->page_size);
2244 }
b199489d 2245 /* the size of data remaining on the first page */
e5d05cbd
MS
2246 page_remain = min_t(size_t,
2247 nor->page_size - page_offset, len - i);
2248
e99ca98f
RR
2249 if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
2250 addr = spi_nor_s3an_addr_convert(nor, addr);
2251
e5d05cbd 2252 write_enable(nor);
e99ca98f 2253 ret = nor->write(nor, addr, page_remain, buf + i);
0bad7b93
MS
2254 if (ret < 0)
2255 goto write_err;
e5d05cbd 2256 written = ret;
1d61dcb3 2257
e5d05cbd
MS
2258 ret = spi_nor_wait_till_ready(nor);
2259 if (ret)
2260 goto write_err;
2261 *retlen += written;
2262 i += written;
b199489d
HS
2263 }
2264
2265write_err:
2266 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
1d61dcb3 2267 return ret;
b199489d
HS
2268}
2269
f10aa369 2270static int spi_nor_check(struct spi_nor *nor)
b199489d
HS
2271{
2272 if (!nor->dev || !nor->read || !nor->write ||
c67cbb83 2273 !nor->read_reg || !nor->write_reg) {
b199489d
HS
2274 pr_err("spi-nor: please fill all the necessary fields!\n");
2275 return -EINVAL;
2276 }
2277
b199489d
HS
2278 return 0;
2279}
2280
b296379f 2281static int s3an_nor_scan(struct spi_nor *nor)
e99ca98f
RR
2282{
2283 int ret;
2284 u8 val;
2285
2286 ret = nor->read_reg(nor, SPINOR_OP_XRDSR, &val, 1);
2287 if (ret < 0) {
2288 dev_err(nor->dev, "error %d reading XRDSR\n", (int) ret);
2289 return ret;
2290 }
2291
2292 nor->erase_opcode = SPINOR_OP_XSE;
2293 nor->program_opcode = SPINOR_OP_XPP;
2294 nor->read_opcode = SPINOR_OP_READ;
2295 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2296
2297 /*
2298 * This flashes have a page size of 264 or 528 bytes (known as
2299 * Default addressing mode). It can be changed to a more standard
2300 * Power of two mode where the page size is 256/512. This comes
2301 * with a price: there is 3% less of space, the data is corrupted
2302 * and the page size cannot be changed back to default addressing
2303 * mode.
2304 *
2305 * The current addressing mode can be read from the XRDSR register
2306 * and should not be changed, because is a destructive operation.
2307 */
2308 if (val & XSR_PAGESIZE) {
2309 /* Flash in Power of 2 mode */
2310 nor->page_size = (nor->page_size == 264) ? 256 : 512;
2311 nor->mtd.writebufsize = nor->page_size;
b296379f 2312 nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
e99ca98f
RR
2313 nor->mtd.erasesize = 8 * nor->page_size;
2314 } else {
2315 /* Flash in Default addressing mode */
2316 nor->flags |= SNOR_F_S3AN_ADDR_DEFAULT;
2317 }
2318
2319 return 0;
2320}
2321
cfc5604c
CP
2322static void
2323spi_nor_set_read_settings(struct spi_nor_read_command *read,
2324 u8 num_mode_clocks,
2325 u8 num_wait_states,
2326 u8 opcode,
2327 enum spi_nor_protocol proto)
2328{
2329 read->num_mode_clocks = num_mode_clocks;
2330 read->num_wait_states = num_wait_states;
2331 read->opcode = opcode;
2332 read->proto = proto;
2333}
2334
2335static void
2336spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2337 u8 opcode,
2338 enum spi_nor_protocol proto)
2339{
2340 pp->opcode = opcode;
2341 pp->proto = proto;
2342}
2343
f10aa369
BB
2344static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2345{
2346 size_t i;
2347
2348 for (i = 0; i < size; i++)
2349 if (table[i][0] == (int)hwcaps)
2350 return table[i][1];
2351
2352 return -EINVAL;
2353}
2354
2355static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2356{
2357 static const int hwcaps_read2cmd[][2] = {
2358 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2359 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2360 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2361 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2362 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2363 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2364 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2365 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2366 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2367 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2368 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2369 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2370 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2371 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2372 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2373 };
2374
2375 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2376 ARRAY_SIZE(hwcaps_read2cmd));
2377}
2378
2379static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2380{
2381 static const int hwcaps_pp2cmd[][2] = {
2382 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2383 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2384 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2385 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2386 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2387 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2388 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2389 };
2390
2391 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2392 ARRAY_SIZE(hwcaps_pp2cmd));
2393}
2394
f384b352
CP
2395/*
2396 * Serial Flash Discoverable Parameters (SFDP) parsing.
2397 */
2398
b038e8e3
TA
2399/**
2400 * spi_nor_read_raw() - raw read of serial flash memory. read_opcode,
2401 * addr_width and read_dummy members of the struct spi_nor
2402 * should be previously
2403 * set.
2404 * @nor: pointer to a 'struct spi_nor'
2405 * @addr: offset in the serial flash memory
2406 * @len: number of bytes to read
1d5ceff2 2407 * @buf: buffer where the data is copied into (dma-safe memory)
b038e8e3
TA
2408 *
2409 * Return: 0 on success, -errno otherwise.
2410 */
2411static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
2412{
2413 int ret;
2414
2415 while (len) {
2416 ret = nor->read(nor, addr, len, buf);
2417 if (!ret || ret > len)
2418 return -EIO;
2419 if (ret < 0)
2420 return ret;
2421
2422 buf += ret;
2423 addr += ret;
2424 len -= ret;
2425 }
2426 return 0;
2427}
2428
f384b352
CP
2429/**
2430 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
2431 * @nor: pointer to a 'struct spi_nor'
2432 * @addr: offset in the SFDP area to start reading data from
2433 * @len: number of bytes to read
bfa41337 2434 * @buf: buffer where the SFDP data are copied into (dma-safe memory)
f384b352
CP
2435 *
2436 * Whatever the actual numbers of bytes for address and dummy cycles are
2437 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
2438 * followed by a 3-byte address and 8 dummy clock cycles.
2439 *
2440 * Return: 0 on success, -errno otherwise.
2441 */
2442static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
2443 size_t len, void *buf)
2444{
2445 u8 addr_width, read_opcode, read_dummy;
2446 int ret;
2447
2448 read_opcode = nor->read_opcode;
2449 addr_width = nor->addr_width;
2450 read_dummy = nor->read_dummy;
2451
2452 nor->read_opcode = SPINOR_OP_RDSFDP;
2453 nor->addr_width = 3;
2454 nor->read_dummy = 8;
2455
b038e8e3 2456 ret = spi_nor_read_raw(nor, addr, len, buf);
f384b352 2457
f384b352
CP
2458 nor->read_opcode = read_opcode;
2459 nor->addr_width = addr_width;
2460 nor->read_dummy = read_dummy;
2461
2462 return ret;
2463}
2464
bfa41337
CP
2465/**
2466 * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
2467 * @nor: pointer to a 'struct spi_nor'
2468 * @addr: offset in the SFDP area to start reading data from
2469 * @len: number of bytes to read
2470 * @buf: buffer where the SFDP data are copied into
2471 *
2472 * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
2473 * guaranteed to be dma-safe.
2474 *
2475 * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
2476 * otherwise.
2477 */
2478static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
2479 size_t len, void *buf)
2480{
2481 void *dma_safe_buf;
2482 int ret;
2483
2484 dma_safe_buf = kmalloc(len, GFP_KERNEL);
2485 if (!dma_safe_buf)
2486 return -ENOMEM;
2487
2488 ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
2489 memcpy(buf, dma_safe_buf, len);
2490 kfree(dma_safe_buf);
2491
2492 return ret;
2493}
2494
f384b352
CP
2495/* Fast Read settings. */
2496
87f3ed18 2497static void
f384b352
CP
2498spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
2499 u16 half,
2500 enum spi_nor_protocol proto)
2501{
2502 read->num_mode_clocks = (half >> 5) & 0x07;
2503 read->num_wait_states = (half >> 0) & 0x1f;
2504 read->opcode = (half >> 8) & 0xff;
2505 read->proto = proto;
2506}
2507
2508struct sfdp_bfpt_read {
2509 /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
2510 u32 hwcaps;
2511
2512 /*
2513 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
2514 * whether the Fast Read x-y-z command is supported.
2515 */
2516 u32 supported_dword;
2517 u32 supported_bit;
2518
2519 /*
2520 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
2521 * encodes the op code, the number of mode clocks and the number of wait
2522 * states to be used by Fast Read x-y-z command.
2523 */
2524 u32 settings_dword;
2525 u32 settings_shift;
2526
2527 /* The SPI protocol for this Fast Read x-y-z command. */
2528 enum spi_nor_protocol proto;
2529};
2530
2531static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
2532 /* Fast Read 1-1-2 */
2533 {
2534 SNOR_HWCAPS_READ_1_1_2,
2535 BFPT_DWORD(1), BIT(16), /* Supported bit */
2536 BFPT_DWORD(4), 0, /* Settings */
2537 SNOR_PROTO_1_1_2,
2538 },
2539
2540 /* Fast Read 1-2-2 */
2541 {
2542 SNOR_HWCAPS_READ_1_2_2,
2543 BFPT_DWORD(1), BIT(20), /* Supported bit */
2544 BFPT_DWORD(4), 16, /* Settings */
2545 SNOR_PROTO_1_2_2,
2546 },
2547
2548 /* Fast Read 2-2-2 */
2549 {
2550 SNOR_HWCAPS_READ_2_2_2,
2551 BFPT_DWORD(5), BIT(0), /* Supported bit */
2552 BFPT_DWORD(6), 16, /* Settings */
2553 SNOR_PROTO_2_2_2,
2554 },
2555
2556 /* Fast Read 1-1-4 */
2557 {
2558 SNOR_HWCAPS_READ_1_1_4,
2559 BFPT_DWORD(1), BIT(22), /* Supported bit */
2560 BFPT_DWORD(3), 16, /* Settings */
2561 SNOR_PROTO_1_1_4,
2562 },
2563
2564 /* Fast Read 1-4-4 */
2565 {
2566 SNOR_HWCAPS_READ_1_4_4,
2567 BFPT_DWORD(1), BIT(21), /* Supported bit */
2568 BFPT_DWORD(3), 0, /* Settings */
2569 SNOR_PROTO_1_4_4,
2570 },
2571
2572 /* Fast Read 4-4-4 */
2573 {
2574 SNOR_HWCAPS_READ_4_4_4,
2575 BFPT_DWORD(5), BIT(4), /* Supported bit */
2576 BFPT_DWORD(7), 16, /* Settings */
2577 SNOR_PROTO_4_4_4,
2578 },
2579};
2580
2581struct sfdp_bfpt_erase {
2582 /*
2583 * The half-word at offset <shift> in DWORD <dwoard> encodes the
2584 * op code and erase sector size to be used by Sector Erase commands.
2585 */
2586 u32 dword;
2587 u32 shift;
2588};
2589
2590static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
2591 /* Erase Type 1 in DWORD8 bits[15:0] */
2592 {BFPT_DWORD(8), 0},
2593
2594 /* Erase Type 2 in DWORD8 bits[31:16] */
2595 {BFPT_DWORD(8), 16},
2596
2597 /* Erase Type 3 in DWORD9 bits[15:0] */
2598 {BFPT_DWORD(9), 0},
2599
2600 /* Erase Type 4 in DWORD9 bits[31:16] */
2601 {BFPT_DWORD(9), 16},
2602};
2603
5390a8df
TA
2604/**
2605 * spi_nor_set_erase_type() - set a SPI NOR erase type
2606 * @erase: pointer to a structure that describes a SPI NOR erase type
2607 * @size: the size of the sector/block erased by the erase type
2608 * @opcode: the SPI command op code to erase the sector/block
2609 */
2610static void spi_nor_set_erase_type(struct spi_nor_erase_type *erase,
2611 u32 size, u8 opcode)
2612{
2613 erase->size = size;
2614 erase->opcode = opcode;
2615 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2616 erase->size_shift = ffs(erase->size) - 1;
2617 erase->size_mask = (1 << erase->size_shift) - 1;
2618}
2619
2620/**
2621 * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT
2622 * @erase: pointer to a structure that describes a SPI NOR erase type
2623 * @size: the size of the sector/block erased by the erase type
2624 * @opcode: the SPI command op code to erase the sector/block
2625 * @i: erase type index as sorted in the Basic Flash Parameter Table
2626 *
2627 * The supported Erase Types will be sorted at init in ascending order, with
2628 * the smallest Erase Type size being the first member in the erase_type array
2629 * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in
2630 * the Basic Flash Parameter Table since it will be used later on to
2631 * synchronize with the supported Erase Types defined in SFDP optional tables.
2632 */
2633static void
2634spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase,
2635 u32 size, u8 opcode, u8 i)
2636{
2637 erase->idx = i;
2638 spi_nor_set_erase_type(erase, size, opcode);
2639}
2640
2641/**
2642 * spi_nor_map_cmp_erase_type() - compare the map's erase types by size
2643 * @l: member in the left half of the map's erase_type array
2644 * @r: member in the right half of the map's erase_type array
2645 *
2646 * Comparison function used in the sort() call to sort in ascending order the
2647 * map's erase types, the smallest erase type size being the first member in the
2648 * sorted erase_type array.
2649 *
2650 * Return: the result of @l->size - @r->size
2651 */
2652static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2653{
2654 const struct spi_nor_erase_type *left = l, *right = r;
2655
2656 return left->size - right->size;
2657}
2658
e8828ec1
TA
2659/**
2660 * spi_nor_sort_erase_mask() - sort erase mask
2661 * @map: the erase map of the SPI NOR
2662 * @erase_mask: the erase type mask to be sorted
2663 *
2664 * Replicate the sort done for the map's erase types in BFPT: sort the erase
2665 * mask in ascending order with the smallest erase type size starting from
2666 * BIT(0) in the sorted erase mask.
2667 *
2668 * Return: sorted erase mask.
2669 */
2670static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
2671{
2672 struct spi_nor_erase_type *erase_type = map->erase_type;
2673 int i;
2674 u8 sorted_erase_mask = 0;
2675
2676 if (!erase_mask)
2677 return 0;
2678
2679 /* Replicate the sort done for the map's erase types. */
2680 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2681 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
2682 sorted_erase_mask |= BIT(i);
2683
2684 return sorted_erase_mask;
2685}
2686
5390a8df
TA
2687/**
2688 * spi_nor_regions_sort_erase_types() - sort erase types in each region
2689 * @map: the erase map of the SPI NOR
2690 *
2691 * Function assumes that the erase types defined in the erase map are already
2692 * sorted in ascending order, with the smallest erase type size being the first
2693 * member in the erase_type array. It replicates the sort done for the map's
2694 * erase types. Each region's erase bitmask will indicate which erase types are
2695 * supported from the sorted erase types defined in the erase map.
2696 * Sort the all region's erase type at init in order to speed up the process of
2697 * finding the best erase command at runtime.
2698 */
2699static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
2700{
2701 struct spi_nor_erase_region *region = map->regions;
5390a8df
TA
2702 u8 region_erase_mask, sorted_erase_mask;
2703
2704 while (region) {
2705 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
2706
e8828ec1
TA
2707 sorted_erase_mask = spi_nor_sort_erase_mask(map,
2708 region_erase_mask);
5390a8df
TA
2709
2710 /* Overwrite erase mask. */
2711 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
2712 sorted_erase_mask;
2713
2714 region = spi_nor_region_next(region);
2715 }
2716}
2717
2718/**
2719 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2720 * @map: the erase map of the SPI NOR
2721 * @erase_mask: bitmask encoding erase types that can erase the entire
2722 * flash memory
2723 * @flash_size: the spi nor flash memory size
2724 */
2725static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2726 u8 erase_mask, u64 flash_size)
2727{
2728 /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
2729 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2730 SNOR_LAST_REGION;
2731 map->uniform_region.size = flash_size;
2732 map->regions = &map->uniform_region;
2733 map->uniform_erase_type = erase_mask;
2734}
2735
2aaa5f7e
BB
2736static int
2737spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2738 const struct sfdp_parameter_header *bfpt_header,
2739 const struct sfdp_bfpt *bfpt,
2740 struct spi_nor_flash_parameter *params)
2741{
2742 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2743 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
2744 params);
2745
2746 return 0;
2747}
2748
f384b352
CP
2749/**
2750 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
2751 * @nor: pointer to a 'struct spi_nor'
2752 * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
2753 * the Basic Flash Parameter Table length and version
2754 * @params: pointer to the 'struct spi_nor_flash_parameter' to be
2755 * filled
2756 *
2757 * The Basic Flash Parameter Table is the main and only mandatory table as
2758 * defined by the SFDP (JESD216) specification.
2759 * It provides us with the total size (memory density) of the data array and
2760 * the number of address bytes for Fast Read, Page Program and Sector Erase
2761 * commands.
2762 * For Fast READ commands, it also gives the number of mode clock cycles and
2763 * wait states (regrouped in the number of dummy clock cycles) for each
2764 * supported instruction op code.
2765 * For Page Program, the page size is now available since JESD216 rev A, however
2766 * the supported instruction op codes are still not provided.
2767 * For Sector Erase commands, this table stores the supported instruction op
2768 * codes and the associated sector sizes.
2769 * Finally, the Quad Enable Requirements (QER) are also available since JESD216
2770 * rev A. The QER bits encode the manufacturer dependent procedure to be
2771 * executed to set the Quad Enable (QE) bit in some internal register of the
2772 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
2773 * sending any Quad SPI command to the memory. Actually, setting the QE bit
2774 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
2775 * and IO3 hence enabling 4 (Quad) I/O lines.
2776 *
2777 * Return: 0 on success, -errno otherwise.
2778 */
2779static int spi_nor_parse_bfpt(struct spi_nor *nor,
2780 const struct sfdp_parameter_header *bfpt_header,
2781 struct spi_nor_flash_parameter *params)
2782{
5390a8df
TA
2783 struct spi_nor_erase_map *map = &nor->erase_map;
2784 struct spi_nor_erase_type *erase_type = map->erase_type;
f384b352
CP
2785 struct sfdp_bfpt bfpt;
2786 size_t len;
2787 int i, cmd, err;
2788 u32 addr;
2789 u16 half;
5390a8df 2790 u8 erase_mask;
f384b352
CP
2791
2792 /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
2793 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
2794 return -EINVAL;
2795
2796 /* Read the Basic Flash Parameter Table. */
2797 len = min_t(size_t, sizeof(bfpt),
2798 bfpt_header->length * sizeof(u32));
2799 addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
2800 memset(&bfpt, 0, sizeof(bfpt));
bfa41337 2801 err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
f384b352
CP
2802 if (err < 0)
2803 return err;
2804
2805 /* Fix endianness of the BFPT DWORDs. */
2806 for (i = 0; i < BFPT_DWORD_MAX; i++)
2807 bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
2808
2809 /* Number of address bytes. */
2810 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
2811 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
2812 nor->addr_width = 3;
2813 break;
2814
2815 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
2816 nor->addr_width = 4;
2817 break;
2818
2819 default:
2820 break;
2821 }
2822
2823 /* Flash Memory Density (in bits). */
2824 params->size = bfpt.dwords[BFPT_DWORD(2)];
2825 if (params->size & BIT(31)) {
2826 params->size &= ~BIT(31);
b8f39116
BB
2827
2828 /*
2829 * Prevent overflows on params->size. Anyway, a NOR of 2^64
2830 * bits is unlikely to exist so this error probably means
2831 * the BFPT we are reading is corrupted/wrong.
2832 */
2833 if (params->size > 63)
2834 return -EINVAL;
2835
f384b352
CP
2836 params->size = 1ULL << params->size;
2837 } else {
2838 params->size++;
2839 }
2840 params->size >>= 3; /* Convert to bytes. */
2841
2842 /* Fast Read settings. */
2843 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
2844 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
2845 struct spi_nor_read_command *read;
2846
2847 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
2848 params->hwcaps.mask &= ~rd->hwcaps;
2849 continue;
2850 }
2851
2852 params->hwcaps.mask |= rd->hwcaps;
2853 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
2854 read = &params->reads[cmd];
2855 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
2856 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
2857 }
2858
5390a8df
TA
2859 /*
2860 * Sector Erase settings. Reinitialize the uniform erase map using the
2861 * Erase Types defined in the bfpt table.
2862 */
2863 erase_mask = 0;
2864 memset(&nor->erase_map, 0, sizeof(nor->erase_map));
f384b352
CP
2865 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
2866 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
2867 u32 erasesize;
2868 u8 opcode;
2869
2870 half = bfpt.dwords[er->dword] >> er->shift;
2871 erasesize = half & 0xff;
2872
2873 /* erasesize == 0 means this Erase Type is not supported. */
2874 if (!erasesize)
2875 continue;
2876
2877 erasesize = 1U << erasesize;
2878 opcode = (half >> 8) & 0xff;
5390a8df
TA
2879 erase_mask |= BIT(i);
2880 spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize,
2881 opcode, i);
f384b352 2882 }
5390a8df
TA
2883 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2884 /*
2885 * Sort all the map's Erase Types in ascending order with the smallest
2886 * erase size being the first member in the erase_type array.
2887 */
2888 sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]),
2889 spi_nor_map_cmp_erase_type, NULL);
2890 /*
2891 * Sort the erase types in the uniform region in order to update the
2892 * uniform_erase_type bitmask. The bitmask will be used later on when
2893 * selecting the uniform erase.
2894 */
2895 spi_nor_regions_sort_erase_types(map);
2896 map->uniform_erase_type = map->uniform_region.offset &
2897 SNOR_ERASE_TYPE_MASK;
f384b352
CP
2898
2899 /* Stop here if not JESD216 rev A or later. */
2900 if (bfpt_header->length < BFPT_DWORD_MAX)
2aaa5f7e
BB
2901 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
2902 params);
f384b352
CP
2903
2904 /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
2905 params->page_size = bfpt.dwords[BFPT_DWORD(11)];
2906 params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
2907 params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
2908 params->page_size = 1U << params->page_size;
2909
2910 /* Quad Enable Requirements. */
2911 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
2912 case BFPT_DWORD15_QER_NONE:
2913 params->quad_enable = NULL;
2914 break;
2915
2916 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
2917 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
2918 params->quad_enable = spansion_no_read_cr_quad_enable;
2919 break;
2920
2921 case BFPT_DWORD15_QER_SR1_BIT6:
2922 params->quad_enable = macronix_quad_enable;
2923 break;
2924
2925 case BFPT_DWORD15_QER_SR2_BIT7:
2926 params->quad_enable = sr2_bit7_quad_enable;
2927 break;
2928
2929 case BFPT_DWORD15_QER_SR2_BIT1:
2930 params->quad_enable = spansion_read_cr_quad_enable;
2931 break;
2932
2933 default:
2934 return -EINVAL;
2935 }
2936
2aaa5f7e 2937 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
f384b352
CP
2938}
2939
b038e8e3
TA
2940#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
2941#define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22)
2942#define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22)
2943#define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22)
2944#define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22)
2945
2946#define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16)
2947#define SMPT_CMD_READ_DUMMY_SHIFT 16
2948#define SMPT_CMD_READ_DUMMY(_cmd) \
2949 (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT)
2950#define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL
2951
2952#define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24)
2953#define SMPT_CMD_READ_DATA_SHIFT 24
2954#define SMPT_CMD_READ_DATA(_cmd) \
2955 (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT)
2956
2957#define SMPT_CMD_OPCODE_MASK GENMASK(15, 8)
2958#define SMPT_CMD_OPCODE_SHIFT 8
2959#define SMPT_CMD_OPCODE(_cmd) \
2960 (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT)
2961
2962#define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16)
2963#define SMPT_MAP_REGION_COUNT_SHIFT 16
2964#define SMPT_MAP_REGION_COUNT(_header) \
2965 ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \
2966 SMPT_MAP_REGION_COUNT_SHIFT) + 1)
2967
2968#define SMPT_MAP_ID_MASK GENMASK(15, 8)
2969#define SMPT_MAP_ID_SHIFT 8
2970#define SMPT_MAP_ID(_header) \
2971 (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT)
2972
2973#define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8)
2974#define SMPT_MAP_REGION_SIZE_SHIFT 8
2975#define SMPT_MAP_REGION_SIZE(_region) \
2976 (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \
2977 SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256)
2978
2979#define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0)
2980#define SMPT_MAP_REGION_ERASE_TYPE(_region) \
2981 ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK)
2982
2983#define SMPT_DESC_TYPE_MAP BIT(1)
2984#define SMPT_DESC_END BIT(0)
2985
2986/**
2987 * spi_nor_smpt_addr_width() - return the address width used in the
2988 * configuration detection command.
2989 * @nor: pointer to a 'struct spi_nor'
2990 * @settings: configuration detection command descriptor, dword1
2991 */
2992static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings)
2993{
2994 switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) {
2995 case SMPT_CMD_ADDRESS_LEN_0:
2996 return 0;
2997 case SMPT_CMD_ADDRESS_LEN_3:
2998 return 3;
2999 case SMPT_CMD_ADDRESS_LEN_4:
3000 return 4;
3001 case SMPT_CMD_ADDRESS_LEN_USE_CURRENT:
3002 /* fall through */
3003 default:
3004 return nor->addr_width;
3005 }
3006}
3007
3008/**
3009 * spi_nor_smpt_read_dummy() - return the configuration detection command read
3010 * latency, in clock cycles.
3011 * @nor: pointer to a 'struct spi_nor'
3012 * @settings: configuration detection command descriptor, dword1
3013 *
3014 * Return: the number of dummy cycles for an SMPT read
3015 */
3016static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
3017{
3018 u8 read_dummy = SMPT_CMD_READ_DUMMY(settings);
3019
3020 if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE)
3021 return nor->read_dummy;
3022 return read_dummy;
3023}
3024
3025/**
3026 * spi_nor_get_map_in_use() - get the configuration map in use
3027 * @nor: pointer to a 'struct spi_nor'
3028 * @smpt: pointer to the sector map parameter table
c797bd81 3029 * @smpt_len: sector map parameter table length
b9f07cc8
TA
3030 *
3031 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
b038e8e3 3032 */
c797bd81
TA
3033static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
3034 u8 smpt_len)
b038e8e3 3035{
b9f07cc8 3036 const u32 *ret;
1d5ceff2 3037 u8 *buf;
c797bd81 3038 u32 addr;
b038e8e3 3039 int err;
c797bd81 3040 u8 i;
b038e8e3 3041 u8 addr_width, read_opcode, read_dummy;
1d5ceff2
TA
3042 u8 read_data_mask, map_id;
3043
3044 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
3045 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3046 if (!buf)
3047 return ERR_PTR(-ENOMEM);
b038e8e3
TA
3048
3049 addr_width = nor->addr_width;
3050 read_dummy = nor->read_dummy;
3051 read_opcode = nor->read_opcode;
3052
3053 map_id = 0;
b038e8e3 3054 /* Determine if there are any optional Detection Command Descriptors */
c797bd81
TA
3055 for (i = 0; i < smpt_len; i += 2) {
3056 if (smpt[i] & SMPT_DESC_TYPE_MAP)
3057 break;
3058
b038e8e3
TA
3059 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
3060 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
3061 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
3062 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
3063 addr = smpt[i + 1];
3064
1d5ceff2 3065 err = spi_nor_read_raw(nor, addr, 1, buf);
b9f07cc8
TA
3066 if (err) {
3067 ret = ERR_PTR(err);
b038e8e3 3068 goto out;
b9f07cc8 3069 }
b038e8e3
TA
3070
3071 /*
3072 * Build an index value that is used to select the Sector Map
3073 * Configuration that is currently in use.
3074 */
1d5ceff2 3075 map_id = map_id << 1 | !!(*buf & read_data_mask);
b038e8e3
TA
3076 }
3077
c797bd81
TA
3078 /*
3079 * If command descriptors are provided, they always precede map
3080 * descriptors in the table. There is no need to start the iteration
3081 * over smpt array all over again.
3082 *
3083 * Find the matching configuration map.
3084 */
b9f07cc8 3085 ret = ERR_PTR(-EINVAL);
c797bd81
TA
3086 while (i < smpt_len) {
3087 if (SMPT_MAP_ID(smpt[i]) == map_id) {
3088 ret = smpt + i;
3089 break;
3090 }
3091
3092 /*
3093 * If there are no more configuration map descriptors and no
3094 * configuration ID matched the configuration identifier, the
3095 * sector address map is unknown.
3096 */
b038e8e3 3097 if (smpt[i] & SMPT_DESC_END)
c797bd81
TA
3098 break;
3099
b038e8e3
TA
3100 /* increment the table index to the next map */
3101 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
3102 }
3103
b038e8e3
TA
3104 /* fall through */
3105out:
1d5ceff2 3106 kfree(buf);
b038e8e3
TA
3107 nor->addr_width = addr_width;
3108 nor->read_dummy = read_dummy;
3109 nor->read_opcode = read_opcode;
3110 return ret;
3111}
3112
3113/**
3114 * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid
3115 * @region: pointer to a structure that describes a SPI NOR erase region
3116 * @erase: pointer to a structure that describes a SPI NOR erase type
3117 * @erase_type: erase type bitmask
3118 */
3119static void
3120spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
3121 const struct spi_nor_erase_type *erase,
3122 const u8 erase_type)
3123{
3124 int i;
3125
3126 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3127 if (!(erase_type & BIT(i)))
3128 continue;
3129 if (region->size & erase[i].size_mask) {
3130 spi_nor_region_mark_overlay(region);
3131 return;
3132 }
3133 }
3134}
3135
3136/**
3137 * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map
3138 * @nor: pointer to a 'struct spi_nor'
3139 * @smpt: pointer to the sector map parameter table
3140 *
3141 * Return: 0 on success, -errno otherwise.
3142 */
3143static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
3144 const u32 *smpt)
3145{
3146 struct spi_nor_erase_map *map = &nor->erase_map;
dc928431 3147 struct spi_nor_erase_type *erase = map->erase_type;
b038e8e3
TA
3148 struct spi_nor_erase_region *region;
3149 u64 offset;
3150 u32 region_count;
3151 int i, j;
dc928431
TA
3152 u8 uniform_erase_type, save_uniform_erase_type;
3153 u8 erase_type, regions_erase_type;
b038e8e3
TA
3154
3155 region_count = SMPT_MAP_REGION_COUNT(*smpt);
3156 /*
3157 * The regions will be freed when the driver detaches from the
3158 * device.
3159 */
3160 region = devm_kcalloc(nor->dev, region_count, sizeof(*region),
3161 GFP_KERNEL);
3162 if (!region)
3163 return -ENOMEM;
3164 map->regions = region;
3165
e8828ec1 3166 uniform_erase_type = 0xff;
dc928431 3167 regions_erase_type = 0;
b038e8e3
TA
3168 offset = 0;
3169 /* Populate regions. */
3170 for (i = 0; i < region_count; i++) {
3171 j = i + 1; /* index for the region dword */
3172 region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]);
3173 erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]);
3174 region[i].offset = offset | erase_type;
3175
3176 spi_nor_region_check_overlay(&region[i], erase, erase_type);
3177
3178 /*
3179 * Save the erase types that are supported in all regions and
3180 * can erase the entire flash memory.
3181 */
e8828ec1 3182 uniform_erase_type &= erase_type;
b038e8e3 3183
dc928431
TA
3184 /*
3185 * regions_erase_type mask will indicate all the erase types
3186 * supported in this configuration map.
3187 */
3188 regions_erase_type |= erase_type;
3189
b038e8e3
TA
3190 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
3191 region[i].size;
3192 }
3193
dc928431 3194 save_uniform_erase_type = map->uniform_erase_type;
e8828ec1
TA
3195 map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3196 uniform_erase_type);
3197
dc928431
TA
3198 if (!regions_erase_type) {
3199 /*
3200 * Roll back to the previous uniform_erase_type mask, SMPT is
3201 * broken.
3202 */
3203 map->uniform_erase_type = save_uniform_erase_type;
3204 return -EINVAL;
3205 }
3206
3207 /*
3208 * BFPT advertises all the erase types supported by all the possible
3209 * map configurations. Mask out the erase types that are not supported
3210 * by the current map configuration.
3211 */
3212 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
3213 if (!(regions_erase_type & BIT(erase[i].idx)))
3214 spi_nor_set_erase_type(&erase[i], 0, 0xFF);
3215
b038e8e3
TA
3216 spi_nor_region_mark_end(&region[i - 1]);
3217
3218 return 0;
3219}
3220
3221/**
3222 * spi_nor_parse_smpt() - parse Sector Map Parameter Table
3223 * @nor: pointer to a 'struct spi_nor'
3224 * @smpt_header: sector map parameter table header
3225 *
3226 * This table is optional, but when available, we parse it to identify the
3227 * location and size of sectors within the main data array of the flash memory
3228 * device and to identify which Erase Types are supported by each sector.
3229 *
3230 * Return: 0 on success, -errno otherwise.
3231 */
3232static int spi_nor_parse_smpt(struct spi_nor *nor,
3233 const struct sfdp_parameter_header *smpt_header)
3234{
3235 const u32 *sector_map;
3236 u32 *smpt;
3237 size_t len;
3238 u32 addr;
3239 int i, ret;
3240
3241 /* Read the Sector Map Parameter Table. */
3242 len = smpt_header->length * sizeof(*smpt);
e7b65a49 3243 smpt = kmalloc(len, GFP_KERNEL);
b038e8e3
TA
3244 if (!smpt)
3245 return -ENOMEM;
3246
3247 addr = SFDP_PARAM_HEADER_PTP(smpt_header);
3248 ret = spi_nor_read_sfdp(nor, addr, len, smpt);
3249 if (ret)
3250 goto out;
3251
3252 /* Fix endianness of the SMPT DWORDs. */
3253 for (i = 0; i < smpt_header->length; i++)
3254 smpt[i] = le32_to_cpu(smpt[i]);
3255
c797bd81 3256 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
b9f07cc8
TA
3257 if (IS_ERR(sector_map)) {
3258 ret = PTR_ERR(sector_map);
b038e8e3
TA
3259 goto out;
3260 }
3261
3262 ret = spi_nor_init_non_uniform_erase_map(nor, sector_map);
3263 if (ret)
3264 goto out;
3265
3266 spi_nor_regions_sort_erase_types(&nor->erase_map);
3267 /* fall through */
3268out:
3269 kfree(smpt);
3270 return ret;
3271}
3272
816873ea
CP
3273#define SFDP_4BAIT_DWORD_MAX 2
3274
3275struct sfdp_4bait {
3276 /* The hardware capability. */
3277 u32 hwcaps;
3278
3279 /*
3280 * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
3281 * the associated 4-byte address op code is supported.
3282 */
3283 u32 supported_bit;
3284};
3285
3286/**
3287 * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
3288 * @nor: pointer to a 'struct spi_nor'.
3289 * @param_header: pointer to the 'struct sfdp_parameter_header' describing
3290 * the 4-Byte Address Instruction Table length and version.
3291 * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
3292 *
3293 * Return: 0 on success, -errno otherwise.
3294 */
3295static int spi_nor_parse_4bait(struct spi_nor *nor,
3296 const struct sfdp_parameter_header *param_header,
3297 struct spi_nor_flash_parameter *params)
3298{
3299 static const struct sfdp_4bait reads[] = {
3300 { SNOR_HWCAPS_READ, BIT(0) },
3301 { SNOR_HWCAPS_READ_FAST, BIT(1) },
3302 { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
3303 { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
3304 { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
3305 { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
3306 { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
3307 { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
3308 { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
3309 };
3310 static const struct sfdp_4bait programs[] = {
3311 { SNOR_HWCAPS_PP, BIT(6) },
3312 { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
3313 { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
3314 };
3315 static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
3316 { 0u /* not used */, BIT(9) },
3317 { 0u /* not used */, BIT(10) },
3318 { 0u /* not used */, BIT(11) },
3319 { 0u /* not used */, BIT(12) },
3320 };
3321 struct spi_nor_pp_command *params_pp = params->page_programs;
3322 struct spi_nor_erase_map *map = &nor->erase_map;
3323 struct spi_nor_erase_type *erase_type = map->erase_type;
3324 u32 *dwords;
3325 size_t len;
3326 u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
3327 int i, ret;
3328
3329 if (param_header->major != SFDP_JESD216_MAJOR ||
3330 param_header->length < SFDP_4BAIT_DWORD_MAX)
3331 return -EINVAL;
3332
3333 /* Read the 4-byte Address Instruction Table. */
3334 len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
3335
3336 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
3337 dwords = kmalloc(len, GFP_KERNEL);
3338 if (!dwords)
3339 return -ENOMEM;
3340
3341 addr = SFDP_PARAM_HEADER_PTP(param_header);
3342 ret = spi_nor_read_sfdp(nor, addr, len, dwords);
3343 if (ret)
3344 return ret;
3345
3346 /* Fix endianness of the 4BAIT DWORDs. */
3347 for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
3348 dwords[i] = le32_to_cpu(dwords[i]);
3349
3350 /*
3351 * Compute the subset of (Fast) Read commands for which the 4-byte
3352 * version is supported.
3353 */
3354 discard_hwcaps = 0;
3355 read_hwcaps = 0;
3356 for (i = 0; i < ARRAY_SIZE(reads); i++) {
3357 const struct sfdp_4bait *read = &reads[i];
3358
3359 discard_hwcaps |= read->hwcaps;
3360 if ((params->hwcaps.mask & read->hwcaps) &&
3361 (dwords[0] & read->supported_bit))
3362 read_hwcaps |= read->hwcaps;
3363 }
3364
3365 /*
3366 * Compute the subset of Page Program commands for which the 4-byte
3367 * version is supported.
3368 */
3369 pp_hwcaps = 0;
3370 for (i = 0; i < ARRAY_SIZE(programs); i++) {
3371 const struct sfdp_4bait *program = &programs[i];
3372
3373 /*
3374 * The 4 Byte Address Instruction (Optional) Table is the only
3375 * SFDP table that indicates support for Page Program Commands.
3376 * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
3377 * authority for specifying Page Program support.
3378 */
3379 discard_hwcaps |= program->hwcaps;
3380 if (dwords[0] & program->supported_bit)
3381 pp_hwcaps |= program->hwcaps;
3382 }
3383
3384 /*
3385 * Compute the subset of Sector Erase commands for which the 4-byte
3386 * version is supported.
3387 */
3388 erase_mask = 0;
3389 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3390 const struct sfdp_4bait *erase = &erases[i];
3391
3392 if (dwords[0] & erase->supported_bit)
3393 erase_mask |= BIT(i);
3394 }
3395
3396 /* Replicate the sort done for the map's erase types in BFPT. */
3397 erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
3398
3399 /*
3400 * We need at least one 4-byte op code per read, program and erase
3401 * operation; the .read(), .write() and .erase() hooks share the
3402 * nor->addr_width value.
3403 */
3404 if (!read_hwcaps || !pp_hwcaps || !erase_mask)
3405 goto out;
3406
3407 /*
3408 * Discard all operations from the 4-byte instruction set which are
3409 * not supported by this memory.
3410 */
3411 params->hwcaps.mask &= ~discard_hwcaps;
3412 params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
3413
3414 /* Use the 4-byte address instruction set. */
3415 for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
3416 struct spi_nor_read_command *read_cmd = &params->reads[i];
3417
3418 read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
3419 }
3420
3421 /* 4BAIT is the only SFDP table that indicates page program support. */
3422 if (pp_hwcaps & SNOR_HWCAPS_PP)
3423 spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
3424 SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
3425 if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
3426 spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
3427 SPINOR_OP_PP_1_1_4_4B,
3428 SNOR_PROTO_1_1_4);
3429 if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
3430 spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
3431 SPINOR_OP_PP_1_4_4_4B,
3432 SNOR_PROTO_1_4_4);
3433
3434 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
3435 if (erase_mask & BIT(i))
3436 erase_type[i].opcode = (dwords[1] >>
3437 erase_type[i].idx * 8) & 0xFF;
3438 else
3439 spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
3440 }
3441
3442 /*
3443 * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
3444 * later because we already did the conversion to 4byte opcodes. Also,
3445 * this latest function implements a legacy quirk for the erase size of
3446 * Spansion memory. However this quirk is no longer needed with new
3447 * SFDP compliant memories.
3448 */
3449 nor->addr_width = 4;
3450 nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
3451
3452 /* fall through */
3453out:
3454 kfree(dwords);
3455 return ret;
3456}
3457
f384b352
CP
3458/**
3459 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
3460 * @nor: pointer to a 'struct spi_nor'
3461 * @params: pointer to the 'struct spi_nor_flash_parameter' to be
3462 * filled
3463 *
3464 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
3465 * specification. This is a standard which tends to supported by almost all
3466 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
3467 * runtime the main parameters needed to perform basic SPI flash operations such
3468 * as Fast Read, Page Program or Sector Erase commands.
3469 *
3470 * Return: 0 on success, -errno otherwise.
3471 */
3472static int spi_nor_parse_sfdp(struct spi_nor *nor,
3473 struct spi_nor_flash_parameter *params)
3474{
3475 const struct sfdp_parameter_header *param_header, *bfpt_header;
3476 struct sfdp_parameter_header *param_headers = NULL;
3477 struct sfdp_header header;
3478 struct device *dev = nor->dev;
3479 size_t psize;
3480 int i, err;
3481
3482 /* Get the SFDP header. */
bfa41337 3483 err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
f384b352
CP
3484 if (err < 0)
3485 return err;
3486
3487 /* Check the SFDP header version. */
3488 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
90d4fa45 3489 header.major != SFDP_JESD216_MAJOR)
f384b352
CP
3490 return -EINVAL;
3491
3492 /*
3493 * Verify that the first and only mandatory parameter header is a
3494 * Basic Flash Parameter Table header as specified in JESD216.
3495 */
3496 bfpt_header = &header.bfpt_header;
3497 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
3498 bfpt_header->major != SFDP_JESD216_MAJOR)
3499 return -EINVAL;
3500
3501 /*
3502 * Allocate memory then read all parameter headers with a single
3503 * Read SFDP command. These parameter headers will actually be parsed
3504 * twice: a first time to get the latest revision of the basic flash
3505 * parameter table, then a second time to handle the supported optional
3506 * tables.
3507 * Hence we read the parameter headers once for all to reduce the
3508 * processing time. Also we use kmalloc() instead of devm_kmalloc()
3509 * because we don't need to keep these parameter headers: the allocated
3510 * memory is always released with kfree() before exiting this function.
3511 */
3512 if (header.nph) {
3513 psize = header.nph * sizeof(*param_headers);
3514
3515 param_headers = kmalloc(psize, GFP_KERNEL);
3516 if (!param_headers)
3517 return -ENOMEM;
3518
3519 err = spi_nor_read_sfdp(nor, sizeof(header),
3520 psize, param_headers);
3521 if (err < 0) {
3522 dev_err(dev, "failed to read SFDP parameter headers\n");
3523 goto exit;
3524 }
3525 }
3526
3527 /*
3528 * Check other parameter headers to get the latest revision of
3529 * the basic flash parameter table.
3530 */
3531 for (i = 0; i < header.nph; i++) {
3532 param_header = &param_headers[i];
3533
3534 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
3535 param_header->major == SFDP_JESD216_MAJOR &&
3536 (param_header->minor > bfpt_header->minor ||
3537 (param_header->minor == bfpt_header->minor &&
3538 param_header->length > bfpt_header->length)))
3539 bfpt_header = param_header;
3540 }
3541
3542 err = spi_nor_parse_bfpt(nor, bfpt_header, params);
3543 if (err)
3544 goto exit;
3545
cd5e6d79 3546 /* Parse optional parameter tables. */
f384b352
CP
3547 for (i = 0; i < header.nph; i++) {
3548 param_header = &param_headers[i];
3549
3550 switch (SFDP_PARAM_HEADER_ID(param_header)) {
3551 case SFDP_SECTOR_MAP_ID:
b038e8e3 3552 err = spi_nor_parse_smpt(nor, param_header);
f384b352
CP
3553 break;
3554
816873ea
CP
3555 case SFDP_4BAIT_ID:
3556 err = spi_nor_parse_4bait(nor, param_header, params);
3557 break;
3558
f384b352
CP
3559 default:
3560 break;
3561 }
3562
cd5e6d79
TA
3563 if (err) {
3564 dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
3565 SFDP_PARAM_HEADER_ID(param_header));
3566 /*
3567 * Let's not drop all information we extracted so far
3568 * if optional table parsers fail. In case of failing,
3569 * each optional parser is responsible to roll back to
3570 * the previously known spi_nor data.
3571 */
3572 err = 0;
3573 }
f384b352
CP
3574 }
3575
3576exit:
3577 kfree(param_headers);
3578 return err;
3579}
3580
cfc5604c 3581static int spi_nor_init_params(struct spi_nor *nor,
cfc5604c
CP
3582 struct spi_nor_flash_parameter *params)
3583{
5390a8df 3584 struct spi_nor_erase_map *map = &nor->erase_map;
b296379f 3585 const struct flash_info *info = nor->info;
5390a8df
TA
3586 u8 i, erase_mask;
3587
cfc5604c
CP
3588 /* Set legacy flash parameters as default. */
3589 memset(params, 0, sizeof(*params));
3590
3591 /* Set SPI NOR sizes. */
84a1c210 3592 params->size = (u64)info->sector_size * info->n_sectors;
cfc5604c
CP
3593 params->page_size = info->page_size;
3594
3595 /* (Fast) Read settings. */
3596 params->hwcaps.mask |= SNOR_HWCAPS_READ;
3597 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
3598 0, 0, SPINOR_OP_READ,
3599 SNOR_PROTO_1_1_1);
3600
3601 if (!(info->flags & SPI_NOR_NO_FR)) {
3602 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
3603 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
3604 0, 8, SPINOR_OP_READ_FAST,
3605 SNOR_PROTO_1_1_1);
3606 }
3607
3608 if (info->flags & SPI_NOR_DUAL_READ) {
3609 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
3610 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
3611 0, 8, SPINOR_OP_READ_1_1_2,
3612 SNOR_PROTO_1_1_2);
3613 }
3614
3615 if (info->flags & SPI_NOR_QUAD_READ) {
3616 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
3617 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
3618 0, 8, SPINOR_OP_READ_1_1_4,
3619 SNOR_PROTO_1_1_4);
3620 }
3621
fcd44b64
YNG
3622 if (info->flags & SPI_NOR_OCTAL_READ) {
3623 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
3624 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
3625 0, 8, SPINOR_OP_READ_1_1_8,
3626 SNOR_PROTO_1_1_8);
3627 }
3628
cfc5604c
CP
3629 /* Page Program settings. */
3630 params->hwcaps.mask |= SNOR_HWCAPS_PP;
3631 spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
3632 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
3633
5390a8df
TA
3634 /*
3635 * Sector Erase settings. Sort Erase Types in ascending order, with the
3636 * smallest erase size starting at BIT(0).
3637 */
3638 erase_mask = 0;
3639 i = 0;
3640 if (info->flags & SECT_4K_PMC) {
3641 erase_mask |= BIT(i);
3642 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3643 SPINOR_OP_BE_4K_PMC);
3644 i++;
3645 } else if (info->flags & SECT_4K) {
3646 erase_mask |= BIT(i);
3647 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3648 SPINOR_OP_BE_4K);
3649 i++;
3650 }
3651 erase_mask |= BIT(i);
3652 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
3653 SPINOR_OP_SE);
3654 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
3655
cfc5604c
CP
3656 /* Select the procedure to set the Quad Enable bit. */
3657 if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
3658 SNOR_HWCAPS_PP_QUAD)) {
3659 switch (JEDEC_MFR(info)) {
3660 case SNOR_MFR_MACRONIX:
3661 params->quad_enable = macronix_quad_enable;
3662 break;
3663
0005aad0 3664 case SNOR_MFR_ST:
cfc5604c
CP
3665 case SNOR_MFR_MICRON:
3666 break;
3667
3668 default:
f384b352 3669 /* Kept only for backward compatibility purpose. */
cfc5604c
CP
3670 params->quad_enable = spansion_quad_enable;
3671 break;
3672 }
e2707285
AY
3673
3674 /*
3675 * Some manufacturer like GigaDevice may use different
3676 * bit to set QE on different memories, so the MFR can't
3677 * indicate the quad_enable method for this case, we need
3678 * set it in flash info list.
3679 */
3680 if (info->quad_enable)
3681 params->quad_enable = info->quad_enable;
cfc5604c
CP
3682 }
3683
f384b352
CP
3684 if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
3685 !(info->flags & SPI_NOR_SKIP_SFDP)) {
3686 struct spi_nor_flash_parameter sfdp_params;
5390a8df 3687 struct spi_nor_erase_map prev_map;
f384b352
CP
3688
3689 memcpy(&sfdp_params, params, sizeof(sfdp_params));
5390a8df
TA
3690 memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
3691
90c31cb9
BB
3692 if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
3693 nor->addr_width = 0;
548ed684 3694 nor->flags &= ~SNOR_F_4B_OPCODES;
5390a8df
TA
3695 /* restore previous erase map */
3696 memcpy(&nor->erase_map, &prev_map,
3697 sizeof(nor->erase_map));
90c31cb9 3698 } else {
f384b352 3699 memcpy(params, &sfdp_params, sizeof(*params));
90c31cb9 3700 }
f384b352
CP
3701 }
3702
cfc5604c
CP
3703 return 0;
3704}
3705
cfc5604c
CP
3706static int spi_nor_select_read(struct spi_nor *nor,
3707 const struct spi_nor_flash_parameter *params,
3708 u32 shared_hwcaps)
3709{
3710 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
3711 const struct spi_nor_read_command *read;
3712
3713 if (best_match < 0)
3714 return -EINVAL;
3715
3716 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
3717 if (cmd < 0)
3718 return -EINVAL;
3719
3720 read = &params->reads[cmd];
3721 nor->read_opcode = read->opcode;
3722 nor->read_proto = read->proto;
3723
3724 /*
3725 * In the spi-nor framework, we don't need to make the difference
3726 * between mode clock cycles and wait state clock cycles.
3727 * Indeed, the value of the mode clock cycles is used by a QSPI
3728 * flash memory to know whether it should enter or leave its 0-4-4
3729 * (Continuous Read / XIP) mode.
3730 * eXecution In Place is out of the scope of the mtd sub-system.
3731 * Hence we choose to merge both mode and wait state clock cycles
3732 * into the so called dummy clock cycles.
3733 */
3734 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
3735 return 0;
3736}
3737
3738static int spi_nor_select_pp(struct spi_nor *nor,
3739 const struct spi_nor_flash_parameter *params,
3740 u32 shared_hwcaps)
3741{
3742 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
3743 const struct spi_nor_pp_command *pp;
3744
3745 if (best_match < 0)
3746 return -EINVAL;
3747
3748 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
3749 if (cmd < 0)
3750 return -EINVAL;
3751
3752 pp = &params->page_programs[cmd];
3753 nor->program_opcode = pp->opcode;
3754 nor->write_proto = pp->proto;
3755 return 0;
3756}
3757
5390a8df
TA
3758/**
3759 * spi_nor_select_uniform_erase() - select optimum uniform erase type
3760 * @map: the erase map of the SPI NOR
3761 * @wanted_size: the erase type size to search for. Contains the value of
3762 * info->sector_size or of the "small sector" size in case
3763 * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
3764 *
3765 * Once the optimum uniform sector erase command is found, disable all the
3766 * other.
3767 *
3768 * Return: pointer to erase type on success, NULL otherwise.
3769 */
3770static const struct spi_nor_erase_type *
3771spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
3772 const u32 wanted_size)
cfc5604c 3773{
5390a8df
TA
3774 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
3775 int i;
3776 u8 uniform_erase_type = map->uniform_erase_type;
cfc5604c 3777
5390a8df
TA
3778 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3779 if (!(uniform_erase_type & BIT(i)))
3780 continue;
3781
3782 tested_erase = &map->erase_type[i];
3783
3784 /*
3785 * If the current erase size is the one, stop here:
3786 * we have found the right uniform Sector Erase command.
3787 */
3788 if (tested_erase->size == wanted_size) {
3789 erase = tested_erase;
3790 break;
3791 }
f384b352 3792
5390a8df
TA
3793 /*
3794 * Otherwise, the current erase size is still a valid canditate.
3795 * Select the biggest valid candidate.
3796 */
3797 if (!erase && tested_erase->size)
3798 erase = tested_erase;
3799 /* keep iterating to find the wanted_size */
3800 }
3801
3802 if (!erase)
3803 return NULL;
3804
3805 /* Disable all other Sector Erase commands. */
3806 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
3807 map->uniform_erase_type |= BIT(erase - map->erase_type);
3808 return erase;
3809}
3810
3811static int spi_nor_select_erase(struct spi_nor *nor, u32 wanted_size)
3812{
3813 struct spi_nor_erase_map *map = &nor->erase_map;
3814 const struct spi_nor_erase_type *erase = NULL;
3815 struct mtd_info *mtd = &nor->mtd;
3816 int i;
3817
3818 /*
3819 * The previous implementation handling Sector Erase commands assumed
3820 * that the SPI flash memory has an uniform layout then used only one
3821 * of the supported erase sizes for all Sector Erase commands.
3822 * So to be backward compatible, the new implementation also tries to
3823 * manage the SPI flash memory as uniform with a single erase sector
3824 * size, when possible.
3825 */
cfc5604c
CP
3826#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
3827 /* prefer "small sector" erase if possible */
5390a8df 3828 wanted_size = 4096u;
cfc5604c 3829#endif
5390a8df
TA
3830
3831 if (spi_nor_has_uniform_erase(nor)) {
3832 erase = spi_nor_select_uniform_erase(map, wanted_size);
3833 if (!erase)
3834 return -EINVAL;
3835 nor->erase_opcode = erase->opcode;
3836 mtd->erasesize = erase->size;
3837 return 0;
cfc5604c 3838 }
5390a8df
TA
3839
3840 /*
3841 * For non-uniform SPI flash memory, set mtd->erasesize to the
3842 * maximum erase sector size. No need to set nor->erase_opcode.
3843 */
3844 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3845 if (map->erase_type[i].size) {
3846 erase = &map->erase_type[i];
3847 break;
3848 }
3849 }
3850
3851 if (!erase)
3852 return -EINVAL;
3853
3854 mtd->erasesize = erase->size;
cfc5604c
CP
3855 return 0;
3856}
3857
b296379f 3858static int spi_nor_setup(struct spi_nor *nor,
cfc5604c
CP
3859 const struct spi_nor_flash_parameter *params,
3860 const struct spi_nor_hwcaps *hwcaps)
3861{
3862 u32 ignored_mask, shared_mask;
3863 bool enable_quad_io;
3864 int err;
3865
3866 /*
3867 * Keep only the hardware capabilities supported by both the SPI
3868 * controller and the SPI flash memory.
3869 */
3870 shared_mask = hwcaps->mask & params->hwcaps.mask;
3871
3872 /* SPI n-n-n protocols are not supported yet. */
3873 ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
3874 SNOR_HWCAPS_READ_4_4_4 |
fe488a5e
CP
3875 SNOR_HWCAPS_READ_8_8_8 |
3876 SNOR_HWCAPS_PP_4_4_4 |
3877 SNOR_HWCAPS_PP_8_8_8);
cfc5604c
CP
3878 if (shared_mask & ignored_mask) {
3879 dev_dbg(nor->dev,
3880 "SPI n-n-n protocols are not supported yet.\n");
3881 shared_mask &= ~ignored_mask;
3882 }
3883
3884 /* Select the (Fast) Read command. */
3885 err = spi_nor_select_read(nor, params, shared_mask);
3886 if (err) {
3887 dev_err(nor->dev,
3888 "can't select read settings supported by both the SPI controller and memory.\n");
3889 return err;
3890 }
3891
3892 /* Select the Page Program command. */
3893 err = spi_nor_select_pp(nor, params, shared_mask);
3894 if (err) {
3895 dev_err(nor->dev,
3896 "can't select write settings supported by both the SPI controller and memory.\n");
3897 return err;
3898 }
3899
3900 /* Select the Sector Erase command. */
b296379f 3901 err = spi_nor_select_erase(nor, nor->info->sector_size);
cfc5604c
CP
3902 if (err) {
3903 dev_err(nor->dev,
3904 "can't select erase settings supported by both the SPI controller and memory.\n");
3905 return err;
3906 }
3907
3908 /* Enable Quad I/O if needed. */
3909 enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
3910 spi_nor_get_protocol_width(nor->write_proto) == 4);
46dde01f
KD
3911 if (enable_quad_io && params->quad_enable)
3912 nor->quad_enable = params->quad_enable;
3913 else
3914 nor->quad_enable = NULL;
3915
3916 return 0;
3917}
3918
3919static int spi_nor_init(struct spi_nor *nor)
3920{
3921 int err;
3922
3923 /*
3924 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
3925 * with the software protection bits set
3926 */
3927 if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
3928 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
3929 JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
3930 nor->info->flags & SPI_NOR_HAS_LOCK) {
3931 write_enable(nor);
3932 write_sr(nor, 0);
3933 spi_nor_wait_till_ready(nor);
3934 }
3935
3936 if (nor->quad_enable) {
3937 err = nor->quad_enable(nor);
cfc5604c
CP
3938 if (err) {
3939 dev_err(nor->dev, "quad mode not supported\n");
3940 return err;
3941 }
3942 }
3943
548ed684 3944 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
bb276262
BN
3945 /*
3946 * If the RESET# pin isn't hooked up properly, or the system
3947 * otherwise doesn't perform a reset command in the boot
3948 * sequence, it's impossible to 100% protect against unexpected
3949 * reboots (e.g., crashes). Warn the user (or hopefully, system
3950 * designer) that this is bad.
3951 */
3952 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
3953 "enabling reset hack; may not recover from unexpected reboots\n");
eb6ec1d7 3954 set_4byte(nor, true);
bb276262 3955 }
46dde01f 3956
cfc5604c
CP
3957 return 0;
3958}
3959
d6084fc8
KD
3960/* mtd resume handler */
3961static void spi_nor_resume(struct mtd_info *mtd)
3962{
3963 struct spi_nor *nor = mtd_to_spi_nor(mtd);
3964 struct device *dev = nor->dev;
3965 int ret;
3966
3967 /* re-initialize the nor chip */
3968 ret = spi_nor_init(nor);
3969 if (ret)
3970 dev_err(dev, "resume() failed\n");
3971}
3972
8dee1d97
HZ
3973void spi_nor_restore(struct spi_nor *nor)
3974{
3975 /* restore the addressing mode */
548ed684
BB
3976 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
3977 nor->flags & SNOR_F_BROKEN_RESET)
eb6ec1d7 3978 set_4byte(nor, false);
8dee1d97
HZ
3979}
3980EXPORT_SYMBOL_GPL(spi_nor_restore);
3981
f10aa369
BB
3982static const struct flash_info *spi_nor_match_id(const char *name)
3983{
3984 const struct flash_info *id = spi_nor_ids;
3985
3986 while (id->name) {
3987 if (!strcmp(name, id->name))
3988 return id;
3989 id++;
3990 }
3991 return NULL;
3992}
3993
cfc5604c
CP
3994int spi_nor_scan(struct spi_nor *nor, const char *name,
3995 const struct spi_nor_hwcaps *hwcaps)
b199489d 3996{
cfc5604c 3997 struct spi_nor_flash_parameter params;
06bb6f5a 3998 const struct flash_info *info = NULL;
b199489d 3999 struct device *dev = nor->dev;
19763671 4000 struct mtd_info *mtd = &nor->mtd;
9c7d7875 4001 struct device_node *np = spi_nor_get_flash_node(nor);
b199489d
HS
4002 int ret;
4003 int i;
4004
4005 ret = spi_nor_check(nor);
4006 if (ret)
4007 return ret;
4008
cfc5604c
CP
4009 /* Reset SPI protocol for all commands. */
4010 nor->reg_proto = SNOR_PROTO_1_1_1;
4011 nor->read_proto = SNOR_PROTO_1_1_1;
4012 nor->write_proto = SNOR_PROTO_1_1_1;
4013
43163022 4014 if (name)
06bb6f5a 4015 info = spi_nor_match_id(name);
43163022 4016 /* Try to auto-detect if chip name wasn't specified or not found */
06bb6f5a
RM
4017 if (!info)
4018 info = spi_nor_read_id(nor);
4019 if (IS_ERR_OR_NULL(info))
70f3ce05
BH
4020 return -ENOENT;
4021
58c81957
RM
4022 /*
4023 * If caller has specified name of flash model that can normally be
4024 * detected using JEDEC, let's verify it.
4025 */
4026 if (name && info->id_len) {
06bb6f5a 4027 const struct flash_info *jinfo;
b199489d 4028
06bb6f5a
RM
4029 jinfo = spi_nor_read_id(nor);
4030 if (IS_ERR(jinfo)) {
4031 return PTR_ERR(jinfo);
4032 } else if (jinfo != info) {
b199489d
HS
4033 /*
4034 * JEDEC knows better, so overwrite platform ID. We
4035 * can't trust partitions any longer, but we'll let
4036 * mtd apply them anyway, since some partitions may be
4037 * marked read-only, and we don't want to lose that
4038 * information, even if it's not 100% accurate.
4039 */
4040 dev_warn(dev, "found %s, expected %s\n",
06bb6f5a
RM
4041 jinfo->name, info->name);
4042 info = jinfo;
b199489d
HS
4043 }
4044 }
4045
b296379f
BB
4046 nor->info = info;
4047
b199489d
HS
4048 mutex_init(&nor->lock);
4049
e99ca98f
RR
4050 /*
4051 * Make sure the XSR_RDY flag is set before calling
4052 * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
4053 * with Atmel spi-nor
4054 */
4055 if (info->flags & SPI_S3AN)
4056 nor->flags |= SNOR_F_READY_XSR_RDY;
4057
cfc5604c 4058 /* Parse the Serial Flash Discoverable Parameters table. */
b296379f 4059 ret = spi_nor_init_params(nor, &params);
cfc5604c
CP
4060 if (ret)
4061 return ret;
4062
32f1b7c8 4063 if (!mtd->name)
b199489d 4064 mtd->name = dev_name(dev);
c9ec3900 4065 mtd->priv = nor;
b199489d
HS
4066 mtd->type = MTD_NORFLASH;
4067 mtd->writesize = 1;
4068 mtd->flags = MTD_CAP_NORFLASH;
cfc5604c 4069 mtd->size = params.size;
b199489d
HS
4070 mtd->_erase = spi_nor_erase;
4071 mtd->_read = spi_nor_read;
d6084fc8 4072 mtd->_resume = spi_nor_resume;
b199489d 4073
357ca38d 4074 /* NOR protection support for STmicro/Micron chips and similar */
0005aad0
YNG
4075 if (JEDEC_MFR(info) == SNOR_MFR_ST ||
4076 JEDEC_MFR(info) == SNOR_MFR_MICRON ||
4077 info->flags & SPI_NOR_HAS_LOCK) {
8cc7f33a
BN
4078 nor->flash_lock = stm_lock;
4079 nor->flash_unlock = stm_unlock;
5bf0e69b 4080 nor->flash_is_locked = stm_is_locked;
8cc7f33a
BN
4081 }
4082
5bf0e69b 4083 if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
b199489d
HS
4084 mtd->_lock = spi_nor_lock;
4085 mtd->_unlock = spi_nor_unlock;
5bf0e69b 4086 mtd->_is_locked = spi_nor_is_locked;
b199489d
HS
4087 }
4088
4089 /* sst nor chips use AAI word program */
4090 if (info->flags & SST_WRITE)
4091 mtd->_write = sst_write;
4092 else
4093 mtd->_write = spi_nor_write;
4094
51983b7d
BN
4095 if (info->flags & USE_FSR)
4096 nor->flags |= SNOR_F_USE_FSR;
3dd8012a
BN
4097 if (info->flags & SPI_NOR_HAS_TB)
4098 nor->flags |= SNOR_F_HAS_SR_TB;
2f5ad7f0 4099 if (info->flags & NO_CHIP_ERASE)
4100 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
c4b3eacc
AS
4101 if (info->flags & USE_CLSR)
4102 nor->flags |= SNOR_F_USE_CLSR;
c14dedde 4103
b199489d
HS
4104 if (info->flags & SPI_NOR_NO_ERASE)
4105 mtd->flags |= MTD_NO_ERASE;
4106
4107 mtd->dev.parent = dev;
cfc5604c 4108 nor->page_size = params.page_size;
b199489d
HS
4109 mtd->writebufsize = nor->page_size;
4110
4111 if (np) {
4112 /* If we were instantiated by DT, use it */
4113 if (of_property_read_bool(np, "m25p,fast-read"))
cfc5604c 4114 params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
b199489d 4115 else
cfc5604c 4116 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
b199489d
HS
4117 } else {
4118 /* If we weren't instantiated by DT, default to fast-read */
cfc5604c 4119 params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
b199489d
HS
4120 }
4121
bb276262
BN
4122 if (of_property_read_bool(np, "broken-flash-reset"))
4123 nor->flags |= SNOR_F_BROKEN_RESET;
4124
b199489d
HS
4125 /* Some devices cannot do fast-read, no matter what DT tells us */
4126 if (info->flags & SPI_NOR_NO_FR)
cfc5604c 4127 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
b199489d 4128
cfc5604c
CP
4129 /*
4130 * Configure the SPI memory:
4131 * - select op codes for (Fast) Read, Page Program and Sector Erase.
4132 * - set the number of dummy cycles (mode cycles + wait states).
4133 * - set the SPI protocols for register and memory accesses.
4134 * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
4135 */
b296379f 4136 ret = spi_nor_setup(nor, &params, hwcaps);
cfc5604c
CP
4137 if (ret)
4138 return ret;
b199489d 4139
f384b352
CP
4140 if (nor->addr_width) {
4141 /* already configured from SFDP */
4142 } else if (info->addr_width) {
b199489d 4143 nor->addr_width = info->addr_width;
f384b352 4144 } else if (mtd->size > 0x1000000) {
b199489d
HS
4145 /* enable 4-byte addressing if the device exceeds 16MiB */
4146 nor->addr_width = 4;
b199489d
HS
4147 } else {
4148 nor->addr_width = 3;
4149 }
4150
548ed684
BB
4151 if (info->flags & SPI_NOR_4B_OPCODES ||
4152 (JEDEC_MFR(info) == SNOR_MFR_SPANSION && mtd->size > SZ_16M))
4153 nor->flags |= SNOR_F_4B_OPCODES;
4154
816873ea
CP
4155 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
4156 !(nor->flags & SNOR_F_HAS_4BAIT))
b296379f 4157 spi_nor_set_4byte_opcodes(nor);
548ed684 4158
c67cbb83
BN
4159 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4160 dev_err(dev, "address width is too large: %u\n",
4161 nor->addr_width);
4162 return -EINVAL;
4163 }
4164
e99ca98f 4165 if (info->flags & SPI_S3AN) {
b296379f 4166 ret = s3an_nor_scan(nor);
e99ca98f
RR
4167 if (ret)
4168 return ret;
4169 }
4170
46dde01f 4171 /* Send all the required SPI flash commands to initialize device */
46dde01f
KD
4172 ret = spi_nor_init(nor);
4173 if (ret)
4174 return ret;
4175
06bb6f5a 4176 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
b199489d
HS
4177 (long long)mtd->size >> 10);
4178
4179 dev_dbg(dev,
4180 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
4181 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
4182 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
4183 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
4184
4185 if (mtd->numeraseregions)
4186 for (i = 0; i < mtd->numeraseregions; i++)
4187 dev_dbg(dev,
4188 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
4189 ".erasesize = 0x%.8x (%uKiB), "
4190 ".numblocks = %d }\n",
4191 i, (long long)mtd->eraseregions[i].offset,
4192 mtd->eraseregions[i].erasesize,
4193 mtd->eraseregions[i].erasesize / 1024,
4194 mtd->eraseregions[i].numblocks);
4195 return 0;
4196}
b61834b0 4197EXPORT_SYMBOL_GPL(spi_nor_scan);
b199489d 4198
e9f3a2bc 4199MODULE_LICENSE("GPL v2");
b199489d
HS
4200MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
4201MODULE_AUTHOR("Mike Lavender");
4202MODULE_DESCRIPTION("framework for SPI NOR");