]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/spi/spi-bcm-qspi.c
Merge branch 'net-hns-bugfixes-for-HNS-Driver'
[thirdparty/kernel/stable.git] / drivers / spi / spi-bcm-qspi.c
1 /*
2 * Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
3 *
4 * Copyright 2016 Broadcom
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation (the "GPL").
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 (GPLv2) for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * version 2 (GPLv2) along with this source code.
17 */
18
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/io.h>
25 #include <linux/ioport.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/of_irq.h>
30 #include <linux/platform_device.h>
31 #include <linux/slab.h>
32 #include <linux/spi/spi.h>
33 #include <linux/spi/spi-mem.h>
34 #include <linux/sysfs.h>
35 #include <linux/types.h>
36 #include "spi-bcm-qspi.h"
37
38 #define DRIVER_NAME "bcm_qspi"
39
40
41 /* BSPI register offsets */
42 #define BSPI_REVISION_ID 0x000
43 #define BSPI_SCRATCH 0x004
44 #define BSPI_MAST_N_BOOT_CTRL 0x008
45 #define BSPI_BUSY_STATUS 0x00c
46 #define BSPI_INTR_STATUS 0x010
47 #define BSPI_B0_STATUS 0x014
48 #define BSPI_B0_CTRL 0x018
49 #define BSPI_B1_STATUS 0x01c
50 #define BSPI_B1_CTRL 0x020
51 #define BSPI_STRAP_OVERRIDE_CTRL 0x024
52 #define BSPI_FLEX_MODE_ENABLE 0x028
53 #define BSPI_BITS_PER_CYCLE 0x02c
54 #define BSPI_BITS_PER_PHASE 0x030
55 #define BSPI_CMD_AND_MODE_BYTE 0x034
56 #define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
57 #define BSPI_BSPI_XOR_VALUE 0x03c
58 #define BSPI_BSPI_XOR_ENABLE 0x040
59 #define BSPI_BSPI_PIO_MODE_ENABLE 0x044
60 #define BSPI_BSPI_PIO_IODIR 0x048
61 #define BSPI_BSPI_PIO_DATA 0x04c
62
63 /* RAF register offsets */
64 #define BSPI_RAF_START_ADDR 0x100
65 #define BSPI_RAF_NUM_WORDS 0x104
66 #define BSPI_RAF_CTRL 0x108
67 #define BSPI_RAF_FULLNESS 0x10c
68 #define BSPI_RAF_WATERMARK 0x110
69 #define BSPI_RAF_STATUS 0x114
70 #define BSPI_RAF_READ_DATA 0x118
71 #define BSPI_RAF_WORD_CNT 0x11c
72 #define BSPI_RAF_CURR_ADDR 0x120
73
74 /* Override mode masks */
75 #define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
76 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
77 #define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
78 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
79 #define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
80
81 #define BSPI_ADDRLEN_3BYTES 3
82 #define BSPI_ADDRLEN_4BYTES 4
83
84 #define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
85
86 #define BSPI_RAF_CTRL_START_MASK BIT(0)
87 #define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
88
89 #define BSPI_BPP_MODE_SELECT_MASK BIT(8)
90 #define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
91
92 #define BSPI_READ_LENGTH 256
93
94 /* MSPI register offsets */
95 #define MSPI_SPCR0_LSB 0x000
96 #define MSPI_SPCR0_MSB 0x004
97 #define MSPI_SPCR1_LSB 0x008
98 #define MSPI_SPCR1_MSB 0x00c
99 #define MSPI_NEWQP 0x010
100 #define MSPI_ENDQP 0x014
101 #define MSPI_SPCR2 0x018
102 #define MSPI_MSPI_STATUS 0x020
103 #define MSPI_CPTQP 0x024
104 #define MSPI_SPCR3 0x028
105 #define MSPI_TXRAM 0x040
106 #define MSPI_RXRAM 0x0c0
107 #define MSPI_CDRAM 0x140
108 #define MSPI_WRITE_LOCK 0x180
109
110 #define MSPI_MASTER_BIT BIT(7)
111
112 #define MSPI_NUM_CDRAM 16
113 #define MSPI_CDRAM_CONT_BIT BIT(7)
114 #define MSPI_CDRAM_BITSE_BIT BIT(6)
115 #define MSPI_CDRAM_PCS 0xf
116
117 #define MSPI_SPCR2_SPE BIT(6)
118 #define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
119
120 #define MSPI_MSPI_STATUS_SPIF BIT(0)
121
122 #define INTR_BASE_BIT_SHIFT 0x02
123 #define INTR_COUNT 0x07
124
125 #define NUM_CHIPSELECT 4
126 #define QSPI_SPBR_MIN 8U
127 #define QSPI_SPBR_MAX 255U
128
129 #define OPCODE_DIOR 0xBB
130 #define OPCODE_QIOR 0xEB
131 #define OPCODE_DIOR_4B 0xBC
132 #define OPCODE_QIOR_4B 0xEC
133
134 #define MAX_CMD_SIZE 6
135
136 #define ADDR_4MB_MASK GENMASK(22, 0)
137
138 /* stop at end of transfer, no other reason */
139 #define TRANS_STATUS_BREAK_NONE 0
140 /* stop at end of spi_message */
141 #define TRANS_STATUS_BREAK_EOM 1
142 /* stop at end of spi_transfer if delay */
143 #define TRANS_STATUS_BREAK_DELAY 2
144 /* stop at end of spi_transfer if cs_change */
145 #define TRANS_STATUS_BREAK_CS_CHANGE 4
146 /* stop if we run out of bytes */
147 #define TRANS_STATUS_BREAK_NO_BYTES 8
148
149 /* events that make us stop filling TX slots */
150 #define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
151 TRANS_STATUS_BREAK_DELAY | \
152 TRANS_STATUS_BREAK_CS_CHANGE)
153
154 /* events that make us deassert CS */
155 #define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
156 TRANS_STATUS_BREAK_CS_CHANGE)
157
158 struct bcm_qspi_parms {
159 u32 speed_hz;
160 u8 mode;
161 u8 bits_per_word;
162 };
163
164 struct bcm_xfer_mode {
165 bool flex_mode;
166 unsigned int width;
167 unsigned int addrlen;
168 unsigned int hp;
169 };
170
171 enum base_type {
172 MSPI,
173 BSPI,
174 CHIP_SELECT,
175 BASEMAX,
176 };
177
178 enum irq_source {
179 SINGLE_L2,
180 MUXED_L1,
181 };
182
183 struct bcm_qspi_irq {
184 const char *irq_name;
185 const irq_handler_t irq_handler;
186 int irq_source;
187 u32 mask;
188 };
189
190 struct bcm_qspi_dev_id {
191 const struct bcm_qspi_irq *irqp;
192 void *dev;
193 };
194
195
196 struct qspi_trans {
197 struct spi_transfer *trans;
198 int byte;
199 bool mspi_last_trans;
200 };
201
202 struct bcm_qspi {
203 struct platform_device *pdev;
204 struct spi_master *master;
205 struct clk *clk;
206 u32 base_clk;
207 u32 max_speed_hz;
208 void __iomem *base[BASEMAX];
209
210 /* Some SoCs provide custom interrupt status register(s) */
211 struct bcm_qspi_soc_intc *soc_intc;
212
213 struct bcm_qspi_parms last_parms;
214 struct qspi_trans trans_pos;
215 int curr_cs;
216 int bspi_maj_rev;
217 int bspi_min_rev;
218 int bspi_enabled;
219 const struct spi_mem_op *bspi_rf_op;
220 u32 bspi_rf_op_idx;
221 u32 bspi_rf_op_len;
222 u32 bspi_rf_op_status;
223 struct bcm_xfer_mode xfer_mode;
224 u32 s3_strap_override_ctrl;
225 bool bspi_mode;
226 bool big_endian;
227 int num_irqs;
228 struct bcm_qspi_dev_id *dev_ids;
229 struct completion mspi_done;
230 struct completion bspi_done;
231 };
232
233 static inline bool has_bspi(struct bcm_qspi *qspi)
234 {
235 return qspi->bspi_mode;
236 }
237
238 /* Read qspi controller register*/
239 static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
240 unsigned int offset)
241 {
242 return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
243 }
244
245 /* Write qspi controller register*/
246 static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
247 unsigned int offset, unsigned int data)
248 {
249 bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
250 }
251
252 /* BSPI helpers */
253 static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
254 {
255 int i;
256
257 /* this should normally finish within 10us */
258 for (i = 0; i < 1000; i++) {
259 if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
260 return 0;
261 udelay(1);
262 }
263 dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
264 return -EIO;
265 }
266
267 static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
268 {
269 if (qspi->bspi_maj_rev < 4)
270 return true;
271 return false;
272 }
273
274 static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
275 {
276 bcm_qspi_bspi_busy_poll(qspi);
277 /* Force rising edge for the b0/b1 'flush' field */
278 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
279 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
280 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
281 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
282 }
283
284 static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
285 {
286 return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
287 BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
288 }
289
290 static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
291 {
292 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
293
294 /* BSPI v3 LR is LE only, convert data to host endianness */
295 if (bcm_qspi_bspi_ver_three(qspi))
296 data = le32_to_cpu(data);
297
298 return data;
299 }
300
301 static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
302 {
303 bcm_qspi_bspi_busy_poll(qspi);
304 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
305 BSPI_RAF_CTRL_START_MASK);
306 }
307
308 static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
309 {
310 bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
311 BSPI_RAF_CTRL_CLEAR_MASK);
312 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
313 }
314
315 static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
316 {
317 u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
318 u32 data = 0;
319
320 dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
321 qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
322 while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
323 data = bcm_qspi_bspi_lr_read_fifo(qspi);
324 if (likely(qspi->bspi_rf_op_len >= 4) &&
325 IS_ALIGNED((uintptr_t)buf, 4)) {
326 buf[qspi->bspi_rf_op_idx++] = data;
327 qspi->bspi_rf_op_len -= 4;
328 } else {
329 /* Read out remaining bytes, make sure*/
330 u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
331
332 data = cpu_to_le32(data);
333 while (qspi->bspi_rf_op_len) {
334 *cbuf++ = (u8)data;
335 data >>= 8;
336 qspi->bspi_rf_op_len--;
337 }
338 }
339 }
340 }
341
342 static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
343 int bpp, int bpc, int flex_mode)
344 {
345 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
346 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
347 bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
348 bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
349 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
350 }
351
352 static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
353 const struct spi_mem_op *op, int hp)
354 {
355 int bpc = 0, bpp = 0;
356 u8 command = op->cmd.opcode;
357 int width = op->cmd.buswidth ? op->cmd.buswidth : SPI_NBITS_SINGLE;
358 int addrlen = op->addr.nbytes;
359 int flex_mode = 1;
360
361 dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
362 width, addrlen, hp);
363
364 if (addrlen == BSPI_ADDRLEN_4BYTES)
365 bpp = BSPI_BPP_ADDR_SELECT_MASK;
366
367 bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
368
369 switch (width) {
370 case SPI_NBITS_SINGLE:
371 if (addrlen == BSPI_ADDRLEN_3BYTES)
372 /* default mode, does not need flex_cmd */
373 flex_mode = 0;
374 break;
375 case SPI_NBITS_DUAL:
376 bpc = 0x00000001;
377 if (hp) {
378 bpc |= 0x00010100; /* address and mode are 2-bit */
379 bpp = BSPI_BPP_MODE_SELECT_MASK;
380 }
381 break;
382 case SPI_NBITS_QUAD:
383 bpc = 0x00000002;
384 if (hp) {
385 bpc |= 0x00020200; /* address and mode are 4-bit */
386 bpp |= BSPI_BPP_MODE_SELECT_MASK;
387 }
388 break;
389 default:
390 return -EINVAL;
391 }
392
393 bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
394
395 return 0;
396 }
397
398 static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
399 const struct spi_mem_op *op, int hp)
400 {
401 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
402 int addrlen = op->addr.nbytes;
403 u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
404
405 dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
406 width, addrlen, hp);
407
408 switch (width) {
409 case SPI_NBITS_SINGLE:
410 /* clear quad/dual mode */
411 data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
412 BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
413 break;
414 case SPI_NBITS_QUAD:
415 /* clear dual mode and set quad mode */
416 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
417 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
418 break;
419 case SPI_NBITS_DUAL:
420 /* clear quad mode set dual mode */
421 data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
422 data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
423 break;
424 default:
425 return -EINVAL;
426 }
427
428 if (addrlen == BSPI_ADDRLEN_4BYTES)
429 /* set 4byte mode*/
430 data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
431 else
432 /* clear 4 byte mode */
433 data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
434
435 /* set the override mode */
436 data |= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
437 bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
438 bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
439
440 return 0;
441 }
442
443 static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
444 const struct spi_mem_op *op, int hp)
445 {
446 int error = 0;
447 int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
448 int addrlen = op->addr.nbytes;
449
450 /* default mode */
451 qspi->xfer_mode.flex_mode = true;
452
453 if (!bcm_qspi_bspi_ver_three(qspi)) {
454 u32 val, mask;
455
456 val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
457 mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
458 if (val & mask || qspi->s3_strap_override_ctrl & mask) {
459 qspi->xfer_mode.flex_mode = false;
460 bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
461 error = bcm_qspi_bspi_set_override(qspi, op, hp);
462 }
463 }
464
465 if (qspi->xfer_mode.flex_mode)
466 error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
467
468 if (error) {
469 dev_warn(&qspi->pdev->dev,
470 "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
471 width, addrlen, hp);
472 } else if (qspi->xfer_mode.width != width ||
473 qspi->xfer_mode.addrlen != addrlen ||
474 qspi->xfer_mode.hp != hp) {
475 qspi->xfer_mode.width = width;
476 qspi->xfer_mode.addrlen = addrlen;
477 qspi->xfer_mode.hp = hp;
478 dev_dbg(&qspi->pdev->dev,
479 "cs:%d %d-lane output, %d-byte address%s\n",
480 qspi->curr_cs,
481 qspi->xfer_mode.width,
482 qspi->xfer_mode.addrlen,
483 qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
484 }
485
486 return error;
487 }
488
489 static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
490 {
491 if (!has_bspi(qspi))
492 return;
493
494 qspi->bspi_enabled = 1;
495 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
496 return;
497
498 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
499 udelay(1);
500 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
501 udelay(1);
502 }
503
504 static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
505 {
506 if (!has_bspi(qspi))
507 return;
508
509 qspi->bspi_enabled = 0;
510 if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
511 return;
512
513 bcm_qspi_bspi_busy_poll(qspi);
514 bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
515 udelay(1);
516 }
517
518 static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
519 {
520 u32 rd = 0;
521 u32 wr = 0;
522
523 if (qspi->base[CHIP_SELECT]) {
524 rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
525 wr = (rd & ~0xff) | (1 << cs);
526 if (rd == wr)
527 return;
528 bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
529 usleep_range(10, 20);
530 }
531
532 dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
533 qspi->curr_cs = cs;
534 }
535
536 /* MSPI helpers */
537 static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
538 const struct bcm_qspi_parms *xp)
539 {
540 u32 spcr, spbr = 0;
541
542 if (xp->speed_hz)
543 spbr = qspi->base_clk / (2 * xp->speed_hz);
544
545 spcr = clamp_val(spbr, QSPI_SPBR_MIN, QSPI_SPBR_MAX);
546 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr);
547
548 spcr = MSPI_MASTER_BIT;
549 /* for 16 bit the data should be zero */
550 if (xp->bits_per_word != 16)
551 spcr |= xp->bits_per_word << 2;
552 spcr |= xp->mode & 3;
553 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
554
555 qspi->last_parms = *xp;
556 }
557
558 static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
559 struct spi_device *spi,
560 struct spi_transfer *trans)
561 {
562 struct bcm_qspi_parms xp;
563
564 xp.speed_hz = trans->speed_hz;
565 xp.bits_per_word = trans->bits_per_word;
566 xp.mode = spi->mode;
567
568 bcm_qspi_hw_set_parms(qspi, &xp);
569 }
570
571 static int bcm_qspi_setup(struct spi_device *spi)
572 {
573 struct bcm_qspi_parms *xp;
574
575 if (spi->bits_per_word > 16)
576 return -EINVAL;
577
578 xp = spi_get_ctldata(spi);
579 if (!xp) {
580 xp = kzalloc(sizeof(*xp), GFP_KERNEL);
581 if (!xp)
582 return -ENOMEM;
583 spi_set_ctldata(spi, xp);
584 }
585 xp->speed_hz = spi->max_speed_hz;
586 xp->mode = spi->mode;
587
588 if (spi->bits_per_word)
589 xp->bits_per_word = spi->bits_per_word;
590 else
591 xp->bits_per_word = 8;
592
593 return 0;
594 }
595
596 static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
597 struct qspi_trans *qt)
598 {
599 if (qt->mspi_last_trans &&
600 spi_transfer_is_last(qspi->master, qt->trans))
601 return true;
602 else
603 return false;
604 }
605
606 static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
607 struct qspi_trans *qt, int flags)
608 {
609 int ret = TRANS_STATUS_BREAK_NONE;
610
611 /* count the last transferred bytes */
612 if (qt->trans->bits_per_word <= 8)
613 qt->byte++;
614 else
615 qt->byte += 2;
616
617 if (qt->byte >= qt->trans->len) {
618 /* we're at the end of the spi_transfer */
619 /* in TX mode, need to pause for a delay or CS change */
620 if (qt->trans->delay_usecs &&
621 (flags & TRANS_STATUS_BREAK_DELAY))
622 ret |= TRANS_STATUS_BREAK_DELAY;
623 if (qt->trans->cs_change &&
624 (flags & TRANS_STATUS_BREAK_CS_CHANGE))
625 ret |= TRANS_STATUS_BREAK_CS_CHANGE;
626 if (ret)
627 goto done;
628
629 dev_dbg(&qspi->pdev->dev, "advance msg exit\n");
630 if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
631 ret = TRANS_STATUS_BREAK_EOM;
632 else
633 ret = TRANS_STATUS_BREAK_NO_BYTES;
634
635 qt->trans = NULL;
636 }
637
638 done:
639 dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
640 qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
641 return ret;
642 }
643
644 static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
645 {
646 u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
647
648 /* mask out reserved bits */
649 return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
650 }
651
652 static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
653 {
654 u32 reg_offset = MSPI_RXRAM;
655 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
656 u32 msb_offset = reg_offset + (slot << 3);
657
658 return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
659 ((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
660 }
661
662 static void read_from_hw(struct bcm_qspi *qspi, int slots)
663 {
664 struct qspi_trans tp;
665 int slot;
666
667 bcm_qspi_disable_bspi(qspi);
668
669 if (slots > MSPI_NUM_CDRAM) {
670 /* should never happen */
671 dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
672 return;
673 }
674
675 tp = qspi->trans_pos;
676
677 for (slot = 0; slot < slots; slot++) {
678 if (tp.trans->bits_per_word <= 8) {
679 u8 *buf = tp.trans->rx_buf;
680
681 if (buf)
682 buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
683 dev_dbg(&qspi->pdev->dev, "RD %02x\n",
684 buf ? buf[tp.byte] : 0xff);
685 } else {
686 u16 *buf = tp.trans->rx_buf;
687
688 if (buf)
689 buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
690 slot);
691 dev_dbg(&qspi->pdev->dev, "RD %04x\n",
692 buf ? buf[tp.byte] : 0xffff);
693 }
694
695 update_qspi_trans_byte_count(qspi, &tp,
696 TRANS_STATUS_BREAK_NONE);
697 }
698
699 qspi->trans_pos = tp;
700 }
701
702 static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
703 u8 val)
704 {
705 u32 reg_offset = MSPI_TXRAM + (slot << 3);
706
707 /* mask out reserved bits */
708 bcm_qspi_write(qspi, MSPI, reg_offset, val);
709 }
710
711 static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
712 u16 val)
713 {
714 u32 reg_offset = MSPI_TXRAM;
715 u32 msb_offset = reg_offset + (slot << 3);
716 u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
717
718 bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
719 bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
720 }
721
722 static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
723 {
724 return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
725 }
726
727 static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
728 {
729 bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
730 }
731
732 /* Return number of slots written */
733 static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
734 {
735 struct qspi_trans tp;
736 int slot = 0, tstatus = 0;
737 u32 mspi_cdram = 0;
738
739 bcm_qspi_disable_bspi(qspi);
740 tp = qspi->trans_pos;
741 bcm_qspi_update_parms(qspi, spi, tp.trans);
742
743 /* Run until end of transfer or reached the max data */
744 while (!tstatus && slot < MSPI_NUM_CDRAM) {
745 if (tp.trans->bits_per_word <= 8) {
746 const u8 *buf = tp.trans->tx_buf;
747 u8 val = buf ? buf[tp.byte] : 0xff;
748
749 write_txram_slot_u8(qspi, slot, val);
750 dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
751 } else {
752 const u16 *buf = tp.trans->tx_buf;
753 u16 val = buf ? buf[tp.byte / 2] : 0xffff;
754
755 write_txram_slot_u16(qspi, slot, val);
756 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
757 }
758 mspi_cdram = MSPI_CDRAM_CONT_BIT;
759
760 if (has_bspi(qspi))
761 mspi_cdram &= ~1;
762 else
763 mspi_cdram |= (~(1 << spi->chip_select) &
764 MSPI_CDRAM_PCS);
765
766 mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
767 MSPI_CDRAM_BITSE_BIT);
768
769 write_cdram_slot(qspi, slot, mspi_cdram);
770
771 tstatus = update_qspi_trans_byte_count(qspi, &tp,
772 TRANS_STATUS_BREAK_TX);
773 slot++;
774 }
775
776 if (!slot) {
777 dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
778 goto done;
779 }
780
781 dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
782 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
783 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
784
785 if (tstatus & TRANS_STATUS_BREAK_DESELECT) {
786 mspi_cdram = read_cdram_slot(qspi, slot - 1) &
787 ~MSPI_CDRAM_CONT_BIT;
788 write_cdram_slot(qspi, slot - 1, mspi_cdram);
789 }
790
791 if (has_bspi(qspi))
792 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
793
794 /* Must flush previous writes before starting MSPI operation */
795 mb();
796 /* Set cont | spe | spifie */
797 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
798
799 done:
800 return slot;
801 }
802
803 static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
804 const struct spi_mem_op *op)
805 {
806 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
807 u32 addr = 0, len, rdlen, len_words, from = 0;
808 int ret = 0;
809 unsigned long timeo = msecs_to_jiffies(100);
810 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
811
812 if (bcm_qspi_bspi_ver_three(qspi))
813 if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
814 return -EIO;
815
816 from = op->addr.val;
817 bcm_qspi_chip_select(qspi, spi->chip_select);
818 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
819
820 /*
821 * when using flex mode we need to send
822 * the upper address byte to bspi
823 */
824 if (bcm_qspi_bspi_ver_three(qspi) == false) {
825 addr = from & 0xff000000;
826 bcm_qspi_write(qspi, BSPI,
827 BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
828 }
829
830 if (!qspi->xfer_mode.flex_mode)
831 addr = from;
832 else
833 addr = from & 0x00ffffff;
834
835 if (bcm_qspi_bspi_ver_three(qspi) == true)
836 addr = (addr + 0xc00000) & 0xffffff;
837
838 /*
839 * read into the entire buffer by breaking the reads
840 * into RAF buffer read lengths
841 */
842 len = op->data.nbytes;
843 qspi->bspi_rf_op_idx = 0;
844
845 do {
846 if (len > BSPI_READ_LENGTH)
847 rdlen = BSPI_READ_LENGTH;
848 else
849 rdlen = len;
850
851 reinit_completion(&qspi->bspi_done);
852 bcm_qspi_enable_bspi(qspi);
853 len_words = (rdlen + 3) >> 2;
854 qspi->bspi_rf_op = op;
855 qspi->bspi_rf_op_status = 0;
856 qspi->bspi_rf_op_len = rdlen;
857 dev_dbg(&qspi->pdev->dev,
858 "bspi xfr addr 0x%x len 0x%x", addr, rdlen);
859 bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
860 bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
861 bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
862 if (qspi->soc_intc) {
863 /*
864 * clear soc MSPI and BSPI interrupts and enable
865 * BSPI interrupts.
866 */
867 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
868 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
869 }
870
871 /* Must flush previous writes before starting BSPI operation */
872 mb();
873 bcm_qspi_bspi_lr_start(qspi);
874 if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
875 dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
876 ret = -ETIMEDOUT;
877 break;
878 }
879
880 /* set msg return length */
881 addr += rdlen;
882 len -= rdlen;
883 } while (len);
884
885 return ret;
886 }
887
888 static int bcm_qspi_transfer_one(struct spi_master *master,
889 struct spi_device *spi,
890 struct spi_transfer *trans)
891 {
892 struct bcm_qspi *qspi = spi_master_get_devdata(master);
893 int slots;
894 unsigned long timeo = msecs_to_jiffies(100);
895
896 bcm_qspi_chip_select(qspi, spi->chip_select);
897 qspi->trans_pos.trans = trans;
898 qspi->trans_pos.byte = 0;
899
900 while (qspi->trans_pos.byte < trans->len) {
901 reinit_completion(&qspi->mspi_done);
902
903 slots = write_to_hw(qspi, spi);
904 if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
905 dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
906 return -ETIMEDOUT;
907 }
908
909 read_from_hw(qspi, slots);
910 }
911
912 return 0;
913 }
914
915 static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
916 const struct spi_mem_op *op)
917 {
918 struct spi_master *master = spi->master;
919 struct bcm_qspi *qspi = spi_master_get_devdata(master);
920 struct spi_transfer t[2];
921 u8 cmd[6] = { };
922 int ret, i;
923
924 memset(cmd, 0, sizeof(cmd));
925 memset(t, 0, sizeof(t));
926
927 /* tx */
928 /* opcode is in cmd[0] */
929 cmd[0] = op->cmd.opcode;
930 for (i = 0; i < op->addr.nbytes; i++)
931 cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
932
933 t[0].tx_buf = cmd;
934 t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
935 t[0].bits_per_word = spi->bits_per_word;
936 t[0].tx_nbits = op->cmd.buswidth;
937 /* lets mspi know that this is not last transfer */
938 qspi->trans_pos.mspi_last_trans = false;
939 ret = bcm_qspi_transfer_one(master, spi, &t[0]);
940
941 /* rx */
942 qspi->trans_pos.mspi_last_trans = true;
943 if (!ret) {
944 /* rx */
945 t[1].rx_buf = op->data.buf.in;
946 t[1].len = op->data.nbytes;
947 t[1].rx_nbits = op->data.buswidth;
948 t[1].bits_per_word = spi->bits_per_word;
949 ret = bcm_qspi_transfer_one(master, spi, &t[1]);
950 }
951
952 return ret;
953 }
954
955 static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
956 const struct spi_mem_op *op)
957 {
958 struct spi_device *spi = mem->spi;
959 struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
960 int ret = 0;
961 bool mspi_read = false;
962 u32 addr = 0, len;
963 u_char *buf;
964
965 if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
966 op->data.dir != SPI_MEM_DATA_IN)
967 return -ENOTSUPP;
968
969 buf = op->data.buf.in;
970 addr = op->addr.val;
971 len = op->data.nbytes;
972
973 if (bcm_qspi_bspi_ver_three(qspi) == true) {
974 /*
975 * The address coming into this function is a raw flash offset.
976 * But for BSPI <= V3, we need to convert it to a remapped BSPI
977 * address. If it crosses a 4MB boundary, just revert back to
978 * using MSPI.
979 */
980 addr = (addr + 0xc00000) & 0xffffff;
981
982 if ((~ADDR_4MB_MASK & addr) ^
983 (~ADDR_4MB_MASK & (addr + len - 1)))
984 mspi_read = true;
985 }
986
987 /* non-aligned and very short transfers are handled by MSPI */
988 if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
989 len < 4)
990 mspi_read = true;
991
992 if (mspi_read)
993 return bcm_qspi_mspi_exec_mem_op(spi, op);
994
995 ret = bcm_qspi_bspi_set_mode(qspi, op, -1);
996
997 if (!ret)
998 ret = bcm_qspi_bspi_exec_mem_op(spi, op);
999
1000 return ret;
1001 }
1002
1003 static void bcm_qspi_cleanup(struct spi_device *spi)
1004 {
1005 struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
1006
1007 kfree(xp);
1008 }
1009
1010 static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
1011 {
1012 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1013 struct bcm_qspi *qspi = qspi_dev_id->dev;
1014 u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1015
1016 if (status & MSPI_MSPI_STATUS_SPIF) {
1017 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1018 /* clear interrupt */
1019 status &= ~MSPI_MSPI_STATUS_SPIF;
1020 bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
1021 if (qspi->soc_intc)
1022 soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
1023 complete(&qspi->mspi_done);
1024 return IRQ_HANDLED;
1025 }
1026
1027 return IRQ_NONE;
1028 }
1029
1030 static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
1031 {
1032 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1033 struct bcm_qspi *qspi = qspi_dev_id->dev;
1034 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1035 u32 status = qspi_dev_id->irqp->mask;
1036
1037 if (qspi->bspi_enabled && qspi->bspi_rf_op) {
1038 bcm_qspi_bspi_lr_data_read(qspi);
1039 if (qspi->bspi_rf_op_len == 0) {
1040 qspi->bspi_rf_op = NULL;
1041 if (qspi->soc_intc) {
1042 /* disable soc BSPI interrupt */
1043 soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
1044 false);
1045 /* indicate done */
1046 status = INTR_BSPI_LR_SESSION_DONE_MASK;
1047 }
1048
1049 if (qspi->bspi_rf_op_status)
1050 bcm_qspi_bspi_lr_clear(qspi);
1051 else
1052 bcm_qspi_bspi_flush_prefetch_buffers(qspi);
1053 }
1054
1055 if (qspi->soc_intc)
1056 /* clear soc BSPI interrupt */
1057 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
1058 }
1059
1060 status &= INTR_BSPI_LR_SESSION_DONE_MASK;
1061 if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
1062 complete(&qspi->bspi_done);
1063
1064 return IRQ_HANDLED;
1065 }
1066
1067 static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
1068 {
1069 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1070 struct bcm_qspi *qspi = qspi_dev_id->dev;
1071 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1072
1073 dev_err(&qspi->pdev->dev, "BSPI INT error\n");
1074 qspi->bspi_rf_op_status = -EIO;
1075 if (qspi->soc_intc)
1076 /* clear soc interrupt */
1077 soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
1078
1079 complete(&qspi->bspi_done);
1080 return IRQ_HANDLED;
1081 }
1082
1083 static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
1084 {
1085 struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1086 struct bcm_qspi *qspi = qspi_dev_id->dev;
1087 struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1088 irqreturn_t ret = IRQ_NONE;
1089
1090 if (soc_intc) {
1091 u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
1092
1093 if (status & MSPI_DONE)
1094 ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
1095 else if (status & BSPI_DONE)
1096 ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
1097 else if (status & BSPI_ERR)
1098 ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
1099 }
1100
1101 return ret;
1102 }
1103
1104 static const struct bcm_qspi_irq qspi_irq_tab[] = {
1105 {
1106 .irq_name = "spi_lr_fullness_reached",
1107 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1108 .mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
1109 },
1110 {
1111 .irq_name = "spi_lr_session_aborted",
1112 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1113 .mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
1114 },
1115 {
1116 .irq_name = "spi_lr_impatient",
1117 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1118 .mask = INTR_BSPI_LR_IMPATIENT_MASK,
1119 },
1120 {
1121 .irq_name = "spi_lr_session_done",
1122 .irq_handler = bcm_qspi_bspi_lr_l2_isr,
1123 .mask = INTR_BSPI_LR_SESSION_DONE_MASK,
1124 },
1125 #ifdef QSPI_INT_DEBUG
1126 /* this interrupt is for debug purposes only, dont request irq */
1127 {
1128 .irq_name = "spi_lr_overread",
1129 .irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1130 .mask = INTR_BSPI_LR_OVERREAD_MASK,
1131 },
1132 #endif
1133 {
1134 .irq_name = "mspi_done",
1135 .irq_handler = bcm_qspi_mspi_l2_isr,
1136 .mask = INTR_MSPI_DONE_MASK,
1137 },
1138 {
1139 .irq_name = "mspi_halted",
1140 .irq_handler = bcm_qspi_mspi_l2_isr,
1141 .mask = INTR_MSPI_HALTED_MASK,
1142 },
1143 {
1144 /* single muxed L1 interrupt source */
1145 .irq_name = "spi_l1_intr",
1146 .irq_handler = bcm_qspi_l1_isr,
1147 .irq_source = MUXED_L1,
1148 .mask = QSPI_INTERRUPTS_ALL,
1149 },
1150 };
1151
1152 static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
1153 {
1154 u32 val = 0;
1155
1156 val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
1157 qspi->bspi_maj_rev = (val >> 8) & 0xff;
1158 qspi->bspi_min_rev = val & 0xff;
1159 if (!(bcm_qspi_bspi_ver_three(qspi))) {
1160 /* Force mapping of BSPI address -> flash offset */
1161 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
1162 bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
1163 }
1164 qspi->bspi_enabled = 1;
1165 bcm_qspi_disable_bspi(qspi);
1166 bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
1167 bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
1168 }
1169
1170 static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
1171 {
1172 struct bcm_qspi_parms parms;
1173
1174 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
1175 bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
1176 bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
1177 bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
1178 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
1179
1180 parms.mode = SPI_MODE_3;
1181 parms.bits_per_word = 8;
1182 parms.speed_hz = qspi->max_speed_hz;
1183 bcm_qspi_hw_set_parms(qspi, &parms);
1184
1185 if (has_bspi(qspi))
1186 bcm_qspi_bspi_init(qspi);
1187 }
1188
1189 static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
1190 {
1191 bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
1192 if (has_bspi(qspi))
1193 bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1194
1195 }
1196
1197 static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
1198 .exec_op = bcm_qspi_exec_mem_op,
1199 };
1200
1201 static const struct of_device_id bcm_qspi_of_match[] = {
1202 { .compatible = "brcm,spi-bcm-qspi" },
1203 {},
1204 };
1205 MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
1206
1207 int bcm_qspi_probe(struct platform_device *pdev,
1208 struct bcm_qspi_soc_intc *soc_intc)
1209 {
1210 struct device *dev = &pdev->dev;
1211 struct bcm_qspi *qspi;
1212 struct spi_master *master;
1213 struct resource *res;
1214 int irq, ret = 0, num_ints = 0;
1215 u32 val;
1216 const char *name = NULL;
1217 int num_irqs = ARRAY_SIZE(qspi_irq_tab);
1218
1219 /* We only support device-tree instantiation */
1220 if (!dev->of_node)
1221 return -ENODEV;
1222
1223 if (!of_match_node(bcm_qspi_of_match, dev->of_node))
1224 return -ENODEV;
1225
1226 master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
1227 if (!master) {
1228 dev_err(dev, "error allocating spi_master\n");
1229 return -ENOMEM;
1230 }
1231
1232 qspi = spi_master_get_devdata(master);
1233 qspi->pdev = pdev;
1234 qspi->trans_pos.trans = NULL;
1235 qspi->trans_pos.byte = 0;
1236 qspi->trans_pos.mspi_last_trans = true;
1237 qspi->master = master;
1238
1239 master->bus_num = -1;
1240 master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD;
1241 master->setup = bcm_qspi_setup;
1242 master->transfer_one = bcm_qspi_transfer_one;
1243 master->mem_ops = &bcm_qspi_mem_ops;
1244 master->cleanup = bcm_qspi_cleanup;
1245 master->dev.of_node = dev->of_node;
1246 master->num_chipselect = NUM_CHIPSELECT;
1247
1248 qspi->big_endian = of_device_is_big_endian(dev->of_node);
1249
1250 if (!of_property_read_u32(dev->of_node, "num-cs", &val))
1251 master->num_chipselect = val;
1252
1253 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
1254 if (!res)
1255 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1256 "mspi");
1257
1258 if (res) {
1259 qspi->base[MSPI] = devm_ioremap_resource(dev, res);
1260 if (IS_ERR(qspi->base[MSPI])) {
1261 ret = PTR_ERR(qspi->base[MSPI]);
1262 goto qspi_resource_err;
1263 }
1264 } else {
1265 goto qspi_resource_err;
1266 }
1267
1268 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
1269 if (res) {
1270 qspi->base[BSPI] = devm_ioremap_resource(dev, res);
1271 if (IS_ERR(qspi->base[BSPI])) {
1272 ret = PTR_ERR(qspi->base[BSPI]);
1273 goto qspi_resource_err;
1274 }
1275 qspi->bspi_mode = true;
1276 } else {
1277 qspi->bspi_mode = false;
1278 }
1279
1280 dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
1281
1282 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
1283 if (res) {
1284 qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
1285 if (IS_ERR(qspi->base[CHIP_SELECT])) {
1286 ret = PTR_ERR(qspi->base[CHIP_SELECT]);
1287 goto qspi_resource_err;
1288 }
1289 }
1290
1291 qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
1292 GFP_KERNEL);
1293 if (!qspi->dev_ids) {
1294 ret = -ENOMEM;
1295 goto qspi_resource_err;
1296 }
1297
1298 for (val = 0; val < num_irqs; val++) {
1299 irq = -1;
1300 name = qspi_irq_tab[val].irq_name;
1301 if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
1302 /* get the l2 interrupts */
1303 irq = platform_get_irq_byname(pdev, name);
1304 } else if (!num_ints && soc_intc) {
1305 /* all mspi, bspi intrs muxed to one L1 intr */
1306 irq = platform_get_irq(pdev, 0);
1307 }
1308
1309 if (irq >= 0) {
1310 ret = devm_request_irq(&pdev->dev, irq,
1311 qspi_irq_tab[val].irq_handler, 0,
1312 name,
1313 &qspi->dev_ids[val]);
1314 if (ret < 0) {
1315 dev_err(&pdev->dev, "IRQ %s not found\n", name);
1316 goto qspi_probe_err;
1317 }
1318
1319 qspi->dev_ids[val].dev = qspi;
1320 qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
1321 num_ints++;
1322 dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
1323 qspi_irq_tab[val].irq_name,
1324 irq);
1325 }
1326 }
1327
1328 if (!num_ints) {
1329 dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
1330 ret = -EINVAL;
1331 goto qspi_probe_err;
1332 }
1333
1334 /*
1335 * Some SoCs integrate spi controller (e.g., its interrupt bits)
1336 * in specific ways
1337 */
1338 if (soc_intc) {
1339 qspi->soc_intc = soc_intc;
1340 soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
1341 } else {
1342 qspi->soc_intc = NULL;
1343 }
1344
1345 qspi->clk = devm_clk_get(&pdev->dev, NULL);
1346 if (IS_ERR(qspi->clk)) {
1347 dev_warn(dev, "unable to get clock\n");
1348 ret = PTR_ERR(qspi->clk);
1349 goto qspi_probe_err;
1350 }
1351
1352 ret = clk_prepare_enable(qspi->clk);
1353 if (ret) {
1354 dev_err(dev, "failed to prepare clock\n");
1355 goto qspi_probe_err;
1356 }
1357
1358 qspi->base_clk = clk_get_rate(qspi->clk);
1359 qspi->max_speed_hz = qspi->base_clk / (QSPI_SPBR_MIN * 2);
1360
1361 bcm_qspi_hw_init(qspi);
1362 init_completion(&qspi->mspi_done);
1363 init_completion(&qspi->bspi_done);
1364 qspi->curr_cs = -1;
1365
1366 platform_set_drvdata(pdev, qspi);
1367
1368 qspi->xfer_mode.width = -1;
1369 qspi->xfer_mode.addrlen = -1;
1370 qspi->xfer_mode.hp = -1;
1371
1372 ret = devm_spi_register_master(&pdev->dev, master);
1373 if (ret < 0) {
1374 dev_err(dev, "can't register master\n");
1375 goto qspi_reg_err;
1376 }
1377
1378 return 0;
1379
1380 qspi_reg_err:
1381 bcm_qspi_hw_uninit(qspi);
1382 clk_disable_unprepare(qspi->clk);
1383 qspi_probe_err:
1384 kfree(qspi->dev_ids);
1385 qspi_resource_err:
1386 spi_master_put(master);
1387 return ret;
1388 }
1389 /* probe function to be called by SoC specific platform driver probe */
1390 EXPORT_SYMBOL_GPL(bcm_qspi_probe);
1391
1392 int bcm_qspi_remove(struct platform_device *pdev)
1393 {
1394 struct bcm_qspi *qspi = platform_get_drvdata(pdev);
1395
1396 bcm_qspi_hw_uninit(qspi);
1397 clk_disable_unprepare(qspi->clk);
1398 kfree(qspi->dev_ids);
1399 spi_unregister_master(qspi->master);
1400
1401 return 0;
1402 }
1403 /* function to be called by SoC specific platform driver remove() */
1404 EXPORT_SYMBOL_GPL(bcm_qspi_remove);
1405
1406 static int __maybe_unused bcm_qspi_suspend(struct device *dev)
1407 {
1408 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1409
1410 /* store the override strap value */
1411 if (!bcm_qspi_bspi_ver_three(qspi))
1412 qspi->s3_strap_override_ctrl =
1413 bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
1414
1415 spi_master_suspend(qspi->master);
1416 clk_disable(qspi->clk);
1417 bcm_qspi_hw_uninit(qspi);
1418
1419 return 0;
1420 };
1421
1422 static int __maybe_unused bcm_qspi_resume(struct device *dev)
1423 {
1424 struct bcm_qspi *qspi = dev_get_drvdata(dev);
1425 int ret = 0;
1426
1427 bcm_qspi_hw_init(qspi);
1428 bcm_qspi_chip_select(qspi, qspi->curr_cs);
1429 if (qspi->soc_intc)
1430 /* enable MSPI interrupt */
1431 qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
1432 true);
1433
1434 ret = clk_enable(qspi->clk);
1435 if (!ret)
1436 spi_master_resume(qspi->master);
1437
1438 return ret;
1439 }
1440
1441 SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
1442
1443 /* pm_ops to be called by SoC specific platform driver */
1444 EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
1445
1446 MODULE_AUTHOR("Kamal Dasu");
1447 MODULE_DESCRIPTION("Broadcom QSPI driver");
1448 MODULE_LICENSE("GPL v2");
1449 MODULE_ALIAS("platform:" DRIVER_NAME);