]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/scsi/zorro_esp.c
scsi: fcoe: clear FC_RP_STARTED flags when receiving a LOGO
[thirdparty/linux.git] / drivers / scsi / zorro_esp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ESP front-end for Amiga ZORRO SCSI systems.
4 *
5 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
6 *
7 * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
8 * migration to ESP SCSI core
9 *
10 * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
11 * Blizzard 1230 DMA and probe function fixes
12 *
13 * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
14 */
15 /*
16 * ZORRO bus code from:
17 */
18 /*
19 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
20 * Amiga MacroSystemUS WarpEngine SCSI controller.
21 * Amiga Technologies/DKB A4091 SCSI controller.
22 *
23 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
24 * plus modifications of the 53c7xx.c driver to support the Amiga.
25 *
26 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/scatterlist.h>
36 #include <linux/delay.h>
37 #include <linux/zorro.h>
38 #include <linux/slab.h>
39
40 #include <asm/page.h>
41 #include <asm/pgtable.h>
42 #include <asm/cacheflush.h>
43 #include <asm/amigahw.h>
44 #include <asm/amigaints.h>
45
46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_transport_spi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_tcq.h>
50
51 #include "esp_scsi.h"
52
53 MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
54 MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
55 MODULE_LICENSE("GPL");
56
57 /* per-board register layout definitions */
58
59 /* Blizzard 1230 DMA interface */
60
61 struct blz1230_dma_registers {
62 unsigned char dma_addr; /* DMA address [0x0000] */
63 unsigned char dmapad2[0x7fff];
64 unsigned char dma_latch; /* DMA latch [0x8000] */
65 };
66
67 /* Blizzard 1230II DMA interface */
68
69 struct blz1230II_dma_registers {
70 unsigned char dma_addr; /* DMA address [0x0000] */
71 unsigned char dmapad2[0xf];
72 unsigned char dma_latch; /* DMA latch [0x0010] */
73 };
74
75 /* Blizzard 2060 DMA interface */
76
77 struct blz2060_dma_registers {
78 unsigned char dma_led_ctrl; /* DMA led control [0x000] */
79 unsigned char dmapad1[0x0f];
80 unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
81 unsigned char dmapad2[0x03];
82 unsigned char dma_addr1; /* DMA address [0x014] */
83 unsigned char dmapad3[0x03];
84 unsigned char dma_addr2; /* DMA address [0x018] */
85 unsigned char dmapad4[0x03];
86 unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
87 };
88
89 /* DMA control bits */
90 #define DMA_WRITE 0x80000000
91
92 /* Cyberstorm DMA interface */
93
94 struct cyber_dma_registers {
95 unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
96 unsigned char dmapad1[1];
97 unsigned char dma_addr1; /* DMA address [0x002] */
98 unsigned char dmapad2[1];
99 unsigned char dma_addr2; /* DMA address [0x004] */
100 unsigned char dmapad3[1];
101 unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
102 unsigned char dmapad4[0x3fb];
103 unsigned char cond_reg; /* DMA cond (ro) [0x402] */
104 #define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
105 };
106
107 /* DMA control bits */
108 #define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
109 #define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
110
111 /* DMA status bits */
112 #define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
113
114 /* The CyberStorm II DMA interface */
115 struct cyberII_dma_registers {
116 unsigned char cond_reg; /* DMA cond (ro) [0x000] */
117 #define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
118 unsigned char dmapad4[0x3f];
119 unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
120 unsigned char dmapad1[3];
121 unsigned char dma_addr1; /* DMA address [0x044] */
122 unsigned char dmapad2[3];
123 unsigned char dma_addr2; /* DMA address [0x048] */
124 unsigned char dmapad3[3];
125 unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
126 };
127
128 /* Fastlane DMA interface */
129
130 struct fastlane_dma_registers {
131 unsigned char cond_reg; /* DMA status (ro) [0x0000] */
132 #define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
133 char dmapad1[0x3f];
134 unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
135 };
136
137 /*
138 * The controller registers can be found in the Z2 config area at these
139 * offsets:
140 */
141 #define FASTLANE_ESP_ADDR 0x1000001
142
143 /* DMA status bits */
144 #define FASTLANE_DMA_MINT 0x80
145 #define FASTLANE_DMA_IACT 0x40
146 #define FASTLANE_DMA_CREQ 0x20
147
148 /* DMA control bits */
149 #define FASTLANE_DMA_FCODE 0xa0
150 #define FASTLANE_DMA_MASK 0xf3
151 #define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
152 #define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
153 #define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
154 #define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
155
156 /*
157 * private data used for driver
158 */
159 struct zorro_esp_priv {
160 struct esp *esp; /* our ESP instance - for Scsi_host* */
161 void __iomem *board_base; /* virtual address (Zorro III board) */
162 int error; /* PIO error flag */
163 int zorro3; /* board is Zorro III */
164 unsigned char ctrl_data; /* shadow copy of ctrl_reg */
165 };
166
167 /*
168 * On all implementations except for the Oktagon, padding between ESP
169 * registers is three bytes.
170 * On Oktagon, it is one byte - use a different accessor there.
171 *
172 * Oktagon needs PDMA - currently unsupported!
173 */
174
175 static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg)
176 {
177 writeb(val, esp->regs + (reg * 4UL));
178 }
179
180 static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
181 {
182 return readb(esp->regs + (reg * 4UL));
183 }
184
185 static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
186 size_t sz, int dir)
187 {
188 return dma_map_single(esp->dev, buf, sz, dir);
189 }
190
191 static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
192 int num_sg, int dir)
193 {
194 return dma_map_sg(esp->dev, sg, num_sg, dir);
195 }
196
197 static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
198 size_t sz, int dir)
199 {
200 dma_unmap_single(esp->dev, addr, sz, dir);
201 }
202
203 static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
204 int num_sg, int dir)
205 {
206 dma_unmap_sg(esp->dev, sg, num_sg, dir);
207 }
208
209 static int zorro_esp_irq_pending(struct esp *esp)
210 {
211 /* check ESP status register; DMA has no status reg. */
212 if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
213 return 1;
214
215 return 0;
216 }
217
218 static int cyber_esp_irq_pending(struct esp *esp)
219 {
220 struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
221 unsigned char dma_status = readb(&dregs->cond_reg);
222
223 /* It's important to check the DMA IRQ bit in the correct way! */
224 return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) &&
225 (dma_status & CYBER_DMA_HNDL_INTR));
226 }
227
228 static int fastlane_esp_irq_pending(struct esp *esp)
229 {
230 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
231 unsigned char dma_status;
232
233 dma_status = readb(&dregs->cond_reg);
234
235 if (dma_status & FASTLANE_DMA_IACT)
236 return 0; /* not our IRQ */
237
238 /* Return non-zero if ESP requested IRQ */
239 return (
240 (dma_status & FASTLANE_DMA_CREQ) &&
241 (!(dma_status & FASTLANE_DMA_MINT)) &&
242 (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR));
243 }
244
245 static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
246 u32 dma_len)
247 {
248 return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
249 }
250
251 static void zorro_esp_reset_dma(struct esp *esp)
252 {
253 /* nothing to do here */
254 }
255
256 static void zorro_esp_dma_drain(struct esp *esp)
257 {
258 /* nothing to do here */
259 }
260
261 static void zorro_esp_dma_invalidate(struct esp *esp)
262 {
263 /* nothing to do here */
264 }
265
266 static void fastlane_esp_dma_invalidate(struct esp *esp)
267 {
268 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
269 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
270 unsigned char *ctrl_data = &zep->ctrl_data;
271
272 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK);
273 writeb(0, &dregs->clear_strobe);
274 z_writel(0, zep->board_base);
275 }
276
277 /*
278 * Programmed IO routines follow.
279 */
280
281 static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
282 {
283 int i = 500000;
284
285 do {
286 unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
287 & ESP_FF_FBYTES;
288
289 if (fbytes)
290 return fbytes;
291
292 udelay(2);
293 } while (--i);
294
295 pr_err("FIFO is empty (sreg %02x)\n",
296 zorro_esp_read8(esp, ESP_STATUS));
297 return 0;
298 }
299
300 static inline int zorro_esp_wait_for_intr(struct esp *esp)
301 {
302 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
303 int i = 500000;
304
305 do {
306 esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
307 if (esp->sreg & ESP_STAT_INTR)
308 return 0;
309
310 udelay(2);
311 } while (--i);
312
313 pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
314 zep->error = 1;
315 return 1;
316 }
317
318 /*
319 * PIO macros as used in mac_esp.c.
320 * Note that addr and fifo arguments are local-scope variables declared
321 * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
322 * and addr and fifo are referenced in each use of the macros so there
323 * is no need to pass them as macro parameters.
324 */
325 #define ZORRO_ESP_PIO_LOOP(operands, reg1) \
326 asm volatile ( \
327 "1: moveb " operands "\n" \
328 " subqw #1,%1 \n" \
329 " jbne 1b \n" \
330 : "+a" (addr), "+r" (reg1) \
331 : "a" (fifo));
332
333 #define ZORRO_ESP_PIO_FILL(operands, reg1) \
334 asm volatile ( \
335 " moveb " operands "\n" \
336 " moveb " operands "\n" \
337 " moveb " operands "\n" \
338 " moveb " operands "\n" \
339 " moveb " operands "\n" \
340 " moveb " operands "\n" \
341 " moveb " operands "\n" \
342 " moveb " operands "\n" \
343 " moveb " operands "\n" \
344 " moveb " operands "\n" \
345 " moveb " operands "\n" \
346 " moveb " operands "\n" \
347 " moveb " operands "\n" \
348 " moveb " operands "\n" \
349 " moveb " operands "\n" \
350 " moveb " operands "\n" \
351 " subqw #8,%1 \n" \
352 " subqw #8,%1 \n" \
353 : "+a" (addr), "+r" (reg1) \
354 : "a" (fifo));
355
356 #define ZORRO_ESP_FIFO_SIZE 16
357
358 static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
359 u32 dma_count, int write, u8 cmd)
360 {
361 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
362 u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
363 u8 phase = esp->sreg & ESP_STAT_PMASK;
364
365 cmd &= ~ESP_CMD_DMA;
366
367 if (write) {
368 u8 *dst = (u8 *)addr;
369 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
370
371 scsi_esp_cmd(esp, cmd);
372
373 while (1) {
374 if (!zorro_esp_wait_for_fifo(esp))
375 break;
376
377 *dst++ = zorro_esp_read8(esp, ESP_FDATA);
378 --esp_count;
379
380 if (!esp_count)
381 break;
382
383 if (zorro_esp_wait_for_intr(esp))
384 break;
385
386 if ((esp->sreg & ESP_STAT_PMASK) != phase)
387 break;
388
389 esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
390 if (esp->ireg & mask) {
391 zep->error = 1;
392 break;
393 }
394
395 if (phase == ESP_MIP)
396 scsi_esp_cmd(esp, ESP_CMD_MOK);
397
398 scsi_esp_cmd(esp, ESP_CMD_TI);
399 }
400 } else { /* unused, as long as we only handle MIP here */
401 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
402
403 if (esp_count >= ZORRO_ESP_FIFO_SIZE)
404 ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
405 else
406 ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
407
408 scsi_esp_cmd(esp, cmd);
409
410 while (esp_count) {
411 unsigned int n;
412
413 if (zorro_esp_wait_for_intr(esp))
414 break;
415
416 if ((esp->sreg & ESP_STAT_PMASK) != phase)
417 break;
418
419 esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
420 if (esp->ireg & ~ESP_INTR_BSERV) {
421 zep->error = 1;
422 break;
423 }
424
425 n = ZORRO_ESP_FIFO_SIZE -
426 (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
427 if (n > esp_count)
428 n = esp_count;
429
430 if (n == ZORRO_ESP_FIFO_SIZE)
431 ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
432 else {
433 esp_count -= n;
434 ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
435 }
436
437 scsi_esp_cmd(esp, ESP_CMD_TI);
438 }
439 }
440 }
441
442 /* Blizzard 1230/60 SCSI-IV DMA */
443
444 static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
445 u32 esp_count, u32 dma_count, int write, u8 cmd)
446 {
447 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
448 struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
449 u8 phase = esp->sreg & ESP_STAT_PMASK;
450
451 zep->error = 0;
452 /*
453 * Use PIO if transferring message bytes to esp->command_block_dma.
454 * PIO requires a virtual address, so substitute esp->command_block
455 * for addr.
456 */
457 if (phase == ESP_MIP && addr == esp->command_block_dma) {
458 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
459 esp_count, dma_count, write, cmd);
460 return;
461 }
462
463 if (write)
464 /* DMA receive */
465 dma_sync_single_for_device(esp->dev, addr, esp_count,
466 DMA_FROM_DEVICE);
467 else
468 /* DMA send */
469 dma_sync_single_for_device(esp->dev, addr, esp_count,
470 DMA_TO_DEVICE);
471
472 addr >>= 1;
473 if (write)
474 addr &= ~(DMA_WRITE);
475 else
476 addr |= DMA_WRITE;
477
478 writeb((addr >> 24) & 0xff, &dregs->dma_latch);
479 writeb((addr >> 24) & 0xff, &dregs->dma_addr);
480 writeb((addr >> 16) & 0xff, &dregs->dma_addr);
481 writeb((addr >> 8) & 0xff, &dregs->dma_addr);
482 writeb(addr & 0xff, &dregs->dma_addr);
483
484 scsi_esp_cmd(esp, ESP_CMD_DMA);
485 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
486 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
487 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
488
489 scsi_esp_cmd(esp, cmd);
490 }
491
492 /* Blizzard 1230-II DMA */
493
494 static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
495 u32 esp_count, u32 dma_count, int write, u8 cmd)
496 {
497 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
498 struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
499 u8 phase = esp->sreg & ESP_STAT_PMASK;
500
501 zep->error = 0;
502 /* Use PIO if transferring message bytes to esp->command_block_dma */
503 if (phase == ESP_MIP && addr == esp->command_block_dma) {
504 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
505 esp_count, dma_count, write, cmd);
506 return;
507 }
508
509 if (write)
510 /* DMA receive */
511 dma_sync_single_for_device(esp->dev, addr, esp_count,
512 DMA_FROM_DEVICE);
513 else
514 /* DMA send */
515 dma_sync_single_for_device(esp->dev, addr, esp_count,
516 DMA_TO_DEVICE);
517
518 addr >>= 1;
519 if (write)
520 addr &= ~(DMA_WRITE);
521 else
522 addr |= DMA_WRITE;
523
524 writeb((addr >> 24) & 0xff, &dregs->dma_latch);
525 writeb((addr >> 16) & 0xff, &dregs->dma_addr);
526 writeb((addr >> 8) & 0xff, &dregs->dma_addr);
527 writeb(addr & 0xff, &dregs->dma_addr);
528
529 scsi_esp_cmd(esp, ESP_CMD_DMA);
530 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
531 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
532 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
533
534 scsi_esp_cmd(esp, cmd);
535 }
536
537 /* Blizzard 2060 DMA */
538
539 static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
540 u32 esp_count, u32 dma_count, int write, u8 cmd)
541 {
542 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
543 struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
544 u8 phase = esp->sreg & ESP_STAT_PMASK;
545
546 zep->error = 0;
547 /* Use PIO if transferring message bytes to esp->command_block_dma */
548 if (phase == ESP_MIP && addr == esp->command_block_dma) {
549 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
550 esp_count, dma_count, write, cmd);
551 return;
552 }
553
554 if (write)
555 /* DMA receive */
556 dma_sync_single_for_device(esp->dev, addr, esp_count,
557 DMA_FROM_DEVICE);
558 else
559 /* DMA send */
560 dma_sync_single_for_device(esp->dev, addr, esp_count,
561 DMA_TO_DEVICE);
562
563 addr >>= 1;
564 if (write)
565 addr &= ~(DMA_WRITE);
566 else
567 addr |= DMA_WRITE;
568
569 writeb(addr & 0xff, &dregs->dma_addr3);
570 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
571 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
572 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
573
574 scsi_esp_cmd(esp, ESP_CMD_DMA);
575 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
576 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
577 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
578
579 scsi_esp_cmd(esp, cmd);
580 }
581
582 /* Cyberstorm I DMA */
583
584 static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
585 u32 esp_count, u32 dma_count, int write, u8 cmd)
586 {
587 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
588 struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
589 u8 phase = esp->sreg & ESP_STAT_PMASK;
590 unsigned char *ctrl_data = &zep->ctrl_data;
591
592 zep->error = 0;
593 /* Use PIO if transferring message bytes to esp->command_block_dma */
594 if (phase == ESP_MIP && addr == esp->command_block_dma) {
595 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
596 esp_count, dma_count, write, cmd);
597 return;
598 }
599
600 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
601 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
602 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
603
604 if (write) {
605 /* DMA receive */
606 dma_sync_single_for_device(esp->dev, addr, esp_count,
607 DMA_FROM_DEVICE);
608 addr &= ~(1);
609 } else {
610 /* DMA send */
611 dma_sync_single_for_device(esp->dev, addr, esp_count,
612 DMA_TO_DEVICE);
613 addr |= 1;
614 }
615
616 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
617 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
618 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
619 writeb(addr & 0xff, &dregs->dma_addr3);
620
621 if (write)
622 *ctrl_data &= ~(CYBER_DMA_WRITE);
623 else
624 *ctrl_data |= CYBER_DMA_WRITE;
625
626 *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
627
628 writeb(*ctrl_data, &dregs->ctrl_reg);
629
630 scsi_esp_cmd(esp, cmd);
631 }
632
633 /* Cyberstorm II DMA */
634
635 static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
636 u32 esp_count, u32 dma_count, int write, u8 cmd)
637 {
638 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
639 struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
640 u8 phase = esp->sreg & ESP_STAT_PMASK;
641
642 zep->error = 0;
643 /* Use PIO if transferring message bytes to esp->command_block_dma */
644 if (phase == ESP_MIP && addr == esp->command_block_dma) {
645 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
646 esp_count, dma_count, write, cmd);
647 return;
648 }
649
650 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
651 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
652 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
653
654 if (write) {
655 /* DMA receive */
656 dma_sync_single_for_device(esp->dev, addr, esp_count,
657 DMA_FROM_DEVICE);
658 addr &= ~(1);
659 } else {
660 /* DMA send */
661 dma_sync_single_for_device(esp->dev, addr, esp_count,
662 DMA_TO_DEVICE);
663 addr |= 1;
664 }
665
666 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
667 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
668 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
669 writeb(addr & 0xff, &dregs->dma_addr3);
670
671 scsi_esp_cmd(esp, cmd);
672 }
673
674 /* Fastlane DMA */
675
676 static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
677 u32 esp_count, u32 dma_count, int write, u8 cmd)
678 {
679 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
680 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
681 u8 phase = esp->sreg & ESP_STAT_PMASK;
682 unsigned char *ctrl_data = &zep->ctrl_data;
683
684 zep->error = 0;
685 /* Use PIO if transferring message bytes to esp->command_block_dma */
686 if (phase == ESP_MIP && addr == esp->command_block_dma) {
687 zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
688 esp_count, dma_count, write, cmd);
689 return;
690 }
691
692 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
693 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
694 zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
695
696 if (write) {
697 /* DMA receive */
698 dma_sync_single_for_device(esp->dev, addr, esp_count,
699 DMA_FROM_DEVICE);
700 addr &= ~(1);
701 } else {
702 /* DMA send */
703 dma_sync_single_for_device(esp->dev, addr, esp_count,
704 DMA_TO_DEVICE);
705 addr |= 1;
706 }
707
708 writeb(0, &dregs->clear_strobe);
709 z_writel(addr, ((addr & 0x00ffffff) + zep->board_base));
710
711 if (write) {
712 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) |
713 FASTLANE_DMA_ENABLE;
714 } else {
715 *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) |
716 FASTLANE_DMA_ENABLE |
717 FASTLANE_DMA_WRITE);
718 }
719
720 writeb(*ctrl_data, &dregs->ctrl_reg);
721
722 scsi_esp_cmd(esp, cmd);
723 }
724
725 static int zorro_esp_dma_error(struct esp *esp)
726 {
727 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
728
729 /* check for error in case we've been doing PIO */
730 if (zep->error == 1)
731 return 1;
732
733 /* do nothing - there seems to be no way to check for DMA errors */
734 return 0;
735 }
736
737 /* per-board ESP driver ops */
738
739 static const struct esp_driver_ops blz1230_esp_ops = {
740 .esp_write8 = zorro_esp_write8,
741 .esp_read8 = zorro_esp_read8,
742 .map_single = zorro_esp_map_single,
743 .map_sg = zorro_esp_map_sg,
744 .unmap_single = zorro_esp_unmap_single,
745 .unmap_sg = zorro_esp_unmap_sg,
746 .irq_pending = zorro_esp_irq_pending,
747 .dma_length_limit = zorro_esp_dma_length_limit,
748 .reset_dma = zorro_esp_reset_dma,
749 .dma_drain = zorro_esp_dma_drain,
750 .dma_invalidate = zorro_esp_dma_invalidate,
751 .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd,
752 .dma_error = zorro_esp_dma_error,
753 };
754
755 static const struct esp_driver_ops blz1230II_esp_ops = {
756 .esp_write8 = zorro_esp_write8,
757 .esp_read8 = zorro_esp_read8,
758 .map_single = zorro_esp_map_single,
759 .map_sg = zorro_esp_map_sg,
760 .unmap_single = zorro_esp_unmap_single,
761 .unmap_sg = zorro_esp_unmap_sg,
762 .irq_pending = zorro_esp_irq_pending,
763 .dma_length_limit = zorro_esp_dma_length_limit,
764 .reset_dma = zorro_esp_reset_dma,
765 .dma_drain = zorro_esp_dma_drain,
766 .dma_invalidate = zorro_esp_dma_invalidate,
767 .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd,
768 .dma_error = zorro_esp_dma_error,
769 };
770
771 static const struct esp_driver_ops blz2060_esp_ops = {
772 .esp_write8 = zorro_esp_write8,
773 .esp_read8 = zorro_esp_read8,
774 .map_single = zorro_esp_map_single,
775 .map_sg = zorro_esp_map_sg,
776 .unmap_single = zorro_esp_unmap_single,
777 .unmap_sg = zorro_esp_unmap_sg,
778 .irq_pending = zorro_esp_irq_pending,
779 .dma_length_limit = zorro_esp_dma_length_limit,
780 .reset_dma = zorro_esp_reset_dma,
781 .dma_drain = zorro_esp_dma_drain,
782 .dma_invalidate = zorro_esp_dma_invalidate,
783 .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd,
784 .dma_error = zorro_esp_dma_error,
785 };
786
787 static const struct esp_driver_ops cyber_esp_ops = {
788 .esp_write8 = zorro_esp_write8,
789 .esp_read8 = zorro_esp_read8,
790 .map_single = zorro_esp_map_single,
791 .map_sg = zorro_esp_map_sg,
792 .unmap_single = zorro_esp_unmap_single,
793 .unmap_sg = zorro_esp_unmap_sg,
794 .irq_pending = cyber_esp_irq_pending,
795 .dma_length_limit = zorro_esp_dma_length_limit,
796 .reset_dma = zorro_esp_reset_dma,
797 .dma_drain = zorro_esp_dma_drain,
798 .dma_invalidate = zorro_esp_dma_invalidate,
799 .send_dma_cmd = zorro_esp_send_cyber_dma_cmd,
800 .dma_error = zorro_esp_dma_error,
801 };
802
803 static const struct esp_driver_ops cyberII_esp_ops = {
804 .esp_write8 = zorro_esp_write8,
805 .esp_read8 = zorro_esp_read8,
806 .map_single = zorro_esp_map_single,
807 .map_sg = zorro_esp_map_sg,
808 .unmap_single = zorro_esp_unmap_single,
809 .unmap_sg = zorro_esp_unmap_sg,
810 .irq_pending = zorro_esp_irq_pending,
811 .dma_length_limit = zorro_esp_dma_length_limit,
812 .reset_dma = zorro_esp_reset_dma,
813 .dma_drain = zorro_esp_dma_drain,
814 .dma_invalidate = zorro_esp_dma_invalidate,
815 .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd,
816 .dma_error = zorro_esp_dma_error,
817 };
818
819 static const struct esp_driver_ops fastlane_esp_ops = {
820 .esp_write8 = zorro_esp_write8,
821 .esp_read8 = zorro_esp_read8,
822 .map_single = zorro_esp_map_single,
823 .map_sg = zorro_esp_map_sg,
824 .unmap_single = zorro_esp_unmap_single,
825 .unmap_sg = zorro_esp_unmap_sg,
826 .irq_pending = fastlane_esp_irq_pending,
827 .dma_length_limit = zorro_esp_dma_length_limit,
828 .reset_dma = zorro_esp_reset_dma,
829 .dma_drain = zorro_esp_dma_drain,
830 .dma_invalidate = fastlane_esp_dma_invalidate,
831 .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd,
832 .dma_error = zorro_esp_dma_error,
833 };
834
835 /* Zorro driver config data */
836
837 struct zorro_driver_data {
838 const char *name;
839 unsigned long offset;
840 unsigned long dma_offset;
841 int absolute; /* offset is absolute address */
842 int scsi_option;
843 const struct esp_driver_ops *esp_ops;
844 };
845
846 /* board types */
847
848 enum {
849 ZORRO_BLZ1230,
850 ZORRO_BLZ1230II,
851 ZORRO_BLZ2060,
852 ZORRO_CYBER,
853 ZORRO_CYBERII,
854 ZORRO_FASTLANE,
855 };
856
857 /* per-board config data */
858
859 static const struct zorro_driver_data zorro_esp_boards[] = {
860 [ZORRO_BLZ1230] = {
861 .name = "Blizzard 1230",
862 .offset = 0x8000,
863 .dma_offset = 0x10000,
864 .scsi_option = 1,
865 .esp_ops = &blz1230_esp_ops,
866 },
867 [ZORRO_BLZ1230II] = {
868 .name = "Blizzard 1230II",
869 .offset = 0x10000,
870 .dma_offset = 0x10021,
871 .scsi_option = 1,
872 .esp_ops = &blz1230II_esp_ops,
873 },
874 [ZORRO_BLZ2060] = {
875 .name = "Blizzard 2060",
876 .offset = 0x1ff00,
877 .dma_offset = 0x1ffe0,
878 .esp_ops = &blz2060_esp_ops,
879 },
880 [ZORRO_CYBER] = {
881 .name = "CyberStormI",
882 .offset = 0xf400,
883 .dma_offset = 0xf800,
884 .esp_ops = &cyber_esp_ops,
885 },
886 [ZORRO_CYBERII] = {
887 .name = "CyberStormII",
888 .offset = 0x1ff03,
889 .dma_offset = 0x1ff43,
890 .scsi_option = 1,
891 .esp_ops = &cyberII_esp_ops,
892 },
893 [ZORRO_FASTLANE] = {
894 .name = "Fastlane",
895 .offset = 0x1000001,
896 .dma_offset = 0x1000041,
897 .esp_ops = &fastlane_esp_ops,
898 },
899 };
900
901 static const struct zorro_device_id zorro_esp_zorro_tbl[] = {
902 { /* Blizzard 1230 IV */
903 .id = ZORRO_ID(PHASE5, 0x11, 0),
904 .driver_data = ZORRO_BLZ1230,
905 },
906 { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
907 .id = ZORRO_ID(PHASE5, 0x0B, 0),
908 .driver_data = ZORRO_BLZ1230II,
909 },
910 { /* Blizzard 2060 */
911 .id = ZORRO_ID(PHASE5, 0x18, 0),
912 .driver_data = ZORRO_BLZ2060,
913 },
914 { /* Cyberstorm */
915 .id = ZORRO_ID(PHASE5, 0x0C, 0),
916 .driver_data = ZORRO_CYBER,
917 },
918 { /* Cyberstorm II */
919 .id = ZORRO_ID(PHASE5, 0x19, 0),
920 .driver_data = ZORRO_CYBERII,
921 },
922 { 0 }
923 };
924 MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
925
926 static int zorro_esp_probe(struct zorro_dev *z,
927 const struct zorro_device_id *ent)
928 {
929 struct scsi_host_template *tpnt = &scsi_esp_template;
930 struct Scsi_Host *host;
931 struct esp *esp;
932 const struct zorro_driver_data *zdd;
933 struct zorro_esp_priv *zep;
934 unsigned long board, ioaddr, dmaaddr;
935 int err;
936
937 board = zorro_resource_start(z);
938 zdd = &zorro_esp_boards[ent->driver_data];
939
940 pr_info("%s found at address 0x%lx.\n", zdd->name, board);
941
942 zep = kzalloc(sizeof(*zep), GFP_KERNEL);
943 if (!zep) {
944 pr_err("Can't allocate device private data!\n");
945 return -ENOMEM;
946 }
947
948 /* let's figure out whether we have a Zorro II or Zorro III board */
949 if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) {
950 if (board > 0xffffff)
951 zep->zorro3 = 1;
952 } else {
953 /*
954 * Even though most of these boards identify as Zorro II,
955 * they are in fact CPU expansion slot boards and have full
956 * access to all of memory. Fix up DMA bitmask here.
957 */
958 z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
959 }
960
961 /*
962 * If Zorro III and ID matches Fastlane, our device table entry
963 * contains data for the Blizzard 1230 II board which does share the
964 * same ID. Fix up device table entry here.
965 * TODO: Some Cyberstom060 boards also share this ID but would need
966 * to use the Cyberstorm I driver data ... we catch this by checking
967 * for presence of ESP chip later, but don't try to fix up yet.
968 */
969 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
970 pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
971 zdd->name, board);
972 zdd = &zorro_esp_boards[ZORRO_FASTLANE];
973 }
974
975 if (zdd->absolute) {
976 ioaddr = zdd->offset;
977 dmaaddr = zdd->dma_offset;
978 } else {
979 ioaddr = board + zdd->offset;
980 dmaaddr = board + zdd->dma_offset;
981 }
982
983 if (!zorro_request_device(z, zdd->name)) {
984 pr_err("cannot reserve region 0x%lx, abort\n",
985 board);
986 err = -EBUSY;
987 goto fail_free_zep;
988 }
989
990 host = scsi_host_alloc(tpnt, sizeof(struct esp));
991
992 if (!host) {
993 pr_err("No host detected; board configuration problem?\n");
994 err = -ENOMEM;
995 goto fail_release_device;
996 }
997
998 host->base = ioaddr;
999 host->this_id = 7;
1000
1001 esp = shost_priv(host);
1002 esp->host = host;
1003 esp->dev = &z->dev;
1004
1005 esp->scsi_id = host->this_id;
1006 esp->scsi_id_mask = (1 << esp->scsi_id);
1007
1008 esp->cfreq = 40000000;
1009
1010 zep->esp = esp;
1011
1012 dev_set_drvdata(esp->dev, zep);
1013
1014 /* additional setup required for Fastlane */
1015 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
1016 /* map full address space up to ESP base for DMA */
1017 zep->board_base = ioremap_nocache(board,
1018 FASTLANE_ESP_ADDR-1);
1019 if (!zep->board_base) {
1020 pr_err("Cannot allocate board address space\n");
1021 err = -ENOMEM;
1022 goto fail_free_host;
1023 }
1024 /* initialize DMA control shadow register */
1025 zep->ctrl_data = (FASTLANE_DMA_FCODE |
1026 FASTLANE_DMA_EDI | FASTLANE_DMA_ESI);
1027 }
1028
1029 esp->ops = zdd->esp_ops;
1030
1031 if (ioaddr > 0xffffff)
1032 esp->regs = ioremap_nocache(ioaddr, 0x20);
1033 else
1034 /* ZorroII address space remapped nocache by early startup */
1035 esp->regs = ZTWO_VADDR(ioaddr);
1036
1037 if (!esp->regs) {
1038 err = -ENOMEM;
1039 goto fail_unmap_fastlane;
1040 }
1041
1042 /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
1043 if (zdd->scsi_option) {
1044 zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
1045 if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) {
1046 err = -ENODEV;
1047 goto fail_unmap_regs;
1048 }
1049 }
1050
1051 if (zep->zorro3) {
1052 /*
1053 * Only Fastlane Z3 for now - add switch for correct struct
1054 * dma_registers size if adding any more
1055 */
1056 esp->dma_regs = ioremap_nocache(dmaaddr,
1057 sizeof(struct fastlane_dma_registers));
1058 } else
1059 /* ZorroII address space remapped nocache by early startup */
1060 esp->dma_regs = ZTWO_VADDR(dmaaddr);
1061
1062 if (!esp->dma_regs) {
1063 err = -ENOMEM;
1064 goto fail_unmap_regs;
1065 }
1066
1067 esp->command_block = dma_alloc_coherent(esp->dev, 16,
1068 &esp->command_block_dma,
1069 GFP_KERNEL);
1070
1071 if (!esp->command_block) {
1072 err = -ENOMEM;
1073 goto fail_unmap_dma_regs;
1074 }
1075
1076 host->irq = IRQ_AMIGA_PORTS;
1077 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
1078 "Amiga Zorro ESP", esp);
1079 if (err < 0) {
1080 err = -ENODEV;
1081 goto fail_free_command_block;
1082 }
1083
1084 /* register the chip */
1085 err = scsi_esp_register(esp, &z->dev);
1086
1087 if (err) {
1088 err = -ENOMEM;
1089 goto fail_free_irq;
1090 }
1091
1092 return 0;
1093
1094 fail_free_irq:
1095 free_irq(host->irq, esp);
1096
1097 fail_free_command_block:
1098 dma_free_coherent(esp->dev, 16,
1099 esp->command_block,
1100 esp->command_block_dma);
1101
1102 fail_unmap_dma_regs:
1103 if (zep->zorro3)
1104 iounmap(esp->dma_regs);
1105
1106 fail_unmap_regs:
1107 if (ioaddr > 0xffffff)
1108 iounmap(esp->regs);
1109
1110 fail_unmap_fastlane:
1111 if (zep->zorro3)
1112 iounmap(zep->board_base);
1113
1114 fail_free_host:
1115 scsi_host_put(host);
1116
1117 fail_release_device:
1118 zorro_release_device(z);
1119
1120 fail_free_zep:
1121 kfree(zep);
1122
1123 return err;
1124 }
1125
1126 static void zorro_esp_remove(struct zorro_dev *z)
1127 {
1128 struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev);
1129 struct esp *esp = zep->esp;
1130 struct Scsi_Host *host = esp->host;
1131
1132 scsi_esp_unregister(esp);
1133
1134 free_irq(host->irq, esp);
1135 dma_free_coherent(esp->dev, 16,
1136 esp->command_block,
1137 esp->command_block_dma);
1138
1139 if (zep->zorro3) {
1140 iounmap(zep->board_base);
1141 iounmap(esp->dma_regs);
1142 }
1143
1144 if (host->base > 0xffffff)
1145 iounmap(esp->regs);
1146
1147 scsi_host_put(host);
1148
1149 zorro_release_device(z);
1150
1151 kfree(zep);
1152 }
1153
1154 static struct zorro_driver zorro_esp_driver = {
1155 .name = KBUILD_MODNAME,
1156 .id_table = zorro_esp_zorro_tbl,
1157 .probe = zorro_esp_probe,
1158 .remove = zorro_esp_remove,
1159 };
1160
1161 static int __init zorro_esp_scsi_init(void)
1162 {
1163 return zorro_register_driver(&zorro_esp_driver);
1164 }
1165
1166 static void __exit zorro_esp_scsi_exit(void)
1167 {
1168 zorro_unregister_driver(&zorro_esp_driver);
1169 }
1170
1171 module_init(zorro_esp_scsi_init);
1172 module_exit(zorro_esp_scsi_exit);