]> git.ipfire.org Git - people/arne_f/kernel.git/blob - drivers/tty/serial/atmel_serial.c
serial: at91, fix rs485 properties
[people/arne_f/kernel.git] / drivers / tty / serial / atmel_serial.c
1 /*
2 * Driver for Atmel AT91 / AT32 Serial ports
3 * Copyright (C) 2003 Rick Bronson
4 *
5 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
6 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
7 *
8 * DMA support added by Chip Coldwell.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25 #include <linux/module.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/init.h>
30 #include <linux/serial.h>
31 #include <linux/clk.h>
32 #include <linux/console.h>
33 #include <linux/sysrq.h>
34 #include <linux/tty_flip.h>
35 #include <linux/platform_device.h>
36 #include <linux/of.h>
37 #include <linux/of_device.h>
38 #include <linux/of_gpio.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/dmaengine.h>
41 #include <linux/atmel_pdc.h>
42 #include <linux/atmel_serial.h>
43 #include <linux/uaccess.h>
44 #include <linux/platform_data/atmel.h>
45 #include <linux/timer.h>
46 #include <linux/gpio.h>
47 #include <linux/gpio/consumer.h>
48 #include <linux/err.h>
49 #include <linux/irq.h>
50 #include <linux/suspend.h>
51
52 #include <asm/io.h>
53 #include <asm/ioctls.h>
54
55 #define PDC_BUFFER_SIZE 512
56 /* Revisit: We should calculate this based on the actual port settings */
57 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
58
59 /* The minium number of data FIFOs should be able to contain */
60 #define ATMEL_MIN_FIFO_SIZE 8
61 /*
62 * These two offsets are substracted from the RX FIFO size to define the RTS
63 * high and low thresholds
64 */
65 #define ATMEL_RTS_HIGH_OFFSET 16
66 #define ATMEL_RTS_LOW_OFFSET 20
67
68 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
69 #define SUPPORT_SYSRQ
70 #endif
71
72 #include <linux/serial_core.h>
73
74 #include "serial_mctrl_gpio.h"
75
76 static void atmel_start_rx(struct uart_port *port);
77 static void atmel_stop_rx(struct uart_port *port);
78
79 #ifdef CONFIG_SERIAL_ATMEL_TTYAT
80
81 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
82 * should coexist with the 8250 driver, such as if we have an external 16C550
83 * UART. */
84 #define SERIAL_ATMEL_MAJOR 204
85 #define MINOR_START 154
86 #define ATMEL_DEVICENAME "ttyAT"
87
88 #else
89
90 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
91 * name, but it is legally reserved for the 8250 driver. */
92 #define SERIAL_ATMEL_MAJOR TTY_MAJOR
93 #define MINOR_START 64
94 #define ATMEL_DEVICENAME "ttyS"
95
96 #endif
97
98 #define ATMEL_ISR_PASS_LIMIT 256
99
100 struct atmel_dma_buffer {
101 unsigned char *buf;
102 dma_addr_t dma_addr;
103 unsigned int dma_size;
104 unsigned int ofs;
105 };
106
107 struct atmel_uart_char {
108 u16 status;
109 u16 ch;
110 };
111
112 #define ATMEL_SERIAL_RINGSIZE 1024
113
114 /*
115 * at91: 6 USARTs and one DBGU port (SAM9260)
116 * avr32: 4
117 */
118 #define ATMEL_MAX_UART 7
119
120 /*
121 * We wrap our port structure around the generic uart_port.
122 */
123 struct atmel_uart_port {
124 struct uart_port uart; /* uart */
125 struct clk *clk; /* uart clock */
126 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
127 u32 backup_imr; /* IMR saved during suspend */
128 int break_active; /* break being received */
129
130 bool use_dma_rx; /* enable DMA receiver */
131 bool use_pdc_rx; /* enable PDC receiver */
132 short pdc_rx_idx; /* current PDC RX buffer */
133 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
134
135 bool use_dma_tx; /* enable DMA transmitter */
136 bool use_pdc_tx; /* enable PDC transmitter */
137 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
138
139 spinlock_t lock_tx; /* port lock */
140 spinlock_t lock_rx; /* port lock */
141 struct dma_chan *chan_tx;
142 struct dma_chan *chan_rx;
143 struct dma_async_tx_descriptor *desc_tx;
144 struct dma_async_tx_descriptor *desc_rx;
145 dma_cookie_t cookie_tx;
146 dma_cookie_t cookie_rx;
147 struct scatterlist sg_tx;
148 struct scatterlist sg_rx;
149 struct tasklet_struct tasklet;
150 unsigned int irq_status;
151 unsigned int irq_status_prev;
152 unsigned int status_change;
153 unsigned int tx_len;
154
155 struct circ_buf rx_ring;
156
157 struct mctrl_gpios *gpios;
158 int gpio_irq[UART_GPIO_MAX];
159 unsigned int tx_done_mask;
160 u32 fifo_size;
161 u32 rts_high;
162 u32 rts_low;
163 bool ms_irq_enabled;
164 bool is_usart; /* usart or uart */
165 struct timer_list uart_timer; /* uart timer */
166
167 bool suspended;
168 unsigned int pending;
169 unsigned int pending_status;
170 spinlock_t lock_suspended;
171
172 int (*prepare_rx)(struct uart_port *port);
173 int (*prepare_tx)(struct uart_port *port);
174 void (*schedule_rx)(struct uart_port *port);
175 void (*schedule_tx)(struct uart_port *port);
176 void (*release_rx)(struct uart_port *port);
177 void (*release_tx)(struct uart_port *port);
178 };
179
180 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
181 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
182
183 #ifdef SUPPORT_SYSRQ
184 static struct console atmel_console;
185 #endif
186
187 #if defined(CONFIG_OF)
188 static const struct of_device_id atmel_serial_dt_ids[] = {
189 { .compatible = "atmel,at91rm9200-usart" },
190 { .compatible = "atmel,at91sam9260-usart" },
191 { /* sentinel */ }
192 };
193
194 MODULE_DEVICE_TABLE(of, atmel_serial_dt_ids);
195 #endif
196
197 static inline struct atmel_uart_port *
198 to_atmel_uart_port(struct uart_port *uart)
199 {
200 return container_of(uart, struct atmel_uart_port, uart);
201 }
202
203 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
204 {
205 return __raw_readl(port->membase + reg);
206 }
207
208 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
209 {
210 __raw_writel(value, port->membase + reg);
211 }
212
213 #ifdef CONFIG_AVR32
214
215 /* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */
216 static inline u8 atmel_uart_read_char(struct uart_port *port)
217 {
218 return __raw_readl(port->membase + ATMEL_US_RHR);
219 }
220
221 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
222 {
223 __raw_writel(value, port->membase + ATMEL_US_THR);
224 }
225
226 #else
227
228 static inline u8 atmel_uart_read_char(struct uart_port *port)
229 {
230 return __raw_readb(port->membase + ATMEL_US_RHR);
231 }
232
233 static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
234 {
235 __raw_writeb(value, port->membase + ATMEL_US_THR);
236 }
237
238 #endif
239
240 #ifdef CONFIG_SERIAL_ATMEL_PDC
241 static bool atmel_use_pdc_rx(struct uart_port *port)
242 {
243 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
244
245 return atmel_port->use_pdc_rx;
246 }
247
248 static bool atmel_use_pdc_tx(struct uart_port *port)
249 {
250 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
251
252 return atmel_port->use_pdc_tx;
253 }
254 #else
255 static bool atmel_use_pdc_rx(struct uart_port *port)
256 {
257 return false;
258 }
259
260 static bool atmel_use_pdc_tx(struct uart_port *port)
261 {
262 return false;
263 }
264 #endif
265
266 static bool atmel_use_dma_tx(struct uart_port *port)
267 {
268 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
269
270 return atmel_port->use_dma_tx;
271 }
272
273 static bool atmel_use_dma_rx(struct uart_port *port)
274 {
275 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
276
277 return atmel_port->use_dma_rx;
278 }
279
280 static unsigned int atmel_get_lines_status(struct uart_port *port)
281 {
282 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
283 unsigned int status, ret = 0;
284
285 status = atmel_uart_readl(port, ATMEL_US_CSR);
286
287 mctrl_gpio_get(atmel_port->gpios, &ret);
288
289 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
290 UART_GPIO_CTS))) {
291 if (ret & TIOCM_CTS)
292 status &= ~ATMEL_US_CTS;
293 else
294 status |= ATMEL_US_CTS;
295 }
296
297 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
298 UART_GPIO_DSR))) {
299 if (ret & TIOCM_DSR)
300 status &= ~ATMEL_US_DSR;
301 else
302 status |= ATMEL_US_DSR;
303 }
304
305 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
306 UART_GPIO_RI))) {
307 if (ret & TIOCM_RI)
308 status &= ~ATMEL_US_RI;
309 else
310 status |= ATMEL_US_RI;
311 }
312
313 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios,
314 UART_GPIO_DCD))) {
315 if (ret & TIOCM_CD)
316 status &= ~ATMEL_US_DCD;
317 else
318 status |= ATMEL_US_DCD;
319 }
320
321 return status;
322 }
323
324 /* Enable or disable the rs485 support */
325 static int atmel_config_rs485(struct uart_port *port,
326 struct serial_rs485 *rs485conf)
327 {
328 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
329 unsigned int mode;
330
331 /* Disable interrupts */
332 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
333
334 mode = atmel_uart_readl(port, ATMEL_US_MR);
335
336 /* Resetting serial mode to RS232 (0x0) */
337 mode &= ~ATMEL_US_USMODE;
338
339 port->rs485 = *rs485conf;
340
341 if (rs485conf->flags & SER_RS485_ENABLED) {
342 dev_dbg(port->dev, "Setting UART to RS485\n");
343 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
344 atmel_uart_writel(port, ATMEL_US_TTGR,
345 rs485conf->delay_rts_after_send);
346 mode |= ATMEL_US_USMODE_RS485;
347 } else {
348 dev_dbg(port->dev, "Setting UART to RS232\n");
349 if (atmel_use_pdc_tx(port))
350 atmel_port->tx_done_mask = ATMEL_US_ENDTX |
351 ATMEL_US_TXBUFE;
352 else
353 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
354 }
355 atmel_uart_writel(port, ATMEL_US_MR, mode);
356
357 /* Enable interrupts */
358 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
359
360 return 0;
361 }
362
363 /*
364 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
365 */
366 static u_int atmel_tx_empty(struct uart_port *port)
367 {
368 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
369 TIOCSER_TEMT :
370 0;
371 }
372
373 /*
374 * Set state of the modem control output lines
375 */
376 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
377 {
378 unsigned int control = 0;
379 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
380 unsigned int rts_paused, rts_ready;
381 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
382
383 /* override mode to RS485 if needed, otherwise keep the current mode */
384 if (port->rs485.flags & SER_RS485_ENABLED) {
385 atmel_uart_writel(port, ATMEL_US_TTGR,
386 port->rs485.delay_rts_after_send);
387 mode &= ~ATMEL_US_USMODE;
388 mode |= ATMEL_US_USMODE_RS485;
389 }
390
391 /* set the RTS line state according to the mode */
392 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
393 /* force RTS line to high level */
394 rts_paused = ATMEL_US_RTSEN;
395
396 /* give the control of the RTS line back to the hardware */
397 rts_ready = ATMEL_US_RTSDIS;
398 } else {
399 /* force RTS line to high level */
400 rts_paused = ATMEL_US_RTSDIS;
401
402 /* force RTS line to low level */
403 rts_ready = ATMEL_US_RTSEN;
404 }
405
406 if (mctrl & TIOCM_RTS)
407 control |= rts_ready;
408 else
409 control |= rts_paused;
410
411 if (mctrl & TIOCM_DTR)
412 control |= ATMEL_US_DTREN;
413 else
414 control |= ATMEL_US_DTRDIS;
415
416 atmel_uart_writel(port, ATMEL_US_CR, control);
417
418 mctrl_gpio_set(atmel_port->gpios, mctrl);
419
420 /* Local loopback mode? */
421 mode &= ~ATMEL_US_CHMODE;
422 if (mctrl & TIOCM_LOOP)
423 mode |= ATMEL_US_CHMODE_LOC_LOOP;
424 else
425 mode |= ATMEL_US_CHMODE_NORMAL;
426
427 atmel_uart_writel(port, ATMEL_US_MR, mode);
428 }
429
430 /*
431 * Get state of the modem control input lines
432 */
433 static u_int atmel_get_mctrl(struct uart_port *port)
434 {
435 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
436 unsigned int ret = 0, status;
437
438 status = atmel_uart_readl(port, ATMEL_US_CSR);
439
440 /*
441 * The control signals are active low.
442 */
443 if (!(status & ATMEL_US_DCD))
444 ret |= TIOCM_CD;
445 if (!(status & ATMEL_US_CTS))
446 ret |= TIOCM_CTS;
447 if (!(status & ATMEL_US_DSR))
448 ret |= TIOCM_DSR;
449 if (!(status & ATMEL_US_RI))
450 ret |= TIOCM_RI;
451
452 return mctrl_gpio_get(atmel_port->gpios, &ret);
453 }
454
455 /*
456 * Stop transmitting.
457 */
458 static void atmel_stop_tx(struct uart_port *port)
459 {
460 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
461
462 if (atmel_use_pdc_tx(port)) {
463 /* disable PDC transmit */
464 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
465 }
466 /* Disable interrupts */
467 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
468
469 if ((port->rs485.flags & SER_RS485_ENABLED) &&
470 !(port->rs485.flags & SER_RS485_RX_DURING_TX))
471 atmel_start_rx(port);
472 }
473
474 /*
475 * Start transmitting.
476 */
477 static void atmel_start_tx(struct uart_port *port)
478 {
479 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
480
481 if (atmel_use_pdc_tx(port)) {
482 if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
483 /* The transmitter is already running. Yes, we
484 really need this.*/
485 return;
486
487 if ((port->rs485.flags & SER_RS485_ENABLED) &&
488 !(port->rs485.flags & SER_RS485_RX_DURING_TX))
489 atmel_stop_rx(port);
490
491 /* re-enable PDC transmit */
492 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
493 }
494 /* Enable interrupts */
495 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
496 }
497
498 /*
499 * start receiving - port is in process of being opened.
500 */
501 static void atmel_start_rx(struct uart_port *port)
502 {
503 /* reset status and receiver */
504 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
505
506 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
507
508 if (atmel_use_pdc_rx(port)) {
509 /* enable PDC controller */
510 atmel_uart_writel(port, ATMEL_US_IER,
511 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
512 port->read_status_mask);
513 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
514 } else {
515 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
516 }
517 }
518
519 /*
520 * Stop receiving - port is in process of being closed.
521 */
522 static void atmel_stop_rx(struct uart_port *port)
523 {
524 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
525
526 if (atmel_use_pdc_rx(port)) {
527 /* disable PDC receive */
528 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
529 atmel_uart_writel(port, ATMEL_US_IDR,
530 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
531 port->read_status_mask);
532 } else {
533 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
534 }
535 }
536
537 /*
538 * Enable modem status interrupts
539 */
540 static void atmel_enable_ms(struct uart_port *port)
541 {
542 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
543 uint32_t ier = 0;
544
545 /*
546 * Interrupt should not be enabled twice
547 */
548 if (atmel_port->ms_irq_enabled)
549 return;
550
551 atmel_port->ms_irq_enabled = true;
552
553 if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0)
554 enable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]);
555 else
556 ier |= ATMEL_US_CTSIC;
557
558 if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0)
559 enable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]);
560 else
561 ier |= ATMEL_US_DSRIC;
562
563 if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0)
564 enable_irq(atmel_port->gpio_irq[UART_GPIO_RI]);
565 else
566 ier |= ATMEL_US_RIIC;
567
568 if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0)
569 enable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]);
570 else
571 ier |= ATMEL_US_DCDIC;
572
573 atmel_uart_writel(port, ATMEL_US_IER, ier);
574 }
575
576 /*
577 * Disable modem status interrupts
578 */
579 static void atmel_disable_ms(struct uart_port *port)
580 {
581 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
582 uint32_t idr = 0;
583
584 /*
585 * Interrupt should not be disabled twice
586 */
587 if (!atmel_port->ms_irq_enabled)
588 return;
589
590 atmel_port->ms_irq_enabled = false;
591
592 if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0)
593 disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]);
594 else
595 idr |= ATMEL_US_CTSIC;
596
597 if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0)
598 disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]);
599 else
600 idr |= ATMEL_US_DSRIC;
601
602 if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0)
603 disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]);
604 else
605 idr |= ATMEL_US_RIIC;
606
607 if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0)
608 disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]);
609 else
610 idr |= ATMEL_US_DCDIC;
611
612 atmel_uart_writel(port, ATMEL_US_IDR, idr);
613 }
614
615 /*
616 * Control the transmission of a break signal
617 */
618 static void atmel_break_ctl(struct uart_port *port, int break_state)
619 {
620 if (break_state != 0)
621 /* start break */
622 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
623 else
624 /* stop break */
625 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
626 }
627
628 /*
629 * Stores the incoming character in the ring buffer
630 */
631 static void
632 atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
633 unsigned int ch)
634 {
635 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
636 struct circ_buf *ring = &atmel_port->rx_ring;
637 struct atmel_uart_char *c;
638
639 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
640 /* Buffer overflow, ignore char */
641 return;
642
643 c = &((struct atmel_uart_char *)ring->buf)[ring->head];
644 c->status = status;
645 c->ch = ch;
646
647 /* Make sure the character is stored before we update head. */
648 smp_wmb();
649
650 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
651 }
652
653 /*
654 * Deal with parity, framing and overrun errors.
655 */
656 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
657 {
658 /* clear error */
659 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
660
661 if (status & ATMEL_US_RXBRK) {
662 /* ignore side-effect */
663 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
664 port->icount.brk++;
665 }
666 if (status & ATMEL_US_PARE)
667 port->icount.parity++;
668 if (status & ATMEL_US_FRAME)
669 port->icount.frame++;
670 if (status & ATMEL_US_OVRE)
671 port->icount.overrun++;
672 }
673
674 /*
675 * Characters received (called from interrupt handler)
676 */
677 static void atmel_rx_chars(struct uart_port *port)
678 {
679 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
680 unsigned int status, ch;
681
682 status = atmel_uart_readl(port, ATMEL_US_CSR);
683 while (status & ATMEL_US_RXRDY) {
684 ch = atmel_uart_read_char(port);
685
686 /*
687 * note that the error handling code is
688 * out of the main execution path
689 */
690 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
691 | ATMEL_US_OVRE | ATMEL_US_RXBRK)
692 || atmel_port->break_active)) {
693
694 /* clear error */
695 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
696
697 if (status & ATMEL_US_RXBRK
698 && !atmel_port->break_active) {
699 atmel_port->break_active = 1;
700 atmel_uart_writel(port, ATMEL_US_IER,
701 ATMEL_US_RXBRK);
702 } else {
703 /*
704 * This is either the end-of-break
705 * condition or we've received at
706 * least one character without RXBRK
707 * being set. In both cases, the next
708 * RXBRK will indicate start-of-break.
709 */
710 atmel_uart_writel(port, ATMEL_US_IDR,
711 ATMEL_US_RXBRK);
712 status &= ~ATMEL_US_RXBRK;
713 atmel_port->break_active = 0;
714 }
715 }
716
717 atmel_buffer_rx_char(port, status, ch);
718 status = atmel_uart_readl(port, ATMEL_US_CSR);
719 }
720
721 tasklet_schedule(&atmel_port->tasklet);
722 }
723
724 /*
725 * Transmit characters (called from tasklet with TXRDY interrupt
726 * disabled)
727 */
728 static void atmel_tx_chars(struct uart_port *port)
729 {
730 struct circ_buf *xmit = &port->state->xmit;
731 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
732
733 if (port->x_char &&
734 (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
735 atmel_uart_write_char(port, port->x_char);
736 port->icount.tx++;
737 port->x_char = 0;
738 }
739 if (uart_circ_empty(xmit) || uart_tx_stopped(port))
740 return;
741
742 while (atmel_uart_readl(port, ATMEL_US_CSR) &
743 atmel_port->tx_done_mask) {
744 atmel_uart_write_char(port, xmit->buf[xmit->tail]);
745 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
746 port->icount.tx++;
747 if (uart_circ_empty(xmit))
748 break;
749 }
750
751 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
752 uart_write_wakeup(port);
753
754 if (!uart_circ_empty(xmit))
755 /* Enable interrupts */
756 atmel_uart_writel(port, ATMEL_US_IER,
757 atmel_port->tx_done_mask);
758 }
759
760 static void atmel_complete_tx_dma(void *arg)
761 {
762 struct atmel_uart_port *atmel_port = arg;
763 struct uart_port *port = &atmel_port->uart;
764 struct circ_buf *xmit = &port->state->xmit;
765 struct dma_chan *chan = atmel_port->chan_tx;
766 unsigned long flags;
767
768 spin_lock_irqsave(&port->lock, flags);
769
770 if (chan)
771 dmaengine_terminate_all(chan);
772 xmit->tail += atmel_port->tx_len;
773 xmit->tail &= UART_XMIT_SIZE - 1;
774
775 port->icount.tx += atmel_port->tx_len;
776
777 spin_lock_irq(&atmel_port->lock_tx);
778 async_tx_ack(atmel_port->desc_tx);
779 atmel_port->cookie_tx = -EINVAL;
780 atmel_port->desc_tx = NULL;
781 spin_unlock_irq(&atmel_port->lock_tx);
782
783 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
784 uart_write_wakeup(port);
785
786 /*
787 * xmit is a circular buffer so, if we have just send data from
788 * xmit->tail to the end of xmit->buf, now we have to transmit the
789 * remaining data from the beginning of xmit->buf to xmit->head.
790 */
791 if (!uart_circ_empty(xmit))
792 tasklet_schedule(&atmel_port->tasklet);
793
794 spin_unlock_irqrestore(&port->lock, flags);
795 }
796
797 static void atmel_release_tx_dma(struct uart_port *port)
798 {
799 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
800 struct dma_chan *chan = atmel_port->chan_tx;
801
802 if (chan) {
803 dmaengine_terminate_all(chan);
804 dma_release_channel(chan);
805 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
806 DMA_TO_DEVICE);
807 }
808
809 atmel_port->desc_tx = NULL;
810 atmel_port->chan_tx = NULL;
811 atmel_port->cookie_tx = -EINVAL;
812 }
813
814 /*
815 * Called from tasklet with TXRDY interrupt is disabled.
816 */
817 static void atmel_tx_dma(struct uart_port *port)
818 {
819 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
820 struct circ_buf *xmit = &port->state->xmit;
821 struct dma_chan *chan = atmel_port->chan_tx;
822 struct dma_async_tx_descriptor *desc;
823 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
824 unsigned int tx_len, part1_len, part2_len, sg_len;
825 dma_addr_t phys_addr;
826
827 /* Make sure we have an idle channel */
828 if (atmel_port->desc_tx != NULL)
829 return;
830
831 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
832 /*
833 * DMA is idle now.
834 * Port xmit buffer is already mapped,
835 * and it is one page... Just adjust
836 * offsets and lengths. Since it is a circular buffer,
837 * we have to transmit till the end, and then the rest.
838 * Take the port lock to get a
839 * consistent xmit buffer state.
840 */
841 tx_len = CIRC_CNT_TO_END(xmit->head,
842 xmit->tail,
843 UART_XMIT_SIZE);
844
845 if (atmel_port->fifo_size) {
846 /* multi data mode */
847 part1_len = (tx_len & ~0x3); /* DWORD access */
848 part2_len = (tx_len & 0x3); /* BYTE access */
849 } else {
850 /* single data (legacy) mode */
851 part1_len = 0;
852 part2_len = tx_len; /* BYTE access only */
853 }
854
855 sg_init_table(sgl, 2);
856 sg_len = 0;
857 phys_addr = sg_dma_address(sg_tx) + xmit->tail;
858 if (part1_len) {
859 sg = &sgl[sg_len++];
860 sg_dma_address(sg) = phys_addr;
861 sg_dma_len(sg) = part1_len;
862
863 phys_addr += part1_len;
864 }
865
866 if (part2_len) {
867 sg = &sgl[sg_len++];
868 sg_dma_address(sg) = phys_addr;
869 sg_dma_len(sg) = part2_len;
870 }
871
872 /*
873 * save tx_len so atmel_complete_tx_dma() will increase
874 * xmit->tail correctly
875 */
876 atmel_port->tx_len = tx_len;
877
878 desc = dmaengine_prep_slave_sg(chan,
879 sgl,
880 sg_len,
881 DMA_MEM_TO_DEV,
882 DMA_PREP_INTERRUPT |
883 DMA_CTRL_ACK);
884 if (!desc) {
885 dev_err(port->dev, "Failed to send via dma!\n");
886 return;
887 }
888
889 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
890
891 atmel_port->desc_tx = desc;
892 desc->callback = atmel_complete_tx_dma;
893 desc->callback_param = atmel_port;
894 atmel_port->cookie_tx = dmaengine_submit(desc);
895
896 } else {
897 if (port->rs485.flags & SER_RS485_ENABLED) {
898 /* DMA done, stop TX, start RX for RS485 */
899 atmel_start_rx(port);
900 }
901 }
902
903 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
904 uart_write_wakeup(port);
905 }
906
907 static int atmel_prepare_tx_dma(struct uart_port *port)
908 {
909 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
910 dma_cap_mask_t mask;
911 struct dma_slave_config config;
912 int ret, nent;
913
914 dma_cap_zero(mask);
915 dma_cap_set(DMA_SLAVE, mask);
916
917 atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
918 if (atmel_port->chan_tx == NULL)
919 goto chan_err;
920 dev_info(port->dev, "using %s for tx DMA transfers\n",
921 dma_chan_name(atmel_port->chan_tx));
922
923 spin_lock_init(&atmel_port->lock_tx);
924 sg_init_table(&atmel_port->sg_tx, 1);
925 /* UART circular tx buffer is an aligned page. */
926 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf));
927 sg_set_page(&atmel_port->sg_tx,
928 virt_to_page(port->state->xmit.buf),
929 UART_XMIT_SIZE,
930 (unsigned long)port->state->xmit.buf & ~PAGE_MASK);
931 nent = dma_map_sg(port->dev,
932 &atmel_port->sg_tx,
933 1,
934 DMA_TO_DEVICE);
935
936 if (!nent) {
937 dev_dbg(port->dev, "need to release resource of dma\n");
938 goto chan_err;
939 } else {
940 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
941 sg_dma_len(&atmel_port->sg_tx),
942 port->state->xmit.buf,
943 &sg_dma_address(&atmel_port->sg_tx));
944 }
945
946 /* Configure the slave DMA */
947 memset(&config, 0, sizeof(config));
948 config.direction = DMA_MEM_TO_DEV;
949 config.dst_addr_width = (atmel_port->fifo_size) ?
950 DMA_SLAVE_BUSWIDTH_4_BYTES :
951 DMA_SLAVE_BUSWIDTH_1_BYTE;
952 config.dst_addr = port->mapbase + ATMEL_US_THR;
953 config.dst_maxburst = 1;
954
955 ret = dmaengine_slave_config(atmel_port->chan_tx,
956 &config);
957 if (ret) {
958 dev_err(port->dev, "DMA tx slave configuration failed\n");
959 goto chan_err;
960 }
961
962 return 0;
963
964 chan_err:
965 dev_err(port->dev, "TX channel not available, switch to pio\n");
966 atmel_port->use_dma_tx = 0;
967 if (atmel_port->chan_tx)
968 atmel_release_tx_dma(port);
969 return -EINVAL;
970 }
971
972 static void atmel_complete_rx_dma(void *arg)
973 {
974 struct uart_port *port = arg;
975 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
976
977 tasklet_schedule(&atmel_port->tasklet);
978 }
979
980 static void atmel_release_rx_dma(struct uart_port *port)
981 {
982 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
983 struct dma_chan *chan = atmel_port->chan_rx;
984
985 if (chan) {
986 dmaengine_terminate_all(chan);
987 dma_release_channel(chan);
988 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
989 DMA_FROM_DEVICE);
990 }
991
992 atmel_port->desc_rx = NULL;
993 atmel_port->chan_rx = NULL;
994 atmel_port->cookie_rx = -EINVAL;
995 }
996
997 static void atmel_rx_from_dma(struct uart_port *port)
998 {
999 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1000 struct tty_port *tport = &port->state->port;
1001 struct circ_buf *ring = &atmel_port->rx_ring;
1002 struct dma_chan *chan = atmel_port->chan_rx;
1003 struct dma_tx_state state;
1004 enum dma_status dmastat;
1005 size_t count;
1006
1007
1008 /* Reset the UART timeout early so that we don't miss one */
1009 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1010 dmastat = dmaengine_tx_status(chan,
1011 atmel_port->cookie_rx,
1012 &state);
1013 /* Restart a new tasklet if DMA status is error */
1014 if (dmastat == DMA_ERROR) {
1015 dev_dbg(port->dev, "Get residue error, restart tasklet\n");
1016 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1017 tasklet_schedule(&atmel_port->tasklet);
1018 return;
1019 }
1020
1021 /* CPU claims ownership of RX DMA buffer */
1022 dma_sync_sg_for_cpu(port->dev,
1023 &atmel_port->sg_rx,
1024 1,
1025 DMA_FROM_DEVICE);
1026
1027 /*
1028 * ring->head points to the end of data already written by the DMA.
1029 * ring->tail points to the beginning of data to be read by the
1030 * framework.
1031 * The current transfer size should not be larger than the dma buffer
1032 * length.
1033 */
1034 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
1035 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
1036 /*
1037 * At this point ring->head may point to the first byte right after the
1038 * last byte of the dma buffer:
1039 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
1040 *
1041 * However ring->tail must always points inside the dma buffer:
1042 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
1043 *
1044 * Since we use a ring buffer, we have to handle the case
1045 * where head is lower than tail. In such a case, we first read from
1046 * tail to the end of the buffer then reset tail.
1047 */
1048 if (ring->head < ring->tail) {
1049 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
1050
1051 tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1052 ring->tail = 0;
1053 port->icount.rx += count;
1054 }
1055
1056 /* Finally we read data from tail to head */
1057 if (ring->tail < ring->head) {
1058 count = ring->head - ring->tail;
1059
1060 tty_insert_flip_string(tport, ring->buf + ring->tail, count);
1061 /* Wrap ring->head if needed */
1062 if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
1063 ring->head = 0;
1064 ring->tail = ring->head;
1065 port->icount.rx += count;
1066 }
1067
1068 /* USART retreives ownership of RX DMA buffer */
1069 dma_sync_sg_for_device(port->dev,
1070 &atmel_port->sg_rx,
1071 1,
1072 DMA_FROM_DEVICE);
1073
1074 /*
1075 * Drop the lock here since it might end up calling
1076 * uart_start(), which takes the lock.
1077 */
1078 spin_unlock(&port->lock);
1079 tty_flip_buffer_push(tport);
1080 spin_lock(&port->lock);
1081
1082 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1083 }
1084
1085 static int atmel_prepare_rx_dma(struct uart_port *port)
1086 {
1087 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1088 struct dma_async_tx_descriptor *desc;
1089 dma_cap_mask_t mask;
1090 struct dma_slave_config config;
1091 struct circ_buf *ring;
1092 int ret, nent;
1093
1094 ring = &atmel_port->rx_ring;
1095
1096 dma_cap_zero(mask);
1097 dma_cap_set(DMA_CYCLIC, mask);
1098
1099 atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
1100 if (atmel_port->chan_rx == NULL)
1101 goto chan_err;
1102 dev_info(port->dev, "using %s for rx DMA transfers\n",
1103 dma_chan_name(atmel_port->chan_rx));
1104
1105 spin_lock_init(&atmel_port->lock_rx);
1106 sg_init_table(&atmel_port->sg_rx, 1);
1107 /* UART circular rx buffer is an aligned page. */
1108 BUG_ON(!PAGE_ALIGNED(ring->buf));
1109 sg_set_page(&atmel_port->sg_rx,
1110 virt_to_page(ring->buf),
1111 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
1112 (unsigned long)ring->buf & ~PAGE_MASK);
1113 nent = dma_map_sg(port->dev,
1114 &atmel_port->sg_rx,
1115 1,
1116 DMA_FROM_DEVICE);
1117
1118 if (!nent) {
1119 dev_dbg(port->dev, "need to release resource of dma\n");
1120 goto chan_err;
1121 } else {
1122 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
1123 sg_dma_len(&atmel_port->sg_rx),
1124 ring->buf,
1125 &sg_dma_address(&atmel_port->sg_rx));
1126 }
1127
1128 /* Configure the slave DMA */
1129 memset(&config, 0, sizeof(config));
1130 config.direction = DMA_DEV_TO_MEM;
1131 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1132 config.src_addr = port->mapbase + ATMEL_US_RHR;
1133 config.src_maxburst = 1;
1134
1135 ret = dmaengine_slave_config(atmel_port->chan_rx,
1136 &config);
1137 if (ret) {
1138 dev_err(port->dev, "DMA rx slave configuration failed\n");
1139 goto chan_err;
1140 }
1141 /*
1142 * Prepare a cyclic dma transfer, assign 2 descriptors,
1143 * each one is half ring buffer size
1144 */
1145 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
1146 sg_dma_address(&atmel_port->sg_rx),
1147 sg_dma_len(&atmel_port->sg_rx),
1148 sg_dma_len(&atmel_port->sg_rx)/2,
1149 DMA_DEV_TO_MEM,
1150 DMA_PREP_INTERRUPT);
1151 desc->callback = atmel_complete_rx_dma;
1152 desc->callback_param = port;
1153 atmel_port->desc_rx = desc;
1154 atmel_port->cookie_rx = dmaengine_submit(desc);
1155
1156 return 0;
1157
1158 chan_err:
1159 dev_err(port->dev, "RX channel not available, switch to pio\n");
1160 atmel_port->use_dma_rx = 0;
1161 if (atmel_port->chan_rx)
1162 atmel_release_rx_dma(port);
1163 return -EINVAL;
1164 }
1165
1166 static void atmel_uart_timer_callback(unsigned long data)
1167 {
1168 struct uart_port *port = (void *)data;
1169 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1170
1171 tasklet_schedule(&atmel_port->tasklet);
1172 mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port));
1173 }
1174
1175 /*
1176 * receive interrupt handler.
1177 */
1178 static void
1179 atmel_handle_receive(struct uart_port *port, unsigned int pending)
1180 {
1181 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1182
1183 if (atmel_use_pdc_rx(port)) {
1184 /*
1185 * PDC receive. Just schedule the tasklet and let it
1186 * figure out the details.
1187 *
1188 * TODO: We're not handling error flags correctly at
1189 * the moment.
1190 */
1191 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
1192 atmel_uart_writel(port, ATMEL_US_IDR,
1193 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
1194 tasklet_schedule(&atmel_port->tasklet);
1195 }
1196
1197 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
1198 ATMEL_US_FRAME | ATMEL_US_PARE))
1199 atmel_pdc_rxerr(port, pending);
1200 }
1201
1202 if (atmel_use_dma_rx(port)) {
1203 if (pending & ATMEL_US_TIMEOUT) {
1204 atmel_uart_writel(port, ATMEL_US_IDR,
1205 ATMEL_US_TIMEOUT);
1206 tasklet_schedule(&atmel_port->tasklet);
1207 }
1208 }
1209
1210 /* Interrupt receive */
1211 if (pending & ATMEL_US_RXRDY)
1212 atmel_rx_chars(port);
1213 else if (pending & ATMEL_US_RXBRK) {
1214 /*
1215 * End of break detected. If it came along with a
1216 * character, atmel_rx_chars will handle it.
1217 */
1218 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
1219 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
1220 atmel_port->break_active = 0;
1221 }
1222 }
1223
1224 /*
1225 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
1226 */
1227 static void
1228 atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1229 {
1230 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1231
1232 if (pending & atmel_port->tx_done_mask) {
1233 /* Either PDC or interrupt transmission */
1234 atmel_uart_writel(port, ATMEL_US_IDR,
1235 atmel_port->tx_done_mask);
1236 tasklet_schedule(&atmel_port->tasklet);
1237 }
1238 }
1239
1240 /*
1241 * status flags interrupt handler.
1242 */
1243 static void
1244 atmel_handle_status(struct uart_port *port, unsigned int pending,
1245 unsigned int status)
1246 {
1247 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1248
1249 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
1250 | ATMEL_US_CTSIC)) {
1251 atmel_port->irq_status = status;
1252 atmel_port->status_change = atmel_port->irq_status ^
1253 atmel_port->irq_status_prev;
1254 atmel_port->irq_status_prev = status;
1255 tasklet_schedule(&atmel_port->tasklet);
1256 }
1257 }
1258
1259 /*
1260 * Interrupt handler
1261 */
1262 static irqreturn_t atmel_interrupt(int irq, void *dev_id)
1263 {
1264 struct uart_port *port = dev_id;
1265 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1266 unsigned int status, pending, mask, pass_counter = 0;
1267 bool gpio_handled = false;
1268
1269 spin_lock(&atmel_port->lock_suspended);
1270
1271 do {
1272 status = atmel_get_lines_status(port);
1273 mask = atmel_uart_readl(port, ATMEL_US_IMR);
1274 pending = status & mask;
1275 if (!gpio_handled) {
1276 /*
1277 * Dealing with GPIO interrupt
1278 */
1279 if (irq == atmel_port->gpio_irq[UART_GPIO_CTS])
1280 pending |= ATMEL_US_CTSIC;
1281
1282 if (irq == atmel_port->gpio_irq[UART_GPIO_DSR])
1283 pending |= ATMEL_US_DSRIC;
1284
1285 if (irq == atmel_port->gpio_irq[UART_GPIO_RI])
1286 pending |= ATMEL_US_RIIC;
1287
1288 if (irq == atmel_port->gpio_irq[UART_GPIO_DCD])
1289 pending |= ATMEL_US_DCDIC;
1290
1291 gpio_handled = true;
1292 }
1293 if (!pending)
1294 break;
1295
1296 if (atmel_port->suspended) {
1297 atmel_port->pending |= pending;
1298 atmel_port->pending_status = status;
1299 atmel_uart_writel(port, ATMEL_US_IDR, mask);
1300 pm_system_wakeup();
1301 break;
1302 }
1303
1304 atmel_handle_receive(port, pending);
1305 atmel_handle_status(port, pending, status);
1306 atmel_handle_transmit(port, pending);
1307 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
1308
1309 spin_unlock(&atmel_port->lock_suspended);
1310
1311 return pass_counter ? IRQ_HANDLED : IRQ_NONE;
1312 }
1313
1314 static void atmel_release_tx_pdc(struct uart_port *port)
1315 {
1316 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1317 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1318
1319 dma_unmap_single(port->dev,
1320 pdc->dma_addr,
1321 pdc->dma_size,
1322 DMA_TO_DEVICE);
1323 }
1324
1325 /*
1326 * Called from tasklet with ENDTX and TXBUFE interrupts disabled.
1327 */
1328 static void atmel_tx_pdc(struct uart_port *port)
1329 {
1330 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1331 struct circ_buf *xmit = &port->state->xmit;
1332 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1333 int count;
1334
1335 /* nothing left to transmit? */
1336 if (atmel_uart_readl(port, ATMEL_PDC_TCR))
1337 return;
1338
1339 xmit->tail += pdc->ofs;
1340 xmit->tail &= UART_XMIT_SIZE - 1;
1341
1342 port->icount.tx += pdc->ofs;
1343 pdc->ofs = 0;
1344
1345 /* more to transmit - setup next transfer */
1346
1347 /* disable PDC transmit */
1348 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
1349
1350 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
1351 dma_sync_single_for_device(port->dev,
1352 pdc->dma_addr,
1353 pdc->dma_size,
1354 DMA_TO_DEVICE);
1355
1356 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
1357 pdc->ofs = count;
1358
1359 atmel_uart_writel(port, ATMEL_PDC_TPR,
1360 pdc->dma_addr + xmit->tail);
1361 atmel_uart_writel(port, ATMEL_PDC_TCR, count);
1362 /* re-enable PDC transmit */
1363 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1364 /* Enable interrupts */
1365 atmel_uart_writel(port, ATMEL_US_IER,
1366 atmel_port->tx_done_mask);
1367 } else {
1368 if ((port->rs485.flags & SER_RS485_ENABLED) &&
1369 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
1370 /* DMA done, stop TX, start RX for RS485 */
1371 atmel_start_rx(port);
1372 }
1373 }
1374
1375 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1376 uart_write_wakeup(port);
1377 }
1378
1379 static int atmel_prepare_tx_pdc(struct uart_port *port)
1380 {
1381 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1382 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
1383 struct circ_buf *xmit = &port->state->xmit;
1384
1385 pdc->buf = xmit->buf;
1386 pdc->dma_addr = dma_map_single(port->dev,
1387 pdc->buf,
1388 UART_XMIT_SIZE,
1389 DMA_TO_DEVICE);
1390 pdc->dma_size = UART_XMIT_SIZE;
1391 pdc->ofs = 0;
1392
1393 return 0;
1394 }
1395
1396 static void atmel_rx_from_ring(struct uart_port *port)
1397 {
1398 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1399 struct circ_buf *ring = &atmel_port->rx_ring;
1400 unsigned int flg;
1401 unsigned int status;
1402
1403 while (ring->head != ring->tail) {
1404 struct atmel_uart_char c;
1405
1406 /* Make sure c is loaded after head. */
1407 smp_rmb();
1408
1409 c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
1410
1411 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
1412
1413 port->icount.rx++;
1414 status = c.status;
1415 flg = TTY_NORMAL;
1416
1417 /*
1418 * note that the error handling code is
1419 * out of the main execution path
1420 */
1421 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
1422 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
1423 if (status & ATMEL_US_RXBRK) {
1424 /* ignore side-effect */
1425 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
1426
1427 port->icount.brk++;
1428 if (uart_handle_break(port))
1429 continue;
1430 }
1431 if (status & ATMEL_US_PARE)
1432 port->icount.parity++;
1433 if (status & ATMEL_US_FRAME)
1434 port->icount.frame++;
1435 if (status & ATMEL_US_OVRE)
1436 port->icount.overrun++;
1437
1438 status &= port->read_status_mask;
1439
1440 if (status & ATMEL_US_RXBRK)
1441 flg = TTY_BREAK;
1442 else if (status & ATMEL_US_PARE)
1443 flg = TTY_PARITY;
1444 else if (status & ATMEL_US_FRAME)
1445 flg = TTY_FRAME;
1446 }
1447
1448
1449 if (uart_handle_sysrq_char(port, c.ch))
1450 continue;
1451
1452 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
1453 }
1454
1455 /*
1456 * Drop the lock here since it might end up calling
1457 * uart_start(), which takes the lock.
1458 */
1459 spin_unlock(&port->lock);
1460 tty_flip_buffer_push(&port->state->port);
1461 spin_lock(&port->lock);
1462 }
1463
1464 static void atmel_release_rx_pdc(struct uart_port *port)
1465 {
1466 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1467 int i;
1468
1469 for (i = 0; i < 2; i++) {
1470 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1471
1472 dma_unmap_single(port->dev,
1473 pdc->dma_addr,
1474 pdc->dma_size,
1475 DMA_FROM_DEVICE);
1476 kfree(pdc->buf);
1477 }
1478 }
1479
1480 static void atmel_rx_from_pdc(struct uart_port *port)
1481 {
1482 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1483 struct tty_port *tport = &port->state->port;
1484 struct atmel_dma_buffer *pdc;
1485 int rx_idx = atmel_port->pdc_rx_idx;
1486 unsigned int head;
1487 unsigned int tail;
1488 unsigned int count;
1489
1490 do {
1491 /* Reset the UART timeout early so that we don't miss one */
1492 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1493
1494 pdc = &atmel_port->pdc_rx[rx_idx];
1495 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1496 tail = pdc->ofs;
1497
1498 /* If the PDC has switched buffers, RPR won't contain
1499 * any address within the current buffer. Since head
1500 * is unsigned, we just need a one-way comparison to
1501 * find out.
1502 *
1503 * In this case, we just need to consume the entire
1504 * buffer and resubmit it for DMA. This will clear the
1505 * ENDRX bit as well, so that we can safely re-enable
1506 * all interrupts below.
1507 */
1508 head = min(head, pdc->dma_size);
1509
1510 if (likely(head != tail)) {
1511 dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
1512 pdc->dma_size, DMA_FROM_DEVICE);
1513
1514 /*
1515 * head will only wrap around when we recycle
1516 * the DMA buffer, and when that happens, we
1517 * explicitly set tail to 0. So head will
1518 * always be greater than tail.
1519 */
1520 count = head - tail;
1521
1522 tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
1523 count);
1524
1525 dma_sync_single_for_device(port->dev, pdc->dma_addr,
1526 pdc->dma_size, DMA_FROM_DEVICE);
1527
1528 port->icount.rx += count;
1529 pdc->ofs = head;
1530 }
1531
1532 /*
1533 * If the current buffer is full, we need to check if
1534 * the next one contains any additional data.
1535 */
1536 if (head >= pdc->dma_size) {
1537 pdc->ofs = 0;
1538 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
1539 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
1540
1541 rx_idx = !rx_idx;
1542 atmel_port->pdc_rx_idx = rx_idx;
1543 }
1544 } while (head >= pdc->dma_size);
1545
1546 /*
1547 * Drop the lock here since it might end up calling
1548 * uart_start(), which takes the lock.
1549 */
1550 spin_unlock(&port->lock);
1551 tty_flip_buffer_push(tport);
1552 spin_lock(&port->lock);
1553
1554 atmel_uart_writel(port, ATMEL_US_IER,
1555 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1556 }
1557
1558 static int atmel_prepare_rx_pdc(struct uart_port *port)
1559 {
1560 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1561 int i;
1562
1563 for (i = 0; i < 2; i++) {
1564 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
1565
1566 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
1567 if (pdc->buf == NULL) {
1568 if (i != 0) {
1569 dma_unmap_single(port->dev,
1570 atmel_port->pdc_rx[0].dma_addr,
1571 PDC_BUFFER_SIZE,
1572 DMA_FROM_DEVICE);
1573 kfree(atmel_port->pdc_rx[0].buf);
1574 }
1575 atmel_port->use_pdc_rx = 0;
1576 return -ENOMEM;
1577 }
1578 pdc->dma_addr = dma_map_single(port->dev,
1579 pdc->buf,
1580 PDC_BUFFER_SIZE,
1581 DMA_FROM_DEVICE);
1582 pdc->dma_size = PDC_BUFFER_SIZE;
1583 pdc->ofs = 0;
1584 }
1585
1586 atmel_port->pdc_rx_idx = 0;
1587
1588 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
1589 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
1590
1591 atmel_uart_writel(port, ATMEL_PDC_RNPR,
1592 atmel_port->pdc_rx[1].dma_addr);
1593 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
1594
1595 return 0;
1596 }
1597
1598 /*
1599 * tasklet handling tty stuff outside the interrupt handler.
1600 */
1601 static void atmel_tasklet_func(unsigned long data)
1602 {
1603 struct uart_port *port = (struct uart_port *)data;
1604 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1605 unsigned int status = atmel_port->irq_status;
1606 unsigned int status_change = atmel_port->status_change;
1607
1608 /* The interrupt handler does not take the lock */
1609 spin_lock(&port->lock);
1610
1611 atmel_port->schedule_tx(port);
1612
1613 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
1614 | ATMEL_US_DCD | ATMEL_US_CTS)) {
1615 /* TODO: All reads to CSR will clear these interrupts! */
1616 if (status_change & ATMEL_US_RI)
1617 port->icount.rng++;
1618 if (status_change & ATMEL_US_DSR)
1619 port->icount.dsr++;
1620 if (status_change & ATMEL_US_DCD)
1621 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
1622 if (status_change & ATMEL_US_CTS)
1623 uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
1624
1625 wake_up_interruptible(&port->state->port.delta_msr_wait);
1626
1627 atmel_port->status_change = 0;
1628 }
1629
1630 atmel_port->schedule_rx(port);
1631
1632 spin_unlock(&port->lock);
1633 }
1634
1635 static void atmel_init_property(struct atmel_uart_port *atmel_port,
1636 struct platform_device *pdev)
1637 {
1638 struct device_node *np = pdev->dev.of_node;
1639 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1640
1641 if (np) {
1642 /* DMA/PDC usage specification */
1643 if (of_get_property(np, "atmel,use-dma-rx", NULL)) {
1644 if (of_get_property(np, "dmas", NULL)) {
1645 atmel_port->use_dma_rx = true;
1646 atmel_port->use_pdc_rx = false;
1647 } else {
1648 atmel_port->use_dma_rx = false;
1649 atmel_port->use_pdc_rx = true;
1650 }
1651 } else {
1652 atmel_port->use_dma_rx = false;
1653 atmel_port->use_pdc_rx = false;
1654 }
1655
1656 if (of_get_property(np, "atmel,use-dma-tx", NULL)) {
1657 if (of_get_property(np, "dmas", NULL)) {
1658 atmel_port->use_dma_tx = true;
1659 atmel_port->use_pdc_tx = false;
1660 } else {
1661 atmel_port->use_dma_tx = false;
1662 atmel_port->use_pdc_tx = true;
1663 }
1664 } else {
1665 atmel_port->use_dma_tx = false;
1666 atmel_port->use_pdc_tx = false;
1667 }
1668
1669 } else {
1670 atmel_port->use_pdc_rx = pdata->use_dma_rx;
1671 atmel_port->use_pdc_tx = pdata->use_dma_tx;
1672 atmel_port->use_dma_rx = false;
1673 atmel_port->use_dma_tx = false;
1674 }
1675
1676 }
1677
1678 static void atmel_init_rs485(struct uart_port *port,
1679 struct platform_device *pdev)
1680 {
1681 struct device_node *np = pdev->dev.of_node;
1682 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1683
1684 if (np) {
1685 struct serial_rs485 *rs485conf = &port->rs485;
1686 u32 rs485_delay[2];
1687 /* rs485 properties */
1688 if (of_property_read_u32_array(np, "rs485-rts-delay",
1689 rs485_delay, 2) == 0) {
1690 rs485conf->delay_rts_before_send = rs485_delay[0];
1691 rs485conf->delay_rts_after_send = rs485_delay[1];
1692 rs485conf->flags = 0;
1693 }
1694
1695 if (of_get_property(np, "rs485-rx-during-tx", NULL))
1696 rs485conf->flags |= SER_RS485_RX_DURING_TX;
1697
1698 if (of_get_property(np, "linux,rs485-enabled-at-boot-time",
1699 NULL))
1700 rs485conf->flags |= SER_RS485_ENABLED;
1701 } else {
1702 port->rs485 = pdata->rs485;
1703 }
1704
1705 }
1706
1707 static void atmel_set_ops(struct uart_port *port)
1708 {
1709 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1710
1711 if (atmel_use_dma_rx(port)) {
1712 atmel_port->prepare_rx = &atmel_prepare_rx_dma;
1713 atmel_port->schedule_rx = &atmel_rx_from_dma;
1714 atmel_port->release_rx = &atmel_release_rx_dma;
1715 } else if (atmel_use_pdc_rx(port)) {
1716 atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
1717 atmel_port->schedule_rx = &atmel_rx_from_pdc;
1718 atmel_port->release_rx = &atmel_release_rx_pdc;
1719 } else {
1720 atmel_port->prepare_rx = NULL;
1721 atmel_port->schedule_rx = &atmel_rx_from_ring;
1722 atmel_port->release_rx = NULL;
1723 }
1724
1725 if (atmel_use_dma_tx(port)) {
1726 atmel_port->prepare_tx = &atmel_prepare_tx_dma;
1727 atmel_port->schedule_tx = &atmel_tx_dma;
1728 atmel_port->release_tx = &atmel_release_tx_dma;
1729 } else if (atmel_use_pdc_tx(port)) {
1730 atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
1731 atmel_port->schedule_tx = &atmel_tx_pdc;
1732 atmel_port->release_tx = &atmel_release_tx_pdc;
1733 } else {
1734 atmel_port->prepare_tx = NULL;
1735 atmel_port->schedule_tx = &atmel_tx_chars;
1736 atmel_port->release_tx = NULL;
1737 }
1738 }
1739
1740 /*
1741 * Get ip name usart or uart
1742 */
1743 static void atmel_get_ip_name(struct uart_port *port)
1744 {
1745 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1746 int name = atmel_uart_readl(port, ATMEL_US_NAME);
1747 u32 version;
1748 int usart, uart;
1749 /* usart and uart ascii */
1750 usart = 0x55534152;
1751 uart = 0x44424755;
1752
1753 atmel_port->is_usart = false;
1754
1755 if (name == usart) {
1756 dev_dbg(port->dev, "This is usart\n");
1757 atmel_port->is_usart = true;
1758 } else if (name == uart) {
1759 dev_dbg(port->dev, "This is uart\n");
1760 atmel_port->is_usart = false;
1761 } else {
1762 /* fallback for older SoCs: use version field */
1763 version = atmel_uart_readl(port, ATMEL_US_VERSION);
1764 switch (version) {
1765 case 0x302:
1766 case 0x10213:
1767 dev_dbg(port->dev, "This version is usart\n");
1768 atmel_port->is_usart = true;
1769 break;
1770 case 0x203:
1771 case 0x10202:
1772 dev_dbg(port->dev, "This version is uart\n");
1773 atmel_port->is_usart = false;
1774 break;
1775 default:
1776 dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
1777 }
1778 }
1779 }
1780
1781 static void atmel_free_gpio_irq(struct uart_port *port)
1782 {
1783 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1784 enum mctrl_gpio_idx i;
1785
1786 for (i = 0; i < UART_GPIO_MAX; i++)
1787 if (atmel_port->gpio_irq[i] >= 0)
1788 free_irq(atmel_port->gpio_irq[i], port);
1789 }
1790
1791 static int atmel_request_gpio_irq(struct uart_port *port)
1792 {
1793 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1794 int *irq = atmel_port->gpio_irq;
1795 enum mctrl_gpio_idx i;
1796 int err = 0;
1797
1798 for (i = 0; (i < UART_GPIO_MAX) && !err; i++) {
1799 if (irq[i] < 0)
1800 continue;
1801
1802 irq_set_status_flags(irq[i], IRQ_NOAUTOEN);
1803 err = request_irq(irq[i], atmel_interrupt, IRQ_TYPE_EDGE_BOTH,
1804 "atmel_serial", port);
1805 if (err)
1806 dev_err(port->dev, "atmel_startup - Can't get %d irq\n",
1807 irq[i]);
1808 }
1809
1810 /*
1811 * If something went wrong, rollback.
1812 */
1813 while (err && (--i >= 0))
1814 if (irq[i] >= 0)
1815 free_irq(irq[i], port);
1816
1817 return err;
1818 }
1819
1820 /*
1821 * Perform initialization and enable port for reception
1822 */
1823 static int atmel_startup(struct uart_port *port)
1824 {
1825 struct platform_device *pdev = to_platform_device(port->dev);
1826 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1827 struct tty_struct *tty = port->state->port.tty;
1828 int retval;
1829
1830 /*
1831 * Ensure that no interrupts are enabled otherwise when
1832 * request_irq() is called we could get stuck trying to
1833 * handle an unexpected interrupt
1834 */
1835 atmel_uart_writel(port, ATMEL_US_IDR, -1);
1836 atmel_port->ms_irq_enabled = false;
1837
1838 /*
1839 * Allocate the IRQ
1840 */
1841 retval = request_irq(port->irq, atmel_interrupt,
1842 IRQF_SHARED | IRQF_COND_SUSPEND,
1843 tty ? tty->name : "atmel_serial", port);
1844 if (retval) {
1845 dev_err(port->dev, "atmel_startup - Can't get irq\n");
1846 return retval;
1847 }
1848
1849 /*
1850 * Get the GPIO lines IRQ
1851 */
1852 retval = atmel_request_gpio_irq(port);
1853 if (retval)
1854 goto free_irq;
1855
1856 tasklet_enable(&atmel_port->tasklet);
1857
1858 /*
1859 * Initialize DMA (if necessary)
1860 */
1861 atmel_init_property(atmel_port, pdev);
1862 atmel_set_ops(port);
1863
1864 if (atmel_port->prepare_rx) {
1865 retval = atmel_port->prepare_rx(port);
1866 if (retval < 0)
1867 atmel_set_ops(port);
1868 }
1869
1870 if (atmel_port->prepare_tx) {
1871 retval = atmel_port->prepare_tx(port);
1872 if (retval < 0)
1873 atmel_set_ops(port);
1874 }
1875
1876 /*
1877 * Enable FIFO when available
1878 */
1879 if (atmel_port->fifo_size) {
1880 unsigned int txrdym = ATMEL_US_ONE_DATA;
1881 unsigned int rxrdym = ATMEL_US_ONE_DATA;
1882 unsigned int fmr;
1883
1884 atmel_uart_writel(port, ATMEL_US_CR,
1885 ATMEL_US_FIFOEN |
1886 ATMEL_US_RXFCLR |
1887 ATMEL_US_TXFLCLR);
1888
1889 if (atmel_use_dma_tx(port))
1890 txrdym = ATMEL_US_FOUR_DATA;
1891
1892 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
1893 if (atmel_port->rts_high &&
1894 atmel_port->rts_low)
1895 fmr |= ATMEL_US_FRTSC |
1896 ATMEL_US_RXFTHRES(atmel_port->rts_high) |
1897 ATMEL_US_RXFTHRES2(atmel_port->rts_low);
1898
1899 atmel_uart_writel(port, ATMEL_US_FMR, fmr);
1900 }
1901
1902 /* Save current CSR for comparison in atmel_tasklet_func() */
1903 atmel_port->irq_status_prev = atmel_get_lines_status(port);
1904 atmel_port->irq_status = atmel_port->irq_status_prev;
1905
1906 /*
1907 * Finally, enable the serial port
1908 */
1909 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
1910 /* enable xmit & rcvr */
1911 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
1912
1913 setup_timer(&atmel_port->uart_timer,
1914 atmel_uart_timer_callback,
1915 (unsigned long)port);
1916
1917 if (atmel_use_pdc_rx(port)) {
1918 /* set UART timeout */
1919 if (!atmel_port->is_usart) {
1920 mod_timer(&atmel_port->uart_timer,
1921 jiffies + uart_poll_timeout(port));
1922 /* set USART timeout */
1923 } else {
1924 atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
1925 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1926
1927 atmel_uart_writel(port, ATMEL_US_IER,
1928 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1929 }
1930 /* enable PDC controller */
1931 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1932 } else if (atmel_use_dma_rx(port)) {
1933 /* set UART timeout */
1934 if (!atmel_port->is_usart) {
1935 mod_timer(&atmel_port->uart_timer,
1936 jiffies + uart_poll_timeout(port));
1937 /* set USART timeout */
1938 } else {
1939 atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
1940 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
1941
1942 atmel_uart_writel(port, ATMEL_US_IER,
1943 ATMEL_US_TIMEOUT);
1944 }
1945 } else {
1946 /* enable receive only */
1947 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
1948 }
1949
1950 return 0;
1951
1952 free_irq:
1953 free_irq(port->irq, port);
1954
1955 return retval;
1956 }
1957
1958 /*
1959 * Flush any TX data submitted for DMA. Called when the TX circular
1960 * buffer is reset.
1961 */
1962 static void atmel_flush_buffer(struct uart_port *port)
1963 {
1964 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1965
1966 if (atmel_use_pdc_tx(port)) {
1967 atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1968 atmel_port->pdc_tx.ofs = 0;
1969 }
1970 }
1971
1972 /*
1973 * Disable the port
1974 */
1975 static void atmel_shutdown(struct uart_port *port)
1976 {
1977 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1978
1979 /*
1980 * Prevent any tasklets being scheduled during
1981 * cleanup
1982 */
1983 del_timer_sync(&atmel_port->uart_timer);
1984
1985 /*
1986 * Clear out any scheduled tasklets before
1987 * we destroy the buffers
1988 */
1989 tasklet_disable(&atmel_port->tasklet);
1990 tasklet_kill(&atmel_port->tasklet);
1991
1992 /*
1993 * Ensure everything is stopped and
1994 * disable all interrupts, port and break condition.
1995 */
1996 atmel_stop_rx(port);
1997 atmel_stop_tx(port);
1998
1999 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
2000 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2001
2002
2003 /*
2004 * Shut-down the DMA.
2005 */
2006 if (atmel_port->release_rx)
2007 atmel_port->release_rx(port);
2008 if (atmel_port->release_tx)
2009 atmel_port->release_tx(port);
2010
2011 /*
2012 * Reset ring buffer pointers
2013 */
2014 atmel_port->rx_ring.head = 0;
2015 atmel_port->rx_ring.tail = 0;
2016
2017 /*
2018 * Free the interrupts
2019 */
2020 free_irq(port->irq, port);
2021 atmel_free_gpio_irq(port);
2022
2023 atmel_port->ms_irq_enabled = false;
2024
2025 atmel_flush_buffer(port);
2026 }
2027
2028 /*
2029 * Power / Clock management.
2030 */
2031 static void atmel_serial_pm(struct uart_port *port, unsigned int state,
2032 unsigned int oldstate)
2033 {
2034 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2035
2036 switch (state) {
2037 case 0:
2038 /*
2039 * Enable the peripheral clock for this serial port.
2040 * This is called on uart_open() or a resume event.
2041 */
2042 clk_prepare_enable(atmel_port->clk);
2043
2044 /* re-enable interrupts if we disabled some on suspend */
2045 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
2046 break;
2047 case 3:
2048 /* Back up the interrupt mask and disable all interrupts */
2049 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
2050 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2051
2052 /*
2053 * Disable the peripheral clock for this serial port.
2054 * This is called on uart_close() or a suspend event.
2055 */
2056 clk_disable_unprepare(atmel_port->clk);
2057 break;
2058 default:
2059 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
2060 }
2061 }
2062
2063 /*
2064 * Change the port parameters
2065 */
2066 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
2067 struct ktermios *old)
2068 {
2069 unsigned long flags;
2070 unsigned int old_mode, mode, imr, quot, baud;
2071
2072 /* save the current mode register */
2073 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
2074
2075 /* reset the mode, clock divisor, parity, stop bits and data size */
2076 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
2077 ATMEL_US_PAR | ATMEL_US_USMODE);
2078
2079 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
2080 quot = uart_get_divisor(port, baud);
2081
2082 if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */
2083 quot /= 8;
2084 mode |= ATMEL_US_USCLKS_MCK_DIV8;
2085 }
2086
2087 /* byte size */
2088 switch (termios->c_cflag & CSIZE) {
2089 case CS5:
2090 mode |= ATMEL_US_CHRL_5;
2091 break;
2092 case CS6:
2093 mode |= ATMEL_US_CHRL_6;
2094 break;
2095 case CS7:
2096 mode |= ATMEL_US_CHRL_7;
2097 break;
2098 default:
2099 mode |= ATMEL_US_CHRL_8;
2100 break;
2101 }
2102
2103 /* stop bits */
2104 if (termios->c_cflag & CSTOPB)
2105 mode |= ATMEL_US_NBSTOP_2;
2106
2107 /* parity */
2108 if (termios->c_cflag & PARENB) {
2109 /* Mark or Space parity */
2110 if (termios->c_cflag & CMSPAR) {
2111 if (termios->c_cflag & PARODD)
2112 mode |= ATMEL_US_PAR_MARK;
2113 else
2114 mode |= ATMEL_US_PAR_SPACE;
2115 } else if (termios->c_cflag & PARODD)
2116 mode |= ATMEL_US_PAR_ODD;
2117 else
2118 mode |= ATMEL_US_PAR_EVEN;
2119 } else
2120 mode |= ATMEL_US_PAR_NONE;
2121
2122 spin_lock_irqsave(&port->lock, flags);
2123
2124 port->read_status_mask = ATMEL_US_OVRE;
2125 if (termios->c_iflag & INPCK)
2126 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2127 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
2128 port->read_status_mask |= ATMEL_US_RXBRK;
2129
2130 if (atmel_use_pdc_rx(port))
2131 /* need to enable error interrupts */
2132 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
2133
2134 /*
2135 * Characters to ignore
2136 */
2137 port->ignore_status_mask = 0;
2138 if (termios->c_iflag & IGNPAR)
2139 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
2140 if (termios->c_iflag & IGNBRK) {
2141 port->ignore_status_mask |= ATMEL_US_RXBRK;
2142 /*
2143 * If we're ignoring parity and break indicators,
2144 * ignore overruns too (for real raw support).
2145 */
2146 if (termios->c_iflag & IGNPAR)
2147 port->ignore_status_mask |= ATMEL_US_OVRE;
2148 }
2149 /* TODO: Ignore all characters if CREAD is set.*/
2150
2151 /* update the per-port timeout */
2152 uart_update_timeout(port, termios->c_cflag, baud);
2153
2154 /*
2155 * save/disable interrupts. The tty layer will ensure that the
2156 * transmitter is empty if requested by the caller, so there's
2157 * no need to wait for it here.
2158 */
2159 imr = atmel_uart_readl(port, ATMEL_US_IMR);
2160 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2161
2162 /* disable receiver and transmitter */
2163 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
2164
2165 /* mode */
2166 if (port->rs485.flags & SER_RS485_ENABLED) {
2167 atmel_uart_writel(port, ATMEL_US_TTGR,
2168 port->rs485.delay_rts_after_send);
2169 mode |= ATMEL_US_USMODE_RS485;
2170 } else if (termios->c_cflag & CRTSCTS) {
2171 /* RS232 with hardware handshake (RTS/CTS) */
2172 mode |= ATMEL_US_USMODE_HWHS;
2173 } else {
2174 /* RS232 without hadware handshake */
2175 mode |= ATMEL_US_USMODE_NORMAL;
2176 }
2177
2178 /* set the mode, clock divisor, parity, stop bits and data size */
2179 atmel_uart_writel(port, ATMEL_US_MR, mode);
2180
2181 /*
2182 * when switching the mode, set the RTS line state according to the
2183 * new mode, otherwise keep the former state
2184 */
2185 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
2186 unsigned int rts_state;
2187
2188 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
2189 /* let the hardware control the RTS line */
2190 rts_state = ATMEL_US_RTSDIS;
2191 } else {
2192 /* force RTS line to low level */
2193 rts_state = ATMEL_US_RTSEN;
2194 }
2195
2196 atmel_uart_writel(port, ATMEL_US_CR, rts_state);
2197 }
2198
2199 /* set the baud rate */
2200 atmel_uart_writel(port, ATMEL_US_BRGR, quot);
2201 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2202 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2203
2204 /* restore interrupts */
2205 atmel_uart_writel(port, ATMEL_US_IER, imr);
2206
2207 /* CTS flow-control and modem-status interrupts */
2208 if (UART_ENABLE_MS(port, termios->c_cflag))
2209 atmel_enable_ms(port);
2210 else
2211 atmel_disable_ms(port);
2212
2213 spin_unlock_irqrestore(&port->lock, flags);
2214 }
2215
2216 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios)
2217 {
2218 if (termios->c_line == N_PPS) {
2219 port->flags |= UPF_HARDPPS_CD;
2220 spin_lock_irq(&port->lock);
2221 atmel_enable_ms(port);
2222 spin_unlock_irq(&port->lock);
2223 } else {
2224 port->flags &= ~UPF_HARDPPS_CD;
2225 if (!UART_ENABLE_MS(port, termios->c_cflag)) {
2226 spin_lock_irq(&port->lock);
2227 atmel_disable_ms(port);
2228 spin_unlock_irq(&port->lock);
2229 }
2230 }
2231 }
2232
2233 /*
2234 * Return string describing the specified port
2235 */
2236 static const char *atmel_type(struct uart_port *port)
2237 {
2238 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
2239 }
2240
2241 /*
2242 * Release the memory region(s) being used by 'port'.
2243 */
2244 static void atmel_release_port(struct uart_port *port)
2245 {
2246 struct platform_device *pdev = to_platform_device(port->dev);
2247 int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2248
2249 release_mem_region(port->mapbase, size);
2250
2251 if (port->flags & UPF_IOREMAP) {
2252 iounmap(port->membase);
2253 port->membase = NULL;
2254 }
2255 }
2256
2257 /*
2258 * Request the memory region(s) being used by 'port'.
2259 */
2260 static int atmel_request_port(struct uart_port *port)
2261 {
2262 struct platform_device *pdev = to_platform_device(port->dev);
2263 int size = pdev->resource[0].end - pdev->resource[0].start + 1;
2264
2265 if (!request_mem_region(port->mapbase, size, "atmel_serial"))
2266 return -EBUSY;
2267
2268 if (port->flags & UPF_IOREMAP) {
2269 port->membase = ioremap(port->mapbase, size);
2270 if (port->membase == NULL) {
2271 release_mem_region(port->mapbase, size);
2272 return -ENOMEM;
2273 }
2274 }
2275
2276 return 0;
2277 }
2278
2279 /*
2280 * Configure/autoconfigure the port.
2281 */
2282 static void atmel_config_port(struct uart_port *port, int flags)
2283 {
2284 if (flags & UART_CONFIG_TYPE) {
2285 port->type = PORT_ATMEL;
2286 atmel_request_port(port);
2287 }
2288 }
2289
2290 /*
2291 * Verify the new serial_struct (for TIOCSSERIAL).
2292 */
2293 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
2294 {
2295 int ret = 0;
2296 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
2297 ret = -EINVAL;
2298 if (port->irq != ser->irq)
2299 ret = -EINVAL;
2300 if (ser->io_type != SERIAL_IO_MEM)
2301 ret = -EINVAL;
2302 if (port->uartclk / 16 != ser->baud_base)
2303 ret = -EINVAL;
2304 if (port->mapbase != (unsigned long)ser->iomem_base)
2305 ret = -EINVAL;
2306 if (port->iobase != ser->port)
2307 ret = -EINVAL;
2308 if (ser->hub6 != 0)
2309 ret = -EINVAL;
2310 return ret;
2311 }
2312
2313 #ifdef CONFIG_CONSOLE_POLL
2314 static int atmel_poll_get_char(struct uart_port *port)
2315 {
2316 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
2317 cpu_relax();
2318
2319 return atmel_uart_read_char(port);
2320 }
2321
2322 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
2323 {
2324 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2325 cpu_relax();
2326
2327 atmel_uart_write_char(port, ch);
2328 }
2329 #endif
2330
2331 static struct uart_ops atmel_pops = {
2332 .tx_empty = atmel_tx_empty,
2333 .set_mctrl = atmel_set_mctrl,
2334 .get_mctrl = atmel_get_mctrl,
2335 .stop_tx = atmel_stop_tx,
2336 .start_tx = atmel_start_tx,
2337 .stop_rx = atmel_stop_rx,
2338 .enable_ms = atmel_enable_ms,
2339 .break_ctl = atmel_break_ctl,
2340 .startup = atmel_startup,
2341 .shutdown = atmel_shutdown,
2342 .flush_buffer = atmel_flush_buffer,
2343 .set_termios = atmel_set_termios,
2344 .set_ldisc = atmel_set_ldisc,
2345 .type = atmel_type,
2346 .release_port = atmel_release_port,
2347 .request_port = atmel_request_port,
2348 .config_port = atmel_config_port,
2349 .verify_port = atmel_verify_port,
2350 .pm = atmel_serial_pm,
2351 #ifdef CONFIG_CONSOLE_POLL
2352 .poll_get_char = atmel_poll_get_char,
2353 .poll_put_char = atmel_poll_put_char,
2354 #endif
2355 };
2356
2357 /*
2358 * Configure the port from the platform device resource info.
2359 */
2360 static int atmel_init_port(struct atmel_uart_port *atmel_port,
2361 struct platform_device *pdev)
2362 {
2363 int ret;
2364 struct uart_port *port = &atmel_port->uart;
2365 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
2366
2367 atmel_init_property(atmel_port, pdev);
2368 atmel_set_ops(port);
2369
2370 atmel_init_rs485(port, pdev);
2371
2372 port->iotype = UPIO_MEM;
2373 port->flags = UPF_BOOT_AUTOCONF;
2374 port->ops = &atmel_pops;
2375 port->fifosize = 1;
2376 port->dev = &pdev->dev;
2377 port->mapbase = pdev->resource[0].start;
2378 port->irq = pdev->resource[1].start;
2379 port->rs485_config = atmel_config_rs485;
2380
2381 tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
2382 (unsigned long)port);
2383 tasklet_disable(&atmel_port->tasklet);
2384
2385 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
2386
2387 if (pdata && pdata->regs) {
2388 /* Already mapped by setup code */
2389 port->membase = pdata->regs;
2390 } else {
2391 port->flags |= UPF_IOREMAP;
2392 port->membase = NULL;
2393 }
2394
2395 /* for console, the clock could already be configured */
2396 if (!atmel_port->clk) {
2397 atmel_port->clk = clk_get(&pdev->dev, "usart");
2398 if (IS_ERR(atmel_port->clk)) {
2399 ret = PTR_ERR(atmel_port->clk);
2400 atmel_port->clk = NULL;
2401 return ret;
2402 }
2403 ret = clk_prepare_enable(atmel_port->clk);
2404 if (ret) {
2405 clk_put(atmel_port->clk);
2406 atmel_port->clk = NULL;
2407 return ret;
2408 }
2409 port->uartclk = clk_get_rate(atmel_port->clk);
2410 clk_disable_unprepare(atmel_port->clk);
2411 /* only enable clock when USART is in use */
2412 }
2413
2414 /* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
2415 if (port->rs485.flags & SER_RS485_ENABLED)
2416 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
2417 else if (atmel_use_pdc_tx(port)) {
2418 port->fifosize = PDC_BUFFER_SIZE;
2419 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
2420 } else {
2421 atmel_port->tx_done_mask = ATMEL_US_TXRDY;
2422 }
2423
2424 return 0;
2425 }
2426
2427 struct platform_device *atmel_default_console_device; /* the serial console device */
2428
2429 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2430 static void atmel_console_putchar(struct uart_port *port, int ch)
2431 {
2432 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
2433 cpu_relax();
2434 atmel_uart_write_char(port, ch);
2435 }
2436
2437 /*
2438 * Interrupts are disabled on entering
2439 */
2440 static void atmel_console_write(struct console *co, const char *s, u_int count)
2441 {
2442 struct uart_port *port = &atmel_ports[co->index].uart;
2443 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2444 unsigned int status, imr;
2445 unsigned int pdc_tx;
2446
2447 /*
2448 * First, save IMR and then disable interrupts
2449 */
2450 imr = atmel_uart_readl(port, ATMEL_US_IMR);
2451 atmel_uart_writel(port, ATMEL_US_IDR,
2452 ATMEL_US_RXRDY | atmel_port->tx_done_mask);
2453
2454 /* Store PDC transmit status and disable it */
2455 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2456 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2457
2458 uart_console_write(port, s, count, atmel_console_putchar);
2459
2460 /*
2461 * Finally, wait for transmitter to become empty
2462 * and restore IMR
2463 */
2464 do {
2465 status = atmel_uart_readl(port, ATMEL_US_CSR);
2466 } while (!(status & ATMEL_US_TXRDY));
2467
2468 /* Restore PDC transmit status */
2469 if (pdc_tx)
2470 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
2471
2472 /* set interrupts back the way they were */
2473 atmel_uart_writel(port, ATMEL_US_IER, imr);
2474 }
2475
2476 /*
2477 * If the port was already initialised (eg, by a boot loader),
2478 * try to determine the current setup.
2479 */
2480 static void __init atmel_console_get_options(struct uart_port *port, int *baud,
2481 int *parity, int *bits)
2482 {
2483 unsigned int mr, quot;
2484
2485 /*
2486 * If the baud rate generator isn't running, the port wasn't
2487 * initialized by the boot loader.
2488 */
2489 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
2490 if (!quot)
2491 return;
2492
2493 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
2494 if (mr == ATMEL_US_CHRL_8)
2495 *bits = 8;
2496 else
2497 *bits = 7;
2498
2499 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
2500 if (mr == ATMEL_US_PAR_EVEN)
2501 *parity = 'e';
2502 else if (mr == ATMEL_US_PAR_ODD)
2503 *parity = 'o';
2504
2505 /*
2506 * The serial core only rounds down when matching this to a
2507 * supported baud rate. Make sure we don't end up slightly
2508 * lower than one of those, as it would make us fall through
2509 * to a much lower baud rate than we really want.
2510 */
2511 *baud = port->uartclk / (16 * (quot - 1));
2512 }
2513
2514 static int __init atmel_console_setup(struct console *co, char *options)
2515 {
2516 int ret;
2517 struct uart_port *port = &atmel_ports[co->index].uart;
2518 int baud = 115200;
2519 int bits = 8;
2520 int parity = 'n';
2521 int flow = 'n';
2522
2523 if (port->membase == NULL) {
2524 /* Port not initialized yet - delay setup */
2525 return -ENODEV;
2526 }
2527
2528 ret = clk_prepare_enable(atmel_ports[co->index].clk);
2529 if (ret)
2530 return ret;
2531
2532 atmel_uart_writel(port, ATMEL_US_IDR, -1);
2533 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
2534 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
2535
2536 if (options)
2537 uart_parse_options(options, &baud, &parity, &bits, &flow);
2538 else
2539 atmel_console_get_options(port, &baud, &parity, &bits);
2540
2541 return uart_set_options(port, co, baud, parity, bits, flow);
2542 }
2543
2544 static struct uart_driver atmel_uart;
2545
2546 static struct console atmel_console = {
2547 .name = ATMEL_DEVICENAME,
2548 .write = atmel_console_write,
2549 .device = uart_console_device,
2550 .setup = atmel_console_setup,
2551 .flags = CON_PRINTBUFFER,
2552 .index = -1,
2553 .data = &atmel_uart,
2554 };
2555
2556 #define ATMEL_CONSOLE_DEVICE (&atmel_console)
2557
2558 /*
2559 * Early console initialization (before VM subsystem initialized).
2560 */
2561 static int __init atmel_console_init(void)
2562 {
2563 int ret;
2564 if (atmel_default_console_device) {
2565 struct atmel_uart_data *pdata =
2566 dev_get_platdata(&atmel_default_console_device->dev);
2567 int id = pdata->num;
2568 struct atmel_uart_port *port = &atmel_ports[id];
2569
2570 port->backup_imr = 0;
2571 port->uart.line = id;
2572
2573 add_preferred_console(ATMEL_DEVICENAME, id, NULL);
2574 ret = atmel_init_port(port, atmel_default_console_device);
2575 if (ret)
2576 return ret;
2577 register_console(&atmel_console);
2578 }
2579
2580 return 0;
2581 }
2582
2583 console_initcall(atmel_console_init);
2584
2585 /*
2586 * Late console initialization.
2587 */
2588 static int __init atmel_late_console_init(void)
2589 {
2590 if (atmel_default_console_device
2591 && !(atmel_console.flags & CON_ENABLED))
2592 register_console(&atmel_console);
2593
2594 return 0;
2595 }
2596
2597 core_initcall(atmel_late_console_init);
2598
2599 static inline bool atmel_is_console_port(struct uart_port *port)
2600 {
2601 return port->cons && port->cons->index == port->line;
2602 }
2603
2604 #else
2605 #define ATMEL_CONSOLE_DEVICE NULL
2606
2607 static inline bool atmel_is_console_port(struct uart_port *port)
2608 {
2609 return false;
2610 }
2611 #endif
2612
2613 static struct uart_driver atmel_uart = {
2614 .owner = THIS_MODULE,
2615 .driver_name = "atmel_serial",
2616 .dev_name = ATMEL_DEVICENAME,
2617 .major = SERIAL_ATMEL_MAJOR,
2618 .minor = MINOR_START,
2619 .nr = ATMEL_MAX_UART,
2620 .cons = ATMEL_CONSOLE_DEVICE,
2621 };
2622
2623 #ifdef CONFIG_PM
2624 static bool atmel_serial_clk_will_stop(void)
2625 {
2626 #ifdef CONFIG_ARCH_AT91
2627 return at91_suspend_entering_slow_clock();
2628 #else
2629 return false;
2630 #endif
2631 }
2632
2633 static int atmel_serial_suspend(struct platform_device *pdev,
2634 pm_message_t state)
2635 {
2636 struct uart_port *port = platform_get_drvdata(pdev);
2637 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2638
2639 if (atmel_is_console_port(port) && console_suspend_enabled) {
2640 /* Drain the TX shifter */
2641 while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
2642 ATMEL_US_TXEMPTY))
2643 cpu_relax();
2644 }
2645
2646 /* we can not wake up if we're running on slow clock */
2647 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
2648 if (atmel_serial_clk_will_stop()) {
2649 unsigned long flags;
2650
2651 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2652 atmel_port->suspended = true;
2653 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2654 device_set_wakeup_enable(&pdev->dev, 0);
2655 }
2656
2657 uart_suspend_port(&atmel_uart, port);
2658
2659 return 0;
2660 }
2661
2662 static int atmel_serial_resume(struct platform_device *pdev)
2663 {
2664 struct uart_port *port = platform_get_drvdata(pdev);
2665 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2666 unsigned long flags;
2667
2668 spin_lock_irqsave(&atmel_port->lock_suspended, flags);
2669 if (atmel_port->pending) {
2670 atmel_handle_receive(port, atmel_port->pending);
2671 atmel_handle_status(port, atmel_port->pending,
2672 atmel_port->pending_status);
2673 atmel_handle_transmit(port, atmel_port->pending);
2674 atmel_port->pending = 0;
2675 }
2676 atmel_port->suspended = false;
2677 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags);
2678
2679 uart_resume_port(&atmel_uart, port);
2680 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
2681
2682 return 0;
2683 }
2684 #else
2685 #define atmel_serial_suspend NULL
2686 #define atmel_serial_resume NULL
2687 #endif
2688
2689 static int atmel_init_gpios(struct atmel_uart_port *p, struct device *dev)
2690 {
2691 enum mctrl_gpio_idx i;
2692 struct gpio_desc *gpiod;
2693
2694 p->gpios = mctrl_gpio_init_noauto(dev, 0);
2695 if (IS_ERR(p->gpios))
2696 return PTR_ERR(p->gpios);
2697
2698 for (i = 0; i < UART_GPIO_MAX; i++) {
2699 gpiod = mctrl_gpio_to_gpiod(p->gpios, i);
2700 if (gpiod && (gpiod_get_direction(gpiod) == GPIOF_DIR_IN))
2701 p->gpio_irq[i] = gpiod_to_irq(gpiod);
2702 else
2703 p->gpio_irq[i] = -EINVAL;
2704 }
2705
2706 return 0;
2707 }
2708
2709 static void atmel_serial_probe_fifos(struct atmel_uart_port *port,
2710 struct platform_device *pdev)
2711 {
2712 port->fifo_size = 0;
2713 port->rts_low = 0;
2714 port->rts_high = 0;
2715
2716 if (of_property_read_u32(pdev->dev.of_node,
2717 "atmel,fifo-size",
2718 &port->fifo_size))
2719 return;
2720
2721 if (!port->fifo_size)
2722 return;
2723
2724 if (port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
2725 port->fifo_size = 0;
2726 dev_err(&pdev->dev, "Invalid FIFO size\n");
2727 return;
2728 }
2729
2730 /*
2731 * 0 <= rts_low <= rts_high <= fifo_size
2732 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
2733 * to flush their internal TX FIFO, commonly up to 16 data, before
2734 * actually stopping to send new data. So we try to set the RTS High
2735 * Threshold to a reasonably high value respecting this 16 data
2736 * empirical rule when possible.
2737 */
2738 port->rts_high = max_t(int, port->fifo_size >> 1,
2739 port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
2740 port->rts_low = max_t(int, port->fifo_size >> 2,
2741 port->fifo_size - ATMEL_RTS_LOW_OFFSET);
2742
2743 dev_info(&pdev->dev, "Using FIFO (%u data)\n",
2744 port->fifo_size);
2745 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
2746 port->rts_high);
2747 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n",
2748 port->rts_low);
2749 }
2750
2751 static int atmel_serial_probe(struct platform_device *pdev)
2752 {
2753 struct atmel_uart_port *port;
2754 struct device_node *np = pdev->dev.of_node;
2755 struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
2756 void *data;
2757 int ret = -ENODEV;
2758 bool rs485_enabled;
2759
2760 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
2761
2762 if (np)
2763 ret = of_alias_get_id(np, "serial");
2764 else
2765 if (pdata)
2766 ret = pdata->num;
2767
2768 if (ret < 0)
2769 /* port id not found in platform data nor device-tree aliases:
2770 * auto-enumerate it */
2771 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
2772
2773 if (ret >= ATMEL_MAX_UART) {
2774 ret = -ENODEV;
2775 goto err;
2776 }
2777
2778 if (test_and_set_bit(ret, atmel_ports_in_use)) {
2779 /* port already in use */
2780 ret = -EBUSY;
2781 goto err;
2782 }
2783
2784 port = &atmel_ports[ret];
2785 port->backup_imr = 0;
2786 port->uart.line = ret;
2787 atmel_serial_probe_fifos(port, pdev);
2788
2789 spin_lock_init(&port->lock_suspended);
2790
2791 ret = atmel_init_gpios(port, &pdev->dev);
2792 if (ret < 0) {
2793 dev_err(&pdev->dev, "Failed to initialize GPIOs.");
2794 goto err_clear_bit;
2795 }
2796
2797 ret = atmel_init_port(port, pdev);
2798 if (ret)
2799 goto err_clear_bit;
2800
2801 if (!atmel_use_pdc_rx(&port->uart)) {
2802 ret = -ENOMEM;
2803 data = kmalloc(sizeof(struct atmel_uart_char)
2804 * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
2805 if (!data)
2806 goto err_alloc_ring;
2807 port->rx_ring.buf = data;
2808 }
2809
2810 rs485_enabled = port->uart.rs485.flags & SER_RS485_ENABLED;
2811
2812 ret = uart_add_one_port(&atmel_uart, &port->uart);
2813 if (ret)
2814 goto err_add_port;
2815
2816 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
2817 if (atmel_is_console_port(&port->uart)
2818 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
2819 /*
2820 * The serial core enabled the clock for us, so undo
2821 * the clk_prepare_enable() in atmel_console_setup()
2822 */
2823 clk_disable_unprepare(port->clk);
2824 }
2825 #endif
2826
2827 device_init_wakeup(&pdev->dev, 1);
2828 platform_set_drvdata(pdev, port);
2829
2830 /*
2831 * The peripheral clock has been disabled by atmel_init_port():
2832 * enable it before accessing I/O registers
2833 */
2834 clk_prepare_enable(port->clk);
2835
2836 if (rs485_enabled) {
2837 atmel_uart_writel(&port->uart, ATMEL_US_MR,
2838 ATMEL_US_USMODE_NORMAL);
2839 atmel_uart_writel(&port->uart, ATMEL_US_CR, ATMEL_US_RTSEN);
2840 }
2841
2842 /*
2843 * Get port name of usart or uart
2844 */
2845 atmel_get_ip_name(&port->uart);
2846
2847 /*
2848 * The peripheral clock can now safely be disabled till the port
2849 * is used
2850 */
2851 clk_disable_unprepare(port->clk);
2852
2853 return 0;
2854
2855 err_add_port:
2856 kfree(port->rx_ring.buf);
2857 port->rx_ring.buf = NULL;
2858 err_alloc_ring:
2859 if (!atmel_is_console_port(&port->uart)) {
2860 clk_put(port->clk);
2861 port->clk = NULL;
2862 }
2863 err_clear_bit:
2864 clear_bit(port->uart.line, atmel_ports_in_use);
2865 err:
2866 return ret;
2867 }
2868
2869 static int atmel_serial_remove(struct platform_device *pdev)
2870 {
2871 struct uart_port *port = platform_get_drvdata(pdev);
2872 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2873 int ret = 0;
2874
2875 tasklet_kill(&atmel_port->tasklet);
2876
2877 device_init_wakeup(&pdev->dev, 0);
2878
2879 ret = uart_remove_one_port(&atmel_uart, port);
2880
2881 kfree(atmel_port->rx_ring.buf);
2882
2883 /* "port" is allocated statically, so we shouldn't free it */
2884
2885 clear_bit(port->line, atmel_ports_in_use);
2886
2887 clk_put(atmel_port->clk);
2888
2889 return ret;
2890 }
2891
2892 static struct platform_driver atmel_serial_driver = {
2893 .probe = atmel_serial_probe,
2894 .remove = atmel_serial_remove,
2895 .suspend = atmel_serial_suspend,
2896 .resume = atmel_serial_resume,
2897 .driver = {
2898 .name = "atmel_usart",
2899 .of_match_table = of_match_ptr(atmel_serial_dt_ids),
2900 },
2901 };
2902
2903 static int __init atmel_serial_init(void)
2904 {
2905 int ret;
2906
2907 ret = uart_register_driver(&atmel_uart);
2908 if (ret)
2909 return ret;
2910
2911 ret = platform_driver_register(&atmel_serial_driver);
2912 if (ret)
2913 uart_unregister_driver(&atmel_uart);
2914
2915 return ret;
2916 }
2917
2918 static void __exit atmel_serial_exit(void)
2919 {
2920 platform_driver_unregister(&atmel_serial_driver);
2921 uart_unregister_driver(&atmel_uart);
2922 }
2923
2924 module_init(atmel_serial_init);
2925 module_exit(atmel_serial_exit);
2926
2927 MODULE_AUTHOR("Rick Bronson");
2928 MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver");
2929 MODULE_LICENSE("GPL");
2930 MODULE_ALIAS("platform:atmel_usart");