2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * SPDX-License-Identifier: GPL-2.0
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/delay.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
21 #include <linux/dma-mapping.h>
25 #include "linux-compat.h"
26 #include "usb-compat.h"
29 #include "musb_core.h"
30 #include "musb_host.h"
33 /* MUSB HOST status 22-mar-2006
35 * - There's still lots of partial code duplication for fault paths, so
36 * they aren't handled as consistently as they need to be.
38 * - PIO mostly behaved when last tested.
39 * + including ep0, with all usbtest cases 9, 10
40 * + usbtest 14 (ep0out) doesn't seem to run at all
41 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
42 * configurations, but otherwise double buffering passes basic tests.
43 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
45 * - DMA (CPPI) ... partially behaves, not currently recommended
46 * + about 1/15 the speed of typical EHCI implementations (PCI)
47 * + RX, all too often reqpkt seems to misbehave after tx
48 * + TX, no known issues (other than evident silicon issue)
50 * - DMA (Mentor/OMAP) ...has at least toggle update problems
52 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
53 * starvation ... nothing yet for TX, interrupt, or bulk.
55 * - Not tested with HNP, but some SRP paths seem to behave.
57 * NOTE 24-August-2006:
59 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
60 * extra endpoint for periodic use enabling hub + keybd + mouse. That
61 * mostly works, except that with "usbnet" it's easy to trigger cases
62 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
63 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
64 * although ARP RX wins. (That test was done with a full speed link.)
69 * NOTE on endpoint usage:
71 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
72 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
73 * (Yes, bulk _could_ use more of the endpoints than that, and would even
76 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
77 * So far that scheduling is both dumb and optimistic: the endpoint will be
78 * "claimed" until its software queue is no longer refilled. No multiplexing
79 * of transfers between endpoints, or anything clever.
83 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
84 struct urb
*urb
, int is_out
,
85 u8
*buf
, u32 offset
, u32 len
);
88 * Clear TX fifo. Needed to avoid BABBLE errors.
90 static void musb_h_tx_flush_fifo(struct musb_hw_ep
*ep
)
92 struct musb
*musb
= ep
->musb
;
93 void __iomem
*epio
= ep
->regs
;
98 csr
= musb_readw(epio
, MUSB_TXCSR
);
99 while (csr
& MUSB_TXCSR_FIFONOTEMPTY
) {
101 dev_dbg(musb
->controller
, "Host TX FIFONOTEMPTY csr: %02x\n", csr
);
103 csr
|= MUSB_TXCSR_FLUSHFIFO
;
104 musb_writew(epio
, MUSB_TXCSR
, csr
);
105 csr
= musb_readw(epio
, MUSB_TXCSR
);
106 if (WARN(retries
-- < 1,
107 "Could not flush host TX%d fifo: csr: %04x\n",
114 static void musb_h_ep0_flush_fifo(struct musb_hw_ep
*ep
)
116 void __iomem
*epio
= ep
->regs
;
120 /* scrub any data left in the fifo */
122 csr
= musb_readw(epio
, MUSB_TXCSR
);
123 if (!(csr
& (MUSB_CSR0_TXPKTRDY
| MUSB_CSR0_RXPKTRDY
)))
125 musb_writew(epio
, MUSB_TXCSR
, MUSB_CSR0_FLUSHFIFO
);
126 csr
= musb_readw(epio
, MUSB_TXCSR
);
130 WARN(!retries
, "Could not flush host TX%d fifo: csr: %04x\n",
133 /* and reset for the next transfer */
134 musb_writew(epio
, MUSB_TXCSR
, 0);
138 * Start transmit. Caller is responsible for locking shared resources.
139 * musb must be locked.
141 static inline void musb_h_tx_start(struct musb_hw_ep
*ep
)
145 /* NOTE: no locks here; caller should lock and select EP */
147 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
148 txcsr
|= MUSB_TXCSR_TXPKTRDY
| MUSB_TXCSR_H_WZC_BITS
;
149 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
151 txcsr
= MUSB_CSR0_H_SETUPPKT
| MUSB_CSR0_TXPKTRDY
;
152 musb_writew(ep
->regs
, MUSB_CSR0
, txcsr
);
157 static inline void musb_h_tx_dma_start(struct musb_hw_ep
*ep
)
161 /* NOTE: no locks here; caller should lock and select EP */
162 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
163 txcsr
|= MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_WZC_BITS
;
164 if (is_cppi_enabled())
165 txcsr
|= MUSB_TXCSR_DMAMODE
;
166 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
169 static void musb_ep_set_qh(struct musb_hw_ep
*ep
, int is_in
, struct musb_qh
*qh
)
171 if (is_in
!= 0 || ep
->is_shared_fifo
)
173 if (is_in
== 0 || ep
->is_shared_fifo
)
177 static struct musb_qh
*musb_ep_get_qh(struct musb_hw_ep
*ep
, int is_in
)
179 return is_in
? ep
->in_qh
: ep
->out_qh
;
183 * Start the URB at the front of an endpoint's queue
184 * end must be claimed from the caller.
186 * Context: controller locked, irqs blocked
189 musb_start_urb(struct musb
*musb
, int is_in
, struct musb_qh
*qh
)
193 void __iomem
*mbase
= musb
->mregs
;
194 struct urb
*urb
= next_urb(qh
);
195 void *buf
= urb
->transfer_buffer
;
197 struct musb_hw_ep
*hw_ep
= qh
->hw_ep
;
198 unsigned pipe
= urb
->pipe
;
199 u8 address
= usb_pipedevice(pipe
);
200 int epnum
= hw_ep
->epnum
;
202 /* initialize software qh state */
206 /* gather right source of data */
208 case USB_ENDPOINT_XFER_CONTROL
:
209 /* control transfers always start with SETUP */
211 musb
->ep0_stage
= MUSB_EP0_START
;
212 buf
= urb
->setup_packet
;
216 case USB_ENDPOINT_XFER_ISOC
:
219 offset
= urb
->iso_frame_desc
[0].offset
;
220 len
= urb
->iso_frame_desc
[0].length
;
223 default: /* bulk, interrupt */
224 /* actual_length may be nonzero on retry paths */
225 buf
= urb
->transfer_buffer
+ urb
->actual_length
;
226 len
= urb
->transfer_buffer_length
- urb
->actual_length
;
229 dev_dbg(musb
->controller
, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
230 qh
, urb
, address
, qh
->epnum
,
231 is_in
? "in" : "out",
232 ({char *s
; switch (qh
->type
) {
233 case USB_ENDPOINT_XFER_CONTROL
: s
= ""; break;
234 case USB_ENDPOINT_XFER_BULK
: s
= "-bulk"; break;
236 case USB_ENDPOINT_XFER_ISOC
: s
= "-iso"; break;
238 default: s
= "-intr"; break;
240 epnum
, buf
+ offset
, len
);
242 /* Configure endpoint */
243 musb_ep_set_qh(hw_ep
, is_in
, qh
);
244 musb_ep_program(musb
, epnum
, urb
, !is_in
, buf
, offset
, len
);
246 /* transmit may have more work: start it when it is time */
250 /* determine if the time is right for a periodic transfer */
253 case USB_ENDPOINT_XFER_ISOC
:
255 case USB_ENDPOINT_XFER_INT
:
256 dev_dbg(musb
->controller
, "check whether there's still time for periodic Tx\n");
257 frame
= musb_readw(mbase
, MUSB_FRAME
);
258 /* FIXME this doesn't implement that scheduling policy ...
259 * or handle framecounter wrapping
262 if ((urb
->transfer_flags
& URB_ISO_ASAP
)
263 || (frame
>= urb
->start_frame
)) {
264 /* REVISIT the SOF irq handler shouldn't duplicate
265 * this code; and we don't init urb->start_frame...
271 qh
->frame
= urb
->start_frame
;
272 /* enable SOF interrupt so we can count down */
273 dev_dbg(musb
->controller
, "SOF for %d\n", epnum
);
274 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
275 musb_writeb(mbase
, MUSB_INTRUSBE
, 0xff);
283 dev_dbg(musb
->controller
, "Start TX%d %s\n", epnum
,
284 hw_ep
->tx_channel
? "dma" : "pio");
286 if (!hw_ep
->tx_channel
)
287 musb_h_tx_start(hw_ep
);
288 else if (is_cppi_enabled() || tusb_dma_omap())
289 musb_h_tx_dma_start(hw_ep
);
293 /* Context: caller owns controller lock, IRQs are blocked */
294 static void musb_giveback(struct musb
*musb
, struct urb
*urb
, int status
)
295 __releases(musb
->lock
)
296 __acquires(musb
->lock
)
298 dev_dbg(musb
->controller
,
299 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
300 urb
, urb
->complete
, status
,
301 usb_pipedevice(urb
->pipe
),
302 usb_pipeendpoint(urb
->pipe
),
303 usb_pipein(urb
->pipe
) ? "in" : "out",
304 urb
->actual_length
, urb
->transfer_buffer_length
307 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb
), urb
);
308 spin_unlock(&musb
->lock
);
309 usb_hcd_giveback_urb(musb_to_hcd(musb
), urb
, status
);
310 spin_lock(&musb
->lock
);
313 /* For bulk/interrupt endpoints only */
314 static inline void musb_save_toggle(struct musb_qh
*qh
, int is_in
,
317 void __iomem
*epio
= qh
->hw_ep
->regs
;
321 * FIXME: the current Mentor DMA code seems to have
322 * problems getting toggle correct.
326 csr
= musb_readw(epio
, MUSB_RXCSR
) & MUSB_RXCSR_H_DATATOGGLE
;
328 csr
= musb_readw(epio
, MUSB_TXCSR
) & MUSB_TXCSR_H_DATATOGGLE
;
330 usb_settoggle(urb
->dev
, qh
->epnum
, !is_in
, csr
? 1 : 0);
334 * Advance this hardware endpoint's queue, completing the specified URB and
335 * advancing to either the next URB queued to that qh, or else invalidating
336 * that qh and advancing to the next qh scheduled after the current one.
338 * Context: caller owns controller lock, IRQs are blocked
340 static void musb_advance_schedule(struct musb
*musb
, struct urb
*urb
,
341 struct musb_hw_ep
*hw_ep
, int is_in
)
343 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, is_in
);
344 struct musb_hw_ep
*ep
= qh
->hw_ep
;
345 int ready
= qh
->is_ready
;
348 status
= (urb
->status
== -EINPROGRESS
) ? 0 : urb
->status
;
350 /* save toggle eagerly, for paranoia */
352 case USB_ENDPOINT_XFER_BULK
:
353 case USB_ENDPOINT_XFER_INT
:
354 musb_save_toggle(qh
, is_in
, urb
);
357 case USB_ENDPOINT_XFER_ISOC
:
358 if (status
== 0 && urb
->error_count
)
365 musb_giveback(musb
, urb
, status
);
366 qh
->is_ready
= ready
;
368 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
369 * invalidate qh as soon as list_empty(&hep->urb_list)
371 if (list_empty(&qh
->hep
->urb_list
)) {
372 struct list_head
*head
;
373 struct dma_controller
*dma
= musb
->dma_controller
;
377 if (ep
->rx_channel
) {
378 dma
->channel_release(ep
->rx_channel
);
379 ep
->rx_channel
= NULL
;
383 if (ep
->tx_channel
) {
384 dma
->channel_release(ep
->tx_channel
);
385 ep
->tx_channel
= NULL
;
389 /* Clobber old pointers to this qh */
390 musb_ep_set_qh(ep
, is_in
, NULL
);
391 qh
->hep
->hcpriv
= NULL
;
395 case USB_ENDPOINT_XFER_CONTROL
:
396 case USB_ENDPOINT_XFER_BULK
:
397 /* fifo policy for these lists, except that NAKing
398 * should rotate a qh to the end (for fairness).
401 head
= qh
->ring
.prev
;
408 case USB_ENDPOINT_XFER_ISOC
:
409 case USB_ENDPOINT_XFER_INT
:
410 /* this is where periodic bandwidth should be
411 * de-allocated if it's tracked and allocated;
412 * and where we'd update the schedule tree...
420 if (qh
!= NULL
&& qh
->is_ready
) {
421 dev_dbg(musb
->controller
, "... next ep%d %cX urb %p\n",
422 hw_ep
->epnum
, is_in
? 'R' : 'T', next_urb(qh
));
423 musb_start_urb(musb
, is_in
, qh
);
427 static u16
musb_h_flush_rxfifo(struct musb_hw_ep
*hw_ep
, u16 csr
)
429 /* we don't want fifo to fill itself again;
430 * ignore dma (various models),
431 * leave toggle alone (may not have been saved yet)
433 csr
|= MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_RXPKTRDY
;
434 csr
&= ~(MUSB_RXCSR_H_REQPKT
435 | MUSB_RXCSR_H_AUTOREQ
436 | MUSB_RXCSR_AUTOCLEAR
);
438 /* write 2x to allow double buffering */
439 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
440 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
442 /* flush writebuffer */
443 return musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
447 * PIO RX for a packet (or part of it).
450 musb_host_packet_rx(struct musb
*musb
, struct urb
*urb
, u8 epnum
, u8 iso_err
)
458 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
459 void __iomem
*epio
= hw_ep
->regs
;
460 struct musb_qh
*qh
= hw_ep
->in_qh
;
461 int pipe
= urb
->pipe
;
462 void *buffer
= urb
->transfer_buffer
;
464 /* musb_ep_select(mbase, epnum); */
465 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
466 dev_dbg(musb
->controller
, "RX%d count %d, buffer %p len %d/%d\n", epnum
, rx_count
,
467 urb
->transfer_buffer
, qh
->offset
,
468 urb
->transfer_buffer_length
);
472 if (usb_pipeisoc(pipe
)) {
474 struct usb_iso_packet_descriptor
*d
;
481 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
482 buf
= buffer
+ d
->offset
;
484 if (rx_count
> length
) {
489 dev_dbg(musb
->controller
, "** OVERFLOW %d into %d\n", rx_count
, length
);
493 urb
->actual_length
+= length
;
494 d
->actual_length
= length
;
498 /* see if we are done */
499 done
= (++qh
->iso_idx
>= urb
->number_of_packets
);
503 buf
= buffer
+ qh
->offset
;
504 length
= urb
->transfer_buffer_length
- qh
->offset
;
505 if (rx_count
> length
) {
506 if (urb
->status
== -EINPROGRESS
)
507 urb
->status
= -EOVERFLOW
;
508 dev_dbg(musb
->controller
, "** OVERFLOW %d into %d\n", rx_count
, length
);
512 urb
->actual_length
+= length
;
513 qh
->offset
+= length
;
515 /* see if we are done */
516 done
= (urb
->actual_length
== urb
->transfer_buffer_length
)
517 || (rx_count
< qh
->maxpacket
)
518 || (urb
->status
!= -EINPROGRESS
);
520 && (urb
->status
== -EINPROGRESS
)
521 && (urb
->transfer_flags
& URB_SHORT_NOT_OK
)
522 && (urb
->actual_length
523 < urb
->transfer_buffer_length
))
524 urb
->status
= -EREMOTEIO
;
529 musb_read_fifo(hw_ep
, length
, buf
);
531 csr
= musb_readw(epio
, MUSB_RXCSR
);
532 csr
|= MUSB_RXCSR_H_WZC_BITS
;
533 if (unlikely(do_flush
))
534 musb_h_flush_rxfifo(hw_ep
, csr
);
536 /* REVISIT this assumes AUTOCLEAR is never set */
537 csr
&= ~(MUSB_RXCSR_RXPKTRDY
| MUSB_RXCSR_H_REQPKT
);
539 csr
|= MUSB_RXCSR_H_REQPKT
;
540 musb_writew(epio
, MUSB_RXCSR
, csr
);
546 /* we don't always need to reinit a given side of an endpoint...
547 * when we do, use tx/rx reinit routine and then construct a new CSR
548 * to address data toggle, NYET, and DMA or PIO.
550 * it's possible that driver bugs (especially for DMA) or aborting a
551 * transfer might have left the endpoint busier than it should be.
552 * the busy/not-empty tests are basically paranoia.
555 musb_rx_reinit(struct musb
*musb
, struct musb_qh
*qh
, struct musb_hw_ep
*ep
)
559 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
560 * That always uses tx_reinit since ep0 repurposes TX register
561 * offsets; the initial SETUP packet is also a kind of OUT.
564 /* if programmed for Tx, put it in RX mode */
565 if (ep
->is_shared_fifo
) {
566 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
567 if (csr
& MUSB_TXCSR_MODE
) {
568 musb_h_tx_flush_fifo(ep
);
569 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
570 musb_writew(ep
->regs
, MUSB_TXCSR
,
571 csr
| MUSB_TXCSR_FRCDATATOG
);
575 * Clear the MODE bit (and everything else) to enable Rx.
576 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
578 if (csr
& MUSB_TXCSR_DMAMODE
)
579 musb_writew(ep
->regs
, MUSB_TXCSR
, MUSB_TXCSR_DMAMODE
);
580 musb_writew(ep
->regs
, MUSB_TXCSR
, 0);
582 /* scrub all previous state, clearing toggle */
584 csr
= musb_readw(ep
->regs
, MUSB_RXCSR
);
585 if (csr
& MUSB_RXCSR_RXPKTRDY
)
586 WARNING("rx%d, packet/%d ready?\n", ep
->epnum
,
587 musb_readw(ep
->regs
, MUSB_RXCOUNT
));
589 musb_h_flush_rxfifo(ep
, MUSB_RXCSR_CLRDATATOG
);
592 /* target addr and (for multipoint) hub addr/port */
593 if (musb
->is_multipoint
) {
594 musb_write_rxfunaddr(ep
->target_regs
, qh
->addr_reg
);
595 musb_write_rxhubaddr(ep
->target_regs
, qh
->h_addr_reg
);
596 musb_write_rxhubport(ep
->target_regs
, qh
->h_port_reg
);
599 musb_writeb(musb
->mregs
, MUSB_FADDR
, qh
->addr_reg
);
601 /* protocol/endpoint, interval/NAKlimit, i/o size */
602 musb_writeb(ep
->regs
, MUSB_RXTYPE
, qh
->type_reg
);
603 musb_writeb(ep
->regs
, MUSB_RXINTERVAL
, qh
->intv_reg
);
604 /* NOTE: bulk combining rewrites high bits of maxpacket */
605 /* Set RXMAXP with the FIFO size of the endpoint
606 * to disable double buffer mode.
608 if (musb
->double_buffer_not_ok
)
609 musb_writew(ep
->regs
, MUSB_RXMAXP
, ep
->max_packet_sz_rx
);
611 musb_writew(ep
->regs
, MUSB_RXMAXP
,
612 qh
->maxpacket
| ((qh
->hb_mult
- 1) << 11));
617 static bool musb_tx_dma_program(struct dma_controller
*dma
,
618 struct musb_hw_ep
*hw_ep
, struct musb_qh
*qh
,
619 struct urb
*urb
, u32 offset
, u32 length
)
621 struct dma_channel
*channel
= hw_ep
->tx_channel
;
622 void __iomem
*epio
= hw_ep
->regs
;
623 u16 pkt_size
= qh
->maxpacket
;
627 #ifdef CONFIG_USB_INVENTRA_DMA
628 if (length
> channel
->max_len
)
629 length
= channel
->max_len
;
631 csr
= musb_readw(epio
, MUSB_TXCSR
);
632 if (length
> pkt_size
) {
634 csr
|= MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_DMAENAB
;
635 /* autoset shouldn't be set in high bandwidth */
636 if (qh
->hb_mult
== 1)
637 csr
|= MUSB_TXCSR_AUTOSET
;
640 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAMODE
);
641 csr
|= MUSB_TXCSR_DMAENAB
; /* against programmer's guide */
643 channel
->desired_mode
= mode
;
644 musb_writew(epio
, MUSB_TXCSR
, csr
);
646 if (!is_cppi_enabled() && !tusb_dma_omap())
649 channel
->actual_len
= 0;
652 * TX uses "RNDIS" mode automatically but needs help
653 * to identify the zero-length-final-packet case.
655 mode
= (urb
->transfer_flags
& URB_ZERO_PACKET
) ? 1 : 0;
658 qh
->segsize
= length
;
661 * Ensure the data reaches to main memory before starting
666 if (!dma
->channel_program(channel
, pkt_size
, mode
,
667 urb
->transfer_dma
+ offset
, length
)) {
668 dma
->channel_release(channel
);
669 hw_ep
->tx_channel
= NULL
;
671 csr
= musb_readw(epio
, MUSB_TXCSR
);
672 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
);
673 musb_writew(epio
, MUSB_TXCSR
, csr
| MUSB_TXCSR_H_WZC_BITS
);
680 * Program an HDRC endpoint as per the given URB
681 * Context: irqs blocked, controller lock held
683 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
684 struct urb
*urb
, int is_out
,
685 u8
*buf
, u32 offset
, u32 len
)
687 struct dma_controller
*dma_controller
;
688 struct dma_channel
*dma_channel
;
690 void __iomem
*mbase
= musb
->mregs
;
691 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
692 void __iomem
*epio
= hw_ep
->regs
;
693 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, !is_out
);
694 u16 packet_sz
= qh
->maxpacket
;
696 dev_dbg(musb
->controller
, "%s hw%d urb %p spd%d dev%d ep%d%s "
697 "h_addr%02x h_port%02x bytes %d\n",
698 is_out
? "-->" : "<--",
699 epnum
, urb
, urb
->dev
->speed
,
700 qh
->addr_reg
, qh
->epnum
, is_out
? "out" : "in",
701 qh
->h_addr_reg
, qh
->h_port_reg
,
704 musb_ep_select(mbase
, epnum
);
706 /* candidate for DMA? */
707 dma_controller
= musb
->dma_controller
;
708 if (is_dma_capable() && epnum
&& dma_controller
) {
709 dma_channel
= is_out
? hw_ep
->tx_channel
: hw_ep
->rx_channel
;
711 dma_channel
= dma_controller
->channel_alloc(
712 dma_controller
, hw_ep
, is_out
);
714 hw_ep
->tx_channel
= dma_channel
;
716 hw_ep
->rx_channel
= dma_channel
;
721 /* make sure we clear DMAEnab, autoSet bits from previous run */
723 /* OUT/transmit/EP0 or IN/receive? */
729 csr
= musb_readw(epio
, MUSB_TXCSR
);
731 /* disable interrupt in case we flush */
732 int_txe
= musb_readw(mbase
, MUSB_INTRTXE
);
733 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
& ~(1 << epnum
));
735 /* general endpoint setup */
737 /* flush all old state, set default */
738 musb_h_tx_flush_fifo(hw_ep
);
741 * We must not clear the DMAMODE bit before or in
742 * the same cycle with the DMAENAB bit, so we clear
743 * the latter first...
745 csr
&= ~(MUSB_TXCSR_H_NAKTIMEOUT
748 | MUSB_TXCSR_FRCDATATOG
749 | MUSB_TXCSR_H_RXSTALL
751 | MUSB_TXCSR_TXPKTRDY
753 csr
|= MUSB_TXCSR_MODE
;
755 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 1))
756 csr
|= MUSB_TXCSR_H_WR_DATATOGGLE
757 | MUSB_TXCSR_H_DATATOGGLE
;
759 csr
|= MUSB_TXCSR_CLRDATATOG
;
761 musb_writew(epio
, MUSB_TXCSR
, csr
);
762 /* REVISIT may need to clear FLUSHFIFO ... */
763 csr
&= ~MUSB_TXCSR_DMAMODE
;
764 musb_writew(epio
, MUSB_TXCSR
, csr
);
765 csr
= musb_readw(epio
, MUSB_TXCSR
);
767 /* endpoint 0: just flush */
768 musb_h_ep0_flush_fifo(hw_ep
);
771 /* target addr and (for multipoint) hub addr/port */
772 if (musb
->is_multipoint
) {
773 musb_write_txfunaddr(mbase
, epnum
, qh
->addr_reg
);
774 musb_write_txhubaddr(mbase
, epnum
, qh
->h_addr_reg
);
775 musb_write_txhubport(mbase
, epnum
, qh
->h_port_reg
);
776 /* FIXME if !epnum, do the same for RX ... */
778 musb_writeb(mbase
, MUSB_FADDR
, qh
->addr_reg
);
780 /* protocol/endpoint/interval/NAKlimit */
782 musb_writeb(epio
, MUSB_TXTYPE
, qh
->type_reg
);
783 if (musb
->double_buffer_not_ok
)
784 musb_writew(epio
, MUSB_TXMAXP
,
785 hw_ep
->max_packet_sz_tx
);
786 else if (can_bulk_split(musb
, qh
->type
))
787 musb_writew(epio
, MUSB_TXMAXP
, packet_sz
788 | ((hw_ep
->max_packet_sz_tx
/
789 packet_sz
) - 1) << 11);
791 musb_writew(epio
, MUSB_TXMAXP
,
793 ((qh
->hb_mult
- 1) << 11));
794 musb_writeb(epio
, MUSB_TXINTERVAL
, qh
->intv_reg
);
796 musb_writeb(epio
, MUSB_NAKLIMIT0
, qh
->intv_reg
);
797 if (musb
->is_multipoint
)
798 musb_writeb(epio
, MUSB_TYPE0
,
802 if (can_bulk_split(musb
, qh
->type
))
803 load_count
= min((u32
) hw_ep
->max_packet_sz_tx
,
806 load_count
= min((u32
) packet_sz
, len
);
808 if (dma_channel
&& musb_tx_dma_program(dma_controller
,
809 hw_ep
, qh
, urb
, offset
, len
))
813 /* PIO to load FIFO */
814 qh
->segsize
= load_count
;
815 musb_write_fifo(hw_ep
, load_count
, buf
);
818 /* re-enable interrupt */
819 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
);
825 if (hw_ep
->rx_reinit
) {
826 musb_rx_reinit(musb
, qh
, hw_ep
);
828 /* init new state: toggle and NYET, maybe DMA later */
829 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 0))
830 csr
= MUSB_RXCSR_H_WR_DATATOGGLE
831 | MUSB_RXCSR_H_DATATOGGLE
;
834 if (qh
->type
== USB_ENDPOINT_XFER_INT
)
835 csr
|= MUSB_RXCSR_DISNYET
;
838 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
840 if (csr
& (MUSB_RXCSR_RXPKTRDY
842 | MUSB_RXCSR_H_REQPKT
))
843 ERR("broken !rx_reinit, ep%d csr %04x\n",
846 /* scrub any stale state, leaving toggle alone */
847 csr
&= MUSB_RXCSR_DISNYET
;
850 /* kick things off */
852 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel
) {
853 /* Candidate for DMA */
854 dma_channel
->actual_len
= 0L;
857 /* AUTOREQ is in a DMA register */
858 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
859 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
862 * Unless caller treats short RX transfers as
863 * errors, we dare not queue multiple transfers.
865 dma_ok
= dma_controller
->channel_program(dma_channel
,
866 packet_sz
, !(urb
->transfer_flags
&
868 urb
->transfer_dma
+ offset
,
871 dma_controller
->channel_release(dma_channel
);
872 hw_ep
->rx_channel
= dma_channel
= NULL
;
874 csr
|= MUSB_RXCSR_DMAENAB
;
877 csr
|= MUSB_RXCSR_H_REQPKT
;
878 dev_dbg(musb
->controller
, "RXCSR%d := %04x\n", epnum
, csr
);
879 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
880 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
886 * Service the default endpoint (ep0) as host.
887 * Return true until it's time to start the status stage.
889 static bool musb_h_ep0_continue(struct musb
*musb
, u16 len
, struct urb
*urb
)
892 u8
*fifo_dest
= NULL
;
894 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
895 struct musb_qh
*qh
= hw_ep
->in_qh
;
896 struct usb_ctrlrequest
*request
;
898 switch (musb
->ep0_stage
) {
900 fifo_dest
= urb
->transfer_buffer
+ urb
->actual_length
;
901 fifo_count
= min_t(size_t, len
, urb
->transfer_buffer_length
-
903 if (fifo_count
< len
)
904 urb
->status
= -EOVERFLOW
;
906 musb_read_fifo(hw_ep
, fifo_count
, fifo_dest
);
908 urb
->actual_length
+= fifo_count
;
909 if (len
< qh
->maxpacket
) {
910 /* always terminate on short read; it's
911 * rarely reported as an error.
913 } else if (urb
->actual_length
<
914 urb
->transfer_buffer_length
)
918 request
= (struct usb_ctrlrequest
*) urb
->setup_packet
;
920 if (!request
->wLength
) {
921 dev_dbg(musb
->controller
, "start no-DATA\n");
923 } else if (request
->bRequestType
& USB_DIR_IN
) {
924 dev_dbg(musb
->controller
, "start IN-DATA\n");
925 musb
->ep0_stage
= MUSB_EP0_IN
;
929 dev_dbg(musb
->controller
, "start OUT-DATA\n");
930 musb
->ep0_stage
= MUSB_EP0_OUT
;
935 fifo_count
= min_t(size_t, qh
->maxpacket
,
936 urb
->transfer_buffer_length
-
939 fifo_dest
= (u8
*) (urb
->transfer_buffer
940 + urb
->actual_length
);
941 dev_dbg(musb
->controller
, "Sending %d byte%s to ep0 fifo %p\n",
943 (fifo_count
== 1) ? "" : "s",
945 musb_write_fifo(hw_ep
, fifo_count
, fifo_dest
);
947 urb
->actual_length
+= fifo_count
;
952 ERR("bogus ep0 stage %d\n", musb
->ep0_stage
);
960 * Handle default endpoint interrupt as host. Only called in IRQ time
961 * from musb_interrupt().
963 * called with controller irqlocked
965 irqreturn_t
musb_h_ep0_irq(struct musb
*musb
)
970 void __iomem
*mbase
= musb
->mregs
;
971 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
972 void __iomem
*epio
= hw_ep
->regs
;
973 struct musb_qh
*qh
= hw_ep
->in_qh
;
974 bool complete
= false;
975 irqreturn_t retval
= IRQ_NONE
;
977 /* ep0 only has one queue, "in" */
980 musb_ep_select(mbase
, 0);
981 csr
= musb_readw(epio
, MUSB_CSR0
);
982 len
= (csr
& MUSB_CSR0_RXPKTRDY
)
983 ? musb_readb(epio
, MUSB_COUNT0
)
986 dev_dbg(musb
->controller
, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
987 csr
, qh
, len
, urb
, musb
->ep0_stage
);
989 /* if we just did status stage, we are done */
990 if (MUSB_EP0_STATUS
== musb
->ep0_stage
) {
991 retval
= IRQ_HANDLED
;
996 if (csr
& MUSB_CSR0_H_RXSTALL
) {
997 dev_dbg(musb
->controller
, "STALLING ENDPOINT\n");
1000 } else if (csr
& MUSB_CSR0_H_ERROR
) {
1001 dev_dbg(musb
->controller
, "no response, csr0 %04x\n", csr
);
1004 } else if (csr
& MUSB_CSR0_H_NAKTIMEOUT
) {
1005 dev_dbg(musb
->controller
, "control NAK timeout\n");
1007 /* NOTE: this code path would be a good place to PAUSE a
1008 * control transfer, if another one is queued, so that
1009 * ep0 is more likely to stay busy. That's already done
1010 * for bulk RX transfers.
1012 * if (qh->ring.next != &musb->control), then
1013 * we have a candidate... NAKing is *NOT* an error
1015 musb_writew(epio
, MUSB_CSR0
, 0);
1016 retval
= IRQ_HANDLED
;
1020 dev_dbg(musb
->controller
, "aborting\n");
1021 retval
= IRQ_HANDLED
;
1023 urb
->status
= status
;
1026 /* use the proper sequence to abort the transfer */
1027 if (csr
& MUSB_CSR0_H_REQPKT
) {
1028 csr
&= ~MUSB_CSR0_H_REQPKT
;
1029 musb_writew(epio
, MUSB_CSR0
, csr
);
1030 csr
&= ~MUSB_CSR0_H_NAKTIMEOUT
;
1031 musb_writew(epio
, MUSB_CSR0
, csr
);
1033 musb_h_ep0_flush_fifo(hw_ep
);
1036 musb_writeb(epio
, MUSB_NAKLIMIT0
, 0);
1039 musb_writew(epio
, MUSB_CSR0
, 0);
1042 if (unlikely(!urb
)) {
1043 /* stop endpoint since we have no place for its data, this
1044 * SHOULD NEVER HAPPEN! */
1045 ERR("no URB for end 0\n");
1047 musb_h_ep0_flush_fifo(hw_ep
);
1052 /* call common logic and prepare response */
1053 if (musb_h_ep0_continue(musb
, len
, urb
)) {
1054 /* more packets required */
1055 csr
= (MUSB_EP0_IN
== musb
->ep0_stage
)
1056 ? MUSB_CSR0_H_REQPKT
: MUSB_CSR0_TXPKTRDY
;
1058 /* data transfer complete; perform status phase */
1059 if (usb_pipeout(urb
->pipe
)
1060 || !urb
->transfer_buffer_length
)
1061 csr
= MUSB_CSR0_H_STATUSPKT
1062 | MUSB_CSR0_H_REQPKT
;
1064 csr
= MUSB_CSR0_H_STATUSPKT
1065 | MUSB_CSR0_TXPKTRDY
;
1067 /* flag status stage */
1068 musb
->ep0_stage
= MUSB_EP0_STATUS
;
1070 dev_dbg(musb
->controller
, "ep0 STATUS, csr %04x\n", csr
);
1073 musb_writew(epio
, MUSB_CSR0
, csr
);
1074 retval
= IRQ_HANDLED
;
1076 musb
->ep0_stage
= MUSB_EP0_IDLE
;
1078 /* call completion handler if done */
1080 musb_advance_schedule(musb
, urb
, hw_ep
, 1);
1086 #ifdef CONFIG_USB_INVENTRA_DMA
1088 /* Host side TX (OUT) using Mentor DMA works as follows:
1090 - if queue was empty, Program Endpoint
1091 - ... which starts DMA to fifo in mode 1 or 0
1093 DMA Isr (transfer complete) -> TxAvail()
1094 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1095 only in musb_cleanup_urb)
1096 - TxPktRdy has to be set in mode 0 or for
1097 short packets in mode 1.
1102 /* Service a Tx-Available or dma completion irq for the endpoint */
1103 void musb_host_tx(struct musb
*musb
, u8 epnum
)
1110 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1111 void __iomem
*epio
= hw_ep
->regs
;
1112 struct musb_qh
*qh
= hw_ep
->out_qh
;
1113 struct urb
*urb
= next_urb(qh
);
1115 void __iomem
*mbase
= musb
->mregs
;
1116 struct dma_channel
*dma
;
1117 bool transfer_pending
= false;
1119 musb_ep_select(mbase
, epnum
);
1120 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1122 /* with CPPI, DMA sometimes triggers "extra" irqs */
1124 dev_dbg(musb
->controller
, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1129 dma
= is_dma_capable() ? hw_ep
->tx_channel
: NULL
;
1130 dev_dbg(musb
->controller
, "OUT/TX%d end, csr %04x%s\n", epnum
, tx_csr
,
1131 dma
? ", dma" : "");
1133 /* check for errors */
1134 if (tx_csr
& MUSB_TXCSR_H_RXSTALL
) {
1135 /* dma was disabled, fifo flushed */
1136 dev_dbg(musb
->controller
, "TX end %d stall\n", epnum
);
1138 /* stall; record URB status */
1141 } else if (tx_csr
& MUSB_TXCSR_H_ERROR
) {
1142 /* (NON-ISO) dma was disabled, fifo flushed */
1143 dev_dbg(musb
->controller
, "TX 3strikes on ep=%d\n", epnum
);
1145 status
= -ETIMEDOUT
;
1147 } else if (tx_csr
& MUSB_TXCSR_H_NAKTIMEOUT
) {
1148 dev_dbg(musb
->controller
, "TX end=%d device not responding\n", epnum
);
1150 /* NOTE: this code path would be a good place to PAUSE a
1151 * transfer, if there's some other (nonperiodic) tx urb
1152 * that could use this fifo. (dma complicates it...)
1153 * That's already done for bulk RX transfers.
1155 * if (bulk && qh->ring.next != &musb->out_bulk), then
1156 * we have a candidate... NAKing is *NOT* an error
1158 musb_ep_select(mbase
, epnum
);
1159 musb_writew(epio
, MUSB_TXCSR
,
1160 MUSB_TXCSR_H_WZC_BITS
1161 | MUSB_TXCSR_TXPKTRDY
);
1166 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1167 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1168 (void) musb
->dma_controller
->channel_abort(dma
);
1171 /* do the proper sequence to abort the transfer in the
1172 * usb core; the dma engine should already be stopped.
1174 musb_h_tx_flush_fifo(hw_ep
);
1175 tx_csr
&= ~(MUSB_TXCSR_AUTOSET
1176 | MUSB_TXCSR_DMAENAB
1177 | MUSB_TXCSR_H_ERROR
1178 | MUSB_TXCSR_H_RXSTALL
1179 | MUSB_TXCSR_H_NAKTIMEOUT
1182 musb_ep_select(mbase
, epnum
);
1183 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1184 /* REVISIT may need to clear FLUSHFIFO ... */
1185 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1186 musb_writeb(epio
, MUSB_TXINTERVAL
, 0);
1191 /* second cppi case */
1192 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1193 dev_dbg(musb
->controller
, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1197 if (is_dma_capable() && dma
&& !status
) {
1199 * DMA has completed. But if we're using DMA mode 1 (multi
1200 * packet DMA), we need a terminal TXPKTRDY interrupt before
1201 * we can consider this transfer completed, lest we trash
1202 * its last packet when writing the next URB's data. So we
1203 * switch back to mode 0 to get that interrupt; we'll come
1204 * back here once it happens.
1206 if (tx_csr
& MUSB_TXCSR_DMAMODE
) {
1208 * We shouldn't clear DMAMODE with DMAENAB set; so
1209 * clear them in a safe order. That should be OK
1210 * once TXPKTRDY has been set (and I've never seen
1211 * it being 0 at this moment -- DMA interrupt latency
1212 * is significant) but if it hasn't been then we have
1213 * no choice but to stop being polite and ignore the
1214 * programmer's guide... :-)
1216 * Note that we must write TXCSR with TXPKTRDY cleared
1217 * in order not to re-trigger the packet send (this bit
1218 * can't be cleared by CPU), and there's another caveat:
1219 * TXPKTRDY may be set shortly and then cleared in the
1220 * double-buffered FIFO mode, so we do an extra TXCSR
1221 * read for debouncing...
1223 tx_csr
&= musb_readw(epio
, MUSB_TXCSR
);
1224 if (tx_csr
& MUSB_TXCSR_TXPKTRDY
) {
1225 tx_csr
&= ~(MUSB_TXCSR_DMAENAB
|
1226 MUSB_TXCSR_TXPKTRDY
);
1227 musb_writew(epio
, MUSB_TXCSR
,
1228 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1230 tx_csr
&= ~(MUSB_TXCSR_DMAMODE
|
1231 MUSB_TXCSR_TXPKTRDY
);
1232 musb_writew(epio
, MUSB_TXCSR
,
1233 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1236 * There is no guarantee that we'll get an interrupt
1237 * after clearing DMAMODE as we might have done this
1238 * too late (after TXPKTRDY was cleared by controller).
1239 * Re-read TXCSR as we have spoiled its previous value.
1241 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1245 * We may get here from a DMA completion or TXPKTRDY interrupt.
1246 * In any case, we must check the FIFO status here and bail out
1247 * only if the FIFO still has data -- that should prevent the
1248 * "missed" TXPKTRDY interrupts and deal with double-buffered
1251 if (tx_csr
& (MUSB_TXCSR_FIFONOTEMPTY
| MUSB_TXCSR_TXPKTRDY
)) {
1252 dev_dbg(musb
->controller
, "DMA complete but packet still in FIFO, "
1253 "CSR %04x\n", tx_csr
);
1258 if (!status
|| dma
|| usb_pipeisoc(pipe
)) {
1260 length
= dma
->actual_len
;
1262 length
= qh
->segsize
;
1263 qh
->offset
+= length
;
1265 if (usb_pipeisoc(pipe
)) {
1267 struct usb_iso_packet_descriptor
*d
;
1269 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1270 d
->actual_length
= length
;
1272 if (++qh
->iso_idx
>= urb
->number_of_packets
) {
1280 } else if (dma
&& urb
->transfer_buffer_length
== qh
->offset
) {
1283 /* see if we need to send more data, or ZLP */
1284 if (qh
->segsize
< qh
->maxpacket
)
1286 else if (qh
->offset
== urb
->transfer_buffer_length
1287 && !(urb
->transfer_flags
1291 offset
= qh
->offset
;
1292 length
= urb
->transfer_buffer_length
- offset
;
1293 transfer_pending
= true;
1298 /* urb->status != -EINPROGRESS means request has been faulted,
1299 * so we must abort this transfer after cleanup
1301 if (urb
->status
!= -EINPROGRESS
) {
1304 status
= urb
->status
;
1309 urb
->status
= status
;
1310 urb
->actual_length
= qh
->offset
;
1311 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_OUT
);
1313 } else if ((usb_pipeisoc(pipe
) || transfer_pending
) && dma
) {
1314 if (musb_tx_dma_program(musb
->dma_controller
, hw_ep
, qh
, urb
,
1316 if (is_cppi_enabled() || tusb_dma_omap())
1317 musb_h_tx_dma_start(hw_ep
);
1320 } else if (tx_csr
& MUSB_TXCSR_DMAENAB
) {
1321 dev_dbg(musb
->controller
, "not complete, but DMA enabled?\n");
1326 * PIO: start next packet in this URB.
1328 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1329 * (and presumably, FIFO is not half-full) we should write *two*
1330 * packets before updating TXCSR; other docs disagree...
1332 if (length
> qh
->maxpacket
)
1333 length
= qh
->maxpacket
;
1334 /* Unmap the buffer so that CPU can use it */
1335 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb
), urb
);
1336 musb_write_fifo(hw_ep
, length
, urb
->transfer_buffer
+ offset
);
1337 qh
->segsize
= length
;
1339 musb_ep_select(mbase
, epnum
);
1340 musb_writew(epio
, MUSB_TXCSR
,
1341 MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY
);
1345 #ifdef CONFIG_USB_INVENTRA_DMA
1347 /* Host side RX (IN) using Mentor DMA works as follows:
1349 - if queue was empty, ProgramEndpoint
1350 - first IN token is sent out (by setting ReqPkt)
1351 LinuxIsr -> RxReady()
1352 /\ => first packet is received
1353 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1354 | -> DMA Isr (transfer complete) -> RxReady()
1355 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1356 | - if urb not complete, send next IN token (ReqPkt)
1357 | | else complete urb.
1359 ---------------------------
1361 * Nuances of mode 1:
1362 * For short packets, no ack (+RxPktRdy) is sent automatically
1363 * (even if AutoClear is ON)
1364 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1365 * automatically => major problem, as collecting the next packet becomes
1366 * difficult. Hence mode 1 is not used.
1369 * All we care about at this driver level is that
1370 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1371 * (b) termination conditions are: short RX, or buffer full;
1372 * (c) fault modes include
1373 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1374 * (and that endpoint's dma queue stops immediately)
1375 * - overflow (full, PLUS more bytes in the terminal packet)
1377 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1378 * thus be a great candidate for using mode 1 ... for all but the
1379 * last packet of one URB's transfer.
1384 /* Schedule next QH from musb->in_bulk and move the current qh to
1385 * the end; avoids starvation for other endpoints.
1387 static void musb_bulk_rx_nak_timeout(struct musb
*musb
, struct musb_hw_ep
*ep
)
1389 struct dma_channel
*dma
;
1391 void __iomem
*mbase
= musb
->mregs
;
1392 void __iomem
*epio
= ep
->regs
;
1393 struct musb_qh
*cur_qh
, *next_qh
;
1396 musb_ep_select(mbase
, ep
->epnum
);
1397 dma
= is_dma_capable() ? ep
->rx_channel
: NULL
;
1399 /* clear nak timeout bit */
1400 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1401 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1402 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1403 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1405 cur_qh
= first_qh(&musb
->in_bulk
);
1407 urb
= next_urb(cur_qh
);
1408 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1409 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1410 musb
->dma_controller
->channel_abort(dma
);
1411 urb
->actual_length
+= dma
->actual_len
;
1412 dma
->actual_len
= 0L;
1414 musb_save_toggle(cur_qh
, 1, urb
);
1416 /* move cur_qh to end of queue */
1417 list_move_tail(&cur_qh
->ring
, &musb
->in_bulk
);
1419 /* get the next qh from musb->in_bulk */
1420 next_qh
= first_qh(&musb
->in_bulk
);
1422 /* set rx_reinit and schedule the next qh */
1424 musb_start_urb(musb
, 1, next_qh
);
1429 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1430 * and high-bandwidth IN transfer cases.
1432 void musb_host_rx(struct musb
*musb
, u8 epnum
)
1435 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1436 void __iomem
*epio
= hw_ep
->regs
;
1437 struct musb_qh
*qh
= hw_ep
->in_qh
;
1439 void __iomem
*mbase
= musb
->mregs
;
1442 bool iso_err
= false;
1445 struct dma_channel
*dma
;
1447 musb_ep_select(mbase
, epnum
);
1450 dma
= is_dma_capable() ? hw_ep
->rx_channel
: NULL
;
1454 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1457 if (unlikely(!urb
)) {
1458 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1459 * usbtest #11 (unlinks) triggers it regularly, sometimes
1460 * with fifo full. (Only with DMA??)
1462 dev_dbg(musb
->controller
, "BOGUS RX%d ready, csr %04x, count %d\n", epnum
, val
,
1463 musb_readw(epio
, MUSB_RXCOUNT
));
1464 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1470 dev_dbg(musb
->controller
, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1471 epnum
, rx_csr
, urb
->actual_length
,
1472 dma
? dma
->actual_len
: 0);
1474 /* check for errors, concurrent stall & unlink is not really
1476 if (rx_csr
& MUSB_RXCSR_H_RXSTALL
) {
1477 dev_dbg(musb
->controller
, "RX end %d STALL\n", epnum
);
1479 /* stall; record URB status */
1482 } else if (rx_csr
& MUSB_RXCSR_H_ERROR
) {
1483 dev_dbg(musb
->controller
, "end %d RX proto error\n", epnum
);
1486 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1488 } else if (rx_csr
& MUSB_RXCSR_DATAERROR
) {
1490 if (USB_ENDPOINT_XFER_ISOC
!= qh
->type
) {
1491 dev_dbg(musb
->controller
, "RX end %d NAK timeout\n", epnum
);
1493 /* NOTE: NAKing is *NOT* an error, so we want to
1494 * continue. Except ... if there's a request for
1495 * another QH, use that instead of starving it.
1497 * Devices like Ethernet and serial adapters keep
1498 * reads posted at all times, which will starve
1499 * other devices without this logic.
1501 if (usb_pipebulk(urb
->pipe
)
1503 && !list_is_singular(&musb
->in_bulk
)) {
1504 musb_bulk_rx_nak_timeout(musb
, hw_ep
);
1507 musb_ep_select(mbase
, epnum
);
1508 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1509 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1510 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1514 dev_dbg(musb
->controller
, "RX end %d ISO data error\n", epnum
);
1515 /* packet error reported later */
1518 } else if (rx_csr
& MUSB_RXCSR_INCOMPRX
) {
1519 dev_dbg(musb
->controller
, "end %d high bandwidth incomplete ISO packet RX\n",
1524 /* faults abort the transfer */
1526 /* clean up dma and collect transfer count */
1527 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1528 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1529 (void) musb
->dma_controller
->channel_abort(dma
);
1530 xfer_len
= dma
->actual_len
;
1532 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1533 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1538 if (unlikely(dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
)) {
1539 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1540 ERR("RX%d dma busy, csr %04x\n", epnum
, rx_csr
);
1544 /* thorough shutdown for now ... given more precise fault handling
1545 * and better queueing support, we might keep a DMA pipeline going
1546 * while processing this irq for earlier completions.
1549 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1551 #ifndef CONFIG_USB_INVENTRA_DMA
1552 if (rx_csr
& MUSB_RXCSR_H_REQPKT
) {
1553 /* REVISIT this happened for a while on some short reads...
1554 * the cleanup still needs investigation... looks bad...
1555 * and also duplicates dma cleanup code above ... plus,
1556 * shouldn't this be the "half full" double buffer case?
1558 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1559 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1560 (void) musb
->dma_controller
->channel_abort(dma
);
1561 xfer_len
= dma
->actual_len
;
1565 dev_dbg(musb
->controller
, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum
, rx_csr
,
1566 xfer_len
, dma
? ", dma" : "");
1567 rx_csr
&= ~MUSB_RXCSR_H_REQPKT
;
1569 musb_ep_select(mbase
, epnum
);
1570 musb_writew(epio
, MUSB_RXCSR
,
1571 MUSB_RXCSR_H_WZC_BITS
| rx_csr
);
1574 if (dma
&& (rx_csr
& MUSB_RXCSR_DMAENAB
)) {
1575 xfer_len
= dma
->actual_len
;
1577 val
&= ~(MUSB_RXCSR_DMAENAB
1578 | MUSB_RXCSR_H_AUTOREQ
1579 | MUSB_RXCSR_AUTOCLEAR
1580 | MUSB_RXCSR_RXPKTRDY
);
1581 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, val
);
1583 #ifdef CONFIG_USB_INVENTRA_DMA
1584 if (usb_pipeisoc(pipe
)) {
1585 struct usb_iso_packet_descriptor
*d
;
1587 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1588 d
->actual_length
= xfer_len
;
1590 /* even if there was an error, we did the dma
1591 * for iso_frame_desc->length
1593 if (d
->status
!= -EILSEQ
&& d
->status
!= -EOVERFLOW
)
1596 if (++qh
->iso_idx
>= urb
->number_of_packets
)
1602 /* done if urb buffer is full or short packet is recd */
1603 done
= (urb
->actual_length
+ xfer_len
>=
1604 urb
->transfer_buffer_length
1605 || dma
->actual_len
< qh
->maxpacket
);
1608 /* send IN token for next packet, without AUTOREQ */
1610 val
|= MUSB_RXCSR_H_REQPKT
;
1611 musb_writew(epio
, MUSB_RXCSR
,
1612 MUSB_RXCSR_H_WZC_BITS
| val
);
1615 dev_dbg(musb
->controller
, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum
,
1616 done
? "off" : "reset",
1617 musb_readw(epio
, MUSB_RXCSR
),
1618 musb_readw(epio
, MUSB_RXCOUNT
));
1622 } else if (urb
->status
== -EINPROGRESS
) {
1623 /* if no errors, be sure a packet is ready for unloading */
1624 if (unlikely(!(rx_csr
& MUSB_RXCSR_RXPKTRDY
))) {
1626 ERR("Rx interrupt with no errors or packet!\n");
1628 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1631 /* do the proper sequence to abort the transfer */
1632 musb_ep_select(mbase
, epnum
);
1633 val
&= ~MUSB_RXCSR_H_REQPKT
;
1634 musb_writew(epio
, MUSB_RXCSR
, val
);
1638 /* we are expecting IN packets */
1639 #ifdef CONFIG_USB_INVENTRA_DMA
1641 struct dma_controller
*c
;
1646 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
1648 dev_dbg(musb
->controller
, "RX%d count %d, buffer 0x%x len %d/%d\n",
1651 + urb
->actual_length
,
1653 urb
->transfer_buffer_length
);
1655 c
= musb
->dma_controller
;
1657 if (usb_pipeisoc(pipe
)) {
1659 struct usb_iso_packet_descriptor
*d
;
1661 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1667 if (rx_count
> d
->length
) {
1668 if (d_status
== 0) {
1669 d_status
= -EOVERFLOW
;
1672 dev_dbg(musb
->controller
, "** OVERFLOW %d into %d\n",\
1673 rx_count
, d
->length
);
1678 d
->status
= d_status
;
1679 buf
= urb
->transfer_dma
+ d
->offset
;
1682 buf
= urb
->transfer_dma
+
1686 dma
->desired_mode
= 0;
1688 /* because of the issue below, mode 1 will
1689 * only rarely behave with correct semantics.
1691 if ((urb
->transfer_flags
&
1693 && (urb
->transfer_buffer_length
-
1696 dma
->desired_mode
= 1;
1697 if (rx_count
< hw_ep
->max_packet_sz_rx
) {
1699 dma
->desired_mode
= 0;
1701 length
= urb
->transfer_buffer_length
;
1705 /* Disadvantage of using mode 1:
1706 * It's basically usable only for mass storage class; essentially all
1707 * other protocols also terminate transfers on short packets.
1710 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1711 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1712 * to use the extra IN token to grab the last packet using mode 0, then
1713 * the problem is that you cannot be sure when the device will send the
1714 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1715 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1716 * transfer, while sometimes it is recd just a little late so that if you
1717 * try to configure for mode 0 soon after the mode 1 transfer is
1718 * completed, you will find rxcount 0. Okay, so you might think why not
1719 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1722 val
= musb_readw(epio
, MUSB_RXCSR
);
1723 val
&= ~MUSB_RXCSR_H_REQPKT
;
1725 if (dma
->desired_mode
== 0)
1726 val
&= ~MUSB_RXCSR_H_AUTOREQ
;
1728 val
|= MUSB_RXCSR_H_AUTOREQ
;
1729 val
|= MUSB_RXCSR_DMAENAB
;
1731 /* autoclear shouldn't be set in high bandwidth */
1732 if (qh
->hb_mult
== 1)
1733 val
|= MUSB_RXCSR_AUTOCLEAR
;
1735 musb_writew(epio
, MUSB_RXCSR
,
1736 MUSB_RXCSR_H_WZC_BITS
| val
);
1738 /* REVISIT if when actual_length != 0,
1739 * transfer_buffer_length needs to be
1742 ret
= c
->channel_program(
1744 dma
->desired_mode
, buf
, length
);
1747 c
->channel_release(dma
);
1748 hw_ep
->rx_channel
= NULL
;
1750 val
= musb_readw(epio
, MUSB_RXCSR
);
1751 val
&= ~(MUSB_RXCSR_DMAENAB
1752 | MUSB_RXCSR_H_AUTOREQ
1753 | MUSB_RXCSR_AUTOCLEAR
);
1754 musb_writew(epio
, MUSB_RXCSR
, val
);
1757 #endif /* Mentor DMA */
1760 /* Unmap the buffer so that CPU can use it */
1761 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb
), urb
);
1762 done
= musb_host_packet_rx(musb
, urb
,
1764 dev_dbg(musb
->controller
, "read %spacket\n", done
? "last " : "");
1769 urb
->actual_length
+= xfer_len
;
1770 qh
->offset
+= xfer_len
;
1772 if (urb
->status
== -EINPROGRESS
)
1773 urb
->status
= status
;
1774 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_IN
);
1778 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1779 * the software schedule associates multiple such nodes with a given
1780 * host side hardware endpoint + direction; scheduling may activate
1781 * that hardware endpoint.
1783 static int musb_schedule(
1790 int best_end
, epnum
;
1791 struct musb_hw_ep
*hw_ep
= NULL
;
1792 struct list_head
*head
= NULL
;
1795 struct urb
*urb
= next_urb(qh
);
1797 /* use fixed hardware for control and bulk */
1798 if (qh
->type
== USB_ENDPOINT_XFER_CONTROL
) {
1799 head
= &musb
->control
;
1800 hw_ep
= musb
->control_ep
;
1804 /* else, periodic transfers get muxed to other endpoints */
1807 * We know this qh hasn't been scheduled, so all we need to do
1808 * is choose which hardware endpoint to put it on ...
1810 * REVISIT what we really want here is a regular schedule tree
1811 * like e.g. OHCI uses.
1816 for (epnum
= 1, hw_ep
= musb
->endpoints
+ 1;
1817 epnum
< musb
->nr_endpoints
;
1821 if (musb_ep_get_qh(hw_ep
, is_in
) != NULL
)
1824 if (hw_ep
== musb
->bulk_ep
)
1828 diff
= hw_ep
->max_packet_sz_rx
;
1830 diff
= hw_ep
->max_packet_sz_tx
;
1831 diff
-= (qh
->maxpacket
* qh
->hb_mult
);
1833 if (diff
>= 0 && best_diff
> diff
) {
1836 * Mentor controller has a bug in that if we schedule
1837 * a BULK Tx transfer on an endpoint that had earlier
1838 * handled ISOC then the BULK transfer has to start on
1839 * a zero toggle. If the BULK transfer starts on a 1
1840 * toggle then this transfer will fail as the mentor
1841 * controller starts the Bulk transfer on a 0 toggle
1842 * irrespective of the programming of the toggle bits
1843 * in the TXCSR register. Check for this condition
1844 * while allocating the EP for a Tx Bulk transfer. If
1847 hw_ep
= musb
->endpoints
+ epnum
;
1848 toggle
= usb_gettoggle(urb
->dev
, qh
->epnum
, !is_in
);
1849 txtype
= (musb_readb(hw_ep
->regs
, MUSB_TXTYPE
)
1851 if (!is_in
&& (qh
->type
== USB_ENDPOINT_XFER_BULK
) &&
1852 toggle
&& (txtype
== USB_ENDPOINT_XFER_ISOC
))
1859 /* use bulk reserved ep1 if no other ep is free */
1860 if (best_end
< 0 && qh
->type
== USB_ENDPOINT_XFER_BULK
) {
1861 hw_ep
= musb
->bulk_ep
;
1863 head
= &musb
->in_bulk
;
1865 head
= &musb
->out_bulk
;
1867 /* Enable bulk RX NAK timeout scheme when bulk requests are
1868 * multiplexed. This scheme doen't work in high speed to full
1869 * speed scenario as NAK interrupts are not coming from a
1870 * full speed device connected to a high speed device.
1871 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1872 * 4 (8 frame or 8ms) for FS device.
1874 if (is_in
&& qh
->dev
)
1876 (USB_SPEED_HIGH
== qh
->dev
->speed
) ? 8 : 4;
1878 } else if (best_end
< 0) {
1884 hw_ep
= musb
->endpoints
+ best_end
;
1885 dev_dbg(musb
->controller
, "qh %p periodic slot %d\n", qh
, best_end
);
1888 idle
= list_empty(head
);
1889 list_add_tail(&qh
->ring
, head
);
1893 qh
->hep
->hcpriv
= qh
;
1895 musb_start_urb(musb
, is_in
, qh
);
1900 /* check if transaction translator is needed for device */
1901 static int tt_needed(struct musb
*musb
, struct usb_device
*dev
)
1903 if ((musb_readb(musb
->mregs
, MUSB_POWER
) & MUSB_POWER_HSMODE
) &&
1904 (dev
->speed
< USB_SPEED_HIGH
))
1911 static int musb_urb_enqueue(
1913 int musb_urb_enqueue(
1915 struct usb_hcd
*hcd
,
1919 unsigned long flags
;
1920 struct musb
*musb
= hcd_to_musb(hcd
);
1921 struct usb_host_endpoint
*hep
= urb
->ep
;
1923 struct usb_endpoint_descriptor
*epd
= &hep
->desc
;
1928 /* host role must be active */
1929 if (!is_host_active(musb
) || !musb
->is_active
)
1932 spin_lock_irqsave(&musb
->lock
, flags
);
1933 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1934 qh
= ret
? NULL
: hep
->hcpriv
;
1937 spin_unlock_irqrestore(&musb
->lock
, flags
);
1939 /* DMA mapping was already done, if needed, and this urb is on
1940 * hep->urb_list now ... so we're done, unless hep wasn't yet
1941 * scheduled onto a live qh.
1943 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1944 * disabled, testing for empty qh->ring and avoiding qh setup costs
1945 * except for the first urb queued after a config change.
1950 /* Allocate and initialize qh, minimizing the work done each time
1951 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1953 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1954 * for bugs in other kernel code to break this driver...
1956 qh
= kzalloc(sizeof *qh
, mem_flags
);
1958 spin_lock_irqsave(&musb
->lock
, flags
);
1959 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1960 spin_unlock_irqrestore(&musb
->lock
, flags
);
1966 INIT_LIST_HEAD(&qh
->ring
);
1969 qh
->maxpacket
= usb_endpoint_maxp(epd
);
1970 qh
->type
= usb_endpoint_type(epd
);
1972 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
1973 * Some musb cores don't support high bandwidth ISO transfers; and
1974 * we don't (yet!) support high bandwidth interrupt transfers.
1976 qh
->hb_mult
= 1 + ((qh
->maxpacket
>> 11) & 0x03);
1977 if (qh
->hb_mult
> 1) {
1978 int ok
= (qh
->type
== USB_ENDPOINT_XFER_ISOC
);
1981 ok
= (usb_pipein(urb
->pipe
) && musb
->hb_iso_rx
)
1982 || (usb_pipeout(urb
->pipe
) && musb
->hb_iso_tx
);
1987 qh
->maxpacket
&= 0x7ff;
1990 qh
->epnum
= usb_endpoint_num(epd
);
1992 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1993 qh
->addr_reg
= (u8
) usb_pipedevice(urb
->pipe
);
1995 /* precompute rxtype/txtype/type0 register */
1996 type_reg
= (qh
->type
<< 4) | qh
->epnum
;
1997 switch (urb
->dev
->speed
) {
2001 case USB_SPEED_FULL
:
2007 qh
->type_reg
= type_reg
;
2009 /* Precompute RXINTERVAL/TXINTERVAL register */
2011 case USB_ENDPOINT_XFER_INT
:
2013 * Full/low speeds use the linear encoding,
2014 * high speed uses the logarithmic encoding.
2016 if (urb
->dev
->speed
<= USB_SPEED_FULL
) {
2017 interval
= max_t(u8
, epd
->bInterval
, 1);
2021 case USB_ENDPOINT_XFER_ISOC
:
2022 /* ISO always uses logarithmic encoding */
2023 interval
= min_t(u8
, epd
->bInterval
, 16);
2026 /* REVISIT we actually want to use NAK limits, hinting to the
2027 * transfer scheduling logic to try some other qh, e.g. try
2030 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2032 * The downside of disabling this is that transfer scheduling
2033 * gets VERY unfair for nonperiodic transfers; a misbehaving
2034 * peripheral could make that hurt. That's perfectly normal
2035 * for reads from network or serial adapters ... so we have
2036 * partial NAKlimit support for bulk RX.
2038 * The upside of disabling it is simpler transfer scheduling.
2042 qh
->intv_reg
= interval
;
2044 /* precompute addressing for external hub/tt ports */
2045 if (musb
->is_multipoint
) {
2047 struct usb_device
*parent
= urb
->dev
->parent
;
2049 struct usb_device
*parent
= usb_dev_get_parent(urb
->dev
);
2053 if (parent
!= hcd
->self
.root_hub
) {
2057 qh
->h_addr_reg
= (u8
) parent
->devnum
;
2060 /* set up tt info if needed */
2062 qh
->h_port_reg
= (u8
) urb
->dev
->ttport
;
2063 if (urb
->dev
->tt
->hub
)
2065 (u8
) urb
->dev
->tt
->hub
->devnum
;
2066 if (urb
->dev
->tt
->multi
)
2067 qh
->h_addr_reg
|= 0x80;
2070 if (tt_needed(musb
, urb
->dev
)) {
2072 uint8_t hubaddr
= 0;
2073 usb_find_usb2_hub_address_port(urb
->dev
,
2076 qh
->h_addr_reg
= hubaddr
;
2077 qh
->h_port_reg
= portnr
;
2083 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2084 * until we get real dma queues (with an entry for each urb/buffer),
2085 * we only have work to do in the former case.
2087 spin_lock_irqsave(&musb
->lock
, flags
);
2089 /* some concurrent activity submitted another urb to hep...
2090 * odd, rare, error prone, but legal.
2096 ret
= musb_schedule(musb
, qh
,
2097 epd
->bEndpointAddress
& USB_ENDPOINT_DIR_MASK
);
2101 /* FIXME set urb->start_frame for iso/intr, it's tested in
2102 * musb_start_urb(), but otherwise only konicawc cares ...
2105 spin_unlock_irqrestore(&musb
->lock
, flags
);
2109 spin_lock_irqsave(&musb
->lock
, flags
);
2110 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2111 spin_unlock_irqrestore(&musb
->lock
, flags
);
2118 * abort a transfer that's at the head of a hardware queue.
2119 * called with controller locked, irqs blocked
2120 * that hardware queue advances to the next transfer, unless prevented
2122 static int musb_cleanup_urb(struct urb
*urb
, struct musb_qh
*qh
)
2124 struct musb_hw_ep
*ep
= qh
->hw_ep
;
2125 struct musb
*musb
= ep
->musb
;
2126 void __iomem
*epio
= ep
->regs
;
2127 unsigned hw_end
= ep
->epnum
;
2128 void __iomem
*regs
= ep
->musb
->mregs
;
2129 int is_in
= usb_pipein(urb
->pipe
);
2133 musb_ep_select(regs
, hw_end
);
2135 if (is_dma_capable()) {
2136 struct dma_channel
*dma
;
2138 dma
= is_in
? ep
->rx_channel
: ep
->tx_channel
;
2140 status
= ep
->musb
->dma_controller
->channel_abort(dma
);
2141 dev_dbg(musb
->controller
,
2142 "abort %cX%d DMA for urb %p --> %d\n",
2143 is_in
? 'R' : 'T', ep
->epnum
,
2145 urb
->actual_length
+= dma
->actual_len
;
2149 /* turn off DMA requests, discard state, stop polling ... */
2150 if (ep
->epnum
&& is_in
) {
2151 /* giveback saves bulk toggle */
2152 csr
= musb_h_flush_rxfifo(ep
, 0);
2154 /* REVISIT we still get an irq; should likely clear the
2155 * endpoint's irq status here to avoid bogus irqs.
2156 * clearing that status is platform-specific...
2158 } else if (ep
->epnum
) {
2159 musb_h_tx_flush_fifo(ep
);
2160 csr
= musb_readw(epio
, MUSB_TXCSR
);
2161 csr
&= ~(MUSB_TXCSR_AUTOSET
2162 | MUSB_TXCSR_DMAENAB
2163 | MUSB_TXCSR_H_RXSTALL
2164 | MUSB_TXCSR_H_NAKTIMEOUT
2165 | MUSB_TXCSR_H_ERROR
2166 | MUSB_TXCSR_TXPKTRDY
);
2167 musb_writew(epio
, MUSB_TXCSR
, csr
);
2168 /* REVISIT may need to clear FLUSHFIFO ... */
2169 musb_writew(epio
, MUSB_TXCSR
, csr
);
2170 /* flush cpu writebuffer */
2171 csr
= musb_readw(epio
, MUSB_TXCSR
);
2173 musb_h_ep0_flush_fifo(ep
);
2176 musb_advance_schedule(ep
->musb
, urb
, ep
, is_in
);
2181 static int musb_urb_dequeue(
2183 int musb_urb_dequeue(
2185 struct usb_hcd
*hcd
,
2189 struct musb
*musb
= hcd_to_musb(hcd
);
2191 unsigned long flags
;
2192 int is_in
= usb_pipein(urb
->pipe
);
2195 dev_dbg(musb
->controller
, "urb=%p, dev%d ep%d%s\n", urb
,
2196 usb_pipedevice(urb
->pipe
),
2197 usb_pipeendpoint(urb
->pipe
),
2198 is_in
? "in" : "out");
2200 spin_lock_irqsave(&musb
->lock
, flags
);
2201 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
2210 * Any URB not actively programmed into endpoint hardware can be
2211 * immediately given back; that's any URB not at the head of an
2212 * endpoint queue, unless someday we get real DMA queues. And even
2213 * if it's at the head, it might not be known to the hardware...
2215 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2216 * has already been updated. This is a synchronous abort; it'd be
2217 * OK to hold off until after some IRQ, though.
2219 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2222 || urb
->urb_list
.prev
!= &qh
->hep
->urb_list
2223 || musb_ep_get_qh(qh
->hw_ep
, is_in
) != qh
) {
2224 int ready
= qh
->is_ready
;
2227 musb_giveback(musb
, urb
, 0);
2228 qh
->is_ready
= ready
;
2230 /* If nothing else (usually musb_giveback) is using it
2231 * and its URB list has emptied, recycle this qh.
2233 if (ready
&& list_empty(&qh
->hep
->urb_list
)) {
2234 qh
->hep
->hcpriv
= NULL
;
2235 list_del(&qh
->ring
);
2239 ret
= musb_cleanup_urb(urb
, qh
);
2241 spin_unlock_irqrestore(&musb
->lock
, flags
);
2246 /* disable an endpoint */
2248 musb_h_disable(struct usb_hcd
*hcd
, struct usb_host_endpoint
*hep
)
2250 u8 is_in
= hep
->desc
.bEndpointAddress
& USB_DIR_IN
;
2251 unsigned long flags
;
2252 struct musb
*musb
= hcd_to_musb(hcd
);
2256 spin_lock_irqsave(&musb
->lock
, flags
);
2262 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2264 /* Kick the first URB off the hardware, if needed */
2266 if (musb_ep_get_qh(qh
->hw_ep
, is_in
) == qh
) {
2269 /* make software (then hardware) stop ASAP */
2271 urb
->status
= -ESHUTDOWN
;
2274 musb_cleanup_urb(urb
, qh
);
2276 /* Then nuke all the others ... and advance the
2277 * queue on hw_ep (e.g. bulk ring) when we're done.
2279 while (!list_empty(&hep
->urb_list
)) {
2281 urb
->status
= -ESHUTDOWN
;
2282 musb_advance_schedule(musb
, urb
, qh
->hw_ep
, is_in
);
2285 /* Just empty the queue; the hardware is busy with
2286 * other transfers, and since !qh->is_ready nothing
2287 * will activate any of these as it advances.
2289 while (!list_empty(&hep
->urb_list
))
2290 musb_giveback(musb
, next_urb(qh
), -ESHUTDOWN
);
2293 list_del(&qh
->ring
);
2297 spin_unlock_irqrestore(&musb
->lock
, flags
);
2300 static int musb_h_get_frame_number(struct usb_hcd
*hcd
)
2302 struct musb
*musb
= hcd_to_musb(hcd
);
2304 return musb_readw(musb
->mregs
, MUSB_FRAME
);
2307 static int musb_h_start(struct usb_hcd
*hcd
)
2309 struct musb
*musb
= hcd_to_musb(hcd
);
2311 /* NOTE: musb_start() is called when the hub driver turns
2312 * on port power, or when (OTG) peripheral starts.
2314 hcd
->state
= HC_STATE_RUNNING
;
2315 musb
->port1_status
= 0;
2319 static void musb_h_stop(struct usb_hcd
*hcd
)
2321 musb_stop(hcd_to_musb(hcd
));
2322 hcd
->state
= HC_STATE_HALT
;
2325 static int musb_bus_suspend(struct usb_hcd
*hcd
)
2327 struct musb
*musb
= hcd_to_musb(hcd
);
2330 if (!is_host_active(musb
))
2333 switch (musb
->xceiv
->state
) {
2334 case OTG_STATE_A_SUSPEND
:
2336 case OTG_STATE_A_WAIT_VRISE
:
2337 /* ID could be grounded even if there's no device
2338 * on the other end of the cable. NOTE that the
2339 * A_WAIT_VRISE timers are messy with MUSB...
2341 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
2342 if ((devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
)
2343 musb
->xceiv
->state
= OTG_STATE_A_WAIT_BCON
;
2349 if (musb
->is_active
) {
2350 WARNING("trying to suspend as %s while active\n",
2351 otg_state_string(musb
->xceiv
->state
));
2357 static int musb_bus_resume(struct usb_hcd
*hcd
)
2359 /* resuming child port does the work */
2363 const struct hc_driver musb_hc_driver
= {
2364 .description
= "musb-hcd",
2365 .product_desc
= "MUSB HDRC host driver",
2366 .hcd_priv_size
= sizeof(struct musb
),
2367 .flags
= HCD_USB2
| HCD_MEMORY
,
2369 /* not using irq handler or reset hooks from usbcore, since
2370 * those must be shared with peripheral code for OTG configs
2373 .start
= musb_h_start
,
2374 .stop
= musb_h_stop
,
2376 .get_frame_number
= musb_h_get_frame_number
,
2378 .urb_enqueue
= musb_urb_enqueue
,
2379 .urb_dequeue
= musb_urb_dequeue
,
2380 .endpoint_disable
= musb_h_disable
,
2382 .hub_status_data
= musb_hub_status_data
,
2383 .hub_control
= musb_hub_control
,
2384 .bus_suspend
= musb_bus_suspend
,
2385 .bus_resume
= musb_bus_resume
,
2386 /* .start_port_reset = NULL, */
2387 /* .hub_irq_enable = NULL, */