2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/module.h>
38 #include <linux/kernel.h>
39 #include <linux/delay.h>
40 #include <linux/sched.h>
41 #include <linux/slab.h>
42 #include <linux/errno.h>
43 #include <linux/init.h>
44 #include <linux/list.h>
45 #include <linux/dma-mapping.h>
49 #include "linux-compat.h"
50 #include "usb-compat.h"
53 #include "musb_core.h"
54 #include "musb_host.h"
57 /* MUSB HOST status 22-mar-2006
59 * - There's still lots of partial code duplication for fault paths, so
60 * they aren't handled as consistently as they need to be.
62 * - PIO mostly behaved when last tested.
63 * + including ep0, with all usbtest cases 9, 10
64 * + usbtest 14 (ep0out) doesn't seem to run at all
65 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
66 * configurations, but otherwise double buffering passes basic tests.
67 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
69 * - DMA (CPPI) ... partially behaves, not currently recommended
70 * + about 1/15 the speed of typical EHCI implementations (PCI)
71 * + RX, all too often reqpkt seems to misbehave after tx
72 * + TX, no known issues (other than evident silicon issue)
74 * - DMA (Mentor/OMAP) ...has at least toggle update problems
76 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
77 * starvation ... nothing yet for TX, interrupt, or bulk.
79 * - Not tested with HNP, but some SRP paths seem to behave.
81 * NOTE 24-August-2006:
83 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
84 * extra endpoint for periodic use enabling hub + keybd + mouse. That
85 * mostly works, except that with "usbnet" it's easy to trigger cases
86 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
87 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
88 * although ARP RX wins. (That test was done with a full speed link.)
93 * NOTE on endpoint usage:
95 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
96 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
97 * (Yes, bulk _could_ use more of the endpoints than that, and would even
100 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
101 * So far that scheduling is both dumb and optimistic: the endpoint will be
102 * "claimed" until its software queue is no longer refilled. No multiplexing
103 * of transfers between endpoints, or anything clever.
107 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
108 struct urb
*urb
, int is_out
,
109 u8
*buf
, u32 offset
, u32 len
);
112 * Clear TX fifo. Needed to avoid BABBLE errors.
114 static void musb_h_tx_flush_fifo(struct musb_hw_ep
*ep
)
116 struct musb
*musb
= ep
->musb
;
117 void __iomem
*epio
= ep
->regs
;
122 csr
= musb_readw(epio
, MUSB_TXCSR
);
123 while (csr
& MUSB_TXCSR_FIFONOTEMPTY
) {
125 dev_dbg(musb
->controller
, "Host TX FIFONOTEMPTY csr: %02x\n", csr
);
127 csr
|= MUSB_TXCSR_FLUSHFIFO
;
128 musb_writew(epio
, MUSB_TXCSR
, csr
);
129 csr
= musb_readw(epio
, MUSB_TXCSR
);
130 if (WARN(retries
-- < 1,
131 "Could not flush host TX%d fifo: csr: %04x\n",
138 static void musb_h_ep0_flush_fifo(struct musb_hw_ep
*ep
)
140 void __iomem
*epio
= ep
->regs
;
144 /* scrub any data left in the fifo */
146 csr
= musb_readw(epio
, MUSB_TXCSR
);
147 if (!(csr
& (MUSB_CSR0_TXPKTRDY
| MUSB_CSR0_RXPKTRDY
)))
149 musb_writew(epio
, MUSB_TXCSR
, MUSB_CSR0_FLUSHFIFO
);
150 csr
= musb_readw(epio
, MUSB_TXCSR
);
154 WARN(!retries
, "Could not flush host TX%d fifo: csr: %04x\n",
157 /* and reset for the next transfer */
158 musb_writew(epio
, MUSB_TXCSR
, 0);
162 * Start transmit. Caller is responsible for locking shared resources.
163 * musb must be locked.
165 static inline void musb_h_tx_start(struct musb_hw_ep
*ep
)
169 /* NOTE: no locks here; caller should lock and select EP */
171 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
172 txcsr
|= MUSB_TXCSR_TXPKTRDY
| MUSB_TXCSR_H_WZC_BITS
;
173 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
175 txcsr
= MUSB_CSR0_H_SETUPPKT
| MUSB_CSR0_TXPKTRDY
;
176 musb_writew(ep
->regs
, MUSB_CSR0
, txcsr
);
181 static inline void musb_h_tx_dma_start(struct musb_hw_ep
*ep
)
185 /* NOTE: no locks here; caller should lock and select EP */
186 txcsr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
187 txcsr
|= MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_H_WZC_BITS
;
188 if (is_cppi_enabled())
189 txcsr
|= MUSB_TXCSR_DMAMODE
;
190 musb_writew(ep
->regs
, MUSB_TXCSR
, txcsr
);
193 static void musb_ep_set_qh(struct musb_hw_ep
*ep
, int is_in
, struct musb_qh
*qh
)
195 if (is_in
!= 0 || ep
->is_shared_fifo
)
197 if (is_in
== 0 || ep
->is_shared_fifo
)
201 static struct musb_qh
*musb_ep_get_qh(struct musb_hw_ep
*ep
, int is_in
)
203 return is_in
? ep
->in_qh
: ep
->out_qh
;
207 * Start the URB at the front of an endpoint's queue
208 * end must be claimed from the caller.
210 * Context: controller locked, irqs blocked
213 musb_start_urb(struct musb
*musb
, int is_in
, struct musb_qh
*qh
)
217 void __iomem
*mbase
= musb
->mregs
;
218 struct urb
*urb
= next_urb(qh
);
219 void *buf
= urb
->transfer_buffer
;
221 struct musb_hw_ep
*hw_ep
= qh
->hw_ep
;
222 unsigned pipe
= urb
->pipe
;
223 u8 address
= usb_pipedevice(pipe
);
224 int epnum
= hw_ep
->epnum
;
226 /* initialize software qh state */
230 /* gather right source of data */
232 case USB_ENDPOINT_XFER_CONTROL
:
233 /* control transfers always start with SETUP */
235 musb
->ep0_stage
= MUSB_EP0_START
;
236 buf
= urb
->setup_packet
;
240 case USB_ENDPOINT_XFER_ISOC
:
243 offset
= urb
->iso_frame_desc
[0].offset
;
244 len
= urb
->iso_frame_desc
[0].length
;
247 default: /* bulk, interrupt */
248 /* actual_length may be nonzero on retry paths */
249 buf
= urb
->transfer_buffer
+ urb
->actual_length
;
250 len
= urb
->transfer_buffer_length
- urb
->actual_length
;
253 dev_dbg(musb
->controller
, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
254 qh
, urb
, address
, qh
->epnum
,
255 is_in
? "in" : "out",
256 ({char *s
; switch (qh
->type
) {
257 case USB_ENDPOINT_XFER_CONTROL
: s
= ""; break;
258 case USB_ENDPOINT_XFER_BULK
: s
= "-bulk"; break;
260 case USB_ENDPOINT_XFER_ISOC
: s
= "-iso"; break;
262 default: s
= "-intr"; break;
264 epnum
, buf
+ offset
, len
);
266 /* Configure endpoint */
267 musb_ep_set_qh(hw_ep
, is_in
, qh
);
268 musb_ep_program(musb
, epnum
, urb
, !is_in
, buf
, offset
, len
);
270 /* transmit may have more work: start it when it is time */
274 /* determine if the time is right for a periodic transfer */
277 case USB_ENDPOINT_XFER_ISOC
:
279 case USB_ENDPOINT_XFER_INT
:
280 dev_dbg(musb
->controller
, "check whether there's still time for periodic Tx\n");
281 frame
= musb_readw(mbase
, MUSB_FRAME
);
282 /* FIXME this doesn't implement that scheduling policy ...
283 * or handle framecounter wrapping
286 if ((urb
->transfer_flags
& URB_ISO_ASAP
)
287 || (frame
>= urb
->start_frame
)) {
288 /* REVISIT the SOF irq handler shouldn't duplicate
289 * this code; and we don't init urb->start_frame...
295 qh
->frame
= urb
->start_frame
;
296 /* enable SOF interrupt so we can count down */
297 dev_dbg(musb
->controller
, "SOF for %d\n", epnum
);
298 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
299 musb_writeb(mbase
, MUSB_INTRUSBE
, 0xff);
307 dev_dbg(musb
->controller
, "Start TX%d %s\n", epnum
,
308 hw_ep
->tx_channel
? "dma" : "pio");
310 if (!hw_ep
->tx_channel
)
311 musb_h_tx_start(hw_ep
);
312 else if (is_cppi_enabled() || tusb_dma_omap())
313 musb_h_tx_dma_start(hw_ep
);
317 /* Context: caller owns controller lock, IRQs are blocked */
318 static void musb_giveback(struct musb
*musb
, struct urb
*urb
, int status
)
319 __releases(musb
->lock
)
320 __acquires(musb
->lock
)
322 dev_dbg(musb
->controller
,
323 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
324 urb
, urb
->complete
, status
,
325 usb_pipedevice(urb
->pipe
),
326 usb_pipeendpoint(urb
->pipe
),
327 usb_pipein(urb
->pipe
) ? "in" : "out",
328 urb
->actual_length
, urb
->transfer_buffer_length
331 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb
), urb
);
332 spin_unlock(&musb
->lock
);
333 usb_hcd_giveback_urb(musb_to_hcd(musb
), urb
, status
);
334 spin_lock(&musb
->lock
);
337 /* For bulk/interrupt endpoints only */
338 static inline void musb_save_toggle(struct musb_qh
*qh
, int is_in
,
341 void __iomem
*epio
= qh
->hw_ep
->regs
;
345 * FIXME: the current Mentor DMA code seems to have
346 * problems getting toggle correct.
350 csr
= musb_readw(epio
, MUSB_RXCSR
) & MUSB_RXCSR_H_DATATOGGLE
;
352 csr
= musb_readw(epio
, MUSB_TXCSR
) & MUSB_TXCSR_H_DATATOGGLE
;
354 usb_settoggle(urb
->dev
, qh
->epnum
, !is_in
, csr
? 1 : 0);
358 * Advance this hardware endpoint's queue, completing the specified URB and
359 * advancing to either the next URB queued to that qh, or else invalidating
360 * that qh and advancing to the next qh scheduled after the current one.
362 * Context: caller owns controller lock, IRQs are blocked
364 static void musb_advance_schedule(struct musb
*musb
, struct urb
*urb
,
365 struct musb_hw_ep
*hw_ep
, int is_in
)
367 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, is_in
);
368 struct musb_hw_ep
*ep
= qh
->hw_ep
;
369 int ready
= qh
->is_ready
;
372 status
= (urb
->status
== -EINPROGRESS
) ? 0 : urb
->status
;
374 /* save toggle eagerly, for paranoia */
376 case USB_ENDPOINT_XFER_BULK
:
377 case USB_ENDPOINT_XFER_INT
:
378 musb_save_toggle(qh
, is_in
, urb
);
381 case USB_ENDPOINT_XFER_ISOC
:
382 if (status
== 0 && urb
->error_count
)
389 musb_giveback(musb
, urb
, status
);
390 qh
->is_ready
= ready
;
392 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
393 * invalidate qh as soon as list_empty(&hep->urb_list)
395 if (list_empty(&qh
->hep
->urb_list
)) {
396 struct list_head
*head
;
397 struct dma_controller
*dma
= musb
->dma_controller
;
401 if (ep
->rx_channel
) {
402 dma
->channel_release(ep
->rx_channel
);
403 ep
->rx_channel
= NULL
;
407 if (ep
->tx_channel
) {
408 dma
->channel_release(ep
->tx_channel
);
409 ep
->tx_channel
= NULL
;
413 /* Clobber old pointers to this qh */
414 musb_ep_set_qh(ep
, is_in
, NULL
);
415 qh
->hep
->hcpriv
= NULL
;
419 case USB_ENDPOINT_XFER_CONTROL
:
420 case USB_ENDPOINT_XFER_BULK
:
421 /* fifo policy for these lists, except that NAKing
422 * should rotate a qh to the end (for fairness).
425 head
= qh
->ring
.prev
;
432 case USB_ENDPOINT_XFER_ISOC
:
433 case USB_ENDPOINT_XFER_INT
:
434 /* this is where periodic bandwidth should be
435 * de-allocated if it's tracked and allocated;
436 * and where we'd update the schedule tree...
444 if (qh
!= NULL
&& qh
->is_ready
) {
445 dev_dbg(musb
->controller
, "... next ep%d %cX urb %p\n",
446 hw_ep
->epnum
, is_in
? 'R' : 'T', next_urb(qh
));
447 musb_start_urb(musb
, is_in
, qh
);
451 static u16
musb_h_flush_rxfifo(struct musb_hw_ep
*hw_ep
, u16 csr
)
453 /* we don't want fifo to fill itself again;
454 * ignore dma (various models),
455 * leave toggle alone (may not have been saved yet)
457 csr
|= MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_RXPKTRDY
;
458 csr
&= ~(MUSB_RXCSR_H_REQPKT
459 | MUSB_RXCSR_H_AUTOREQ
460 | MUSB_RXCSR_AUTOCLEAR
);
462 /* write 2x to allow double buffering */
463 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
464 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
466 /* flush writebuffer */
467 return musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
471 * PIO RX for a packet (or part of it).
474 musb_host_packet_rx(struct musb
*musb
, struct urb
*urb
, u8 epnum
, u8 iso_err
)
482 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
483 void __iomem
*epio
= hw_ep
->regs
;
484 struct musb_qh
*qh
= hw_ep
->in_qh
;
485 int pipe
= urb
->pipe
;
486 void *buffer
= urb
->transfer_buffer
;
488 /* musb_ep_select(mbase, epnum); */
489 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
490 dev_dbg(musb
->controller
, "RX%d count %d, buffer %p len %d/%d\n", epnum
, rx_count
,
491 urb
->transfer_buffer
, qh
->offset
,
492 urb
->transfer_buffer_length
);
496 if (usb_pipeisoc(pipe
)) {
498 struct usb_iso_packet_descriptor
*d
;
505 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
506 buf
= buffer
+ d
->offset
;
508 if (rx_count
> length
) {
513 dev_dbg(musb
->controller
, "** OVERFLOW %d into %d\n", rx_count
, length
);
517 urb
->actual_length
+= length
;
518 d
->actual_length
= length
;
522 /* see if we are done */
523 done
= (++qh
->iso_idx
>= urb
->number_of_packets
);
527 buf
= buffer
+ qh
->offset
;
528 length
= urb
->transfer_buffer_length
- qh
->offset
;
529 if (rx_count
> length
) {
530 if (urb
->status
== -EINPROGRESS
)
531 urb
->status
= -EOVERFLOW
;
532 dev_dbg(musb
->controller
, "** OVERFLOW %d into %d\n", rx_count
, length
);
536 urb
->actual_length
+= length
;
537 qh
->offset
+= length
;
539 /* see if we are done */
540 done
= (urb
->actual_length
== urb
->transfer_buffer_length
)
541 || (rx_count
< qh
->maxpacket
)
542 || (urb
->status
!= -EINPROGRESS
);
544 && (urb
->status
== -EINPROGRESS
)
545 && (urb
->transfer_flags
& URB_SHORT_NOT_OK
)
546 && (urb
->actual_length
547 < urb
->transfer_buffer_length
))
548 urb
->status
= -EREMOTEIO
;
553 musb_read_fifo(hw_ep
, length
, buf
);
555 csr
= musb_readw(epio
, MUSB_RXCSR
);
556 csr
|= MUSB_RXCSR_H_WZC_BITS
;
557 if (unlikely(do_flush
))
558 musb_h_flush_rxfifo(hw_ep
, csr
);
560 /* REVISIT this assumes AUTOCLEAR is never set */
561 csr
&= ~(MUSB_RXCSR_RXPKTRDY
| MUSB_RXCSR_H_REQPKT
);
563 csr
|= MUSB_RXCSR_H_REQPKT
;
564 musb_writew(epio
, MUSB_RXCSR
, csr
);
570 /* we don't always need to reinit a given side of an endpoint...
571 * when we do, use tx/rx reinit routine and then construct a new CSR
572 * to address data toggle, NYET, and DMA or PIO.
574 * it's possible that driver bugs (especially for DMA) or aborting a
575 * transfer might have left the endpoint busier than it should be.
576 * the busy/not-empty tests are basically paranoia.
579 musb_rx_reinit(struct musb
*musb
, struct musb_qh
*qh
, struct musb_hw_ep
*ep
)
583 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
584 * That always uses tx_reinit since ep0 repurposes TX register
585 * offsets; the initial SETUP packet is also a kind of OUT.
588 /* if programmed for Tx, put it in RX mode */
589 if (ep
->is_shared_fifo
) {
590 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
591 if (csr
& MUSB_TXCSR_MODE
) {
592 musb_h_tx_flush_fifo(ep
);
593 csr
= musb_readw(ep
->regs
, MUSB_TXCSR
);
594 musb_writew(ep
->regs
, MUSB_TXCSR
,
595 csr
| MUSB_TXCSR_FRCDATATOG
);
599 * Clear the MODE bit (and everything else) to enable Rx.
600 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
602 if (csr
& MUSB_TXCSR_DMAMODE
)
603 musb_writew(ep
->regs
, MUSB_TXCSR
, MUSB_TXCSR_DMAMODE
);
604 musb_writew(ep
->regs
, MUSB_TXCSR
, 0);
606 /* scrub all previous state, clearing toggle */
608 csr
= musb_readw(ep
->regs
, MUSB_RXCSR
);
609 if (csr
& MUSB_RXCSR_RXPKTRDY
)
610 WARNING("rx%d, packet/%d ready?\n", ep
->epnum
,
611 musb_readw(ep
->regs
, MUSB_RXCOUNT
));
613 musb_h_flush_rxfifo(ep
, MUSB_RXCSR_CLRDATATOG
);
616 /* target addr and (for multipoint) hub addr/port */
617 if (musb
->is_multipoint
) {
618 musb_write_rxfunaddr(ep
->target_regs
, qh
->addr_reg
);
619 musb_write_rxhubaddr(ep
->target_regs
, qh
->h_addr_reg
);
620 musb_write_rxhubport(ep
->target_regs
, qh
->h_port_reg
);
623 musb_writeb(musb
->mregs
, MUSB_FADDR
, qh
->addr_reg
);
625 /* protocol/endpoint, interval/NAKlimit, i/o size */
626 musb_writeb(ep
->regs
, MUSB_RXTYPE
, qh
->type_reg
);
627 musb_writeb(ep
->regs
, MUSB_RXINTERVAL
, qh
->intv_reg
);
628 /* NOTE: bulk combining rewrites high bits of maxpacket */
629 /* Set RXMAXP with the FIFO size of the endpoint
630 * to disable double buffer mode.
632 if (musb
->double_buffer_not_ok
)
633 musb_writew(ep
->regs
, MUSB_RXMAXP
, ep
->max_packet_sz_rx
);
635 musb_writew(ep
->regs
, MUSB_RXMAXP
,
636 qh
->maxpacket
| ((qh
->hb_mult
- 1) << 11));
641 static bool musb_tx_dma_program(struct dma_controller
*dma
,
642 struct musb_hw_ep
*hw_ep
, struct musb_qh
*qh
,
643 struct urb
*urb
, u32 offset
, u32 length
)
645 struct dma_channel
*channel
= hw_ep
->tx_channel
;
646 void __iomem
*epio
= hw_ep
->regs
;
647 u16 pkt_size
= qh
->maxpacket
;
651 #ifdef CONFIG_USB_INVENTRA_DMA
652 if (length
> channel
->max_len
)
653 length
= channel
->max_len
;
655 csr
= musb_readw(epio
, MUSB_TXCSR
);
656 if (length
> pkt_size
) {
658 csr
|= MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_DMAENAB
;
659 /* autoset shouldn't be set in high bandwidth */
660 if (qh
->hb_mult
== 1)
661 csr
|= MUSB_TXCSR_AUTOSET
;
664 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAMODE
);
665 csr
|= MUSB_TXCSR_DMAENAB
; /* against programmer's guide */
667 channel
->desired_mode
= mode
;
668 musb_writew(epio
, MUSB_TXCSR
, csr
);
670 if (!is_cppi_enabled() && !tusb_dma_omap())
673 channel
->actual_len
= 0;
676 * TX uses "RNDIS" mode automatically but needs help
677 * to identify the zero-length-final-packet case.
679 mode
= (urb
->transfer_flags
& URB_ZERO_PACKET
) ? 1 : 0;
682 qh
->segsize
= length
;
685 * Ensure the data reaches to main memory before starting
690 if (!dma
->channel_program(channel
, pkt_size
, mode
,
691 urb
->transfer_dma
+ offset
, length
)) {
692 dma
->channel_release(channel
);
693 hw_ep
->tx_channel
= NULL
;
695 csr
= musb_readw(epio
, MUSB_TXCSR
);
696 csr
&= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB
);
697 musb_writew(epio
, MUSB_TXCSR
, csr
| MUSB_TXCSR_H_WZC_BITS
);
704 * Program an HDRC endpoint as per the given URB
705 * Context: irqs blocked, controller lock held
707 static void musb_ep_program(struct musb
*musb
, u8 epnum
,
708 struct urb
*urb
, int is_out
,
709 u8
*buf
, u32 offset
, u32 len
)
711 struct dma_controller
*dma_controller
;
712 struct dma_channel
*dma_channel
;
714 void __iomem
*mbase
= musb
->mregs
;
715 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
716 void __iomem
*epio
= hw_ep
->regs
;
717 struct musb_qh
*qh
= musb_ep_get_qh(hw_ep
, !is_out
);
718 u16 packet_sz
= qh
->maxpacket
;
720 dev_dbg(musb
->controller
, "%s hw%d urb %p spd%d dev%d ep%d%s "
721 "h_addr%02x h_port%02x bytes %d\n",
722 is_out
? "-->" : "<--",
723 epnum
, urb
, urb
->dev
->speed
,
724 qh
->addr_reg
, qh
->epnum
, is_out
? "out" : "in",
725 qh
->h_addr_reg
, qh
->h_port_reg
,
728 musb_ep_select(mbase
, epnum
);
730 /* candidate for DMA? */
731 dma_controller
= musb
->dma_controller
;
732 if (is_dma_capable() && epnum
&& dma_controller
) {
733 dma_channel
= is_out
? hw_ep
->tx_channel
: hw_ep
->rx_channel
;
735 dma_channel
= dma_controller
->channel_alloc(
736 dma_controller
, hw_ep
, is_out
);
738 hw_ep
->tx_channel
= dma_channel
;
740 hw_ep
->rx_channel
= dma_channel
;
745 /* make sure we clear DMAEnab, autoSet bits from previous run */
747 /* OUT/transmit/EP0 or IN/receive? */
753 csr
= musb_readw(epio
, MUSB_TXCSR
);
755 /* disable interrupt in case we flush */
756 int_txe
= musb_readw(mbase
, MUSB_INTRTXE
);
757 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
& ~(1 << epnum
));
759 /* general endpoint setup */
761 /* flush all old state, set default */
762 musb_h_tx_flush_fifo(hw_ep
);
765 * We must not clear the DMAMODE bit before or in
766 * the same cycle with the DMAENAB bit, so we clear
767 * the latter first...
769 csr
&= ~(MUSB_TXCSR_H_NAKTIMEOUT
772 | MUSB_TXCSR_FRCDATATOG
773 | MUSB_TXCSR_H_RXSTALL
775 | MUSB_TXCSR_TXPKTRDY
777 csr
|= MUSB_TXCSR_MODE
;
779 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 1))
780 csr
|= MUSB_TXCSR_H_WR_DATATOGGLE
781 | MUSB_TXCSR_H_DATATOGGLE
;
783 csr
|= MUSB_TXCSR_CLRDATATOG
;
785 musb_writew(epio
, MUSB_TXCSR
, csr
);
786 /* REVISIT may need to clear FLUSHFIFO ... */
787 csr
&= ~MUSB_TXCSR_DMAMODE
;
788 musb_writew(epio
, MUSB_TXCSR
, csr
);
789 csr
= musb_readw(epio
, MUSB_TXCSR
);
791 /* endpoint 0: just flush */
792 musb_h_ep0_flush_fifo(hw_ep
);
795 /* target addr and (for multipoint) hub addr/port */
796 if (musb
->is_multipoint
) {
797 musb_write_txfunaddr(mbase
, epnum
, qh
->addr_reg
);
798 musb_write_txhubaddr(mbase
, epnum
, qh
->h_addr_reg
);
799 musb_write_txhubport(mbase
, epnum
, qh
->h_port_reg
);
800 /* FIXME if !epnum, do the same for RX ... */
802 musb_writeb(mbase
, MUSB_FADDR
, qh
->addr_reg
);
804 /* protocol/endpoint/interval/NAKlimit */
806 musb_writeb(epio
, MUSB_TXTYPE
, qh
->type_reg
);
807 if (musb
->double_buffer_not_ok
)
808 musb_writew(epio
, MUSB_TXMAXP
,
809 hw_ep
->max_packet_sz_tx
);
810 else if (can_bulk_split(musb
, qh
->type
))
811 musb_writew(epio
, MUSB_TXMAXP
, packet_sz
812 | ((hw_ep
->max_packet_sz_tx
/
813 packet_sz
) - 1) << 11);
815 musb_writew(epio
, MUSB_TXMAXP
,
817 ((qh
->hb_mult
- 1) << 11));
818 musb_writeb(epio
, MUSB_TXINTERVAL
, qh
->intv_reg
);
820 musb_writeb(epio
, MUSB_NAKLIMIT0
, qh
->intv_reg
);
821 if (musb
->is_multipoint
)
822 musb_writeb(epio
, MUSB_TYPE0
,
826 if (can_bulk_split(musb
, qh
->type
))
827 load_count
= min((u32
) hw_ep
->max_packet_sz_tx
,
830 load_count
= min((u32
) packet_sz
, len
);
832 if (dma_channel
&& musb_tx_dma_program(dma_controller
,
833 hw_ep
, qh
, urb
, offset
, len
))
837 /* PIO to load FIFO */
838 qh
->segsize
= load_count
;
839 musb_write_fifo(hw_ep
, load_count
, buf
);
842 /* re-enable interrupt */
843 musb_writew(mbase
, MUSB_INTRTXE
, int_txe
);
849 if (hw_ep
->rx_reinit
) {
850 musb_rx_reinit(musb
, qh
, hw_ep
);
852 /* init new state: toggle and NYET, maybe DMA later */
853 if (usb_gettoggle(urb
->dev
, qh
->epnum
, 0))
854 csr
= MUSB_RXCSR_H_WR_DATATOGGLE
855 | MUSB_RXCSR_H_DATATOGGLE
;
858 if (qh
->type
== USB_ENDPOINT_XFER_INT
)
859 csr
|= MUSB_RXCSR_DISNYET
;
862 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
864 if (csr
& (MUSB_RXCSR_RXPKTRDY
866 | MUSB_RXCSR_H_REQPKT
))
867 ERR("broken !rx_reinit, ep%d csr %04x\n",
870 /* scrub any stale state, leaving toggle alone */
871 csr
&= MUSB_RXCSR_DISNYET
;
874 /* kick things off */
876 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel
) {
877 /* Candidate for DMA */
878 dma_channel
->actual_len
= 0L;
881 /* AUTOREQ is in a DMA register */
882 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
883 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
886 * Unless caller treats short RX transfers as
887 * errors, we dare not queue multiple transfers.
889 dma_ok
= dma_controller
->channel_program(dma_channel
,
890 packet_sz
, !(urb
->transfer_flags
&
892 urb
->transfer_dma
+ offset
,
895 dma_controller
->channel_release(dma_channel
);
896 hw_ep
->rx_channel
= dma_channel
= NULL
;
898 csr
|= MUSB_RXCSR_DMAENAB
;
901 csr
|= MUSB_RXCSR_H_REQPKT
;
902 dev_dbg(musb
->controller
, "RXCSR%d := %04x\n", epnum
, csr
);
903 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, csr
);
904 csr
= musb_readw(hw_ep
->regs
, MUSB_RXCSR
);
910 * Service the default endpoint (ep0) as host.
911 * Return true until it's time to start the status stage.
913 static bool musb_h_ep0_continue(struct musb
*musb
, u16 len
, struct urb
*urb
)
916 u8
*fifo_dest
= NULL
;
918 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
919 struct musb_qh
*qh
= hw_ep
->in_qh
;
920 struct usb_ctrlrequest
*request
;
922 switch (musb
->ep0_stage
) {
924 fifo_dest
= urb
->transfer_buffer
+ urb
->actual_length
;
925 fifo_count
= min_t(size_t, len
, urb
->transfer_buffer_length
-
927 if (fifo_count
< len
)
928 urb
->status
= -EOVERFLOW
;
930 musb_read_fifo(hw_ep
, fifo_count
, fifo_dest
);
932 urb
->actual_length
+= fifo_count
;
933 if (len
< qh
->maxpacket
) {
934 /* always terminate on short read; it's
935 * rarely reported as an error.
937 } else if (urb
->actual_length
<
938 urb
->transfer_buffer_length
)
942 request
= (struct usb_ctrlrequest
*) urb
->setup_packet
;
944 if (!request
->wLength
) {
945 dev_dbg(musb
->controller
, "start no-DATA\n");
947 } else if (request
->bRequestType
& USB_DIR_IN
) {
948 dev_dbg(musb
->controller
, "start IN-DATA\n");
949 musb
->ep0_stage
= MUSB_EP0_IN
;
953 dev_dbg(musb
->controller
, "start OUT-DATA\n");
954 musb
->ep0_stage
= MUSB_EP0_OUT
;
959 fifo_count
= min_t(size_t, qh
->maxpacket
,
960 urb
->transfer_buffer_length
-
963 fifo_dest
= (u8
*) (urb
->transfer_buffer
964 + urb
->actual_length
);
965 dev_dbg(musb
->controller
, "Sending %d byte%s to ep0 fifo %p\n",
967 (fifo_count
== 1) ? "" : "s",
969 musb_write_fifo(hw_ep
, fifo_count
, fifo_dest
);
971 urb
->actual_length
+= fifo_count
;
976 ERR("bogus ep0 stage %d\n", musb
->ep0_stage
);
984 * Handle default endpoint interrupt as host. Only called in IRQ time
985 * from musb_interrupt().
987 * called with controller irqlocked
989 irqreturn_t
musb_h_ep0_irq(struct musb
*musb
)
994 void __iomem
*mbase
= musb
->mregs
;
995 struct musb_hw_ep
*hw_ep
= musb
->control_ep
;
996 void __iomem
*epio
= hw_ep
->regs
;
997 struct musb_qh
*qh
= hw_ep
->in_qh
;
998 bool complete
= false;
999 irqreturn_t retval
= IRQ_NONE
;
1001 /* ep0 only has one queue, "in" */
1004 musb_ep_select(mbase
, 0);
1005 csr
= musb_readw(epio
, MUSB_CSR0
);
1006 len
= (csr
& MUSB_CSR0_RXPKTRDY
)
1007 ? musb_readb(epio
, MUSB_COUNT0
)
1010 dev_dbg(musb
->controller
, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1011 csr
, qh
, len
, urb
, musb
->ep0_stage
);
1013 /* if we just did status stage, we are done */
1014 if (MUSB_EP0_STATUS
== musb
->ep0_stage
) {
1015 retval
= IRQ_HANDLED
;
1019 /* prepare status */
1020 if (csr
& MUSB_CSR0_H_RXSTALL
) {
1021 dev_dbg(musb
->controller
, "STALLING ENDPOINT\n");
1024 } else if (csr
& MUSB_CSR0_H_ERROR
) {
1025 dev_dbg(musb
->controller
, "no response, csr0 %04x\n", csr
);
1028 } else if (csr
& MUSB_CSR0_H_NAKTIMEOUT
) {
1029 dev_dbg(musb
->controller
, "control NAK timeout\n");
1031 /* NOTE: this code path would be a good place to PAUSE a
1032 * control transfer, if another one is queued, so that
1033 * ep0 is more likely to stay busy. That's already done
1034 * for bulk RX transfers.
1036 * if (qh->ring.next != &musb->control), then
1037 * we have a candidate... NAKing is *NOT* an error
1039 musb_writew(epio
, MUSB_CSR0
, 0);
1040 retval
= IRQ_HANDLED
;
1044 dev_dbg(musb
->controller
, "aborting\n");
1045 retval
= IRQ_HANDLED
;
1047 urb
->status
= status
;
1050 /* use the proper sequence to abort the transfer */
1051 if (csr
& MUSB_CSR0_H_REQPKT
) {
1052 csr
&= ~MUSB_CSR0_H_REQPKT
;
1053 musb_writew(epio
, MUSB_CSR0
, csr
);
1054 csr
&= ~MUSB_CSR0_H_NAKTIMEOUT
;
1055 musb_writew(epio
, MUSB_CSR0
, csr
);
1057 musb_h_ep0_flush_fifo(hw_ep
);
1060 musb_writeb(epio
, MUSB_NAKLIMIT0
, 0);
1063 musb_writew(epio
, MUSB_CSR0
, 0);
1066 if (unlikely(!urb
)) {
1067 /* stop endpoint since we have no place for its data, this
1068 * SHOULD NEVER HAPPEN! */
1069 ERR("no URB for end 0\n");
1071 musb_h_ep0_flush_fifo(hw_ep
);
1076 /* call common logic and prepare response */
1077 if (musb_h_ep0_continue(musb
, len
, urb
)) {
1078 /* more packets required */
1079 csr
= (MUSB_EP0_IN
== musb
->ep0_stage
)
1080 ? MUSB_CSR0_H_REQPKT
: MUSB_CSR0_TXPKTRDY
;
1082 /* data transfer complete; perform status phase */
1083 if (usb_pipeout(urb
->pipe
)
1084 || !urb
->transfer_buffer_length
)
1085 csr
= MUSB_CSR0_H_STATUSPKT
1086 | MUSB_CSR0_H_REQPKT
;
1088 csr
= MUSB_CSR0_H_STATUSPKT
1089 | MUSB_CSR0_TXPKTRDY
;
1091 /* flag status stage */
1092 musb
->ep0_stage
= MUSB_EP0_STATUS
;
1094 dev_dbg(musb
->controller
, "ep0 STATUS, csr %04x\n", csr
);
1097 musb_writew(epio
, MUSB_CSR0
, csr
);
1098 retval
= IRQ_HANDLED
;
1100 musb
->ep0_stage
= MUSB_EP0_IDLE
;
1102 /* call completion handler if done */
1104 musb_advance_schedule(musb
, urb
, hw_ep
, 1);
1110 #ifdef CONFIG_USB_INVENTRA_DMA
1112 /* Host side TX (OUT) using Mentor DMA works as follows:
1114 - if queue was empty, Program Endpoint
1115 - ... which starts DMA to fifo in mode 1 or 0
1117 DMA Isr (transfer complete) -> TxAvail()
1118 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1119 only in musb_cleanup_urb)
1120 - TxPktRdy has to be set in mode 0 or for
1121 short packets in mode 1.
1126 /* Service a Tx-Available or dma completion irq for the endpoint */
1127 void musb_host_tx(struct musb
*musb
, u8 epnum
)
1134 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1135 void __iomem
*epio
= hw_ep
->regs
;
1136 struct musb_qh
*qh
= hw_ep
->out_qh
;
1137 struct urb
*urb
= next_urb(qh
);
1139 void __iomem
*mbase
= musb
->mregs
;
1140 struct dma_channel
*dma
;
1141 bool transfer_pending
= false;
1143 musb_ep_select(mbase
, epnum
);
1144 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1146 /* with CPPI, DMA sometimes triggers "extra" irqs */
1148 dev_dbg(musb
->controller
, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1153 dma
= is_dma_capable() ? hw_ep
->tx_channel
: NULL
;
1154 dev_dbg(musb
->controller
, "OUT/TX%d end, csr %04x%s\n", epnum
, tx_csr
,
1155 dma
? ", dma" : "");
1157 /* check for errors */
1158 if (tx_csr
& MUSB_TXCSR_H_RXSTALL
) {
1159 /* dma was disabled, fifo flushed */
1160 dev_dbg(musb
->controller
, "TX end %d stall\n", epnum
);
1162 /* stall; record URB status */
1165 } else if (tx_csr
& MUSB_TXCSR_H_ERROR
) {
1166 /* (NON-ISO) dma was disabled, fifo flushed */
1167 dev_dbg(musb
->controller
, "TX 3strikes on ep=%d\n", epnum
);
1169 status
= -ETIMEDOUT
;
1171 } else if (tx_csr
& MUSB_TXCSR_H_NAKTIMEOUT
) {
1172 dev_dbg(musb
->controller
, "TX end=%d device not responding\n", epnum
);
1174 /* NOTE: this code path would be a good place to PAUSE a
1175 * transfer, if there's some other (nonperiodic) tx urb
1176 * that could use this fifo. (dma complicates it...)
1177 * That's already done for bulk RX transfers.
1179 * if (bulk && qh->ring.next != &musb->out_bulk), then
1180 * we have a candidate... NAKing is *NOT* an error
1182 musb_ep_select(mbase
, epnum
);
1183 musb_writew(epio
, MUSB_TXCSR
,
1184 MUSB_TXCSR_H_WZC_BITS
1185 | MUSB_TXCSR_TXPKTRDY
);
1190 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1191 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1192 (void) musb
->dma_controller
->channel_abort(dma
);
1195 /* do the proper sequence to abort the transfer in the
1196 * usb core; the dma engine should already be stopped.
1198 musb_h_tx_flush_fifo(hw_ep
);
1199 tx_csr
&= ~(MUSB_TXCSR_AUTOSET
1200 | MUSB_TXCSR_DMAENAB
1201 | MUSB_TXCSR_H_ERROR
1202 | MUSB_TXCSR_H_RXSTALL
1203 | MUSB_TXCSR_H_NAKTIMEOUT
1206 musb_ep_select(mbase
, epnum
);
1207 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1208 /* REVISIT may need to clear FLUSHFIFO ... */
1209 musb_writew(epio
, MUSB_TXCSR
, tx_csr
);
1210 musb_writeb(epio
, MUSB_TXINTERVAL
, 0);
1215 /* second cppi case */
1216 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1217 dev_dbg(musb
->controller
, "extra TX%d ready, csr %04x\n", epnum
, tx_csr
);
1221 if (is_dma_capable() && dma
&& !status
) {
1223 * DMA has completed. But if we're using DMA mode 1 (multi
1224 * packet DMA), we need a terminal TXPKTRDY interrupt before
1225 * we can consider this transfer completed, lest we trash
1226 * its last packet when writing the next URB's data. So we
1227 * switch back to mode 0 to get that interrupt; we'll come
1228 * back here once it happens.
1230 if (tx_csr
& MUSB_TXCSR_DMAMODE
) {
1232 * We shouldn't clear DMAMODE with DMAENAB set; so
1233 * clear them in a safe order. That should be OK
1234 * once TXPKTRDY has been set (and I've never seen
1235 * it being 0 at this moment -- DMA interrupt latency
1236 * is significant) but if it hasn't been then we have
1237 * no choice but to stop being polite and ignore the
1238 * programmer's guide... :-)
1240 * Note that we must write TXCSR with TXPKTRDY cleared
1241 * in order not to re-trigger the packet send (this bit
1242 * can't be cleared by CPU), and there's another caveat:
1243 * TXPKTRDY may be set shortly and then cleared in the
1244 * double-buffered FIFO mode, so we do an extra TXCSR
1245 * read for debouncing...
1247 tx_csr
&= musb_readw(epio
, MUSB_TXCSR
);
1248 if (tx_csr
& MUSB_TXCSR_TXPKTRDY
) {
1249 tx_csr
&= ~(MUSB_TXCSR_DMAENAB
|
1250 MUSB_TXCSR_TXPKTRDY
);
1251 musb_writew(epio
, MUSB_TXCSR
,
1252 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1254 tx_csr
&= ~(MUSB_TXCSR_DMAMODE
|
1255 MUSB_TXCSR_TXPKTRDY
);
1256 musb_writew(epio
, MUSB_TXCSR
,
1257 tx_csr
| MUSB_TXCSR_H_WZC_BITS
);
1260 * There is no guarantee that we'll get an interrupt
1261 * after clearing DMAMODE as we might have done this
1262 * too late (after TXPKTRDY was cleared by controller).
1263 * Re-read TXCSR as we have spoiled its previous value.
1265 tx_csr
= musb_readw(epio
, MUSB_TXCSR
);
1269 * We may get here from a DMA completion or TXPKTRDY interrupt.
1270 * In any case, we must check the FIFO status here and bail out
1271 * only if the FIFO still has data -- that should prevent the
1272 * "missed" TXPKTRDY interrupts and deal with double-buffered
1275 if (tx_csr
& (MUSB_TXCSR_FIFONOTEMPTY
| MUSB_TXCSR_TXPKTRDY
)) {
1276 dev_dbg(musb
->controller
, "DMA complete but packet still in FIFO, "
1277 "CSR %04x\n", tx_csr
);
1282 if (!status
|| dma
|| usb_pipeisoc(pipe
)) {
1284 length
= dma
->actual_len
;
1286 length
= qh
->segsize
;
1287 qh
->offset
+= length
;
1289 if (usb_pipeisoc(pipe
)) {
1291 struct usb_iso_packet_descriptor
*d
;
1293 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1294 d
->actual_length
= length
;
1296 if (++qh
->iso_idx
>= urb
->number_of_packets
) {
1304 } else if (dma
&& urb
->transfer_buffer_length
== qh
->offset
) {
1307 /* see if we need to send more data, or ZLP */
1308 if (qh
->segsize
< qh
->maxpacket
)
1310 else if (qh
->offset
== urb
->transfer_buffer_length
1311 && !(urb
->transfer_flags
1315 offset
= qh
->offset
;
1316 length
= urb
->transfer_buffer_length
- offset
;
1317 transfer_pending
= true;
1322 /* urb->status != -EINPROGRESS means request has been faulted,
1323 * so we must abort this transfer after cleanup
1325 if (urb
->status
!= -EINPROGRESS
) {
1328 status
= urb
->status
;
1333 urb
->status
= status
;
1334 urb
->actual_length
= qh
->offset
;
1335 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_OUT
);
1337 } else if ((usb_pipeisoc(pipe
) || transfer_pending
) && dma
) {
1338 if (musb_tx_dma_program(musb
->dma_controller
, hw_ep
, qh
, urb
,
1340 if (is_cppi_enabled() || tusb_dma_omap())
1341 musb_h_tx_dma_start(hw_ep
);
1344 } else if (tx_csr
& MUSB_TXCSR_DMAENAB
) {
1345 dev_dbg(musb
->controller
, "not complete, but DMA enabled?\n");
1350 * PIO: start next packet in this URB.
1352 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1353 * (and presumably, FIFO is not half-full) we should write *two*
1354 * packets before updating TXCSR; other docs disagree...
1356 if (length
> qh
->maxpacket
)
1357 length
= qh
->maxpacket
;
1358 /* Unmap the buffer so that CPU can use it */
1359 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb
), urb
);
1360 musb_write_fifo(hw_ep
, length
, urb
->transfer_buffer
+ offset
);
1361 qh
->segsize
= length
;
1363 musb_ep_select(mbase
, epnum
);
1364 musb_writew(epio
, MUSB_TXCSR
,
1365 MUSB_TXCSR_H_WZC_BITS
| MUSB_TXCSR_TXPKTRDY
);
1369 #ifdef CONFIG_USB_INVENTRA_DMA
1371 /* Host side RX (IN) using Mentor DMA works as follows:
1373 - if queue was empty, ProgramEndpoint
1374 - first IN token is sent out (by setting ReqPkt)
1375 LinuxIsr -> RxReady()
1376 /\ => first packet is received
1377 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1378 | -> DMA Isr (transfer complete) -> RxReady()
1379 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1380 | - if urb not complete, send next IN token (ReqPkt)
1381 | | else complete urb.
1383 ---------------------------
1385 * Nuances of mode 1:
1386 * For short packets, no ack (+RxPktRdy) is sent automatically
1387 * (even if AutoClear is ON)
1388 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1389 * automatically => major problem, as collecting the next packet becomes
1390 * difficult. Hence mode 1 is not used.
1393 * All we care about at this driver level is that
1394 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1395 * (b) termination conditions are: short RX, or buffer full;
1396 * (c) fault modes include
1397 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1398 * (and that endpoint's dma queue stops immediately)
1399 * - overflow (full, PLUS more bytes in the terminal packet)
1401 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1402 * thus be a great candidate for using mode 1 ... for all but the
1403 * last packet of one URB's transfer.
1408 /* Schedule next QH from musb->in_bulk and move the current qh to
1409 * the end; avoids starvation for other endpoints.
1411 static void musb_bulk_rx_nak_timeout(struct musb
*musb
, struct musb_hw_ep
*ep
)
1413 struct dma_channel
*dma
;
1415 void __iomem
*mbase
= musb
->mregs
;
1416 void __iomem
*epio
= ep
->regs
;
1417 struct musb_qh
*cur_qh
, *next_qh
;
1420 musb_ep_select(mbase
, ep
->epnum
);
1421 dma
= is_dma_capable() ? ep
->rx_channel
: NULL
;
1423 /* clear nak timeout bit */
1424 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1425 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1426 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1427 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1429 cur_qh
= first_qh(&musb
->in_bulk
);
1431 urb
= next_urb(cur_qh
);
1432 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1433 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1434 musb
->dma_controller
->channel_abort(dma
);
1435 urb
->actual_length
+= dma
->actual_len
;
1436 dma
->actual_len
= 0L;
1438 musb_save_toggle(cur_qh
, 1, urb
);
1440 /* move cur_qh to end of queue */
1441 list_move_tail(&cur_qh
->ring
, &musb
->in_bulk
);
1443 /* get the next qh from musb->in_bulk */
1444 next_qh
= first_qh(&musb
->in_bulk
);
1446 /* set rx_reinit and schedule the next qh */
1448 musb_start_urb(musb
, 1, next_qh
);
1453 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1454 * and high-bandwidth IN transfer cases.
1456 void musb_host_rx(struct musb
*musb
, u8 epnum
)
1459 struct musb_hw_ep
*hw_ep
= musb
->endpoints
+ epnum
;
1460 void __iomem
*epio
= hw_ep
->regs
;
1461 struct musb_qh
*qh
= hw_ep
->in_qh
;
1463 void __iomem
*mbase
= musb
->mregs
;
1466 bool iso_err
= false;
1469 struct dma_channel
*dma
;
1471 musb_ep_select(mbase
, epnum
);
1474 dma
= is_dma_capable() ? hw_ep
->rx_channel
: NULL
;
1478 rx_csr
= musb_readw(epio
, MUSB_RXCSR
);
1481 if (unlikely(!urb
)) {
1482 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1483 * usbtest #11 (unlinks) triggers it regularly, sometimes
1484 * with fifo full. (Only with DMA??)
1486 dev_dbg(musb
->controller
, "BOGUS RX%d ready, csr %04x, count %d\n", epnum
, val
,
1487 musb_readw(epio
, MUSB_RXCOUNT
));
1488 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1494 dev_dbg(musb
->controller
, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1495 epnum
, rx_csr
, urb
->actual_length
,
1496 dma
? dma
->actual_len
: 0);
1498 /* check for errors, concurrent stall & unlink is not really
1500 if (rx_csr
& MUSB_RXCSR_H_RXSTALL
) {
1501 dev_dbg(musb
->controller
, "RX end %d STALL\n", epnum
);
1503 /* stall; record URB status */
1506 } else if (rx_csr
& MUSB_RXCSR_H_ERROR
) {
1507 dev_dbg(musb
->controller
, "end %d RX proto error\n", epnum
);
1510 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1512 } else if (rx_csr
& MUSB_RXCSR_DATAERROR
) {
1514 if (USB_ENDPOINT_XFER_ISOC
!= qh
->type
) {
1515 dev_dbg(musb
->controller
, "RX end %d NAK timeout\n", epnum
);
1517 /* NOTE: NAKing is *NOT* an error, so we want to
1518 * continue. Except ... if there's a request for
1519 * another QH, use that instead of starving it.
1521 * Devices like Ethernet and serial adapters keep
1522 * reads posted at all times, which will starve
1523 * other devices without this logic.
1525 if (usb_pipebulk(urb
->pipe
)
1527 && !list_is_singular(&musb
->in_bulk
)) {
1528 musb_bulk_rx_nak_timeout(musb
, hw_ep
);
1531 musb_ep_select(mbase
, epnum
);
1532 rx_csr
|= MUSB_RXCSR_H_WZC_BITS
;
1533 rx_csr
&= ~MUSB_RXCSR_DATAERROR
;
1534 musb_writew(epio
, MUSB_RXCSR
, rx_csr
);
1538 dev_dbg(musb
->controller
, "RX end %d ISO data error\n", epnum
);
1539 /* packet error reported later */
1542 } else if (rx_csr
& MUSB_RXCSR_INCOMPRX
) {
1543 dev_dbg(musb
->controller
, "end %d high bandwidth incomplete ISO packet RX\n",
1548 /* faults abort the transfer */
1550 /* clean up dma and collect transfer count */
1551 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1552 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1553 (void) musb
->dma_controller
->channel_abort(dma
);
1554 xfer_len
= dma
->actual_len
;
1556 musb_h_flush_rxfifo(hw_ep
, MUSB_RXCSR_CLRDATATOG
);
1557 musb_writeb(epio
, MUSB_RXINTERVAL
, 0);
1562 if (unlikely(dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
)) {
1563 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1564 ERR("RX%d dma busy, csr %04x\n", epnum
, rx_csr
);
1568 /* thorough shutdown for now ... given more precise fault handling
1569 * and better queueing support, we might keep a DMA pipeline going
1570 * while processing this irq for earlier completions.
1573 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1575 #ifndef CONFIG_USB_INVENTRA_DMA
1576 if (rx_csr
& MUSB_RXCSR_H_REQPKT
) {
1577 /* REVISIT this happened for a while on some short reads...
1578 * the cleanup still needs investigation... looks bad...
1579 * and also duplicates dma cleanup code above ... plus,
1580 * shouldn't this be the "half full" double buffer case?
1582 if (dma_channel_status(dma
) == MUSB_DMA_STATUS_BUSY
) {
1583 dma
->status
= MUSB_DMA_STATUS_CORE_ABORT
;
1584 (void) musb
->dma_controller
->channel_abort(dma
);
1585 xfer_len
= dma
->actual_len
;
1589 dev_dbg(musb
->controller
, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum
, rx_csr
,
1590 xfer_len
, dma
? ", dma" : "");
1591 rx_csr
&= ~MUSB_RXCSR_H_REQPKT
;
1593 musb_ep_select(mbase
, epnum
);
1594 musb_writew(epio
, MUSB_RXCSR
,
1595 MUSB_RXCSR_H_WZC_BITS
| rx_csr
);
1598 if (dma
&& (rx_csr
& MUSB_RXCSR_DMAENAB
)) {
1599 xfer_len
= dma
->actual_len
;
1601 val
&= ~(MUSB_RXCSR_DMAENAB
1602 | MUSB_RXCSR_H_AUTOREQ
1603 | MUSB_RXCSR_AUTOCLEAR
1604 | MUSB_RXCSR_RXPKTRDY
);
1605 musb_writew(hw_ep
->regs
, MUSB_RXCSR
, val
);
1607 #ifdef CONFIG_USB_INVENTRA_DMA
1608 if (usb_pipeisoc(pipe
)) {
1609 struct usb_iso_packet_descriptor
*d
;
1611 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1612 d
->actual_length
= xfer_len
;
1614 /* even if there was an error, we did the dma
1615 * for iso_frame_desc->length
1617 if (d
->status
!= -EILSEQ
&& d
->status
!= -EOVERFLOW
)
1620 if (++qh
->iso_idx
>= urb
->number_of_packets
)
1626 /* done if urb buffer is full or short packet is recd */
1627 done
= (urb
->actual_length
+ xfer_len
>=
1628 urb
->transfer_buffer_length
1629 || dma
->actual_len
< qh
->maxpacket
);
1632 /* send IN token for next packet, without AUTOREQ */
1634 val
|= MUSB_RXCSR_H_REQPKT
;
1635 musb_writew(epio
, MUSB_RXCSR
,
1636 MUSB_RXCSR_H_WZC_BITS
| val
);
1639 dev_dbg(musb
->controller
, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum
,
1640 done
? "off" : "reset",
1641 musb_readw(epio
, MUSB_RXCSR
),
1642 musb_readw(epio
, MUSB_RXCOUNT
));
1646 } else if (urb
->status
== -EINPROGRESS
) {
1647 /* if no errors, be sure a packet is ready for unloading */
1648 if (unlikely(!(rx_csr
& MUSB_RXCSR_RXPKTRDY
))) {
1650 ERR("Rx interrupt with no errors or packet!\n");
1652 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1655 /* do the proper sequence to abort the transfer */
1656 musb_ep_select(mbase
, epnum
);
1657 val
&= ~MUSB_RXCSR_H_REQPKT
;
1658 musb_writew(epio
, MUSB_RXCSR
, val
);
1662 /* we are expecting IN packets */
1663 #ifdef CONFIG_USB_INVENTRA_DMA
1665 struct dma_controller
*c
;
1670 rx_count
= musb_readw(epio
, MUSB_RXCOUNT
);
1672 dev_dbg(musb
->controller
, "RX%d count %d, buffer 0x%x len %d/%d\n",
1675 + urb
->actual_length
,
1677 urb
->transfer_buffer_length
);
1679 c
= musb
->dma_controller
;
1681 if (usb_pipeisoc(pipe
)) {
1683 struct usb_iso_packet_descriptor
*d
;
1685 d
= urb
->iso_frame_desc
+ qh
->iso_idx
;
1691 if (rx_count
> d
->length
) {
1692 if (d_status
== 0) {
1693 d_status
= -EOVERFLOW
;
1696 dev_dbg(musb
->controller
, "** OVERFLOW %d into %d\n",\
1697 rx_count
, d
->length
);
1702 d
->status
= d_status
;
1703 buf
= urb
->transfer_dma
+ d
->offset
;
1706 buf
= urb
->transfer_dma
+
1710 dma
->desired_mode
= 0;
1712 /* because of the issue below, mode 1 will
1713 * only rarely behave with correct semantics.
1715 if ((urb
->transfer_flags
&
1717 && (urb
->transfer_buffer_length
-
1720 dma
->desired_mode
= 1;
1721 if (rx_count
< hw_ep
->max_packet_sz_rx
) {
1723 dma
->desired_mode
= 0;
1725 length
= urb
->transfer_buffer_length
;
1729 /* Disadvantage of using mode 1:
1730 * It's basically usable only for mass storage class; essentially all
1731 * other protocols also terminate transfers on short packets.
1734 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1735 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1736 * to use the extra IN token to grab the last packet using mode 0, then
1737 * the problem is that you cannot be sure when the device will send the
1738 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1739 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1740 * transfer, while sometimes it is recd just a little late so that if you
1741 * try to configure for mode 0 soon after the mode 1 transfer is
1742 * completed, you will find rxcount 0. Okay, so you might think why not
1743 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1746 val
= musb_readw(epio
, MUSB_RXCSR
);
1747 val
&= ~MUSB_RXCSR_H_REQPKT
;
1749 if (dma
->desired_mode
== 0)
1750 val
&= ~MUSB_RXCSR_H_AUTOREQ
;
1752 val
|= MUSB_RXCSR_H_AUTOREQ
;
1753 val
|= MUSB_RXCSR_DMAENAB
;
1755 /* autoclear shouldn't be set in high bandwidth */
1756 if (qh
->hb_mult
== 1)
1757 val
|= MUSB_RXCSR_AUTOCLEAR
;
1759 musb_writew(epio
, MUSB_RXCSR
,
1760 MUSB_RXCSR_H_WZC_BITS
| val
);
1762 /* REVISIT if when actual_length != 0,
1763 * transfer_buffer_length needs to be
1766 ret
= c
->channel_program(
1768 dma
->desired_mode
, buf
, length
);
1771 c
->channel_release(dma
);
1772 hw_ep
->rx_channel
= NULL
;
1774 val
= musb_readw(epio
, MUSB_RXCSR
);
1775 val
&= ~(MUSB_RXCSR_DMAENAB
1776 | MUSB_RXCSR_H_AUTOREQ
1777 | MUSB_RXCSR_AUTOCLEAR
);
1778 musb_writew(epio
, MUSB_RXCSR
, val
);
1781 #endif /* Mentor DMA */
1784 /* Unmap the buffer so that CPU can use it */
1785 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb
), urb
);
1786 done
= musb_host_packet_rx(musb
, urb
,
1788 dev_dbg(musb
->controller
, "read %spacket\n", done
? "last " : "");
1793 urb
->actual_length
+= xfer_len
;
1794 qh
->offset
+= xfer_len
;
1796 if (urb
->status
== -EINPROGRESS
)
1797 urb
->status
= status
;
1798 musb_advance_schedule(musb
, urb
, hw_ep
, USB_DIR_IN
);
1802 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1803 * the software schedule associates multiple such nodes with a given
1804 * host side hardware endpoint + direction; scheduling may activate
1805 * that hardware endpoint.
1807 static int musb_schedule(
1814 int best_end
, epnum
;
1815 struct musb_hw_ep
*hw_ep
= NULL
;
1816 struct list_head
*head
= NULL
;
1819 struct urb
*urb
= next_urb(qh
);
1821 /* use fixed hardware for control and bulk */
1822 if (qh
->type
== USB_ENDPOINT_XFER_CONTROL
) {
1823 head
= &musb
->control
;
1824 hw_ep
= musb
->control_ep
;
1828 /* else, periodic transfers get muxed to other endpoints */
1831 * We know this qh hasn't been scheduled, so all we need to do
1832 * is choose which hardware endpoint to put it on ...
1834 * REVISIT what we really want here is a regular schedule tree
1835 * like e.g. OHCI uses.
1840 for (epnum
= 1, hw_ep
= musb
->endpoints
+ 1;
1841 epnum
< musb
->nr_endpoints
;
1845 if (musb_ep_get_qh(hw_ep
, is_in
) != NULL
)
1848 if (hw_ep
== musb
->bulk_ep
)
1852 diff
= hw_ep
->max_packet_sz_rx
;
1854 diff
= hw_ep
->max_packet_sz_tx
;
1855 diff
-= (qh
->maxpacket
* qh
->hb_mult
);
1857 if (diff
>= 0 && best_diff
> diff
) {
1860 * Mentor controller has a bug in that if we schedule
1861 * a BULK Tx transfer on an endpoint that had earlier
1862 * handled ISOC then the BULK transfer has to start on
1863 * a zero toggle. If the BULK transfer starts on a 1
1864 * toggle then this transfer will fail as the mentor
1865 * controller starts the Bulk transfer on a 0 toggle
1866 * irrespective of the programming of the toggle bits
1867 * in the TXCSR register. Check for this condition
1868 * while allocating the EP for a Tx Bulk transfer. If
1871 hw_ep
= musb
->endpoints
+ epnum
;
1872 toggle
= usb_gettoggle(urb
->dev
, qh
->epnum
, !is_in
);
1873 txtype
= (musb_readb(hw_ep
->regs
, MUSB_TXTYPE
)
1875 if (!is_in
&& (qh
->type
== USB_ENDPOINT_XFER_BULK
) &&
1876 toggle
&& (txtype
== USB_ENDPOINT_XFER_ISOC
))
1883 /* use bulk reserved ep1 if no other ep is free */
1884 if (best_end
< 0 && qh
->type
== USB_ENDPOINT_XFER_BULK
) {
1885 hw_ep
= musb
->bulk_ep
;
1887 head
= &musb
->in_bulk
;
1889 head
= &musb
->out_bulk
;
1891 /* Enable bulk RX NAK timeout scheme when bulk requests are
1892 * multiplexed. This scheme doen't work in high speed to full
1893 * speed scenario as NAK interrupts are not coming from a
1894 * full speed device connected to a high speed device.
1895 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1896 * 4 (8 frame or 8ms) for FS device.
1898 if (is_in
&& qh
->dev
)
1900 (USB_SPEED_HIGH
== qh
->dev
->speed
) ? 8 : 4;
1902 } else if (best_end
< 0) {
1908 hw_ep
= musb
->endpoints
+ best_end
;
1909 dev_dbg(musb
->controller
, "qh %p periodic slot %d\n", qh
, best_end
);
1912 idle
= list_empty(head
);
1913 list_add_tail(&qh
->ring
, head
);
1917 qh
->hep
->hcpriv
= qh
;
1919 musb_start_urb(musb
, is_in
, qh
);
1924 /* check if transaction translator is needed for device */
1925 static int tt_needed(struct musb
*musb
, struct usb_device
*dev
)
1927 if ((musb_readb(musb
->mregs
, MUSB_POWER
) & MUSB_POWER_HSMODE
) &&
1928 (dev
->speed
< USB_SPEED_HIGH
))
1935 static int musb_urb_enqueue(
1937 int musb_urb_enqueue(
1939 struct usb_hcd
*hcd
,
1943 unsigned long flags
;
1944 struct musb
*musb
= hcd_to_musb(hcd
);
1945 struct usb_host_endpoint
*hep
= urb
->ep
;
1947 struct usb_endpoint_descriptor
*epd
= &hep
->desc
;
1952 /* host role must be active */
1953 if (!is_host_active(musb
) || !musb
->is_active
)
1956 spin_lock_irqsave(&musb
->lock
, flags
);
1957 ret
= usb_hcd_link_urb_to_ep(hcd
, urb
);
1958 qh
= ret
? NULL
: hep
->hcpriv
;
1961 spin_unlock_irqrestore(&musb
->lock
, flags
);
1963 /* DMA mapping was already done, if needed, and this urb is on
1964 * hep->urb_list now ... so we're done, unless hep wasn't yet
1965 * scheduled onto a live qh.
1967 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1968 * disabled, testing for empty qh->ring and avoiding qh setup costs
1969 * except for the first urb queued after a config change.
1974 /* Allocate and initialize qh, minimizing the work done each time
1975 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1977 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1978 * for bugs in other kernel code to break this driver...
1980 qh
= kzalloc(sizeof *qh
, mem_flags
);
1982 spin_lock_irqsave(&musb
->lock
, flags
);
1983 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
1984 spin_unlock_irqrestore(&musb
->lock
, flags
);
1990 INIT_LIST_HEAD(&qh
->ring
);
1993 qh
->maxpacket
= usb_endpoint_maxp(epd
);
1994 qh
->type
= usb_endpoint_type(epd
);
1996 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
1997 * Some musb cores don't support high bandwidth ISO transfers; and
1998 * we don't (yet!) support high bandwidth interrupt transfers.
2000 qh
->hb_mult
= 1 + ((qh
->maxpacket
>> 11) & 0x03);
2001 if (qh
->hb_mult
> 1) {
2002 int ok
= (qh
->type
== USB_ENDPOINT_XFER_ISOC
);
2005 ok
= (usb_pipein(urb
->pipe
) && musb
->hb_iso_rx
)
2006 || (usb_pipeout(urb
->pipe
) && musb
->hb_iso_tx
);
2011 qh
->maxpacket
&= 0x7ff;
2014 qh
->epnum
= usb_endpoint_num(epd
);
2016 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2017 qh
->addr_reg
= (u8
) usb_pipedevice(urb
->pipe
);
2019 /* precompute rxtype/txtype/type0 register */
2020 type_reg
= (qh
->type
<< 4) | qh
->epnum
;
2021 switch (urb
->dev
->speed
) {
2025 case USB_SPEED_FULL
:
2031 qh
->type_reg
= type_reg
;
2033 /* Precompute RXINTERVAL/TXINTERVAL register */
2035 case USB_ENDPOINT_XFER_INT
:
2037 * Full/low speeds use the linear encoding,
2038 * high speed uses the logarithmic encoding.
2040 if (urb
->dev
->speed
<= USB_SPEED_FULL
) {
2041 interval
= max_t(u8
, epd
->bInterval
, 1);
2045 case USB_ENDPOINT_XFER_ISOC
:
2046 /* ISO always uses logarithmic encoding */
2047 interval
= min_t(u8
, epd
->bInterval
, 16);
2050 /* REVISIT we actually want to use NAK limits, hinting to the
2051 * transfer scheduling logic to try some other qh, e.g. try
2054 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2056 * The downside of disabling this is that transfer scheduling
2057 * gets VERY unfair for nonperiodic transfers; a misbehaving
2058 * peripheral could make that hurt. That's perfectly normal
2059 * for reads from network or serial adapters ... so we have
2060 * partial NAKlimit support for bulk RX.
2062 * The upside of disabling it is simpler transfer scheduling.
2066 qh
->intv_reg
= interval
;
2068 /* precompute addressing for external hub/tt ports */
2069 if (musb
->is_multipoint
) {
2071 struct usb_device
*parent
= urb
->dev
->parent
;
2073 struct usb_device
*parent
= usb_dev_get_parent(urb
->dev
);
2077 if (parent
!= hcd
->self
.root_hub
) {
2081 qh
->h_addr_reg
= (u8
) parent
->devnum
;
2084 /* set up tt info if needed */
2086 qh
->h_port_reg
= (u8
) urb
->dev
->ttport
;
2087 if (urb
->dev
->tt
->hub
)
2089 (u8
) urb
->dev
->tt
->hub
->devnum
;
2090 if (urb
->dev
->tt
->multi
)
2091 qh
->h_addr_reg
|= 0x80;
2094 if (tt_needed(musb
, urb
->dev
)) {
2095 u16 hub_port
= find_tt(urb
->dev
);
2096 qh
->h_addr_reg
= (u8
) (hub_port
>> 8);
2097 qh
->h_port_reg
= (u8
) (hub_port
& 0xff);
2103 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2104 * until we get real dma queues (with an entry for each urb/buffer),
2105 * we only have work to do in the former case.
2107 spin_lock_irqsave(&musb
->lock
, flags
);
2109 /* some concurrent activity submitted another urb to hep...
2110 * odd, rare, error prone, but legal.
2116 ret
= musb_schedule(musb
, qh
,
2117 epd
->bEndpointAddress
& USB_ENDPOINT_DIR_MASK
);
2121 /* FIXME set urb->start_frame for iso/intr, it's tested in
2122 * musb_start_urb(), but otherwise only konicawc cares ...
2125 spin_unlock_irqrestore(&musb
->lock
, flags
);
2129 spin_lock_irqsave(&musb
->lock
, flags
);
2130 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2131 spin_unlock_irqrestore(&musb
->lock
, flags
);
2138 * abort a transfer that's at the head of a hardware queue.
2139 * called with controller locked, irqs blocked
2140 * that hardware queue advances to the next transfer, unless prevented
2142 static int musb_cleanup_urb(struct urb
*urb
, struct musb_qh
*qh
)
2144 struct musb_hw_ep
*ep
= qh
->hw_ep
;
2145 struct musb
*musb
= ep
->musb
;
2146 void __iomem
*epio
= ep
->regs
;
2147 unsigned hw_end
= ep
->epnum
;
2148 void __iomem
*regs
= ep
->musb
->mregs
;
2149 int is_in
= usb_pipein(urb
->pipe
);
2153 musb_ep_select(regs
, hw_end
);
2155 if (is_dma_capable()) {
2156 struct dma_channel
*dma
;
2158 dma
= is_in
? ep
->rx_channel
: ep
->tx_channel
;
2160 status
= ep
->musb
->dma_controller
->channel_abort(dma
);
2161 dev_dbg(musb
->controller
,
2162 "abort %cX%d DMA for urb %p --> %d\n",
2163 is_in
? 'R' : 'T', ep
->epnum
,
2165 urb
->actual_length
+= dma
->actual_len
;
2169 /* turn off DMA requests, discard state, stop polling ... */
2170 if (ep
->epnum
&& is_in
) {
2171 /* giveback saves bulk toggle */
2172 csr
= musb_h_flush_rxfifo(ep
, 0);
2174 /* REVISIT we still get an irq; should likely clear the
2175 * endpoint's irq status here to avoid bogus irqs.
2176 * clearing that status is platform-specific...
2178 } else if (ep
->epnum
) {
2179 musb_h_tx_flush_fifo(ep
);
2180 csr
= musb_readw(epio
, MUSB_TXCSR
);
2181 csr
&= ~(MUSB_TXCSR_AUTOSET
2182 | MUSB_TXCSR_DMAENAB
2183 | MUSB_TXCSR_H_RXSTALL
2184 | MUSB_TXCSR_H_NAKTIMEOUT
2185 | MUSB_TXCSR_H_ERROR
2186 | MUSB_TXCSR_TXPKTRDY
);
2187 musb_writew(epio
, MUSB_TXCSR
, csr
);
2188 /* REVISIT may need to clear FLUSHFIFO ... */
2189 musb_writew(epio
, MUSB_TXCSR
, csr
);
2190 /* flush cpu writebuffer */
2191 csr
= musb_readw(epio
, MUSB_TXCSR
);
2193 musb_h_ep0_flush_fifo(ep
);
2196 musb_advance_schedule(ep
->musb
, urb
, ep
, is_in
);
2201 static int musb_urb_dequeue(
2203 int musb_urb_dequeue(
2205 struct usb_hcd
*hcd
,
2209 struct musb
*musb
= hcd_to_musb(hcd
);
2211 unsigned long flags
;
2212 int is_in
= usb_pipein(urb
->pipe
);
2215 dev_dbg(musb
->controller
, "urb=%p, dev%d ep%d%s\n", urb
,
2216 usb_pipedevice(urb
->pipe
),
2217 usb_pipeendpoint(urb
->pipe
),
2218 is_in
? "in" : "out");
2220 spin_lock_irqsave(&musb
->lock
, flags
);
2221 ret
= usb_hcd_check_unlink_urb(hcd
, urb
, status
);
2230 * Any URB not actively programmed into endpoint hardware can be
2231 * immediately given back; that's any URB not at the head of an
2232 * endpoint queue, unless someday we get real DMA queues. And even
2233 * if it's at the head, it might not be known to the hardware...
2235 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2236 * has already been updated. This is a synchronous abort; it'd be
2237 * OK to hold off until after some IRQ, though.
2239 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2242 || urb
->urb_list
.prev
!= &qh
->hep
->urb_list
2243 || musb_ep_get_qh(qh
->hw_ep
, is_in
) != qh
) {
2244 int ready
= qh
->is_ready
;
2247 musb_giveback(musb
, urb
, 0);
2248 qh
->is_ready
= ready
;
2250 /* If nothing else (usually musb_giveback) is using it
2251 * and its URB list has emptied, recycle this qh.
2253 if (ready
&& list_empty(&qh
->hep
->urb_list
)) {
2254 qh
->hep
->hcpriv
= NULL
;
2255 list_del(&qh
->ring
);
2259 ret
= musb_cleanup_urb(urb
, qh
);
2261 spin_unlock_irqrestore(&musb
->lock
, flags
);
2266 /* disable an endpoint */
2268 musb_h_disable(struct usb_hcd
*hcd
, struct usb_host_endpoint
*hep
)
2270 u8 is_in
= hep
->desc
.bEndpointAddress
& USB_DIR_IN
;
2271 unsigned long flags
;
2272 struct musb
*musb
= hcd_to_musb(hcd
);
2276 spin_lock_irqsave(&musb
->lock
, flags
);
2282 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2284 /* Kick the first URB off the hardware, if needed */
2286 if (musb_ep_get_qh(qh
->hw_ep
, is_in
) == qh
) {
2289 /* make software (then hardware) stop ASAP */
2291 urb
->status
= -ESHUTDOWN
;
2294 musb_cleanup_urb(urb
, qh
);
2296 /* Then nuke all the others ... and advance the
2297 * queue on hw_ep (e.g. bulk ring) when we're done.
2299 while (!list_empty(&hep
->urb_list
)) {
2301 urb
->status
= -ESHUTDOWN
;
2302 musb_advance_schedule(musb
, urb
, qh
->hw_ep
, is_in
);
2305 /* Just empty the queue; the hardware is busy with
2306 * other transfers, and since !qh->is_ready nothing
2307 * will activate any of these as it advances.
2309 while (!list_empty(&hep
->urb_list
))
2310 musb_giveback(musb
, next_urb(qh
), -ESHUTDOWN
);
2313 list_del(&qh
->ring
);
2317 spin_unlock_irqrestore(&musb
->lock
, flags
);
2320 static int musb_h_get_frame_number(struct usb_hcd
*hcd
)
2322 struct musb
*musb
= hcd_to_musb(hcd
);
2324 return musb_readw(musb
->mregs
, MUSB_FRAME
);
2327 static int musb_h_start(struct usb_hcd
*hcd
)
2329 struct musb
*musb
= hcd_to_musb(hcd
);
2331 /* NOTE: musb_start() is called when the hub driver turns
2332 * on port power, or when (OTG) peripheral starts.
2334 hcd
->state
= HC_STATE_RUNNING
;
2335 musb
->port1_status
= 0;
2339 static void musb_h_stop(struct usb_hcd
*hcd
)
2341 musb_stop(hcd_to_musb(hcd
));
2342 hcd
->state
= HC_STATE_HALT
;
2345 static int musb_bus_suspend(struct usb_hcd
*hcd
)
2347 struct musb
*musb
= hcd_to_musb(hcd
);
2350 if (!is_host_active(musb
))
2353 switch (musb
->xceiv
->state
) {
2354 case OTG_STATE_A_SUSPEND
:
2356 case OTG_STATE_A_WAIT_VRISE
:
2357 /* ID could be grounded even if there's no device
2358 * on the other end of the cable. NOTE that the
2359 * A_WAIT_VRISE timers are messy with MUSB...
2361 devctl
= musb_readb(musb
->mregs
, MUSB_DEVCTL
);
2362 if ((devctl
& MUSB_DEVCTL_VBUS
) == MUSB_DEVCTL_VBUS
)
2363 musb
->xceiv
->state
= OTG_STATE_A_WAIT_BCON
;
2369 if (musb
->is_active
) {
2370 WARNING("trying to suspend as %s while active\n",
2371 otg_state_string(musb
->xceiv
->state
));
2377 static int musb_bus_resume(struct usb_hcd
*hcd
)
2379 /* resuming child port does the work */
2383 const struct hc_driver musb_hc_driver
= {
2384 .description
= "musb-hcd",
2385 .product_desc
= "MUSB HDRC host driver",
2386 .hcd_priv_size
= sizeof(struct musb
),
2387 .flags
= HCD_USB2
| HCD_MEMORY
,
2389 /* not using irq handler or reset hooks from usbcore, since
2390 * those must be shared with peripheral code for OTG configs
2393 .start
= musb_h_start
,
2394 .stop
= musb_h_stop
,
2396 .get_frame_number
= musb_h_get_frame_number
,
2398 .urb_enqueue
= musb_urb_enqueue
,
2399 .urb_dequeue
= musb_urb_dequeue
,
2400 .endpoint_disable
= musb_h_disable
,
2402 .hub_status_data
= musb_hub_status_data
,
2403 .hub_control
= musb_hub_control
,
2404 .bus_suspend
= musb_bus_suspend
,
2405 .bus_resume
= musb_bus_resume
,
2406 /* .start_port_reset = NULL, */
2407 /* .hub_irq_enable = NULL, */