2 * Copyright 2011, Marvell Semiconductor Inc.
3 * Lei Wen <leiwen@marvell.com>
5 * SPDX-License-Identifier: GPL-2.0+
7 * Back ported to the 8xx platform (from the 8260 platform) by
8 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
16 #include <asm/byteorder.h>
17 #include <asm/errno.h>
19 #include <asm/unaligned.h>
20 #include <linux/types.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <usb/ci_udc.h>
24 #include "../host/ehci.h"
28 * Check if the system has too long cachelines. If the cachelines are
29 * longer then 128b, the driver will not be able flush/invalidate data
30 * cache over separate QH entries. We use 128b because one QH entry is
31 * 64b long and there are always two QH list entries for each endpoint.
33 #if ARCH_DMA_MINALIGN > 128
34 #error This driver can not work on systems with caches longer than 128b
38 #define DBG(x...) do {} while (0)
40 #define DBG(x...) printf(x)
41 static const char *reqname(unsigned r
)
44 case USB_REQ_GET_STATUS
: return "GET_STATUS";
45 case USB_REQ_CLEAR_FEATURE
: return "CLEAR_FEATURE";
46 case USB_REQ_SET_FEATURE
: return "SET_FEATURE";
47 case USB_REQ_SET_ADDRESS
: return "SET_ADDRESS";
48 case USB_REQ_GET_DESCRIPTOR
: return "GET_DESCRIPTOR";
49 case USB_REQ_SET_DESCRIPTOR
: return "SET_DESCRIPTOR";
50 case USB_REQ_GET_CONFIGURATION
: return "GET_CONFIGURATION";
51 case USB_REQ_SET_CONFIGURATION
: return "SET_CONFIGURATION";
52 case USB_REQ_GET_INTERFACE
: return "GET_INTERFACE";
53 case USB_REQ_SET_INTERFACE
: return "SET_INTERFACE";
54 default: return "*UNKNOWN*";
59 static struct usb_endpoint_descriptor ep0_desc
= {
60 .bLength
= sizeof(struct usb_endpoint_descriptor
),
61 .bDescriptorType
= USB_DT_ENDPOINT
,
62 .bEndpointAddress
= USB_DIR_IN
,
63 .bmAttributes
= USB_ENDPOINT_XFER_CONTROL
,
66 static int ci_pullup(struct usb_gadget
*gadget
, int is_on
);
67 static int ci_ep_enable(struct usb_ep
*ep
,
68 const struct usb_endpoint_descriptor
*desc
);
69 static int ci_ep_disable(struct usb_ep
*ep
);
70 static int ci_ep_queue(struct usb_ep
*ep
,
71 struct usb_request
*req
, gfp_t gfp_flags
);
72 static struct usb_request
*
73 ci_ep_alloc_request(struct usb_ep
*ep
, unsigned int gfp_flags
);
74 static void ci_ep_free_request(struct usb_ep
*ep
, struct usb_request
*_req
);
76 static struct usb_gadget_ops ci_udc_ops
= {
80 static struct usb_ep_ops ci_ep_ops
= {
81 .enable
= ci_ep_enable
,
82 .disable
= ci_ep_disable
,
84 .alloc_request
= ci_ep_alloc_request
,
85 .free_request
= ci_ep_free_request
,
88 /* Init values for USB endpoints. */
89 static const struct usb_ep ci_ep_init
[2] = {
102 static struct ci_drv controller
= {
111 * ci_get_qh() - return queue head for endpoint
112 * @ep_num: Endpoint number
113 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0)
115 * This function returns the QH associated with particular endpoint
116 * and it's direction.
118 static struct ept_queue_head
*ci_get_qh(int ep_num
, int dir_in
)
120 return &controller
.epts
[(ep_num
* 2) + dir_in
];
124 * ci_get_qtd() - return queue item for endpoint
125 * @ep_num: Endpoint number
126 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0)
128 * This function returns the QH associated with particular endpoint
129 * and it's direction.
131 static struct ept_queue_item
*ci_get_qtd(int ep_num
, int dir_in
)
133 return controller
.items
[(ep_num
* 2) + dir_in
];
137 * ci_flush_qh - flush cache over queue head
138 * @ep_num: Endpoint number
140 * This function flushes cache over QH for particular endpoint.
142 static void ci_flush_qh(int ep_num
)
144 struct ept_queue_head
*head
= ci_get_qh(ep_num
, 0);
145 const uint32_t start
= (uint32_t)head
;
146 const uint32_t end
= start
+ 2 * sizeof(*head
);
148 flush_dcache_range(start
, end
);
152 * ci_invalidate_qh - invalidate cache over queue head
153 * @ep_num: Endpoint number
155 * This function invalidates cache over QH for particular endpoint.
157 static void ci_invalidate_qh(int ep_num
)
159 struct ept_queue_head
*head
= ci_get_qh(ep_num
, 0);
160 uint32_t start
= (uint32_t)head
;
161 uint32_t end
= start
+ 2 * sizeof(*head
);
163 invalidate_dcache_range(start
, end
);
167 * ci_flush_qtd - flush cache over queue item
168 * @ep_num: Endpoint number
170 * This function flushes cache over qTD pair for particular endpoint.
172 static void ci_flush_qtd(int ep_num
)
174 struct ept_queue_item
*item
= ci_get_qtd(ep_num
, 0);
175 const uint32_t start
= (uint32_t)item
;
176 const uint32_t end_raw
= start
+ 2 * sizeof(*item
);
177 const uint32_t end
= roundup(end_raw
, ARCH_DMA_MINALIGN
);
179 flush_dcache_range(start
, end
);
183 * ci_invalidate_qtd - invalidate cache over queue item
184 * @ep_num: Endpoint number
186 * This function invalidates cache over qTD pair for particular endpoint.
188 static void ci_invalidate_qtd(int ep_num
)
190 struct ept_queue_item
*item
= ci_get_qtd(ep_num
, 0);
191 const uint32_t start
= (uint32_t)item
;
192 const uint32_t end_raw
= start
+ 2 * sizeof(*item
);
193 const uint32_t end
= roundup(end_raw
, ARCH_DMA_MINALIGN
);
195 invalidate_dcache_range(start
, end
);
198 static struct usb_request
*
199 ci_ep_alloc_request(struct usb_ep
*ep
, unsigned int gfp_flags
)
201 struct ci_ep
*ci_ep
= container_of(ep
, struct ci_ep
, ep
);
203 struct ci_req
*ci_req
;
205 num
= ci_ep
->desc
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
;
206 if (num
== 0 && controller
.ep0_req
)
207 return &controller
.ep0_req
->req
;
209 ci_req
= memalign(ARCH_DMA_MINALIGN
, sizeof(*ci_req
));
213 INIT_LIST_HEAD(&ci_req
->queue
);
217 controller
.ep0_req
= ci_req
;
222 static void ci_ep_free_request(struct usb_ep
*ep
, struct usb_request
*req
)
224 struct ci_ep
*ci_ep
= container_of(ep
, struct ci_ep
, ep
);
225 struct ci_req
*ci_req
= container_of(req
, struct ci_req
, req
);
228 num
= ci_ep
->desc
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
;
230 if (!controller
.ep0_req
)
232 controller
.ep0_req
= 0;
240 static void ep_enable(int num
, int in
, int maxpacket
)
242 struct ci_udc
*udc
= (struct ci_udc
*)controller
.ctrl
->hcor
;
245 n
= readl(&udc
->epctrl
[num
]);
247 n
|= (CTRL_TXE
| CTRL_TXR
| CTRL_TXT_BULK
);
249 n
|= (CTRL_RXE
| CTRL_RXR
| CTRL_RXT_BULK
);
252 struct ept_queue_head
*head
= ci_get_qh(num
, in
);
254 head
->config
= CONFIG_MAX_PKT(maxpacket
) | CONFIG_ZLT
;
257 writel(n
, &udc
->epctrl
[num
]);
260 static int ci_ep_enable(struct usb_ep
*ep
,
261 const struct usb_endpoint_descriptor
*desc
)
263 struct ci_ep
*ci_ep
= container_of(ep
, struct ci_ep
, ep
);
265 num
= desc
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
;
266 in
= (desc
->bEndpointAddress
& USB_DIR_IN
) != 0;
270 int max
= get_unaligned_le16(&desc
->wMaxPacketSize
);
272 if ((max
> 64) && (controller
.gadget
.speed
== USB_SPEED_FULL
))
274 if (ep
->maxpacket
!= max
) {
275 DBG("%s: from %d to %d\n", __func__
,
280 ep_enable(num
, in
, ep
->maxpacket
);
281 DBG("%s: num=%d maxpacket=%d\n", __func__
, num
, ep
->maxpacket
);
285 static int ci_ep_disable(struct usb_ep
*ep
)
287 struct ci_ep
*ci_ep
= container_of(ep
, struct ci_ep
, ep
);
293 static int ci_bounce(struct ci_req
*ci_req
, int in
)
295 struct usb_request
*req
= &ci_req
->req
;
296 uint32_t addr
= (uint32_t)req
->buf
;
298 uint32_t aligned_used_len
;
300 /* Input buffer address is not aligned. */
301 if (addr
& (ARCH_DMA_MINALIGN
- 1))
304 /* Input buffer length is not aligned. */
305 if (req
->length
& (ARCH_DMA_MINALIGN
- 1))
308 /* The buffer is well aligned, only flush cache. */
309 ci_req
->hw_len
= req
->length
;
310 ci_req
->hw_buf
= req
->buf
;
314 if (ci_req
->b_buf
&& req
->length
> ci_req
->b_len
) {
318 if (!ci_req
->b_buf
) {
319 ci_req
->b_len
= roundup(req
->length
, ARCH_DMA_MINALIGN
);
320 ci_req
->b_buf
= memalign(ARCH_DMA_MINALIGN
, ci_req
->b_len
);
324 ci_req
->hw_len
= ci_req
->b_len
;
325 ci_req
->hw_buf
= ci_req
->b_buf
;
328 memcpy(ci_req
->hw_buf
, req
->buf
, req
->length
);
331 hwaddr
= (uint32_t)ci_req
->hw_buf
;
332 aligned_used_len
= roundup(req
->length
, ARCH_DMA_MINALIGN
);
333 flush_dcache_range(hwaddr
, hwaddr
+ aligned_used_len
);
338 static void ci_debounce(struct ci_req
*ci_req
, int in
)
340 struct usb_request
*req
= &ci_req
->req
;
341 uint32_t addr
= (uint32_t)req
->buf
;
342 uint32_t hwaddr
= (uint32_t)ci_req
->hw_buf
;
343 uint32_t aligned_used_len
;
348 aligned_used_len
= roundup(req
->actual
, ARCH_DMA_MINALIGN
);
349 invalidate_dcache_range(hwaddr
, hwaddr
+ aligned_used_len
);
352 return; /* not a bounce */
354 memcpy(req
->buf
, ci_req
->hw_buf
, req
->actual
);
357 static void ci_ep_submit_next_request(struct ci_ep
*ci_ep
)
359 struct ci_udc
*udc
= (struct ci_udc
*)controller
.ctrl
->hcor
;
360 struct ept_queue_item
*item
;
361 struct ept_queue_head
*head
;
362 int bit
, num
, len
, in
;
363 struct ci_req
*ci_req
;
365 ci_ep
->req_primed
= true;
367 num
= ci_ep
->desc
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
;
368 in
= (ci_ep
->desc
->bEndpointAddress
& USB_DIR_IN
) != 0;
369 item
= ci_get_qtd(num
, in
);
370 head
= ci_get_qh(num
, in
);
372 ci_req
= list_first_entry(&ci_ep
->queue
, struct ci_req
, queue
);
373 len
= ci_req
->req
.length
;
375 item
->info
= INFO_BYTES(len
) | INFO_ACTIVE
;
376 item
->page0
= (uint32_t)ci_req
->hw_buf
;
377 item
->page1
= ((uint32_t)ci_req
->hw_buf
& 0xfffff000) + 0x1000;
378 item
->page2
= ((uint32_t)ci_req
->hw_buf
& 0xfffff000) + 0x2000;
379 item
->page3
= ((uint32_t)ci_req
->hw_buf
& 0xfffff000) + 0x3000;
380 item
->page4
= ((uint32_t)ci_req
->hw_buf
& 0xfffff000) + 0x4000;
382 head
->next
= (unsigned) item
;
386 * When sending the data for an IN transaction, the attached host
387 * knows that all data for the IN is sent when one of the following
389 * a) A zero-length packet is transmitted.
390 * b) A packet with length that isn't an exact multiple of the ep's
391 * maxpacket is transmitted.
392 * c) Enough data is sent to exactly fill the host's maximum expected
393 * IN transaction size.
395 * One of these conditions MUST apply at the end of an IN transaction,
396 * or the transaction will not be considered complete by the host. If
397 * none of (a)..(c) already applies, then we must force (a) to apply
398 * by explicitly sending an extra zero-length packet.
401 if (in
&& len
&& !(len
% ci_ep
->ep
.maxpacket
) && ci_req
->req
.zero
) {
403 * Each endpoint has 2 items allocated, even though typically
404 * only 1 is used at a time since either an IN or an OUT but
405 * not both is queued. For an IN transaction, item currently
406 * points at the second of these items, so we know that we
407 * can use the other to transmit the extra zero-length packet.
409 struct ept_queue_item
*other_item
= ci_get_qtd(num
, 0);
410 item
->next
= (unsigned)other_item
;
412 item
->info
= INFO_ACTIVE
;
415 item
->next
= TERMINATE
;
416 item
->info
|= INFO_IOC
;
420 DBG("ept%d %s queue len %x, req %p, buffer %p\n",
421 num
, in
? "in" : "out", len
, ci_req
, ci_req
->hw_buf
);
429 writel(bit
, &udc
->epprime
);
432 static int ci_ep_queue(struct usb_ep
*ep
,
433 struct usb_request
*req
, gfp_t gfp_flags
)
435 struct ci_ep
*ci_ep
= container_of(ep
, struct ci_ep
, ep
);
436 struct ci_req
*ci_req
= container_of(req
, struct ci_req
, req
);
438 int __maybe_unused num
;
440 num
= ci_ep
->desc
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
;
441 in
= (ci_ep
->desc
->bEndpointAddress
& USB_DIR_IN
) != 0;
443 if (!num
&& ci_ep
->req_primed
) {
445 * The flipping of ep0 between IN and OUT relies on
446 * ci_ep_queue consuming the current IN/OUT setting
447 * immediately. If this is deferred to a later point when the
448 * req is pulled out of ci_req->queue, then the IN/OUT setting
449 * may have been changed since the req was queued, and state
450 * will get out of sync. This condition doesn't occur today,
451 * but could if bugs were introduced later, and this error
452 * check will save a lot of debugging time.
454 printf("%s: ep0 transaction already in progress\n", __func__
);
458 ret
= ci_bounce(ci_req
, in
);
462 DBG("ept%d %s pre-queue req %p, buffer %p\n",
463 num
, in
? "in" : "out", ci_req
, ci_req
->hw_buf
);
464 list_add_tail(&ci_req
->queue
, &ci_ep
->queue
);
466 if (!ci_ep
->req_primed
)
467 ci_ep_submit_next_request(ci_ep
);
472 static void flip_ep0_direction(void)
474 if (ep0_desc
.bEndpointAddress
== USB_DIR_IN
) {
475 DBG("%s: Flipping ep0 to OUT\n", __func__
);
476 ep0_desc
.bEndpointAddress
= 0;
478 DBG("%s: Flipping ep0 to IN\n", __func__
);
479 ep0_desc
.bEndpointAddress
= USB_DIR_IN
;
483 static void handle_ep_complete(struct ci_ep
*ep
)
485 struct ept_queue_item
*item
;
487 struct ci_req
*ci_req
;
489 num
= ep
->desc
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
;
490 in
= (ep
->desc
->bEndpointAddress
& USB_DIR_IN
) != 0;
491 item
= ci_get_qtd(num
, in
);
492 ci_invalidate_qtd(num
);
494 len
= (item
->info
>> 16) & 0x7fff;
495 if (item
->info
& 0xff)
496 printf("EP%d/%s FAIL info=%x pg0=%x\n",
497 num
, in
? "in" : "out", item
->info
, item
->page0
);
499 ci_req
= list_first_entry(&ep
->queue
, struct ci_req
, queue
);
500 list_del_init(&ci_req
->queue
);
501 ep
->req_primed
= false;
503 if (!list_empty(&ep
->queue
))
504 ci_ep_submit_next_request(ep
);
506 ci_req
->req
.actual
= ci_req
->req
.length
- len
;
507 ci_debounce(ci_req
, in
);
509 DBG("ept%d %s req %p, complete %x\n",
510 num
, in
? "in" : "out", ci_req
, len
);
511 if (num
!= 0 || controller
.ep0_data_phase
)
512 ci_req
->req
.complete(&ep
->ep
, &ci_req
->req
);
513 if (num
== 0 && controller
.ep0_data_phase
) {
515 * Data Stage is complete, so flip ep0 dir for Status Stage,
516 * which always transfers a packet in the opposite direction.
518 DBG("%s: flip ep0 dir for Status Stage\n", __func__
);
519 flip_ep0_direction();
520 controller
.ep0_data_phase
= false;
521 ci_req
->req
.length
= 0;
522 usb_ep_queue(&ep
->ep
, &ci_req
->req
, 0);
526 #define SETUP(type, request) (((type) << 8) | (request))
528 static void handle_setup(void)
530 struct ci_ep
*ci_ep
= &controller
.ep
[0];
531 struct ci_req
*ci_req
;
532 struct usb_request
*req
;
533 struct ci_udc
*udc
= (struct ci_udc
*)controller
.ctrl
->hcor
;
534 struct ept_queue_head
*head
;
535 struct usb_ctrlrequest r
;
537 int num
, in
, _num
, _in
, i
;
540 ci_req
= controller
.ep0_req
;
542 head
= ci_get_qh(0, 0); /* EP0 OUT */
545 memcpy(&r
, head
->setup_data
, sizeof(struct usb_ctrlrequest
));
546 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
547 writel(EPT_RX(0), &udc
->epsetupstat
);
549 writel(EPT_RX(0), &udc
->epstat
);
551 DBG("handle setup %s, %x, %x index %x value %x length %x\n",
552 reqname(r
.bRequest
), r
.bRequestType
, r
.bRequest
, r
.wIndex
,
553 r
.wValue
, r
.wLength
);
555 /* Set EP0 dir for Data Stage based on Setup Stage data */
556 if (r
.bRequestType
& USB_DIR_IN
) {
557 DBG("%s: Set ep0 to IN for Data Stage\n", __func__
);
558 ep0_desc
.bEndpointAddress
= USB_DIR_IN
;
560 DBG("%s: Set ep0 to OUT for Data Stage\n", __func__
);
561 ep0_desc
.bEndpointAddress
= 0;
564 controller
.ep0_data_phase
= true;
566 /* 0 length -> no Data Stage. Flip dir for Status Stage */
567 DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__
);
568 flip_ep0_direction();
569 controller
.ep0_data_phase
= false;
572 list_del_init(&ci_req
->queue
);
573 ci_ep
->req_primed
= false;
575 switch (SETUP(r
.bRequestType
, r
.bRequest
)) {
576 case SETUP(USB_RECIP_ENDPOINT
, USB_REQ_CLEAR_FEATURE
):
577 _num
= r
.wIndex
& 15;
578 _in
= !!(r
.wIndex
& 0x80);
580 if ((r
.wValue
== 0) && (r
.wLength
== 0)) {
582 for (i
= 0; i
< NUM_ENDPOINTS
; i
++) {
583 struct ci_ep
*ep
= &controller
.ep
[i
];
587 num
= ep
->desc
->bEndpointAddress
588 & USB_ENDPOINT_NUMBER_MASK
;
589 in
= (ep
->desc
->bEndpointAddress
591 if ((num
== _num
) && (in
== _in
)) {
592 ep_enable(num
, in
, ep
->ep
.maxpacket
);
593 usb_ep_queue(controller
.gadget
.ep0
,
601 case SETUP(USB_RECIP_DEVICE
, USB_REQ_SET_ADDRESS
):
603 * write address delayed (will take effect
604 * after the next IN txn)
606 writel((r
.wValue
<< 25) | (1 << 24), &udc
->devaddr
);
608 usb_ep_queue(controller
.gadget
.ep0
, req
, 0);
611 case SETUP(USB_DIR_IN
| USB_RECIP_DEVICE
, USB_REQ_GET_STATUS
):
613 buf
= (char *)req
->buf
;
614 buf
[0] = 1 << USB_DEVICE_SELF_POWERED
;
616 usb_ep_queue(controller
.gadget
.ep0
, req
, 0);
619 /* pass request up to the gadget driver */
620 if (controller
.driver
)
621 status
= controller
.driver
->setup(&controller
.gadget
, &r
);
627 DBG("STALL reqname %s type %x value %x, index %x\n",
628 reqname(r
.bRequest
), r
.bRequestType
, r
.wValue
, r
.wIndex
);
629 writel((1<<16) | (1 << 0), &udc
->epctrl
[0]);
632 static void stop_activity(void)
635 struct ept_queue_head
*head
;
636 struct ci_udc
*udc
= (struct ci_udc
*)controller
.ctrl
->hcor
;
637 writel(readl(&udc
->epcomp
), &udc
->epcomp
);
638 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
639 writel(readl(&udc
->epsetupstat
), &udc
->epsetupstat
);
641 writel(readl(&udc
->epstat
), &udc
->epstat
);
642 writel(0xffffffff, &udc
->epflush
);
644 /* error out any pending reqs */
645 for (i
= 0; i
< NUM_ENDPOINTS
; i
++) {
647 writel(0, &udc
->epctrl
[i
]);
648 if (controller
.ep
[i
].desc
) {
649 num
= controller
.ep
[i
].desc
->bEndpointAddress
650 & USB_ENDPOINT_NUMBER_MASK
;
651 in
= (controller
.ep
[i
].desc
->bEndpointAddress
653 head
= ci_get_qh(num
, in
);
654 head
->info
= INFO_ACTIVE
;
662 struct ci_udc
*udc
= (struct ci_udc
*)controller
.ctrl
->hcor
;
663 unsigned n
= readl(&udc
->usbsts
);
664 writel(n
, &udc
->usbsts
);
667 n
&= (STS_SLI
| STS_URI
| STS_PCI
| STS_UI
| STS_UEI
);
672 DBG("-- reset --\n");
676 DBG("-- suspend --\n");
680 int speed
= USB_SPEED_FULL
;
682 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
683 bit
= (readl(&udc
->hostpc1_devlc
) >> 25) & 3;
685 bit
= (readl(&udc
->portsc
) >> 26) & 3;
687 DBG("-- portchange %x %s\n", bit
, (bit
== 2) ? "High" : "Full");
689 speed
= USB_SPEED_HIGH
;
692 controller
.gadget
.speed
= speed
;
693 for (i
= 1; i
< NUM_ENDPOINTS
; i
++) {
694 if (controller
.ep
[i
].ep
.maxpacket
> max
)
695 controller
.ep
[i
].ep
.maxpacket
= max
;
700 printf("<UEI %x>\n", readl(&udc
->epcomp
));
702 if ((n
& STS_UI
) || (n
& STS_UEI
)) {
703 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
704 n
= readl(&udc
->epsetupstat
);
706 n
= readl(&udc
->epstat
);
711 n
= readl(&udc
->epcomp
);
713 writel(n
, &udc
->epcomp
);
715 for (i
= 0; i
< NUM_ENDPOINTS
&& n
; i
++) {
716 if (controller
.ep
[i
].desc
) {
717 num
= controller
.ep
[i
].desc
->bEndpointAddress
718 & USB_ENDPOINT_NUMBER_MASK
;
719 in
= (controller
.ep
[i
].desc
->bEndpointAddress
721 bit
= (in
) ? EPT_TX(num
) : EPT_RX(num
);
723 handle_ep_complete(&controller
.ep
[i
]);
729 int usb_gadget_handle_interrupts(void)
732 struct ci_udc
*udc
= (struct ci_udc
*)controller
.ctrl
->hcor
;
734 value
= readl(&udc
->usbsts
);
741 void udc_disconnect(void)
743 struct ci_udc
*udc
= (struct ci_udc
*)controller
.ctrl
->hcor
;
746 writel(USBCMD_FS2
, &udc
->usbcmd
);
748 if (controller
.driver
)
749 controller
.driver
->disconnect(&controller
.gadget
);
752 static int ci_pullup(struct usb_gadget
*gadget
, int is_on
)
754 struct ci_udc
*udc
= (struct ci_udc
*)controller
.ctrl
->hcor
;
757 writel(USBCMD_ITC(MICRO_8FRAME
) | USBCMD_RST
, &udc
->usbcmd
);
760 writel((unsigned)controller
.epts
, &udc
->epinitaddr
);
762 /* select DEVICE mode */
763 writel(USBMODE_DEVICE
, &udc
->usbmode
);
765 writel(0xffffffff, &udc
->epflush
);
767 /* Turn on the USB connection by enabling the pullup resistor */
768 writel(USBCMD_ITC(MICRO_8FRAME
) | USBCMD_RUN
, &udc
->usbcmd
);
776 static int ci_udc_probe(void)
778 struct ept_queue_head
*head
;
782 const int num
= 2 * NUM_ENDPOINTS
;
784 const int eplist_min_align
= 4096;
785 const int eplist_align
= roundup(eplist_min_align
, ARCH_DMA_MINALIGN
);
786 const int eplist_raw_sz
= num
* sizeof(struct ept_queue_head
);
787 const int eplist_sz
= roundup(eplist_raw_sz
, ARCH_DMA_MINALIGN
);
789 const int ilist_align
= roundup(ARCH_DMA_MINALIGN
, 32);
790 const int ilist_ent_raw_sz
= 2 * sizeof(struct ept_queue_item
);
791 const int ilist_ent_sz
= roundup(ilist_ent_raw_sz
, ARCH_DMA_MINALIGN
);
792 const int ilist_sz
= NUM_ENDPOINTS
* ilist_ent_sz
;
794 /* The QH list must be aligned to 4096 bytes. */
795 controller
.epts
= memalign(eplist_align
, eplist_sz
);
796 if (!controller
.epts
)
798 memset(controller
.epts
, 0, eplist_sz
);
801 * Each qTD item must be 32-byte aligned, each qTD touple must be
802 * cacheline aligned. There are two qTD items for each endpoint and
803 * only one of them is used for the endpoint at time, so we can group
806 controller
.items_mem
= memalign(ilist_align
, ilist_sz
);
807 if (!controller
.items_mem
) {
808 free(controller
.epts
);
811 memset(controller
.items_mem
, 0, ilist_sz
);
813 for (i
= 0; i
< 2 * NUM_ENDPOINTS
; i
++) {
815 * Configure QH for each endpoint. The structure of the QH list
816 * is such that each two subsequent fields, N and N+1 where N is
817 * even, in the QH list represent QH for one endpoint. The Nth
818 * entry represents OUT configuration and the N+1th entry does
819 * represent IN configuration of the endpoint.
821 head
= controller
.epts
+ i
;
823 head
->config
= CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE
)
824 | CONFIG_ZLT
| CONFIG_IOS
;
826 head
->config
= CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE
)
828 head
->next
= TERMINATE
;
831 imem
= controller
.items_mem
+ ((i
>> 1) * ilist_ent_sz
);
833 imem
+= sizeof(struct ept_queue_item
);
835 controller
.items
[i
] = (struct ept_queue_item
*)imem
;
843 INIT_LIST_HEAD(&controller
.gadget
.ep_list
);
846 memcpy(&controller
.ep
[0].ep
, &ci_ep_init
[0], sizeof(*ci_ep_init
));
847 controller
.ep
[0].desc
= &ep0_desc
;
848 INIT_LIST_HEAD(&controller
.ep
[0].queue
);
849 controller
.ep
[0].req_primed
= false;
850 controller
.gadget
.ep0
= &controller
.ep
[0].ep
;
851 INIT_LIST_HEAD(&controller
.gadget
.ep0
->ep_list
);
854 for (i
= 1; i
< NUM_ENDPOINTS
; i
++) {
855 memcpy(&controller
.ep
[i
].ep
, &ci_ep_init
[1],
856 sizeof(*ci_ep_init
));
857 INIT_LIST_HEAD(&controller
.ep
[i
].queue
);
858 controller
.ep
[i
].req_primed
= false;
859 list_add_tail(&controller
.ep
[i
].ep
.ep_list
,
860 &controller
.gadget
.ep_list
);
863 ci_ep_alloc_request(&controller
.ep
[0].ep
, 0);
864 if (!controller
.ep0_req
) {
865 free(controller
.items_mem
);
866 free(controller
.epts
);
873 int usb_gadget_register_driver(struct usb_gadget_driver
*driver
)
879 if (!driver
->bind
|| !driver
->setup
|| !driver
->disconnect
)
881 if (driver
->speed
!= USB_SPEED_FULL
&& driver
->speed
!= USB_SPEED_HIGH
)
884 ret
= usb_lowlevel_init(0, USB_INIT_DEVICE
, (void **)&controller
.ctrl
);
888 ret
= ci_udc_probe();
889 #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS)
891 * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all
892 * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection
895 struct ci_udc
*udc
= (struct ci_udc
*)controller
.ctrl
->hcor
;
897 /* select ULPI phy */
898 writel(PTS(PTS_ENABLE
) | PFSC
, &udc
->portsc
);
902 ret
= driver
->bind(&controller
.gadget
);
904 DBG("driver->bind() returned %d\n", ret
);
907 controller
.driver
= driver
;
912 int usb_gadget_unregister_driver(struct usb_gadget_driver
*driver
)
916 driver
->unbind(&controller
.gadget
);
917 controller
.driver
= NULL
;
919 ci_ep_free_request(&controller
.ep
[0].ep
, &controller
.ep0_req
->req
);
920 free(controller
.items_mem
);
921 free(controller
.epts
);