]> git.ipfire.org Git - thirdparty/u-boot.git/blob - drivers/usb/gadget/ci_udc.c
common: Drop net.h from common header
[thirdparty/u-boot.git] / drivers / usb / gadget / ci_udc.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2011, Marvell Semiconductor Inc.
4 * Lei Wen <leiwen@marvell.com>
5 *
6 * Back ported to the 8xx platform (from the 8260 platform) by
7 * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
8 */
9
10 #include <common.h>
11 #include <command.h>
12 #include <config.h>
13 #include <cpu_func.h>
14 #include <net.h>
15 #include <malloc.h>
16 #include <asm/byteorder.h>
17 #include <asm/cache.h>
18 #include <linux/errno.h>
19 #include <asm/io.h>
20 #include <asm/unaligned.h>
21 #include <linux/types.h>
22 #include <linux/usb/ch9.h>
23 #include <linux/usb/gadget.h>
24 #include <usb/ci_udc.h>
25 #include "../host/ehci.h"
26 #include "ci_udc.h"
27
28 /*
29 * Check if the system has too long cachelines. If the cachelines are
30 * longer then 128b, the driver will not be able flush/invalidate data
31 * cache over separate QH entries. We use 128b because one QH entry is
32 * 64b long and there are always two QH list entries for each endpoint.
33 */
34 #if ARCH_DMA_MINALIGN > 128
35 #error This driver can not work on systems with caches longer than 128b
36 #endif
37
38 /*
39 * Every QTD must be individually aligned, since we can program any
40 * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN,
41 * and the USB HW requires 32-byte alignment. Align to both:
42 */
43 #define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32)
44 /* Each QTD is this size */
45 #define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item)
46 /*
47 * Align the size of the QTD too, so we can add this value to each
48 * QTD's address to get another aligned address.
49 */
50 #define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN)
51 /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */
52 #define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ)
53
54 #define EP_MAX_LENGTH_TRANSFER 0x4000
55
56 #ifndef DEBUG
57 #define DBG(x...) do {} while (0)
58 #else
59 #define DBG(x...) printf(x)
60 static const char *reqname(unsigned r)
61 {
62 switch (r) {
63 case USB_REQ_GET_STATUS: return "GET_STATUS";
64 case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE";
65 case USB_REQ_SET_FEATURE: return "SET_FEATURE";
66 case USB_REQ_SET_ADDRESS: return "SET_ADDRESS";
67 case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR";
68 case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR";
69 case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION";
70 case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION";
71 case USB_REQ_GET_INTERFACE: return "GET_INTERFACE";
72 case USB_REQ_SET_INTERFACE: return "SET_INTERFACE";
73 default: return "*UNKNOWN*";
74 }
75 }
76 #endif
77
78 static struct usb_endpoint_descriptor ep0_desc = {
79 .bLength = sizeof(struct usb_endpoint_descriptor),
80 .bDescriptorType = USB_DT_ENDPOINT,
81 .bEndpointAddress = USB_DIR_IN,
82 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
83 };
84
85 static int ci_pullup(struct usb_gadget *gadget, int is_on);
86 static int ci_ep_enable(struct usb_ep *ep,
87 const struct usb_endpoint_descriptor *desc);
88 static int ci_ep_disable(struct usb_ep *ep);
89 static int ci_ep_queue(struct usb_ep *ep,
90 struct usb_request *req, gfp_t gfp_flags);
91 static int ci_ep_dequeue(struct usb_ep *ep, struct usb_request *req);
92 static struct usb_request *
93 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags);
94 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req);
95
96 static struct usb_gadget_ops ci_udc_ops = {
97 .pullup = ci_pullup,
98 };
99
100 static struct usb_ep_ops ci_ep_ops = {
101 .enable = ci_ep_enable,
102 .disable = ci_ep_disable,
103 .queue = ci_ep_queue,
104 .dequeue = ci_ep_dequeue,
105 .alloc_request = ci_ep_alloc_request,
106 .free_request = ci_ep_free_request,
107 };
108
109 __weak void ci_init_after_reset(struct ehci_ctrl *ctrl)
110 {
111 }
112
113 /* Init values for USB endpoints. */
114 static const struct usb_ep ci_ep_init[5] = {
115 [0] = { /* EP 0 */
116 .maxpacket = 64,
117 .name = "ep0",
118 .ops = &ci_ep_ops,
119 },
120 [1] = {
121 .maxpacket = 512,
122 .name = "ep1in-bulk",
123 .ops = &ci_ep_ops,
124 },
125 [2] = {
126 .maxpacket = 512,
127 .name = "ep2out-bulk",
128 .ops = &ci_ep_ops,
129 },
130 [3] = {
131 .maxpacket = 512,
132 .name = "ep3in-int",
133 .ops = &ci_ep_ops,
134 },
135 [4] = {
136 .maxpacket = 512,
137 .name = "ep-",
138 .ops = &ci_ep_ops,
139 },
140 };
141
142 static struct ci_drv controller = {
143 .gadget = {
144 .name = "ci_udc",
145 .ops = &ci_udc_ops,
146 .is_dualspeed = 1,
147 },
148 };
149
150 /**
151 * ci_get_qh() - return queue head for endpoint
152 * @ep_num: Endpoint number
153 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0)
154 *
155 * This function returns the QH associated with particular endpoint
156 * and it's direction.
157 */
158 static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in)
159 {
160 return &controller.epts[(ep_num * 2) + dir_in];
161 }
162
163 /**
164 * ci_get_qtd() - return queue item for endpoint
165 * @ep_num: Endpoint number
166 * @dir_in: Direction of the endpoint (IN = 1, OUT = 0)
167 *
168 * This function returns the QH associated with particular endpoint
169 * and it's direction.
170 */
171 static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in)
172 {
173 int index = (ep_num * 2) + dir_in;
174 uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ);
175 return (struct ept_queue_item *)imem;
176 }
177
178 /**
179 * ci_flush_qh - flush cache over queue head
180 * @ep_num: Endpoint number
181 *
182 * This function flushes cache over QH for particular endpoint.
183 */
184 static void ci_flush_qh(int ep_num)
185 {
186 struct ept_queue_head *head = ci_get_qh(ep_num, 0);
187 const unsigned long start = (unsigned long)head;
188 const unsigned long end = start + 2 * sizeof(*head);
189
190 flush_dcache_range(start, end);
191 }
192
193 /**
194 * ci_invalidate_qh - invalidate cache over queue head
195 * @ep_num: Endpoint number
196 *
197 * This function invalidates cache over QH for particular endpoint.
198 */
199 static void ci_invalidate_qh(int ep_num)
200 {
201 struct ept_queue_head *head = ci_get_qh(ep_num, 0);
202 unsigned long start = (unsigned long)head;
203 unsigned long end = start + 2 * sizeof(*head);
204
205 invalidate_dcache_range(start, end);
206 }
207
208 /**
209 * ci_flush_qtd - flush cache over queue item
210 * @ep_num: Endpoint number
211 *
212 * This function flushes cache over qTD pair for particular endpoint.
213 */
214 static void ci_flush_qtd(int ep_num)
215 {
216 struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
217 const unsigned long start = (unsigned long)item;
218 const unsigned long end = start + 2 * ILIST_ENT_SZ;
219
220 flush_dcache_range(start, end);
221 }
222
223 /**
224 * ci_flush_td - flush cache over queue item
225 * @td: td pointer
226 *
227 * This function flushes cache for particular transfer descriptor.
228 */
229 static void ci_flush_td(struct ept_queue_item *td)
230 {
231 const unsigned long start = (unsigned long)td;
232 const unsigned long end = (unsigned long)td + ILIST_ENT_SZ;
233 flush_dcache_range(start, end);
234 }
235
236 /**
237 * ci_invalidate_qtd - invalidate cache over queue item
238 * @ep_num: Endpoint number
239 *
240 * This function invalidates cache over qTD pair for particular endpoint.
241 */
242 static void ci_invalidate_qtd(int ep_num)
243 {
244 struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
245 const unsigned long start = (unsigned long)item;
246 const unsigned long end = start + 2 * ILIST_ENT_SZ;
247
248 invalidate_dcache_range(start, end);
249 }
250
251 /**
252 * ci_invalidate_td - invalidate cache over queue item
253 * @td: td pointer
254 *
255 * This function invalidates cache for particular transfer descriptor.
256 */
257 static void ci_invalidate_td(struct ept_queue_item *td)
258 {
259 const unsigned long start = (unsigned long)td;
260 const unsigned long end = start + ILIST_ENT_SZ;
261 invalidate_dcache_range(start, end);
262 }
263
264 static struct usb_request *
265 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags)
266 {
267 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
268 int num = -1;
269 struct ci_req *ci_req;
270
271 if (ci_ep->desc)
272 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
273
274 if (num == 0 && controller.ep0_req)
275 return &controller.ep0_req->req;
276
277 ci_req = calloc(1, sizeof(*ci_req));
278 if (!ci_req)
279 return NULL;
280
281 INIT_LIST_HEAD(&ci_req->queue);
282
283 if (num == 0)
284 controller.ep0_req = ci_req;
285
286 return &ci_req->req;
287 }
288
289 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req)
290 {
291 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
292 struct ci_req *ci_req = container_of(req, struct ci_req, req);
293 int num = -1;
294
295 if (ci_ep->desc)
296 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
297
298 if (num == 0) {
299 if (!controller.ep0_req)
300 return;
301 controller.ep0_req = 0;
302 }
303
304 if (ci_req->b_buf)
305 free(ci_req->b_buf);
306 free(ci_req);
307 }
308
309 static void ep_enable(int num, int in, int maxpacket)
310 {
311 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
312 unsigned n;
313
314 n = readl(&udc->epctrl[num]);
315 if (in)
316 n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK);
317 else
318 n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK);
319
320 if (num != 0) {
321 struct ept_queue_head *head = ci_get_qh(num, in);
322
323 head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT;
324 ci_flush_qh(num);
325 }
326 writel(n, &udc->epctrl[num]);
327 }
328
329 static int ci_ep_enable(struct usb_ep *ep,
330 const struct usb_endpoint_descriptor *desc)
331 {
332 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
333 int num, in;
334 num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
335 in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
336 ci_ep->desc = desc;
337
338 if (num) {
339 int max = get_unaligned_le16(&desc->wMaxPacketSize);
340
341 if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL))
342 max = 64;
343 if (ep->maxpacket != max) {
344 DBG("%s: from %d to %d\n", __func__,
345 ep->maxpacket, max);
346 ep->maxpacket = max;
347 }
348 }
349 ep_enable(num, in, ep->maxpacket);
350 DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket);
351 return 0;
352 }
353
354 static int ci_ep_disable(struct usb_ep *ep)
355 {
356 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
357
358 ci_ep->desc = NULL;
359 return 0;
360 }
361
362 static int ci_bounce(struct ci_req *ci_req, int in)
363 {
364 struct usb_request *req = &ci_req->req;
365 unsigned long addr = (unsigned long)req->buf;
366 unsigned long hwaddr;
367 uint32_t aligned_used_len;
368
369 /* Input buffer address is not aligned. */
370 if (addr & (ARCH_DMA_MINALIGN - 1))
371 goto align;
372
373 /* Input buffer length is not aligned. */
374 if (req->length & (ARCH_DMA_MINALIGN - 1))
375 goto align;
376
377 /* The buffer is well aligned, only flush cache. */
378 ci_req->hw_len = req->length;
379 ci_req->hw_buf = req->buf;
380 goto flush;
381
382 align:
383 if (ci_req->b_buf && req->length > ci_req->b_len) {
384 free(ci_req->b_buf);
385 ci_req->b_buf = 0;
386 }
387 if (!ci_req->b_buf) {
388 ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN);
389 ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len);
390 if (!ci_req->b_buf)
391 return -ENOMEM;
392 }
393 ci_req->hw_len = ci_req->b_len;
394 ci_req->hw_buf = ci_req->b_buf;
395
396 if (in)
397 memcpy(ci_req->hw_buf, req->buf, req->length);
398
399 flush:
400 hwaddr = (unsigned long)ci_req->hw_buf;
401 aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN);
402 flush_dcache_range(hwaddr, hwaddr + aligned_used_len);
403
404 return 0;
405 }
406
407 static void ci_debounce(struct ci_req *ci_req, int in)
408 {
409 struct usb_request *req = &ci_req->req;
410 unsigned long addr = (unsigned long)req->buf;
411 unsigned long hwaddr = (unsigned long)ci_req->hw_buf;
412 uint32_t aligned_used_len;
413
414 if (in)
415 return;
416
417 aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN);
418 invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len);
419
420 if (addr == hwaddr)
421 return; /* not a bounce */
422
423 memcpy(req->buf, ci_req->hw_buf, req->actual);
424 }
425
426 static void ci_ep_submit_next_request(struct ci_ep *ci_ep)
427 {
428 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
429 struct ept_queue_item *item;
430 struct ept_queue_head *head;
431 int bit, num, len, in;
432 struct ci_req *ci_req;
433 u8 *buf;
434 uint32_t len_left, len_this_dtd;
435 struct ept_queue_item *dtd, *qtd;
436
437 ci_ep->req_primed = true;
438
439 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
440 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
441 item = ci_get_qtd(num, in);
442 head = ci_get_qh(num, in);
443
444 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
445 len = ci_req->req.length;
446
447 head->next = (unsigned long)item;
448 head->info = 0;
449
450 ci_req->dtd_count = 0;
451 buf = ci_req->hw_buf;
452 len_left = len;
453 dtd = item;
454
455 do {
456 len_this_dtd = min(len_left, (unsigned)EP_MAX_LENGTH_TRANSFER);
457
458 dtd->info = INFO_BYTES(len_this_dtd) | INFO_ACTIVE;
459 dtd->page0 = (unsigned long)buf;
460 dtd->page1 = ((unsigned long)buf & 0xfffff000) + 0x1000;
461 dtd->page2 = ((unsigned long)buf & 0xfffff000) + 0x2000;
462 dtd->page3 = ((unsigned long)buf & 0xfffff000) + 0x3000;
463 dtd->page4 = ((unsigned long)buf & 0xfffff000) + 0x4000;
464
465 len_left -= len_this_dtd;
466 buf += len_this_dtd;
467
468 if (len_left) {
469 qtd = (struct ept_queue_item *)
470 memalign(ILIST_ALIGN, ILIST_ENT_SZ);
471 dtd->next = (unsigned long)qtd;
472 dtd = qtd;
473 memset(dtd, 0, ILIST_ENT_SZ);
474 }
475
476 ci_req->dtd_count++;
477 } while (len_left);
478
479 item = dtd;
480 /*
481 * When sending the data for an IN transaction, the attached host
482 * knows that all data for the IN is sent when one of the following
483 * occurs:
484 * a) A zero-length packet is transmitted.
485 * b) A packet with length that isn't an exact multiple of the ep's
486 * maxpacket is transmitted.
487 * c) Enough data is sent to exactly fill the host's maximum expected
488 * IN transaction size.
489 *
490 * One of these conditions MUST apply at the end of an IN transaction,
491 * or the transaction will not be considered complete by the host. If
492 * none of (a)..(c) already applies, then we must force (a) to apply
493 * by explicitly sending an extra zero-length packet.
494 */
495 /* IN !a !b !c */
496 if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) {
497 /*
498 * Each endpoint has 2 items allocated, even though typically
499 * only 1 is used at a time since either an IN or an OUT but
500 * not both is queued. For an IN transaction, item currently
501 * points at the second of these items, so we know that we
502 * can use the other to transmit the extra zero-length packet.
503 */
504 struct ept_queue_item *other_item = ci_get_qtd(num, 0);
505 item->next = (unsigned long)other_item;
506 item = other_item;
507 item->info = INFO_ACTIVE;
508 }
509
510 item->next = TERMINATE;
511 item->info |= INFO_IOC;
512
513 ci_flush_qtd(num);
514
515 item = (struct ept_queue_item *)(unsigned long)head->next;
516 while (item->next != TERMINATE) {
517 ci_flush_td((struct ept_queue_item *)(unsigned long)item->next);
518 item = (struct ept_queue_item *)(unsigned long)item->next;
519 }
520
521 DBG("ept%d %s queue len %x, req %p, buffer %p\n",
522 num, in ? "in" : "out", len, ci_req, ci_req->hw_buf);
523 ci_flush_qh(num);
524
525 if (in)
526 bit = EPT_TX(num);
527 else
528 bit = EPT_RX(num);
529
530 writel(bit, &udc->epprime);
531 }
532
533 static int ci_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
534 {
535 struct ci_ep *ci_ep = container_of(_ep, struct ci_ep, ep);
536 struct ci_req *ci_req;
537
538 list_for_each_entry(ci_req, &ci_ep->queue, queue) {
539 if (&ci_req->req == _req)
540 break;
541 }
542
543 if (&ci_req->req != _req)
544 return -EINVAL;
545
546 list_del_init(&ci_req->queue);
547
548 if (ci_req->req.status == -EINPROGRESS) {
549 ci_req->req.status = -ECONNRESET;
550 if (ci_req->req.complete)
551 ci_req->req.complete(_ep, _req);
552 }
553
554 return 0;
555 }
556
557 static int ci_ep_queue(struct usb_ep *ep,
558 struct usb_request *req, gfp_t gfp_flags)
559 {
560 struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
561 struct ci_req *ci_req = container_of(req, struct ci_req, req);
562 int in, ret;
563 int __maybe_unused num;
564
565 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
566 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
567
568 if (!num && ci_ep->req_primed) {
569 /*
570 * The flipping of ep0 between IN and OUT relies on
571 * ci_ep_queue consuming the current IN/OUT setting
572 * immediately. If this is deferred to a later point when the
573 * req is pulled out of ci_req->queue, then the IN/OUT setting
574 * may have been changed since the req was queued, and state
575 * will get out of sync. This condition doesn't occur today,
576 * but could if bugs were introduced later, and this error
577 * check will save a lot of debugging time.
578 */
579 printf("%s: ep0 transaction already in progress\n", __func__);
580 return -EPROTO;
581 }
582
583 ret = ci_bounce(ci_req, in);
584 if (ret)
585 return ret;
586
587 DBG("ept%d %s pre-queue req %p, buffer %p\n",
588 num, in ? "in" : "out", ci_req, ci_req->hw_buf);
589 list_add_tail(&ci_req->queue, &ci_ep->queue);
590
591 if (!ci_ep->req_primed)
592 ci_ep_submit_next_request(ci_ep);
593
594 return 0;
595 }
596
597 static void flip_ep0_direction(void)
598 {
599 if (ep0_desc.bEndpointAddress == USB_DIR_IN) {
600 DBG("%s: Flipping ep0 to OUT\n", __func__);
601 ep0_desc.bEndpointAddress = 0;
602 } else {
603 DBG("%s: Flipping ep0 to IN\n", __func__);
604 ep0_desc.bEndpointAddress = USB_DIR_IN;
605 }
606 }
607
608 static void handle_ep_complete(struct ci_ep *ci_ep)
609 {
610 struct ept_queue_item *item, *next_td;
611 int num, in, len, j;
612 struct ci_req *ci_req;
613
614 num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
615 in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
616 item = ci_get_qtd(num, in);
617 ci_invalidate_qtd(num);
618 ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
619
620 next_td = item;
621 len = 0;
622 for (j = 0; j < ci_req->dtd_count; j++) {
623 ci_invalidate_td(next_td);
624 item = next_td;
625 len += (item->info >> 16) & 0x7fff;
626 if (item->info & 0xff)
627 printf("EP%d/%s FAIL info=%x pg0=%x\n",
628 num, in ? "in" : "out", item->info, item->page0);
629 if (j != ci_req->dtd_count - 1)
630 next_td = (struct ept_queue_item *)(unsigned long)
631 item->next;
632 if (j != 0)
633 free(item);
634 }
635
636 list_del_init(&ci_req->queue);
637 ci_ep->req_primed = false;
638
639 if (!list_empty(&ci_ep->queue))
640 ci_ep_submit_next_request(ci_ep);
641
642 ci_req->req.actual = ci_req->req.length - len;
643 ci_debounce(ci_req, in);
644
645 DBG("ept%d %s req %p, complete %x\n",
646 num, in ? "in" : "out", ci_req, len);
647 if (num != 0 || controller.ep0_data_phase)
648 ci_req->req.complete(&ci_ep->ep, &ci_req->req);
649 if (num == 0 && controller.ep0_data_phase) {
650 /*
651 * Data Stage is complete, so flip ep0 dir for Status Stage,
652 * which always transfers a packet in the opposite direction.
653 */
654 DBG("%s: flip ep0 dir for Status Stage\n", __func__);
655 flip_ep0_direction();
656 controller.ep0_data_phase = false;
657 ci_req->req.length = 0;
658 usb_ep_queue(&ci_ep->ep, &ci_req->req, 0);
659 }
660 }
661
662 #define SETUP(type, request) (((type) << 8) | (request))
663
664 static void handle_setup(void)
665 {
666 struct ci_ep *ci_ep = &controller.ep[0];
667 struct ci_req *ci_req;
668 struct usb_request *req;
669 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
670 struct ept_queue_head *head;
671 struct usb_ctrlrequest r;
672 int status = 0;
673 int num, in, _num, _in, i;
674 char *buf;
675
676 ci_req = controller.ep0_req;
677 req = &ci_req->req;
678 head = ci_get_qh(0, 0); /* EP0 OUT */
679
680 ci_invalidate_qh(0);
681 memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest));
682 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
683 writel(EPT_RX(0), &udc->epsetupstat);
684 #else
685 writel(EPT_RX(0), &udc->epstat);
686 #endif
687 DBG("handle setup %s, %x, %x index %x value %x length %x\n",
688 reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex,
689 r.wValue, r.wLength);
690
691 /* Set EP0 dir for Data Stage based on Setup Stage data */
692 if (r.bRequestType & USB_DIR_IN) {
693 DBG("%s: Set ep0 to IN for Data Stage\n", __func__);
694 ep0_desc.bEndpointAddress = USB_DIR_IN;
695 } else {
696 DBG("%s: Set ep0 to OUT for Data Stage\n", __func__);
697 ep0_desc.bEndpointAddress = 0;
698 }
699 if (r.wLength) {
700 controller.ep0_data_phase = true;
701 } else {
702 /* 0 length -> no Data Stage. Flip dir for Status Stage */
703 DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__);
704 flip_ep0_direction();
705 controller.ep0_data_phase = false;
706 }
707
708 list_del_init(&ci_req->queue);
709 ci_ep->req_primed = false;
710
711 switch (SETUP(r.bRequestType, r.bRequest)) {
712 case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE):
713 _num = r.wIndex & 15;
714 _in = !!(r.wIndex & 0x80);
715
716 if ((r.wValue == 0) && (r.wLength == 0)) {
717 req->length = 0;
718 for (i = 0; i < NUM_ENDPOINTS; i++) {
719 struct ci_ep *ep = &controller.ep[i];
720
721 if (!ep->desc)
722 continue;
723 num = ep->desc->bEndpointAddress
724 & USB_ENDPOINT_NUMBER_MASK;
725 in = (ep->desc->bEndpointAddress
726 & USB_DIR_IN) != 0;
727 if ((num == _num) && (in == _in)) {
728 ep_enable(num, in, ep->ep.maxpacket);
729 usb_ep_queue(controller.gadget.ep0,
730 req, 0);
731 break;
732 }
733 }
734 }
735 return;
736
737 case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS):
738 /*
739 * write address delayed (will take effect
740 * after the next IN txn)
741 */
742 writel((r.wValue << 25) | (1 << 24), &udc->devaddr);
743 req->length = 0;
744 usb_ep_queue(controller.gadget.ep0, req, 0);
745 return;
746
747 case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS):
748 req->length = 2;
749 buf = (char *)req->buf;
750 buf[0] = 1 << USB_DEVICE_SELF_POWERED;
751 buf[1] = 0;
752 usb_ep_queue(controller.gadget.ep0, req, 0);
753 return;
754 }
755 /* pass request up to the gadget driver */
756 if (controller.driver)
757 status = controller.driver->setup(&controller.gadget, &r);
758 else
759 status = -ENODEV;
760
761 if (!status)
762 return;
763 DBG("STALL reqname %s type %x value %x, index %x\n",
764 reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex);
765 writel((1<<16) | (1 << 0), &udc->epctrl[0]);
766 }
767
768 static void stop_activity(void)
769 {
770 int i, num, in;
771 struct ept_queue_head *head;
772 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
773 writel(readl(&udc->epcomp), &udc->epcomp);
774 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
775 writel(readl(&udc->epsetupstat), &udc->epsetupstat);
776 #endif
777 writel(readl(&udc->epstat), &udc->epstat);
778 writel(0xffffffff, &udc->epflush);
779
780 /* error out any pending reqs */
781 for (i = 0; i < NUM_ENDPOINTS; i++) {
782 if (i != 0)
783 writel(0, &udc->epctrl[i]);
784 if (controller.ep[i].desc) {
785 num = controller.ep[i].desc->bEndpointAddress
786 & USB_ENDPOINT_NUMBER_MASK;
787 in = (controller.ep[i].desc->bEndpointAddress
788 & USB_DIR_IN) != 0;
789 head = ci_get_qh(num, in);
790 head->info = INFO_ACTIVE;
791 ci_flush_qh(num);
792 }
793 }
794 }
795
796 void udc_irq(void)
797 {
798 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
799 unsigned n = readl(&udc->usbsts);
800 writel(n, &udc->usbsts);
801 int bit, i, num, in;
802
803 n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI);
804 if (n == 0)
805 return;
806
807 if (n & STS_URI) {
808 DBG("-- reset --\n");
809 stop_activity();
810 }
811 if (n & STS_SLI)
812 DBG("-- suspend --\n");
813
814 if (n & STS_PCI) {
815 int max = 64;
816 int speed = USB_SPEED_FULL;
817
818 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
819 bit = (readl(&udc->hostpc1_devlc) >> 25) & 3;
820 #else
821 bit = (readl(&udc->portsc) >> 26) & 3;
822 #endif
823 DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full");
824 if (bit == 2) {
825 speed = USB_SPEED_HIGH;
826 max = 512;
827 }
828 controller.gadget.speed = speed;
829 for (i = 1; i < NUM_ENDPOINTS; i++) {
830 if (controller.ep[i].ep.maxpacket > max)
831 controller.ep[i].ep.maxpacket = max;
832 }
833 }
834
835 if (n & STS_UEI)
836 printf("<UEI %x>\n", readl(&udc->epcomp));
837
838 if ((n & STS_UI) || (n & STS_UEI)) {
839 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
840 n = readl(&udc->epsetupstat);
841 #else
842 n = readl(&udc->epstat);
843 #endif
844 if (n & EPT_RX(0))
845 handle_setup();
846
847 n = readl(&udc->epcomp);
848 if (n != 0)
849 writel(n, &udc->epcomp);
850
851 for (i = 0; i < NUM_ENDPOINTS && n; i++) {
852 if (controller.ep[i].desc) {
853 num = controller.ep[i].desc->bEndpointAddress
854 & USB_ENDPOINT_NUMBER_MASK;
855 in = (controller.ep[i].desc->bEndpointAddress
856 & USB_DIR_IN) != 0;
857 bit = (in) ? EPT_TX(num) : EPT_RX(num);
858 if (n & bit)
859 handle_ep_complete(&controller.ep[i]);
860 }
861 }
862 }
863 }
864
865 int usb_gadget_handle_interrupts(int index)
866 {
867 u32 value;
868 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
869
870 value = readl(&udc->usbsts);
871 if (value)
872 udc_irq();
873
874 return value;
875 }
876
877 void udc_disconnect(void)
878 {
879 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
880 /* disable pullup */
881 stop_activity();
882 writel(USBCMD_FS2, &udc->usbcmd);
883 udelay(800);
884 if (controller.driver)
885 controller.driver->disconnect(&controller.gadget);
886 }
887
888 static int ci_pullup(struct usb_gadget *gadget, int is_on)
889 {
890 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
891 if (is_on) {
892 /* RESET */
893 writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd);
894 udelay(200);
895
896 ci_init_after_reset(controller.ctrl);
897
898 writel((unsigned long)controller.epts, &udc->epinitaddr);
899
900 /* select DEVICE mode */
901 writel(USBMODE_DEVICE, &udc->usbmode);
902
903 #if !defined(CONFIG_USB_GADGET_DUALSPEED)
904 /* Port force Full-Speed Connect */
905 setbits_le32(&udc->portsc, PFSC);
906 #endif
907
908 writel(0xffffffff, &udc->epflush);
909
910 /* Turn on the USB connection by enabling the pullup resistor */
911 setbits_le32(&udc->usbcmd, USBCMD_ITC(MICRO_8FRAME) |
912 USBCMD_RUN);
913 } else {
914 udc_disconnect();
915 }
916
917 return 0;
918 }
919
920 static int ci_udc_probe(void)
921 {
922 struct ept_queue_head *head;
923 int i;
924
925 const int num = 2 * NUM_ENDPOINTS;
926
927 const int eplist_min_align = 4096;
928 const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN);
929 const int eplist_raw_sz = num * sizeof(struct ept_queue_head);
930 const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN);
931
932 /* The QH list must be aligned to 4096 bytes. */
933 controller.epts = memalign(eplist_align, eplist_sz);
934 if (!controller.epts)
935 return -ENOMEM;
936 memset(controller.epts, 0, eplist_sz);
937
938 controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ);
939 if (!controller.items_mem) {
940 free(controller.epts);
941 return -ENOMEM;
942 }
943 memset(controller.items_mem, 0, ILIST_SZ);
944
945 for (i = 0; i < 2 * NUM_ENDPOINTS; i++) {
946 /*
947 * Configure QH for each endpoint. The structure of the QH list
948 * is such that each two subsequent fields, N and N+1 where N is
949 * even, in the QH list represent QH for one endpoint. The Nth
950 * entry represents OUT configuration and the N+1th entry does
951 * represent IN configuration of the endpoint.
952 */
953 head = controller.epts + i;
954 if (i < 2)
955 head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE)
956 | CONFIG_ZLT | CONFIG_IOS;
957 else
958 head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE)
959 | CONFIG_ZLT;
960 head->next = TERMINATE;
961 head->info = 0;
962
963 if (i & 1) {
964 ci_flush_qh(i / 2);
965 ci_flush_qtd(i / 2);
966 }
967 }
968
969 INIT_LIST_HEAD(&controller.gadget.ep_list);
970
971 /* Init EP 0 */
972 memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init));
973 controller.ep[0].desc = &ep0_desc;
974 INIT_LIST_HEAD(&controller.ep[0].queue);
975 controller.ep[0].req_primed = false;
976 controller.gadget.ep0 = &controller.ep[0].ep;
977 INIT_LIST_HEAD(&controller.gadget.ep0->ep_list);
978
979 /* Init EP 1..3 */
980 for (i = 1; i < 4; i++) {
981 memcpy(&controller.ep[i].ep, &ci_ep_init[i],
982 sizeof(*ci_ep_init));
983 INIT_LIST_HEAD(&controller.ep[i].queue);
984 controller.ep[i].req_primed = false;
985 list_add_tail(&controller.ep[i].ep.ep_list,
986 &controller.gadget.ep_list);
987 }
988
989 /* Init EP 4..n */
990 for (i = 4; i < NUM_ENDPOINTS; i++) {
991 memcpy(&controller.ep[i].ep, &ci_ep_init[4],
992 sizeof(*ci_ep_init));
993 INIT_LIST_HEAD(&controller.ep[i].queue);
994 controller.ep[i].req_primed = false;
995 list_add_tail(&controller.ep[i].ep.ep_list,
996 &controller.gadget.ep_list);
997 }
998
999 ci_ep_alloc_request(&controller.ep[0].ep, 0);
1000 if (!controller.ep0_req) {
1001 free(controller.items_mem);
1002 free(controller.epts);
1003 return -ENOMEM;
1004 }
1005
1006 return 0;
1007 }
1008
1009 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1010 {
1011 int ret;
1012
1013 if (!driver)
1014 return -EINVAL;
1015 if (!driver->bind || !driver->setup || !driver->disconnect)
1016 return -EINVAL;
1017 if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH)
1018 return -EINVAL;
1019
1020 #if CONFIG_IS_ENABLED(DM_USB)
1021 ret = usb_setup_ehci_gadget(&controller.ctrl);
1022 #else
1023 ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl);
1024 #endif
1025 if (ret)
1026 return ret;
1027
1028 ret = ci_udc_probe();
1029 if (ret) {
1030 DBG("udc probe failed, returned %d\n", ret);
1031 return ret;
1032 }
1033
1034 ret = driver->bind(&controller.gadget);
1035 if (ret) {
1036 DBG("driver->bind() returned %d\n", ret);
1037 return ret;
1038 }
1039 controller.driver = driver;
1040
1041 return 0;
1042 }
1043
1044 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1045 {
1046 udc_disconnect();
1047
1048 driver->unbind(&controller.gadget);
1049 controller.driver = NULL;
1050
1051 ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req);
1052 free(controller.items_mem);
1053 free(controller.epts);
1054
1055 return 0;
1056 }
1057
1058 bool dfu_usb_get_reset(void)
1059 {
1060 struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
1061
1062 return !!(readl(&udc->usbsts) & STS_URI);
1063 }