]> git.ipfire.org Git - people/arne_f/kernel.git/blame - drivers/usb/dwc3/gadget.c
Merge tag 'v3.14.78' into linux-3.14.x-rpi
[people/arne_f/kernel.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
5945f789
FB
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
72246da4 12 *
5945f789
FB
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
72246da4
FB
17 */
18
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/list.h>
28#include <linux/dma-mapping.h>
29
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32
33#include "core.h"
34#include "gadget.h"
35#include "io.h"
36
04a9bfcd
FB
37/**
38 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
39 * @dwc: pointer to our context structure
40 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
41 *
42 * Caller should take care of locking. This function will
43 * return 0 on success or -EINVAL if wrong Test Selector
44 * is passed
45 */
46int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
47{
48 u32 reg;
49
50 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
51 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
52
53 switch (mode) {
54 case TEST_J:
55 case TEST_K:
56 case TEST_SE0_NAK:
57 case TEST_PACKET:
58 case TEST_FORCE_EN:
59 reg |= mode << 1;
60 break;
61 default:
62 return -EINVAL;
63 }
64
65 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
66
67 return 0;
68}
69
8598bde7
FB
70/**
71 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
72 * @dwc: pointer to our context structure
73 * @state: the state to put link into
74 *
75 * Caller should take care of locking. This function will
aee63e3c 76 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
77 */
78int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
79{
aee63e3c 80 int retries = 10000;
8598bde7
FB
81 u32 reg;
82
802fde98
PZ
83 /*
84 * Wait until device controller is ready. Only applies to 1.94a and
85 * later RTL.
86 */
87 if (dwc->revision >= DWC3_REVISION_194A) {
88 while (--retries) {
89 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
90 if (reg & DWC3_DSTS_DCNRD)
91 udelay(5);
92 else
93 break;
94 }
95
96 if (retries <= 0)
97 return -ETIMEDOUT;
98 }
99
8598bde7
FB
100 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
101 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
102
103 /* set requested state */
104 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
105 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
106
802fde98
PZ
107 /*
108 * The following code is racy when called from dwc3_gadget_wakeup,
109 * and is not needed, at least on newer versions
110 */
111 if (dwc->revision >= DWC3_REVISION_194A)
112 return 0;
113
8598bde7 114 /* wait for a change in DSTS */
aed430e5 115 retries = 10000;
8598bde7
FB
116 while (--retries) {
117 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
118
8598bde7
FB
119 if (DWC3_DSTS_USBLNKST(reg) == state)
120 return 0;
121
aee63e3c 122 udelay(5);
8598bde7
FB
123 }
124
125 dev_vdbg(dwc->dev, "link state change request timed out\n");
126
127 return -ETIMEDOUT;
128}
129
457e84b6
FB
130/**
131 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
132 * @dwc: pointer to our context structure
133 *
134 * This function will a best effort FIFO allocation in order
135 * to improve FIFO usage and throughput, while still allowing
136 * us to enable as many endpoints as possible.
137 *
138 * Keep in mind that this operation will be highly dependent
139 * on the configured size for RAM1 - which contains TxFifo -,
140 * the amount of endpoints enabled on coreConsultant tool, and
141 * the width of the Master Bus.
142 *
143 * In the ideal world, we would always be able to satisfy the
144 * following equation:
145 *
146 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
147 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
148 *
149 * Unfortunately, due to many variables that's not always the case.
150 */
151int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
152{
153 int last_fifo_depth = 0;
154 int ram1_depth;
155 int fifo_size;
156 int mdwidth;
157 int num;
158
159 if (!dwc->needs_fifo_resize)
160 return 0;
161
162 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
163 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
164
165 /* MDWIDTH is represented in bits, we need it in bytes */
166 mdwidth >>= 3;
167
168 /*
169 * FIXME For now we will only allocate 1 wMaxPacketSize space
170 * for each enabled endpoint, later patches will come to
171 * improve this algorithm so that we better use the internal
172 * FIFO space
173 */
174 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
175 struct dwc3_ep *dep = dwc->eps[num];
176 int fifo_number = dep->number >> 1;
2e81c36a 177 int mult = 1;
457e84b6
FB
178 int tmp;
179
180 if (!(dep->number & 1))
181 continue;
182
183 if (!(dep->flags & DWC3_EP_ENABLED))
184 continue;
185
16e78db7
IS
186 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
187 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
2e81c36a
FB
188 mult = 3;
189
190 /*
191 * REVISIT: the following assumes we will always have enough
192 * space available on the FIFO RAM for all possible use cases.
193 * Make sure that's true somehow and change FIFO allocation
194 * accordingly.
195 *
196 * If we have Bulk or Isochronous endpoints, we want
197 * them to be able to be very, very fast. So we're giving
198 * those endpoints a fifo_size which is enough for 3 full
199 * packets
200 */
201 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
202 tmp += mdwidth;
203
204 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 205
457e84b6
FB
206 fifo_size |= (last_fifo_depth << 16);
207
208 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
209 dep->name, last_fifo_depth, fifo_size & 0xffff);
210
211 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
212 fifo_size);
213
214 last_fifo_depth += (fifo_size & 0xffff);
215 }
216
217 return 0;
218}
219
72246da4
FB
220void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
221 int status)
222{
223 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 224 int i;
72246da4
FB
225
226 if (req->queued) {
e5ba5ec8
PA
227 i = 0;
228 do {
eeb720fb 229 dep->busy_slot++;
e5ba5ec8
PA
230 /*
231 * Skip LINK TRB. We can't use req->trb and check for
232 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
233 * just completed (not the LINK TRB).
234 */
235 if (((dep->busy_slot & DWC3_TRB_MASK) ==
236 DWC3_TRB_NUM- 1) &&
16e78db7 237 usb_endpoint_xfer_isoc(dep->endpoint.desc))
e5ba5ec8
PA
238 dep->busy_slot++;
239 } while(++i < req->request.num_mapped_sgs);
c9fda7d6 240 req->queued = false;
72246da4
FB
241 }
242 list_del(&req->list);
eeb720fb 243 req->trb = NULL;
72246da4
FB
244
245 if (req->request.status == -EINPROGRESS)
246 req->request.status = status;
247
0416e494
PA
248 if (dwc->ep0_bounced && dep->number == 0)
249 dwc->ep0_bounced = false;
250 else
251 usb_gadget_unmap_request(&dwc->gadget, &req->request,
252 req->direction);
72246da4
FB
253
254 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
255 req, dep->name, req->request.actual,
256 req->request.length, status);
257
258 spin_unlock(&dwc->lock);
0fc9a1be 259 req->request.complete(&dep->endpoint, &req->request);
72246da4
FB
260 spin_lock(&dwc->lock);
261}
262
263static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
264{
265 switch (cmd) {
266 case DWC3_DEPCMD_DEPSTARTCFG:
267 return "Start New Configuration";
268 case DWC3_DEPCMD_ENDTRANSFER:
269 return "End Transfer";
270 case DWC3_DEPCMD_UPDATETRANSFER:
271 return "Update Transfer";
272 case DWC3_DEPCMD_STARTTRANSFER:
273 return "Start Transfer";
274 case DWC3_DEPCMD_CLEARSTALL:
275 return "Clear Stall";
276 case DWC3_DEPCMD_SETSTALL:
277 return "Set Stall";
802fde98
PZ
278 case DWC3_DEPCMD_GETEPSTATE:
279 return "Get Endpoint State";
72246da4
FB
280 case DWC3_DEPCMD_SETTRANSFRESOURCE:
281 return "Set Endpoint Transfer Resource";
282 case DWC3_DEPCMD_SETEPCONFIG:
283 return "Set Endpoint Configuration";
284 default:
285 return "UNKNOWN command";
286 }
287}
288
b09bb642
FB
289int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param)
290{
291 u32 timeout = 500;
292 u32 reg;
293
294 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
295 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
296
297 do {
298 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
299 if (!(reg & DWC3_DGCMD_CMDACT)) {
300 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
301 DWC3_DGCMD_STATUS(reg));
35910b3c
SSB
302 if (DWC3_DGCMD_STATUS(reg))
303 return -EINVAL;
b09bb642
FB
304 return 0;
305 }
306
307 /*
308 * We can't sleep here, because it's also called from
309 * interrupt context.
310 */
311 timeout--;
312 if (!timeout)
313 return -ETIMEDOUT;
314 udelay(1);
315 } while (1);
316}
317
72246da4
FB
318int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
319 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
320{
321 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 322 u32 timeout = 500;
72246da4
FB
323 u32 reg;
324
325 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
326 dep->name,
dc1c70a7
FB
327 dwc3_gadget_ep_cmd_string(cmd), params->param0,
328 params->param1, params->param2);
72246da4 329
dc1c70a7
FB
330 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
331 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
332 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
333
334 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
335 do {
336 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
337 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
338 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
339 DWC3_DEPCMD_STATUS(reg));
dfd5bab5
SSB
340 if (DWC3_DEPCMD_STATUS(reg))
341 return -EINVAL;
72246da4
FB
342 return 0;
343 }
344
345 /*
72246da4
FB
346 * We can't sleep here, because it is also called from
347 * interrupt context.
348 */
349 timeout--;
350 if (!timeout)
351 return -ETIMEDOUT;
352
61d58242 353 udelay(1);
72246da4
FB
354 } while (1);
355}
356
357static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 358 struct dwc3_trb *trb)
72246da4 359{
c439ef87 360 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
361
362 return dep->trb_pool_dma + offset;
363}
364
365static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
366{
367 struct dwc3 *dwc = dep->dwc;
368
369 if (dep->trb_pool)
370 return 0;
371
372 if (dep->number == 0 || dep->number == 1)
373 return 0;
374
375 dep->trb_pool = dma_alloc_coherent(dwc->dev,
376 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
377 &dep->trb_pool_dma, GFP_KERNEL);
378 if (!dep->trb_pool) {
379 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
380 dep->name);
381 return -ENOMEM;
382 }
383
384 return 0;
385}
386
387static void dwc3_free_trb_pool(struct dwc3_ep *dep)
388{
389 struct dwc3 *dwc = dep->dwc;
390
391 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
392 dep->trb_pool, dep->trb_pool_dma);
393
394 dep->trb_pool = NULL;
395 dep->trb_pool_dma = 0;
396}
397
398static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
399{
400 struct dwc3_gadget_ep_cmd_params params;
401 u32 cmd;
402
403 memset(&params, 0x00, sizeof(params));
404
405 if (dep->number != 1) {
406 cmd = DWC3_DEPCMD_DEPSTARTCFG;
407 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
408 if (dep->number > 1) {
409 if (dwc->start_config_issued)
410 return 0;
411 dwc->start_config_issued = true;
72246da4 412 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 413 }
72246da4
FB
414
415 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
416 }
417
418 return 0;
419}
420
421static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 422 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
423 const struct usb_ss_ep_comp_descriptor *comp_desc,
424 bool ignore)
72246da4
FB
425{
426 struct dwc3_gadget_ep_cmd_params params;
427
428 memset(&params, 0x00, sizeof(params));
429
dc1c70a7 430 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
431 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
432
433 /* Burst size is only needed in SuperSpeed mode */
434 if (dwc->gadget.speed == USB_SPEED_SUPER) {
435 u32 burst = dep->endpoint.maxburst - 1;
436
437 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
438 }
72246da4 439
4b345c9a
FB
440 if (ignore)
441 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
442
dc1c70a7
FB
443 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
444 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 445
18b7ede5 446 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
447 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
448 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
449 dep->stream_capable = true;
450 }
451
72246da4 452 if (usb_endpoint_xfer_isoc(desc))
dc1c70a7 453 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
454
455 /*
456 * We are doing 1:1 mapping for endpoints, meaning
457 * Physical Endpoints 2 maps to Logical Endpoint 2 and
458 * so on. We consider the direction bit as part of the physical
459 * endpoint number. So USB endpoint 0x81 is 0x03.
460 */
dc1c70a7 461 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
462
463 /*
464 * We must use the lower 16 TX FIFOs even though
465 * HW might have more
466 */
467 if (dep->direction)
dc1c70a7 468 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
469
470 if (desc->bInterval) {
dc1c70a7 471 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
472 dep->interval = 1 << (desc->bInterval - 1);
473 }
474
475 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
476 DWC3_DEPCMD_SETEPCONFIG, &params);
477}
478
479static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
480{
481 struct dwc3_gadget_ep_cmd_params params;
482
483 memset(&params, 0x00, sizeof(params));
484
dc1c70a7 485 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
486
487 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
488 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
489}
490
491/**
492 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
493 * @dep: endpoint to be initialized
494 * @desc: USB Endpoint Descriptor
495 *
496 * Caller should take care of locking
497 */
498static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 499 const struct usb_endpoint_descriptor *desc,
4b345c9a
FB
500 const struct usb_ss_ep_comp_descriptor *comp_desc,
501 bool ignore)
72246da4
FB
502{
503 struct dwc3 *dwc = dep->dwc;
504 u32 reg;
505 int ret = -ENOMEM;
506
ff62d6b6
FB
507 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
508
72246da4
FB
509 if (!(dep->flags & DWC3_EP_ENABLED)) {
510 ret = dwc3_gadget_start_config(dwc, dep);
511 if (ret)
512 return ret;
513 }
514
4b345c9a 515 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore);
72246da4
FB
516 if (ret)
517 return ret;
518
519 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
520 struct dwc3_trb *trb_st_hw;
521 struct dwc3_trb *trb_link;
72246da4
FB
522
523 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
524 if (ret)
525 return ret;
526
16e78db7 527 dep->endpoint.desc = desc;
c90bfaec 528 dep->comp_desc = comp_desc;
72246da4
FB
529 dep->type = usb_endpoint_type(desc);
530 dep->flags |= DWC3_EP_ENABLED;
531
532 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
533 reg |= DWC3_DALEPENA_EP(dep->number);
534 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
535
536 if (!usb_endpoint_xfer_isoc(desc))
537 return 0;
538
1d046793 539 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
540 trb_st_hw = &dep->trb_pool[0];
541
f6bafc6a 542 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
ee3e596a 543 memset(trb_link, 0, sizeof(*trb_link));
72246da4 544
f6bafc6a
FB
545 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
546 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
547 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
548 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
549 }
550
551 return 0;
552}
553
624407f9
SAS
554static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
555static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
556{
557 struct dwc3_request *req;
558
ea53b882 559 if (!list_empty(&dep->req_queued)) {
624407f9
SAS
560 dwc3_stop_active_transfer(dwc, dep->number);
561
57911504 562 /* - giveback all requests to gadget driver */
1591633e
PA
563 while (!list_empty(&dep->req_queued)) {
564 req = next_request(&dep->req_queued);
565
566 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
567 }
ea53b882
FB
568 }
569
72246da4
FB
570 while (!list_empty(&dep->request_list)) {
571 req = next_request(&dep->request_list);
572
624407f9 573 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 574 }
72246da4
FB
575}
576
577/**
578 * __dwc3_gadget_ep_disable - Disables a HW endpoint
579 * @dep: the endpoint to disable
580 *
624407f9
SAS
581 * This function also removes requests which are currently processed ny the
582 * hardware and those which are not yet scheduled.
583 * Caller should take care of locking.
72246da4 584 */
72246da4
FB
585static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
586{
587 struct dwc3 *dwc = dep->dwc;
588 u32 reg;
589
624407f9 590 dwc3_remove_requests(dwc, dep);
72246da4 591
0d8b12fc
FB
592 /* make sure HW endpoint isn't stalled */
593 if (dep->flags & DWC3_EP_STALL)
55016b9e 594 __dwc3_gadget_ep_set_halt(dep, 0, false);
0d8b12fc 595
72246da4
FB
596 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
597 reg &= ~DWC3_DALEPENA_EP(dep->number);
598 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
599
879631aa 600 dep->stream_capable = false;
f9c56cdd 601 dep->endpoint.desc = NULL;
c90bfaec 602 dep->comp_desc = NULL;
72246da4 603 dep->type = 0;
879631aa 604 dep->flags = 0;
72246da4
FB
605
606 return 0;
607}
608
609/* -------------------------------------------------------------------------- */
610
611static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
612 const struct usb_endpoint_descriptor *desc)
613{
614 return -EINVAL;
615}
616
617static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
618{
619 return -EINVAL;
620}
621
622/* -------------------------------------------------------------------------- */
623
624static int dwc3_gadget_ep_enable(struct usb_ep *ep,
625 const struct usb_endpoint_descriptor *desc)
626{
627 struct dwc3_ep *dep;
628 struct dwc3 *dwc;
629 unsigned long flags;
630 int ret;
631
632 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
633 pr_debug("dwc3: invalid parameters\n");
634 return -EINVAL;
635 }
636
637 if (!desc->wMaxPacketSize) {
638 pr_debug("dwc3: missing wMaxPacketSize\n");
639 return -EINVAL;
640 }
641
642 dep = to_dwc3_ep(ep);
643 dwc = dep->dwc;
644
c6f83f38
FB
645 if (dep->flags & DWC3_EP_ENABLED) {
646 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
647 dep->name);
648 return 0;
649 }
650
72246da4
FB
651 switch (usb_endpoint_type(desc)) {
652 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 653 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
654 break;
655 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 656 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
657 break;
658 case USB_ENDPOINT_XFER_BULK:
27a78d6a 659 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
660 break;
661 case USB_ENDPOINT_XFER_INT:
27a78d6a 662 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
663 break;
664 default:
665 dev_err(dwc->dev, "invalid endpoint transfer type\n");
666 }
667
72246da4 668 spin_lock_irqsave(&dwc->lock, flags);
4b345c9a 669 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
72246da4
FB
670 spin_unlock_irqrestore(&dwc->lock, flags);
671
672 return ret;
673}
674
675static int dwc3_gadget_ep_disable(struct usb_ep *ep)
676{
677 struct dwc3_ep *dep;
678 struct dwc3 *dwc;
679 unsigned long flags;
680 int ret;
681
682 if (!ep) {
683 pr_debug("dwc3: invalid parameters\n");
684 return -EINVAL;
685 }
686
687 dep = to_dwc3_ep(ep);
688 dwc = dep->dwc;
689
690 if (!(dep->flags & DWC3_EP_ENABLED)) {
691 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
692 dep->name);
693 return 0;
694 }
695
696 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
697 dep->number >> 1,
698 (dep->number & 1) ? "in" : "out");
699
700 spin_lock_irqsave(&dwc->lock, flags);
701 ret = __dwc3_gadget_ep_disable(dep);
702 spin_unlock_irqrestore(&dwc->lock, flags);
703
704 return ret;
705}
706
707static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
708 gfp_t gfp_flags)
709{
710 struct dwc3_request *req;
711 struct dwc3_ep *dep = to_dwc3_ep(ep);
712 struct dwc3 *dwc = dep->dwc;
713
714 req = kzalloc(sizeof(*req), gfp_flags);
715 if (!req) {
716 dev_err(dwc->dev, "not enough memory\n");
717 return NULL;
718 }
719
720 req->epnum = dep->number;
721 req->dep = dep;
72246da4
FB
722
723 return &req->request;
724}
725
726static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
727 struct usb_request *request)
728{
729 struct dwc3_request *req = to_dwc3_request(request);
730
731 kfree(req);
732}
733
c71fc37c
FB
734/**
735 * dwc3_prepare_one_trb - setup one TRB from one request
736 * @dep: endpoint for which this request is prepared
737 * @req: dwc3_request pointer
738 */
68e823e2 739static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 740 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 741 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 742{
eeb720fb 743 struct dwc3 *dwc = dep->dwc;
f6bafc6a 744 struct dwc3_trb *trb;
c71fc37c 745
eeb720fb
FB
746 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
747 dep->name, req, (unsigned long long) dma,
748 length, last ? " last" : "",
749 chain ? " chain" : "");
750
c71fc37c 751 /* Skip the LINK-TRB on ISOC */
915e202a 752 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
16e78db7 753 usb_endpoint_xfer_isoc(dep->endpoint.desc))
915e202a
PA
754 dep->free_slot++;
755
756 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
c71fc37c 757
eeb720fb
FB
758 if (!req->trb) {
759 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
760 req->trb = trb;
761 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
e5ba5ec8 762 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
eeb720fb 763 }
c71fc37c 764
e5ba5ec8
PA
765 dep->free_slot++;
766
f6bafc6a
FB
767 trb->size = DWC3_TRB_SIZE_LENGTH(length);
768 trb->bpl = lower_32_bits(dma);
769 trb->bph = upper_32_bits(dma);
c71fc37c 770
16e78db7 771 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 772 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 773 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
774 break;
775
776 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
777 if (!node)
778 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
779 else
780 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
c71fc37c 781
e5ba5ec8 782 if (!req->request.no_interrupt && !chain)
f6bafc6a 783 trb->ctrl |= DWC3_TRB_CTRL_IOC;
c71fc37c
FB
784 break;
785
786 case USB_ENDPOINT_XFER_BULK:
787 case USB_ENDPOINT_XFER_INT:
f6bafc6a 788 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
789 break;
790 default:
791 /*
792 * This is only possible with faulty memory because we
793 * checked it already :)
794 */
795 BUG();
796 }
797
16e78db7 798 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
f6bafc6a
FB
799 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
800 trb->ctrl |= DWC3_TRB_CTRL_CSP;
e5ba5ec8
PA
801 } else if (last) {
802 trb->ctrl |= DWC3_TRB_CTRL_LST;
f6bafc6a 803 }
c71fc37c 804
e5ba5ec8
PA
805 if (chain)
806 trb->ctrl |= DWC3_TRB_CTRL_CHN;
807
16e78db7 808 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 809 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 810
f6bafc6a 811 trb->ctrl |= DWC3_TRB_CTRL_HWO;
c71fc37c
FB
812}
813
72246da4
FB
814/*
815 * dwc3_prepare_trbs - setup TRBs from requests
816 * @dep: endpoint for which requests are being prepared
817 * @starting: true if the endpoint is idle and no requests are queued.
818 *
1d046793
PZ
819 * The function goes through the requests list and sets up TRBs for the
820 * transfers. The function returns once there are no more TRBs available or
821 * it runs out of requests.
72246da4 822 */
68e823e2 823static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 824{
68e823e2 825 struct dwc3_request *req, *n;
72246da4 826 u32 trbs_left;
8d62cd65 827 u32 max;
c71fc37c 828 unsigned int last_one = 0;
72246da4
FB
829
830 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
831
832 /* the first request must not be queued */
833 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 834
8d62cd65 835 /* Can't wrap around on a non-isoc EP since there's no link TRB */
16e78db7 836 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8d62cd65
PZ
837 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
838 if (trbs_left > max)
839 trbs_left = max;
840 }
841
72246da4 842 /*
1d046793
PZ
843 * If busy & slot are equal than it is either full or empty. If we are
844 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
845 * full and don't do anything
846 */
847 if (!trbs_left) {
848 if (!starting)
68e823e2 849 return;
72246da4
FB
850 trbs_left = DWC3_TRB_NUM;
851 /*
852 * In case we start from scratch, we queue the ISOC requests
853 * starting from slot 1. This is done because we use ring
854 * buffer and have no LST bit to stop us. Instead, we place
1d046793 855 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
856 * after the first request so we start at slot 1 and have
857 * 7 requests proceed before we hit the first IOC.
858 * Other transfer types don't use the ring buffer and are
859 * processed from the first TRB until the last one. Since we
860 * don't wrap around we have to start at the beginning.
861 */
16e78db7 862 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
863 dep->busy_slot = 1;
864 dep->free_slot = 1;
865 } else {
866 dep->busy_slot = 0;
867 dep->free_slot = 0;
868 }
869 }
870
871 /* The last TRB is a link TRB, not used for xfer */
16e78db7 872 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
68e823e2 873 return;
72246da4
FB
874
875 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
876 unsigned length;
877 dma_addr_t dma;
e5ba5ec8 878 last_one = false;
72246da4 879
eeb720fb
FB
880 if (req->request.num_mapped_sgs > 0) {
881 struct usb_request *request = &req->request;
882 struct scatterlist *sg = request->sg;
883 struct scatterlist *s;
884 int i;
72246da4 885
eeb720fb
FB
886 for_each_sg(sg, s, request->num_mapped_sgs, i) {
887 unsigned chain = true;
72246da4 888
eeb720fb
FB
889 length = sg_dma_len(s);
890 dma = sg_dma_address(s);
72246da4 891
1d046793
PZ
892 if (i == (request->num_mapped_sgs - 1) ||
893 sg_is_last(s)) {
0f1f031c 894 if (list_empty(&dep->request_list))
e5ba5ec8 895 last_one = true;
eeb720fb
FB
896 chain = false;
897 }
72246da4 898
eeb720fb
FB
899 trbs_left--;
900 if (!trbs_left)
901 last_one = true;
72246da4 902
eeb720fb
FB
903 if (last_one)
904 chain = false;
72246da4 905
eeb720fb 906 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 907 last_one, chain, i);
72246da4 908
eeb720fb
FB
909 if (last_one)
910 break;
911 }
adcf2412
AV
912
913 if (last_one)
914 break;
72246da4 915 } else {
eeb720fb
FB
916 dma = req->request.dma;
917 length = req->request.length;
918 trbs_left--;
72246da4 919
eeb720fb
FB
920 if (!trbs_left)
921 last_one = 1;
879631aa 922
eeb720fb
FB
923 /* Is this the last request? */
924 if (list_is_last(&req->list, &dep->request_list))
925 last_one = 1;
72246da4 926
eeb720fb 927 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 928 last_one, false, 0);
72246da4 929
eeb720fb
FB
930 if (last_one)
931 break;
72246da4 932 }
72246da4 933 }
72246da4
FB
934}
935
936static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
937 int start_new)
938{
939 struct dwc3_gadget_ep_cmd_params params;
940 struct dwc3_request *req;
941 struct dwc3 *dwc = dep->dwc;
942 int ret;
943 u32 cmd;
944
945 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
946 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
947 return -EBUSY;
948 }
949 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
950
951 /*
952 * If we are getting here after a short-out-packet we don't enqueue any
953 * new requests as we try to set the IOC bit only on the last request.
954 */
955 if (start_new) {
956 if (list_empty(&dep->req_queued))
957 dwc3_prepare_trbs(dep, start_new);
958
959 /* req points to the first request which will be sent */
960 req = next_request(&dep->req_queued);
961 } else {
68e823e2
FB
962 dwc3_prepare_trbs(dep, start_new);
963
72246da4 964 /*
1d046793 965 * req points to the first request where HWO changed from 0 to 1
72246da4 966 */
68e823e2 967 req = next_request(&dep->req_queued);
72246da4
FB
968 }
969 if (!req) {
970 dep->flags |= DWC3_EP_PENDING_REQUEST;
971 return 0;
972 }
973
974 memset(&params, 0, sizeof(params));
72246da4 975
1877d6c9
PA
976 if (start_new) {
977 params.param0 = upper_32_bits(req->trb_dma);
978 params.param1 = lower_32_bits(req->trb_dma);
72246da4 979 cmd = DWC3_DEPCMD_STARTTRANSFER;
1877d6c9 980 } else {
72246da4 981 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1877d6c9 982 }
72246da4
FB
983
984 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
985 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
986 if (ret < 0) {
987 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
988
989 /*
990 * FIXME we need to iterate over the list of requests
991 * here and stop, unmap, free and del each of the linked
1d046793 992 * requests instead of what we do now.
72246da4 993 */
0fc9a1be
FB
994 usb_gadget_unmap_request(&dwc->gadget, &req->request,
995 req->direction);
72246da4
FB
996 list_del(&req->list);
997 return ret;
998 }
999
1000 dep->flags |= DWC3_EP_BUSY;
25b8ff68 1001
f898ae09 1002 if (start_new) {
b4996a86 1003 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
f898ae09 1004 dep->number);
b4996a86 1005 WARN_ON_ONCE(!dep->resource_index);
f898ae09 1006 }
25b8ff68 1007
72246da4
FB
1008 return 0;
1009}
1010
d6d6ec7b
PA
1011static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1012 struct dwc3_ep *dep, u32 cur_uf)
1013{
1014 u32 uf;
1015
1016 if (list_empty(&dep->request_list)) {
1017 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1018 dep->name);
f4a53c55 1019 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1020 return;
1021 }
1022
1023 /* 4 micro frames in the future */
1024 uf = cur_uf + dep->interval * 4;
1025
1026 __dwc3_gadget_kick_transfer(dep, uf, 1);
1027}
1028
1029static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1030 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1031{
1032 u32 cur_uf, mask;
1033
1034 mask = ~(dep->interval - 1);
1035 cur_uf = event->parameters & mask;
1036
1037 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1038}
1039
72246da4
FB
1040static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1041{
0fc9a1be
FB
1042 struct dwc3 *dwc = dep->dwc;
1043 int ret;
1044
72246da4
FB
1045 req->request.actual = 0;
1046 req->request.status = -EINPROGRESS;
1047 req->direction = dep->direction;
1048 req->epnum = dep->number;
1049
1050 /*
1051 * We only add to our list of requests now and
1052 * start consuming the list once we get XferNotReady
1053 * IRQ.
1054 *
1055 * That way, we avoid doing anything that we don't need
1056 * to do now and defer it until the point we receive a
1057 * particular token from the Host side.
1058 *
1059 * This will also avoid Host cancelling URBs due to too
1d046793 1060 * many NAKs.
72246da4 1061 */
0fc9a1be
FB
1062 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1063 dep->direction);
1064 if (ret)
1065 return ret;
1066
72246da4
FB
1067 list_add_tail(&req->list, &dep->request_list);
1068
1069 /*
b511e5e7 1070 * There are a few special cases:
72246da4 1071 *
f898ae09
PZ
1072 * 1. XferNotReady with empty list of requests. We need to kick the
1073 * transfer here in that situation, otherwise we will be NAKing
1074 * forever. If we get XferNotReady before gadget driver has a
1075 * chance to queue a request, we will ACK the IRQ but won't be
1076 * able to receive the data until the next request is queued.
1077 * The following code is handling exactly that.
72246da4 1078 *
72246da4
FB
1079 */
1080 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1081 /*
1082 * If xfernotready is already elapsed and it is a case
1083 * of isoc transfer, then issue END TRANSFER, so that
1084 * you can receive xfernotready again and can have
1085 * notion of current microframe.
1086 */
1087 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
cdc359dd
PA
1088 if (list_empty(&dep->req_queued)) {
1089 dwc3_stop_active_transfer(dwc, dep->number);
1090 dep->flags = DWC3_EP_ENABLED;
1091 }
f4a53c55
PA
1092 return 0;
1093 }
1094
b511e5e7 1095 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
348e026f 1096 if (ret && ret != -EBUSY)
b511e5e7
FB
1097 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1098 dep->name);
15f86bde 1099 return ret;
b511e5e7 1100 }
72246da4 1101
b511e5e7
FB
1102 /*
1103 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1104 * kick the transfer here after queuing a request, otherwise the
1105 * core may not see the modified TRB(s).
1106 */
1107 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1108 (dep->flags & DWC3_EP_BUSY) &&
1109 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86
FB
1110 WARN_ON_ONCE(!dep->resource_index);
1111 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
b511e5e7 1112 false);
348e026f 1113 if (ret && ret != -EBUSY)
72246da4
FB
1114 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1115 dep->name);
15f86bde 1116 return ret;
a0925324 1117 }
72246da4
FB
1118
1119 return 0;
1120}
1121
1122static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1123 gfp_t gfp_flags)
1124{
1125 struct dwc3_request *req = to_dwc3_request(request);
1126 struct dwc3_ep *dep = to_dwc3_ep(ep);
1127 struct dwc3 *dwc = dep->dwc;
1128
1129 unsigned long flags;
1130
1131 int ret;
1132
16e78db7 1133 if (!dep->endpoint.desc) {
72246da4
FB
1134 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1135 request, ep->name);
1136 return -ESHUTDOWN;
1137 }
1138
1139 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1140 request, ep->name, request->length);
1141
1142 spin_lock_irqsave(&dwc->lock, flags);
1143 ret = __dwc3_gadget_ep_queue(dep, req);
1144 spin_unlock_irqrestore(&dwc->lock, flags);
1145
1146 return ret;
1147}
1148
1149static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1150 struct usb_request *request)
1151{
1152 struct dwc3_request *req = to_dwc3_request(request);
1153 struct dwc3_request *r = NULL;
1154
1155 struct dwc3_ep *dep = to_dwc3_ep(ep);
1156 struct dwc3 *dwc = dep->dwc;
1157
1158 unsigned long flags;
1159 int ret = 0;
1160
1161 spin_lock_irqsave(&dwc->lock, flags);
1162
1163 list_for_each_entry(r, &dep->request_list, list) {
1164 if (r == req)
1165 break;
1166 }
1167
1168 if (r != req) {
1169 list_for_each_entry(r, &dep->req_queued, list) {
1170 if (r == req)
1171 break;
1172 }
1173 if (r == req) {
1174 /* wait until it is processed */
1175 dwc3_stop_active_transfer(dwc, dep->number);
e8d4e8be 1176 goto out1;
72246da4
FB
1177 }
1178 dev_err(dwc->dev, "request %p was not queued to %s\n",
1179 request, ep->name);
1180 ret = -EINVAL;
1181 goto out0;
1182 }
1183
e8d4e8be 1184out1:
72246da4
FB
1185 /* giveback the request */
1186 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1187
1188out0:
1189 spin_unlock_irqrestore(&dwc->lock, flags);
1190
1191 return ret;
1192}
1193
55016b9e 1194int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
72246da4
FB
1195{
1196 struct dwc3_gadget_ep_cmd_params params;
1197 struct dwc3 *dwc = dep->dwc;
1198 int ret;
1199
1200 memset(&params, 0x00, sizeof(params));
1201
1202 if (value) {
55016b9e
FB
1203 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1204 (!list_empty(&dep->req_queued) ||
1205 !list_empty(&dep->request_list)))) {
1206 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1207 dep->name);
1208 return -EAGAIN;
1209 }
1210
72246da4
FB
1211 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1212 DWC3_DEPCMD_SETSTALL, &params);
1213 if (ret)
1214 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1215 value ? "set" : "clear",
1216 dep->name);
1217 else
1218 dep->flags |= DWC3_EP_STALL;
1219 } else {
1220 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1221 DWC3_DEPCMD_CLEARSTALL, &params);
1222 if (ret)
1223 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1224 value ? "set" : "clear",
1225 dep->name);
1226 else
a535d81c 1227 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
72246da4 1228 }
5275455a 1229
72246da4
FB
1230 return ret;
1231}
1232
1233static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1234{
1235 struct dwc3_ep *dep = to_dwc3_ep(ep);
1236 struct dwc3 *dwc = dep->dwc;
1237
1238 unsigned long flags;
1239
1240 int ret;
1241
1242 spin_lock_irqsave(&dwc->lock, flags);
1243
16e78db7 1244 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1245 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1246 ret = -EINVAL;
1247 goto out;
1248 }
1249
55016b9e 1250 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
72246da4
FB
1251out:
1252 spin_unlock_irqrestore(&dwc->lock, flags);
1253
1254 return ret;
1255}
1256
1257static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1258{
1259 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1260 struct dwc3 *dwc = dep->dwc;
1261 unsigned long flags;
72246da4 1262
249a4569 1263 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1264 dep->flags |= DWC3_EP_WEDGE;
249a4569 1265 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4 1266
08f0d966
PA
1267 if (dep->number == 0 || dep->number == 1)
1268 return dwc3_gadget_ep0_set_halt(ep, 1);
1269 else
55016b9e 1270 return __dwc3_gadget_ep_set_halt(dep, 1, false);
72246da4
FB
1271}
1272
1273/* -------------------------------------------------------------------------- */
1274
1275static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1276 .bLength = USB_DT_ENDPOINT_SIZE,
1277 .bDescriptorType = USB_DT_ENDPOINT,
1278 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1279};
1280
1281static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1282 .enable = dwc3_gadget_ep0_enable,
1283 .disable = dwc3_gadget_ep0_disable,
1284 .alloc_request = dwc3_gadget_ep_alloc_request,
1285 .free_request = dwc3_gadget_ep_free_request,
1286 .queue = dwc3_gadget_ep0_queue,
1287 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1288 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1289 .set_wedge = dwc3_gadget_ep_set_wedge,
1290};
1291
1292static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1293 .enable = dwc3_gadget_ep_enable,
1294 .disable = dwc3_gadget_ep_disable,
1295 .alloc_request = dwc3_gadget_ep_alloc_request,
1296 .free_request = dwc3_gadget_ep_free_request,
1297 .queue = dwc3_gadget_ep_queue,
1298 .dequeue = dwc3_gadget_ep_dequeue,
1299 .set_halt = dwc3_gadget_ep_set_halt,
1300 .set_wedge = dwc3_gadget_ep_set_wedge,
1301};
1302
1303/* -------------------------------------------------------------------------- */
1304
1305static int dwc3_gadget_get_frame(struct usb_gadget *g)
1306{
1307 struct dwc3 *dwc = gadget_to_dwc(g);
1308 u32 reg;
1309
1310 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1311 return DWC3_DSTS_SOFFN(reg);
1312}
1313
1314static int dwc3_gadget_wakeup(struct usb_gadget *g)
1315{
1316 struct dwc3 *dwc = gadget_to_dwc(g);
1317
1318 unsigned long timeout;
1319 unsigned long flags;
1320
1321 u32 reg;
1322
1323 int ret = 0;
1324
1325 u8 link_state;
1326 u8 speed;
1327
1328 spin_lock_irqsave(&dwc->lock, flags);
1329
1330 /*
1331 * According to the Databook Remote wakeup request should
1332 * be issued only when the device is in early suspend state.
1333 *
1334 * We can check that via USB Link State bits in DSTS register.
1335 */
1336 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1337
1338 speed = reg & DWC3_DSTS_CONNECTSPD;
1339 if (speed == DWC3_DSTS_SUPERSPEED) {
1340 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1341 ret = -EINVAL;
1342 goto out;
1343 }
1344
1345 link_state = DWC3_DSTS_USBLNKST(reg);
1346
1347 switch (link_state) {
1348 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1349 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1350 break;
1351 default:
1352 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1353 link_state);
1354 ret = -EINVAL;
1355 goto out;
1356 }
1357
8598bde7
FB
1358 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1359 if (ret < 0) {
1360 dev_err(dwc->dev, "failed to put link in Recovery\n");
1361 goto out;
1362 }
72246da4 1363
802fde98
PZ
1364 /* Recent versions do this automatically */
1365 if (dwc->revision < DWC3_REVISION_194A) {
1366 /* write zeroes to Link Change Request */
fcc023c7 1367 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1368 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1369 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1370 }
72246da4 1371
1d046793 1372 /* poll until Link State changes to ON */
72246da4
FB
1373 timeout = jiffies + msecs_to_jiffies(100);
1374
1d046793 1375 while (!time_after(jiffies, timeout)) {
72246da4
FB
1376 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1377
1378 /* in HS, means ON */
1379 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1380 break;
1381 }
1382
1383 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1384 dev_err(dwc->dev, "failed to send remote wakeup\n");
1385 ret = -EINVAL;
1386 }
1387
1388out:
1389 spin_unlock_irqrestore(&dwc->lock, flags);
1390
1391 return ret;
1392}
1393
1394static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1395 int is_selfpowered)
1396{
1397 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1398 unsigned long flags;
72246da4 1399
249a4569 1400 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1401 dwc->is_selfpowered = !!is_selfpowered;
249a4569 1402 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1403
1404 return 0;
1405}
1406
6f17f74b 1407static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
72246da4
FB
1408{
1409 u32 reg;
61d58242 1410 u32 timeout = 500;
72246da4
FB
1411
1412 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1413 if (is_on) {
802fde98
PZ
1414 if (dwc->revision <= DWC3_REVISION_187A) {
1415 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1416 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1417 }
1418
1419 if (dwc->revision >= DWC3_REVISION_194A)
1420 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1421 reg |= DWC3_DCTL_RUN_STOP;
9fcb3bd8 1422 dwc->pullups_connected = true;
8db7ed15 1423 } else {
72246da4 1424 reg &= ~DWC3_DCTL_RUN_STOP;
9fcb3bd8 1425 dwc->pullups_connected = false;
8db7ed15 1426 }
72246da4
FB
1427
1428 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1429
1430 do {
1431 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1432 if (is_on) {
1433 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1434 break;
1435 } else {
1436 if (reg & DWC3_DSTS_DEVCTRLHLT)
1437 break;
1438 }
72246da4
FB
1439 timeout--;
1440 if (!timeout)
6f17f74b 1441 return -ETIMEDOUT;
61d58242 1442 udelay(1);
72246da4
FB
1443 } while (1);
1444
1445 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1446 dwc->gadget_driver
1447 ? dwc->gadget_driver->function : "no-function",
1448 is_on ? "connect" : "disconnect");
6f17f74b
PA
1449
1450 return 0;
72246da4
FB
1451}
1452
1453static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1454{
1455 struct dwc3 *dwc = gadget_to_dwc(g);
1456 unsigned long flags;
6f17f74b 1457 int ret;
72246da4
FB
1458
1459 is_on = !!is_on;
1460
1461 spin_lock_irqsave(&dwc->lock, flags);
6f17f74b 1462 ret = dwc3_gadget_run_stop(dwc, is_on);
72246da4
FB
1463 spin_unlock_irqrestore(&dwc->lock, flags);
1464
6f17f74b 1465 return ret;
72246da4
FB
1466}
1467
8698e2ac
FB
1468static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1469{
1470 u32 reg;
1471
1472 /* Enable all but Start and End of Frame IRQs */
1473 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1474 DWC3_DEVTEN_EVNTOVERFLOWEN |
1475 DWC3_DEVTEN_CMDCMPLTEN |
1476 DWC3_DEVTEN_ERRTICERREN |
1477 DWC3_DEVTEN_WKUPEVTEN |
1478 DWC3_DEVTEN_ULSTCNGEN |
1479 DWC3_DEVTEN_CONNECTDONEEN |
1480 DWC3_DEVTEN_USBRSTEN |
1481 DWC3_DEVTEN_DISCONNEVTEN);
1482
1483 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1484}
1485
1486static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1487{
1488 /* mask all interrupts */
1489 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1490}
1491
1492static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1493static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1494
72246da4
FB
1495static int dwc3_gadget_start(struct usb_gadget *g,
1496 struct usb_gadget_driver *driver)
1497{
1498 struct dwc3 *dwc = gadget_to_dwc(g);
1499 struct dwc3_ep *dep;
1500 unsigned long flags;
1501 int ret = 0;
8698e2ac 1502 int irq;
72246da4
FB
1503 u32 reg;
1504
b0d7ffd4
FB
1505 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1506 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
e8adfc30 1507 IRQF_SHARED, "dwc3", dwc);
b0d7ffd4
FB
1508 if (ret) {
1509 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1510 irq, ret);
1511 goto err0;
1512 }
1513
72246da4
FB
1514 spin_lock_irqsave(&dwc->lock, flags);
1515
1516 if (dwc->gadget_driver) {
1517 dev_err(dwc->dev, "%s is already bound to %s\n",
1518 dwc->gadget.name,
1519 dwc->gadget_driver->driver.name);
1520 ret = -EBUSY;
b0d7ffd4 1521 goto err1;
72246da4
FB
1522 }
1523
1524 dwc->gadget_driver = driver;
72246da4 1525
72246da4
FB
1526 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1527 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1528
1529 /**
1530 * WORKAROUND: DWC3 revision < 2.20a have an issue
1531 * which would cause metastability state on Run/Stop
1532 * bit if we try to force the IP to USB2-only mode.
1533 *
1534 * Because of that, we cannot configure the IP to any
1535 * speed other than the SuperSpeed
1536 *
1537 * Refers to:
1538 *
1539 * STAR#9000525659: Clock Domain Crossing on DCTL in
1540 * USB 2.0 Mode
1541 */
f7e846f0 1542 if (dwc->revision < DWC3_REVISION_220A) {
07e7f47b 1543 reg |= DWC3_DCFG_SUPERSPEED;
f7e846f0
FB
1544 } else {
1545 switch (dwc->maximum_speed) {
1546 case USB_SPEED_LOW:
1547 reg |= DWC3_DSTS_LOWSPEED;
1548 break;
1549 case USB_SPEED_FULL:
1550 reg |= DWC3_DSTS_FULLSPEED1;
1551 break;
1552 case USB_SPEED_HIGH:
1553 reg |= DWC3_DSTS_HIGHSPEED;
1554 break;
1555 case USB_SPEED_SUPER: /* FALLTHROUGH */
1556 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1557 default:
1558 reg |= DWC3_DSTS_SUPERSPEED;
1559 }
1560 }
72246da4
FB
1561 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1562
b23c8439
PZ
1563 dwc->start_config_issued = false;
1564
72246da4
FB
1565 /* Start with SuperSpeed Default */
1566 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1567
1568 dep = dwc->eps[0];
4b345c9a 1569 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1570 if (ret) {
1571 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1572 goto err2;
72246da4
FB
1573 }
1574
1575 dep = dwc->eps[1];
4b345c9a 1576 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
72246da4
FB
1577 if (ret) {
1578 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1579 goto err3;
72246da4
FB
1580 }
1581
1582 /* begin to receive SETUP packets */
c7fcdeb2 1583 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1584 dwc3_ep0_out_start(dwc);
1585
8698e2ac
FB
1586 dwc3_gadget_enable_irq(dwc);
1587
72246da4
FB
1588 spin_unlock_irqrestore(&dwc->lock, flags);
1589
1590 return 0;
1591
b0d7ffd4 1592err3:
72246da4
FB
1593 __dwc3_gadget_ep_disable(dwc->eps[0]);
1594
b0d7ffd4 1595err2:
cdcedd69 1596 dwc->gadget_driver = NULL;
b0d7ffd4
FB
1597
1598err1:
72246da4
FB
1599 spin_unlock_irqrestore(&dwc->lock, flags);
1600
b0d7ffd4
FB
1601 free_irq(irq, dwc);
1602
1603err0:
72246da4
FB
1604 return ret;
1605}
1606
1607static int dwc3_gadget_stop(struct usb_gadget *g,
1608 struct usb_gadget_driver *driver)
1609{
1610 struct dwc3 *dwc = gadget_to_dwc(g);
1611 unsigned long flags;
8698e2ac 1612 int irq;
72246da4
FB
1613
1614 spin_lock_irqsave(&dwc->lock, flags);
1615
8698e2ac 1616 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1617 __dwc3_gadget_ep_disable(dwc->eps[0]);
1618 __dwc3_gadget_ep_disable(dwc->eps[1]);
1619
1620 dwc->gadget_driver = NULL;
72246da4
FB
1621
1622 spin_unlock_irqrestore(&dwc->lock, flags);
1623
b0d7ffd4
FB
1624 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1625 free_irq(irq, dwc);
1626
72246da4
FB
1627 return 0;
1628}
802fde98 1629
72246da4
FB
1630static const struct usb_gadget_ops dwc3_gadget_ops = {
1631 .get_frame = dwc3_gadget_get_frame,
1632 .wakeup = dwc3_gadget_wakeup,
1633 .set_selfpowered = dwc3_gadget_set_selfpowered,
1634 .pullup = dwc3_gadget_pullup,
1635 .udc_start = dwc3_gadget_start,
1636 .udc_stop = dwc3_gadget_stop,
1637};
1638
1639/* -------------------------------------------------------------------------- */
1640
6a1e3ef4
FB
1641static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1642 u8 num, u32 direction)
72246da4
FB
1643{
1644 struct dwc3_ep *dep;
6a1e3ef4 1645 u8 i;
72246da4 1646
6a1e3ef4
FB
1647 for (i = 0; i < num; i++) {
1648 u8 epnum = (i << 1) | (!!direction);
72246da4 1649
72246da4
FB
1650 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1651 if (!dep) {
1652 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1653 epnum);
1654 return -ENOMEM;
1655 }
1656
1657 dep->dwc = dwc;
1658 dep->number = epnum;
9aa62ae4 1659 dep->direction = !!direction;
72246da4
FB
1660 dwc->eps[epnum] = dep;
1661
1662 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1663 (epnum & 1) ? "in" : "out");
6a1e3ef4 1664
72246da4 1665 dep->endpoint.name = dep->name;
72246da4 1666
653df35e
FB
1667 dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1668
72246da4 1669 if (epnum == 0 || epnum == 1) {
e117e742 1670 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
6048e4c6 1671 dep->endpoint.maxburst = 1;
72246da4
FB
1672 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1673 if (!epnum)
1674 dwc->gadget.ep0 = &dep->endpoint;
1675 } else {
1676 int ret;
1677
e117e742 1678 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
12d36c16 1679 dep->endpoint.max_streams = 15;
72246da4
FB
1680 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1681 list_add_tail(&dep->endpoint.ep_list,
1682 &dwc->gadget.ep_list);
1683
1684 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1685 if (ret)
72246da4 1686 return ret;
72246da4 1687 }
25b8ff68 1688
72246da4
FB
1689 INIT_LIST_HEAD(&dep->request_list);
1690 INIT_LIST_HEAD(&dep->req_queued);
1691 }
1692
1693 return 0;
1694}
1695
6a1e3ef4
FB
1696static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1697{
1698 int ret;
1699
1700 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1701
1702 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1703 if (ret < 0) {
1704 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1705 return ret;
1706 }
1707
1708 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1709 if (ret < 0) {
1710 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1711 return ret;
1712 }
1713
1714 return 0;
1715}
1716
72246da4
FB
1717static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1718{
1719 struct dwc3_ep *dep;
1720 u8 epnum;
1721
1722 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1723 dep = dwc->eps[epnum];
6a1e3ef4
FB
1724 if (!dep)
1725 continue;
5bf8fae3
GC
1726 /*
1727 * Physical endpoints 0 and 1 are special; they form the
1728 * bi-directional USB endpoint 0.
1729 *
1730 * For those two physical endpoints, we don't allocate a TRB
1731 * pool nor do we add them the endpoints list. Due to that, we
1732 * shouldn't do these two operations otherwise we would end up
1733 * with all sorts of bugs when removing dwc3.ko.
1734 */
1735 if (epnum != 0 && epnum != 1) {
1736 dwc3_free_trb_pool(dep);
72246da4 1737 list_del(&dep->endpoint.ep_list);
5bf8fae3 1738 }
72246da4
FB
1739
1740 kfree(dep);
1741 }
1742}
1743
72246da4 1744/* -------------------------------------------------------------------------- */
e5caff68 1745
e5ba5ec8
PA
1746static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1747 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1748 const struct dwc3_event_depevt *event, int status)
1749{
72246da4
FB
1750 unsigned int count;
1751 unsigned int s_pkt = 0;
d6d6ec7b 1752 unsigned int trb_status;
72246da4 1753
e5ba5ec8
PA
1754 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1755 /*
1756 * We continue despite the error. There is not much we
1757 * can do. If we don't clean it up we loop forever. If
1758 * we skip the TRB then it gets overwritten after a
1759 * while since we use them in a ring buffer. A BUG()
1760 * would help. Lets hope that if this occurs, someone
1761 * fixes the root cause instead of looking away :)
1762 */
1763 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1764 dep->name, trb);
1765 count = trb->size & DWC3_TRB_SIZE_MASK;
1766
1767 if (dep->direction) {
1768 if (count) {
1769 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1770 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1771 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1772 dep->name);
1773 /*
1774 * If missed isoc occurred and there is
1775 * no request queued then issue END
1776 * TRANSFER, so that core generates
1777 * next xfernotready and we will issue
1778 * a fresh START TRANSFER.
1779 * If there are still queued request
1780 * then wait, do not issue either END
1781 * or UPDATE TRANSFER, just attach next
1782 * request in request_list during
1783 * giveback.If any future queued request
1784 * is successfully transferred then we
1785 * will issue UPDATE TRANSFER for all
1786 * request in the request_list.
1787 */
1788 dep->flags |= DWC3_EP_MISSED_ISOC;
1789 } else {
1790 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1791 dep->name);
1792 status = -ECONNRESET;
1793 }
1794 } else {
1795 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1796 }
1797 } else {
1798 if (count && (event->status & DEPEVT_STATUS_SHORT))
1799 s_pkt = 1;
1800 }
1801
e5ba5ec8
PA
1802 if (s_pkt)
1803 return 1;
1804 if ((event->status & DEPEVT_STATUS_LST) &&
1805 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1806 DWC3_TRB_CTRL_HWO)))
1807 return 1;
1808 if ((event->status & DEPEVT_STATUS_IOC) &&
1809 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1810 return 1;
1811 return 0;
1812}
1813
1814static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1815 const struct dwc3_event_depevt *event, int status)
1816{
1817 struct dwc3_request *req;
1818 struct dwc3_trb *trb;
1819 unsigned int slot;
1820 unsigned int i;
baa329fa 1821 int count = 0;
e5ba5ec8
PA
1822 int ret;
1823
72246da4
FB
1824 do {
1825 req = next_request(&dep->req_queued);
d39ee7be
SAS
1826 if (!req) {
1827 WARN_ON_ONCE(1);
1828 return 1;
1829 }
e5ba5ec8
PA
1830 i = 0;
1831 do {
1832 slot = req->start_slot + i;
1833 if ((slot == DWC3_TRB_NUM - 1) &&
1834 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1835 slot++;
1836 slot %= DWC3_TRB_NUM;
1837 trb = &dep->trb_pool[slot];
baa329fa
FB
1838 count += trb->size & DWC3_TRB_SIZE_MASK;
1839
72246da4 1840
e5ba5ec8
PA
1841 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1842 event, status);
1843 if (ret)
1844 break;
1845 }while (++i < req->request.num_mapped_sgs);
72246da4 1846
baa329fa
FB
1847 /*
1848 * We assume here we will always receive the entire data block
1849 * which we should receive. Meaning, if we program RX to
1850 * receive 4K but we receive only 2K, we assume that's all we
1851 * should receive and we simply bounce the request back to the
1852 * gadget driver for further processing.
1853 */
1854 req->request.actual += req->request.length - count;
72246da4 1855 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
1856
1857 if (ret)
72246da4
FB
1858 break;
1859 } while (1);
1860
cdc359dd
PA
1861 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1862 list_empty(&dep->req_queued)) {
1863 if (list_empty(&dep->request_list)) {
1864 /*
1865 * If there is no entry in request list then do
1866 * not issue END TRANSFER now. Just set PENDING
1867 * flag, so that END TRANSFER is issued when an
1868 * entry is added into request list.
1869 */
1870 dep->flags = DWC3_EP_PENDING_REQUEST;
1871 } else {
1872 dwc3_stop_active_transfer(dwc, dep->number);
1873 dep->flags = DWC3_EP_ENABLED;
1874 }
7efea86c
PA
1875 return 1;
1876 }
1877
f6bafc6a
FB
1878 if ((event->status & DEPEVT_STATUS_IOC) &&
1879 (trb->ctrl & DWC3_TRB_CTRL_IOC))
72246da4
FB
1880 return 0;
1881 return 1;
1882}
1883
1884static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1885 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1886 int start_new)
1887{
1888 unsigned status = 0;
1889 int clean_busy;
1890
1891 if (event->status & DEPEVT_STATUS_BUSERR)
1892 status = -ECONNRESET;
1893
1d046793 1894 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
c2df85ca 1895 if (clean_busy)
72246da4 1896 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
1897
1898 /*
1899 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1900 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1901 */
1902 if (dwc->revision < DWC3_REVISION_183A) {
1903 u32 reg;
1904 int i;
1905
1906 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 1907 dep = dwc->eps[i];
fae2b904
FB
1908
1909 if (!(dep->flags & DWC3_EP_ENABLED))
1910 continue;
1911
1912 if (!list_empty(&dep->req_queued))
1913 return;
1914 }
1915
1916 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1917 reg |= dwc->u1u2;
1918 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1919
1920 dwc->u1u2 = 0;
1921 }
72246da4
FB
1922}
1923
72246da4
FB
1924static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1925 const struct dwc3_event_depevt *event)
1926{
1927 struct dwc3_ep *dep;
1928 u8 epnum = event->endpoint_number;
1929
1930 dep = dwc->eps[epnum];
1931
3336abb5
FB
1932 if (!(dep->flags & DWC3_EP_ENABLED))
1933 return;
1934
72246da4
FB
1935 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1936 dwc3_ep_event_string(event->endpoint_event));
1937
1938 if (epnum == 0 || epnum == 1) {
1939 dwc3_ep0_interrupt(dwc, event);
1940 return;
1941 }
1942
1943 switch (event->endpoint_event) {
1944 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 1945 dep->resource_index = 0;
c2df85ca 1946
16e78db7 1947 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1948 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1949 dep->name);
1950 return;
1951 }
1952
1953 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1954 break;
1955 case DWC3_DEPEVT_XFERINPROGRESS:
16e78db7 1956 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1957 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1958 dep->name);
1959 return;
1960 }
1961
1962 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1963 break;
1964 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 1965 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1966 dwc3_gadget_start_isoc(dwc, dep, event);
1967 } else {
1968 int ret;
1969
1970 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1971 dep->name, event->status &
1972 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1973 ? "Transfer Active"
1974 : "Transfer Not Active");
1975
1976 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1977 if (!ret || ret == -EBUSY)
1978 return;
1979
1980 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1981 dep->name);
1982 }
1983
879631aa
FB
1984 break;
1985 case DWC3_DEPEVT_STREAMEVT:
16e78db7 1986 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
1987 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1988 dep->name);
1989 return;
1990 }
1991
1992 switch (event->status) {
1993 case DEPEVT_STREAMEVT_FOUND:
1994 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1995 event->parameters);
1996
1997 break;
1998 case DEPEVT_STREAMEVT_NOTFOUND:
1999 /* FALLTHROUGH */
2000 default:
2001 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2002 }
72246da4
FB
2003 break;
2004 case DWC3_DEPEVT_RXTXFIFOEVT:
2005 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2006 break;
72246da4 2007 case DWC3_DEPEVT_EPCMDCMPLT:
ea53b882 2008 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
72246da4
FB
2009 break;
2010 }
2011}
2012
2013static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2014{
2015 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2016 spin_unlock(&dwc->lock);
2017 dwc->gadget_driver->disconnect(&dwc->gadget);
2018 spin_lock(&dwc->lock);
2019 }
2020}
2021
2022static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
2023{
2024 struct dwc3_ep *dep;
2025 struct dwc3_gadget_ep_cmd_params params;
2026 u32 cmd;
2027 int ret;
2028
2029 dep = dwc->eps[epnum];
2030
b4996a86 2031 if (!dep->resource_index)
3daf74d7
PA
2032 return;
2033
57911504
PA
2034 /*
2035 * NOTICE: We are violating what the Databook says about the
2036 * EndTransfer command. Ideally we would _always_ wait for the
2037 * EndTransfer Command Completion IRQ, but that's causing too
2038 * much trouble synchronizing between us and gadget driver.
2039 *
2040 * We have discussed this with the IP Provider and it was
2041 * suggested to giveback all requests here, but give HW some
2042 * extra time to synchronize with the interconnect. We're using
2043 * an arbitraty 100us delay for that.
2044 *
2045 * Note also that a similar handling was tested by Synopsys
2046 * (thanks a lot Paul) and nothing bad has come out of it.
2047 * In short, what we're doing is:
2048 *
2049 * - Issue EndTransfer WITH CMDIOC bit set
2050 * - Wait 100us
2051 */
2052
3daf74d7
PA
2053 cmd = DWC3_DEPCMD_ENDTRANSFER;
2054 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
b4996a86 2055 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7
PA
2056 memset(&params, 0, sizeof(params));
2057 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2058 WARN_ON_ONCE(ret);
b4996a86 2059 dep->resource_index = 0;
041d81f4 2060 dep->flags &= ~DWC3_EP_BUSY;
57911504 2061 udelay(100);
72246da4
FB
2062}
2063
2064static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2065{
2066 u32 epnum;
2067
2068 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2069 struct dwc3_ep *dep;
2070
2071 dep = dwc->eps[epnum];
6a1e3ef4
FB
2072 if (!dep)
2073 continue;
2074
72246da4
FB
2075 if (!(dep->flags & DWC3_EP_ENABLED))
2076 continue;
2077
624407f9 2078 dwc3_remove_requests(dwc, dep);
72246da4
FB
2079 }
2080}
2081
2082static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2083{
2084 u32 epnum;
2085
2086 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2087 struct dwc3_ep *dep;
2088 struct dwc3_gadget_ep_cmd_params params;
2089 int ret;
2090
2091 dep = dwc->eps[epnum];
6a1e3ef4
FB
2092 if (!dep)
2093 continue;
72246da4
FB
2094
2095 if (!(dep->flags & DWC3_EP_STALL))
2096 continue;
2097
2098 dep->flags &= ~DWC3_EP_STALL;
2099
2100 memset(&params, 0, sizeof(params));
2101 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2102 DWC3_DEPCMD_CLEARSTALL, &params);
2103 WARN_ON_ONCE(ret);
2104 }
2105}
2106
2107static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2108{
c4430a26
FB
2109 int reg;
2110
72246da4 2111 dev_vdbg(dwc->dev, "%s\n", __func__);
72246da4
FB
2112
2113 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2114 reg &= ~DWC3_DCTL_INITU1ENA;
2115 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2116
2117 reg &= ~DWC3_DCTL_INITU2ENA;
2118 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2119
72246da4 2120 dwc3_disconnect_gadget(dwc);
b23c8439 2121 dwc->start_config_issued = false;
72246da4
FB
2122
2123 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2124 dwc->setup_packet_pending = false;
72246da4
FB
2125}
2126
72246da4
FB
2127static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2128{
2129 u32 reg;
2130
2131 dev_vdbg(dwc->dev, "%s\n", __func__);
2132
df62df56
FB
2133 /*
2134 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2135 * would cause a missing Disconnect Event if there's a
2136 * pending Setup Packet in the FIFO.
2137 *
2138 * There's no suggested workaround on the official Bug
2139 * report, which states that "unless the driver/application
2140 * is doing any special handling of a disconnect event,
2141 * there is no functional issue".
2142 *
2143 * Unfortunately, it turns out that we _do_ some special
2144 * handling of a disconnect event, namely complete all
2145 * pending transfers, notify gadget driver of the
2146 * disconnection, and so on.
2147 *
2148 * Our suggested workaround is to follow the Disconnect
2149 * Event steps here, instead, based on a setup_packet_pending
2150 * flag. Such flag gets set whenever we have a XferNotReady
2151 * event on EP0 and gets cleared on XferComplete for the
2152 * same endpoint.
2153 *
2154 * Refers to:
2155 *
2156 * STAR#9000466709: RTL: Device : Disconnect event not
2157 * generated if setup packet pending in FIFO
2158 */
2159 if (dwc->revision < DWC3_REVISION_188A) {
2160 if (dwc->setup_packet_pending)
2161 dwc3_gadget_disconnect_interrupt(dwc);
2162 }
2163
961906ed 2164 /* after reset -> Default State */
14cd592f 2165 usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
961906ed 2166
72246da4
FB
2167 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
2168 dwc3_disconnect_gadget(dwc);
2169
2170 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2171 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2172 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2173 dwc->test_mode = false;
72246da4
FB
2174
2175 dwc3_stop_active_transfers(dwc);
2176 dwc3_clear_stall_all_ep(dwc);
b23c8439 2177 dwc->start_config_issued = false;
72246da4
FB
2178
2179 /* Reset device address to zero */
2180 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2181 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2182 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2183}
2184
2185static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2186{
2187 u32 reg;
2188 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2189
2190 /*
2191 * We change the clock only at SS but I dunno why I would want to do
2192 * this. Maybe it becomes part of the power saving plan.
2193 */
2194
2195 if (speed != DWC3_DSTS_SUPERSPEED)
2196 return;
2197
2198 /*
2199 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2200 * each time on Connect Done.
2201 */
2202 if (!usb30_clock)
2203 return;
2204
2205 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2206 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2207 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2208}
2209
72246da4
FB
2210static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2211{
72246da4
FB
2212 struct dwc3_ep *dep;
2213 int ret;
2214 u32 reg;
2215 u8 speed;
2216
2217 dev_vdbg(dwc->dev, "%s\n", __func__);
2218
72246da4
FB
2219 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2220 speed = reg & DWC3_DSTS_CONNECTSPD;
2221 dwc->speed = speed;
2222
2223 dwc3_update_ram_clk_sel(dwc, speed);
2224
2225 switch (speed) {
2226 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2227 /*
2228 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2229 * would cause a missing USB3 Reset event.
2230 *
2231 * In such situations, we should force a USB3 Reset
2232 * event by calling our dwc3_gadget_reset_interrupt()
2233 * routine.
2234 *
2235 * Refers to:
2236 *
2237 * STAR#9000483510: RTL: SS : USB3 reset event may
2238 * not be generated always when the link enters poll
2239 */
2240 if (dwc->revision < DWC3_REVISION_190A)
2241 dwc3_gadget_reset_interrupt(dwc);
2242
72246da4
FB
2243 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2244 dwc->gadget.ep0->maxpacket = 512;
2245 dwc->gadget.speed = USB_SPEED_SUPER;
2246 break;
2247 case DWC3_DCFG_HIGHSPEED:
2248 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2249 dwc->gadget.ep0->maxpacket = 64;
2250 dwc->gadget.speed = USB_SPEED_HIGH;
2251 break;
2252 case DWC3_DCFG_FULLSPEED2:
2253 case DWC3_DCFG_FULLSPEED1:
2254 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2255 dwc->gadget.ep0->maxpacket = 64;
2256 dwc->gadget.speed = USB_SPEED_FULL;
2257 break;
2258 case DWC3_DCFG_LOWSPEED:
2259 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2260 dwc->gadget.ep0->maxpacket = 8;
2261 dwc->gadget.speed = USB_SPEED_LOW;
2262 break;
2263 }
2264
2b758350
PA
2265 /* Enable USB2 LPM Capability */
2266
2267 if ((dwc->revision > DWC3_REVISION_194A)
2268 && (speed != DWC3_DCFG_SUPERSPEED)) {
2269 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2270 reg |= DWC3_DCFG_LPM_CAP;
2271 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2272
2273 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2274 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2275
1a947746
FB
2276 /*
2277 * TODO: This should be configurable. For now using
2278 * maximum allowed HIRD threshold value of 0b1100
2279 */
2280 reg |= DWC3_DCTL_HIRD_THRES(12);
2b758350
PA
2281
2282 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2283 }
2284
72246da4 2285 dep = dwc->eps[0];
4b345c9a 2286 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2287 if (ret) {
2288 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2289 return;
2290 }
2291
2292 dep = dwc->eps[1];
4b345c9a 2293 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
72246da4
FB
2294 if (ret) {
2295 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2296 return;
2297 }
2298
2299 /*
2300 * Configure PHY via GUSB3PIPECTLn if required.
2301 *
2302 * Update GTXFIFOSIZn
2303 *
2304 * In both cases reset values should be sufficient.
2305 */
2306}
2307
2308static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2309{
2310 dev_vdbg(dwc->dev, "%s\n", __func__);
2311
2312 /*
2313 * TODO take core out of low power mode when that's
2314 * implemented.
2315 */
2316
2317 dwc->gadget_driver->resume(&dwc->gadget);
2318}
2319
2320static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2321 unsigned int evtinfo)
2322{
fae2b904 2323 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2324 unsigned int pwropt;
2325
2326 /*
2327 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2328 * Hibernation mode enabled which would show up when device detects
2329 * host-initiated U3 exit.
2330 *
2331 * In that case, device will generate a Link State Change Interrupt
2332 * from U3 to RESUME which is only necessary if Hibernation is
2333 * configured in.
2334 *
2335 * There are no functional changes due to such spurious event and we
2336 * just need to ignore it.
2337 *
2338 * Refers to:
2339 *
2340 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2341 * operational mode
2342 */
2343 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2344 if ((dwc->revision < DWC3_REVISION_250A) &&
2345 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2346 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2347 (next == DWC3_LINK_STATE_RESUME)) {
2348 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2349 return;
2350 }
2351 }
fae2b904
FB
2352
2353 /*
2354 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2355 * on the link partner, the USB session might do multiple entry/exit
2356 * of low power states before a transfer takes place.
2357 *
2358 * Due to this problem, we might experience lower throughput. The
2359 * suggested workaround is to disable DCTL[12:9] bits if we're
2360 * transitioning from U1/U2 to U0 and enable those bits again
2361 * after a transfer completes and there are no pending transfers
2362 * on any of the enabled endpoints.
2363 *
2364 * This is the first half of that workaround.
2365 *
2366 * Refers to:
2367 *
2368 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2369 * core send LGO_Ux entering U0
2370 */
2371 if (dwc->revision < DWC3_REVISION_183A) {
2372 if (next == DWC3_LINK_STATE_U0) {
2373 u32 u1u2;
2374 u32 reg;
2375
2376 switch (dwc->link_state) {
2377 case DWC3_LINK_STATE_U1:
2378 case DWC3_LINK_STATE_U2:
2379 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2380 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2381 | DWC3_DCTL_ACCEPTU2ENA
2382 | DWC3_DCTL_INITU1ENA
2383 | DWC3_DCTL_ACCEPTU1ENA);
2384
2385 if (!dwc->u1u2)
2386 dwc->u1u2 = reg & u1u2;
2387
2388 reg &= ~u1u2;
2389
2390 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2391 break;
2392 default:
2393 /* do nothing */
2394 break;
2395 }
2396 }
2397 }
2398
2399 dwc->link_state = next;
019ac832
FB
2400
2401 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
72246da4
FB
2402}
2403
2404static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2405 const struct dwc3_event_devt *event)
2406{
2407 switch (event->type) {
2408 case DWC3_DEVICE_EVENT_DISCONNECT:
2409 dwc3_gadget_disconnect_interrupt(dwc);
2410 break;
2411 case DWC3_DEVICE_EVENT_RESET:
2412 dwc3_gadget_reset_interrupt(dwc);
2413 break;
2414 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2415 dwc3_gadget_conndone_interrupt(dwc);
2416 break;
2417 case DWC3_DEVICE_EVENT_WAKEUP:
2418 dwc3_gadget_wakeup_interrupt(dwc);
2419 break;
2420 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2421 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2422 break;
2423 case DWC3_DEVICE_EVENT_EOPF:
2424 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2425 break;
2426 case DWC3_DEVICE_EVENT_SOF:
2427 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2428 break;
2429 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2430 dev_vdbg(dwc->dev, "Erratic Error\n");
2431 break;
2432 case DWC3_DEVICE_EVENT_CMD_CMPL:
2433 dev_vdbg(dwc->dev, "Command Complete\n");
2434 break;
2435 case DWC3_DEVICE_EVENT_OVERFLOW:
2436 dev_vdbg(dwc->dev, "Overflow\n");
2437 break;
2438 default:
2439 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2440 }
2441}
2442
2443static void dwc3_process_event_entry(struct dwc3 *dwc,
2444 const union dwc3_event *event)
2445{
2446 /* Endpoint IRQ, handle it and return early */
2447 if (event->type.is_devspec == 0) {
2448 /* depevt */
2449 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2450 }
2451
2452 switch (event->type.type) {
2453 case DWC3_EVENT_TYPE_DEV:
2454 dwc3_gadget_interrupt(dwc, &event->devt);
2455 break;
2456 /* REVISIT what to do with Carkit and I2C events ? */
2457 default:
2458 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2459 }
2460}
2461
f42f2447 2462static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
b15a762f 2463{
f42f2447 2464 struct dwc3_event_buffer *evt;
b15a762f 2465 irqreturn_t ret = IRQ_NONE;
f42f2447 2466 int left;
e8adfc30 2467 u32 reg;
b15a762f 2468
f42f2447
FB
2469 evt = dwc->ev_buffs[buf];
2470 left = evt->count;
b15a762f 2471
f42f2447
FB
2472 if (!(evt->flags & DWC3_EVENT_PENDING))
2473 return IRQ_NONE;
b15a762f 2474
f42f2447
FB
2475 while (left > 0) {
2476 union dwc3_event event;
b15a762f 2477
f42f2447 2478 event.raw = *(u32 *) (evt->buf + evt->lpos);
b15a762f 2479
f42f2447 2480 dwc3_process_event_entry(dwc, &event);
b15a762f 2481
f42f2447
FB
2482 /*
2483 * FIXME we wrap around correctly to the next entry as
2484 * almost all entries are 4 bytes in size. There is one
2485 * entry which has 12 bytes which is a regular entry
2486 * followed by 8 bytes data. ATM I don't know how
2487 * things are organized if we get next to the a
2488 * boundary so I worry about that once we try to handle
2489 * that.
2490 */
2491 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2492 left -= 4;
b15a762f 2493
f42f2447
FB
2494 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2495 }
b15a762f 2496
f42f2447
FB
2497 evt->count = 0;
2498 evt->flags &= ~DWC3_EVENT_PENDING;
2499 ret = IRQ_HANDLED;
b15a762f 2500
f42f2447
FB
2501 /* Unmask interrupt */
2502 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2503 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2504 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
b15a762f 2505
f42f2447
FB
2506 return ret;
2507}
e8adfc30 2508
f42f2447
FB
2509static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2510{
2511 struct dwc3 *dwc = _dwc;
2512 unsigned long flags;
2513 irqreturn_t ret = IRQ_NONE;
2514 int i;
2515
2516 spin_lock_irqsave(&dwc->lock, flags);
2517
2518 for (i = 0; i < dwc->num_event_buffers; i++)
2519 ret |= dwc3_process_event_buf(dwc, i);
b15a762f
FB
2520
2521 spin_unlock_irqrestore(&dwc->lock, flags);
2522
2523 return ret;
2524}
2525
7f97aa98 2526static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
72246da4
FB
2527{
2528 struct dwc3_event_buffer *evt;
72246da4 2529 u32 count;
e8adfc30 2530 u32 reg;
72246da4 2531
b15a762f
FB
2532 evt = dwc->ev_buffs[buf];
2533
72246da4
FB
2534 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2535 count &= DWC3_GEVNTCOUNT_MASK;
2536 if (!count)
2537 return IRQ_NONE;
2538
b15a762f
FB
2539 evt->count = count;
2540 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2541
e8adfc30
FB
2542 /* Mask interrupt */
2543 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2544 reg |= DWC3_GEVNTSIZ_INTMASK;
2545 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2546
b15a762f 2547 return IRQ_WAKE_THREAD;
72246da4
FB
2548}
2549
2550static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2551{
2552 struct dwc3 *dwc = _dwc;
2553 int i;
2554 irqreturn_t ret = IRQ_NONE;
2555
2556 spin_lock(&dwc->lock);
2557
9f622b2a 2558 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2559 irqreturn_t status;
2560
7f97aa98 2561 status = dwc3_check_event_buf(dwc, i);
b15a762f 2562 if (status == IRQ_WAKE_THREAD)
72246da4
FB
2563 ret = status;
2564 }
2565
2566 spin_unlock(&dwc->lock);
2567
2568 return ret;
2569}
2570
2571/**
2572 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2573 * @dwc: pointer to our controller context structure
72246da4
FB
2574 *
2575 * Returns 0 on success otherwise negative errno.
2576 */
41ac7b3a 2577int dwc3_gadget_init(struct dwc3 *dwc)
72246da4 2578{
72246da4 2579 int ret;
72246da4
FB
2580
2581 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2582 &dwc->ctrl_req_addr, GFP_KERNEL);
2583 if (!dwc->ctrl_req) {
2584 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2585 ret = -ENOMEM;
2586 goto err0;
2587 }
2588
2589 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2590 &dwc->ep0_trb_addr, GFP_KERNEL);
2591 if (!dwc->ep0_trb) {
2592 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2593 ret = -ENOMEM;
2594 goto err1;
2595 }
2596
3ef35faf 2597 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4
FB
2598 if (!dwc->setup_buf) {
2599 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2600 ret = -ENOMEM;
2601 goto err2;
2602 }
2603
5812b1c2 2604 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2605 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2606 GFP_KERNEL);
5812b1c2
FB
2607 if (!dwc->ep0_bounce) {
2608 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2609 ret = -ENOMEM;
2610 goto err3;
2611 }
2612
72246da4 2613 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2614 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4 2615 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2616 dwc->gadget.sg_supported = true;
72246da4
FB
2617 dwc->gadget.name = "dwc3-gadget";
2618
a4b9d94b
DC
2619 /*
2620 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2621 * on ep out.
2622 */
2623 dwc->gadget.quirk_ep_out_aligned_size = true;
2624
72246da4
FB
2625 /*
2626 * REVISIT: Here we should clear all pending IRQs to be
2627 * sure we're starting from a well known location.
2628 */
2629
2630 ret = dwc3_gadget_init_endpoints(dwc);
2631 if (ret)
5812b1c2 2632 goto err4;
72246da4 2633
72246da4
FB
2634 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2635 if (ret) {
2636 dev_err(dwc->dev, "failed to register udc\n");
e1f80467 2637 goto err4;
72246da4
FB
2638 }
2639
2640 return 0;
2641
5812b1c2 2642err4:
e1f80467 2643 dwc3_gadget_free_endpoints(dwc);
3ef35faf
FB
2644 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2645 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2646
72246da4 2647err3:
0fc9a1be 2648 kfree(dwc->setup_buf);
72246da4
FB
2649
2650err2:
2651 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2652 dwc->ep0_trb, dwc->ep0_trb_addr);
2653
2654err1:
2655 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2656 dwc->ctrl_req, dwc->ctrl_req_addr);
2657
2658err0:
2659 return ret;
2660}
2661
7415f17c
FB
2662/* -------------------------------------------------------------------------- */
2663
72246da4
FB
2664void dwc3_gadget_exit(struct dwc3 *dwc)
2665{
72246da4 2666 usb_del_gadget_udc(&dwc->gadget);
72246da4 2667
72246da4
FB
2668 dwc3_gadget_free_endpoints(dwc);
2669
3ef35faf
FB
2670 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2671 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2672
0fc9a1be 2673 kfree(dwc->setup_buf);
72246da4
FB
2674
2675 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2676 dwc->ep0_trb, dwc->ep0_trb_addr);
2677
2678 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2679 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 2680}
7415f17c
FB
2681
2682int dwc3_gadget_prepare(struct dwc3 *dwc)
2683{
2684 if (dwc->pullups_connected)
2685 dwc3_gadget_disable_irq(dwc);
2686
2687 return 0;
2688}
2689
2690void dwc3_gadget_complete(struct dwc3 *dwc)
2691{
2692 if (dwc->pullups_connected) {
2693 dwc3_gadget_enable_irq(dwc);
2694 dwc3_gadget_run_stop(dwc, true);
2695 }
2696}
2697
2698int dwc3_gadget_suspend(struct dwc3 *dwc)
2699{
2700 __dwc3_gadget_ep_disable(dwc->eps[0]);
2701 __dwc3_gadget_ep_disable(dwc->eps[1]);
2702
2703 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2704
2705 return 0;
2706}
2707
2708int dwc3_gadget_resume(struct dwc3 *dwc)
2709{
2710 struct dwc3_ep *dep;
2711 int ret;
2712
2713 /* Start with SuperSpeed Default */
2714 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2715
2716 dep = dwc->eps[0];
2717 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2718 if (ret)
2719 goto err0;
2720
2721 dep = dwc->eps[1];
2722 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2723 if (ret)
2724 goto err1;
2725
2726 /* begin to receive SETUP packets */
2727 dwc->ep0state = EP0_SETUP_PHASE;
2728 dwc3_ep0_out_start(dwc);
2729
2730 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2731
2732 return 0;
2733
2734err1:
2735 __dwc3_gadget_ep_disable(dwc->eps[0]);
2736
2737err0:
2738 return ret;
2739}