]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/usb/gadget/udc/tegra-xudc.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / usb / gadget / udc / tegra-xudc.c
CommitLineData
49db4272
NK
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * NVIDIA Tegra XUSB device mode controller
4 *
5 * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
6 * Copyright (c) 2015, Google Inc.
7 */
8
9#include <linux/clk.h>
10#include <linux/completion.h>
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/dmapool.h>
14#include <linux/interrupt.h>
15#include <linux/iopoll.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_device.h>
20#include <linux/phy/phy.h>
21#include <linux/phy/tegra/xusb.h>
22#include <linux/pm_domain.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/regulator/consumer.h>
26#include <linux/reset.h>
27#include <linux/usb/ch9.h>
28#include <linux/usb/gadget.h>
b77f2ffe 29#include <linux/usb/otg.h>
49db4272 30#include <linux/usb/role.h>
b77f2ffe 31#include <linux/usb/phy.h>
49db4272
NK
32#include <linux/workqueue.h>
33
34/* XUSB_DEV registers */
35#define SPARAM 0x000
36#define SPARAM_ERSTMAX_MASK GENMASK(20, 16)
37#define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK)
38#define DB 0x004
39#define DB_TARGET_MASK GENMASK(15, 8)
40#define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
41#define DB_STREAMID_MASK GENMASK(31, 16)
42#define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
43#define ERSTSZ 0x008
44#define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
45#define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
46#define ERSTXBALO(x) (0x010 + 8 * (x))
47#define ERSTXBAHI(x) (0x014 + 8 * (x))
48#define ERDPLO 0x020
49#define ERDPLO_EHB BIT(3)
50#define ERDPHI 0x024
51#define EREPLO 0x028
52#define EREPLO_ECS BIT(0)
53#define EREPLO_SEGI BIT(1)
54#define EREPHI 0x02c
55#define CTRL 0x030
56#define CTRL_RUN BIT(0)
57#define CTRL_LSE BIT(1)
58#define CTRL_IE BIT(4)
59#define CTRL_SMI_EVT BIT(5)
60#define CTRL_SMI_DSE BIT(6)
61#define CTRL_EWE BIT(7)
62#define CTRL_DEVADDR_MASK GENMASK(30, 24)
63#define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
64#define CTRL_ENABLE BIT(31)
65#define ST 0x034
66#define ST_RC BIT(0)
67#define ST_IP BIT(4)
68#define RT_IMOD 0x038
69#define RT_IMOD_IMODI_MASK GENMASK(15, 0)
70#define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
71#define RT_IMOD_IMODC_MASK GENMASK(31, 16)
72#define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
73#define PORTSC 0x03c
74#define PORTSC_CCS BIT(0)
75#define PORTSC_PED BIT(1)
76#define PORTSC_PR BIT(4)
77#define PORTSC_PLS_SHIFT 5
78#define PORTSC_PLS_MASK GENMASK(8, 5)
79#define PORTSC_PLS_U0 0x0
80#define PORTSC_PLS_U2 0x2
81#define PORTSC_PLS_U3 0x3
82#define PORTSC_PLS_DISABLED 0x4
83#define PORTSC_PLS_RXDETECT 0x5
84#define PORTSC_PLS_INACTIVE 0x6
85#define PORTSC_PLS_RESUME 0xf
86#define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
87#define PORTSC_PS_SHIFT 10
88#define PORTSC_PS_MASK GENMASK(13, 10)
89#define PORTSC_PS_UNDEFINED 0x0
90#define PORTSC_PS_FS 0x1
91#define PORTSC_PS_LS 0x2
92#define PORTSC_PS_HS 0x3
93#define PORTSC_PS_SS 0x4
94#define PORTSC_LWS BIT(16)
95#define PORTSC_CSC BIT(17)
96#define PORTSC_WRC BIT(19)
97#define PORTSC_PRC BIT(21)
98#define PORTSC_PLC BIT(22)
99#define PORTSC_CEC BIT(23)
100#define PORTSC_WPR BIT(30)
101#define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
102 PORTSC_PLC | PORTSC_CEC)
103#define ECPLO 0x040
104#define ECPHI 0x044
105#define MFINDEX 0x048
106#define MFINDEX_FRAME_SHIFT 3
107#define MFINDEX_FRAME_MASK GENMASK(13, 3)
108#define PORTPM 0x04c
109#define PORTPM_L1S_MASK GENMASK(1, 0)
110#define PORTPM_L1S_DROP 0x0
111#define PORTPM_L1S_ACCEPT 0x1
112#define PORTPM_L1S_NYET 0x2
113#define PORTPM_L1S_STALL 0x3
114#define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
115#define PORTPM_RWE BIT(3)
116#define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
117#define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
118#define PORTPM_FLA BIT(24)
119#define PORTPM_VBA BIT(25)
120#define PORTPM_WOC BIT(26)
121#define PORTPM_WOD BIT(27)
122#define PORTPM_U1E BIT(28)
123#define PORTPM_U2E BIT(29)
124#define PORTPM_FRWE BIT(30)
125#define PORTPM_PNG_CYA BIT(31)
126#define EP_HALT 0x050
127#define EP_PAUSE 0x054
128#define EP_RELOAD 0x058
129#define EP_STCHG 0x05c
130#define DEVNOTIF_LO 0x064
131#define DEVNOTIF_LO_TRIG BIT(0)
132#define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
133#define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
134#define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
135#define DEVNOTIF_HI 0x068
136#define PORTHALT 0x06c
137#define PORTHALT_HALT_LTSSM BIT(0)
138#define PORTHALT_HALT_REJECT BIT(1)
139#define PORTHALT_STCHG_REQ BIT(20)
140#define PORTHALT_STCHG_INTR_EN BIT(24)
141#define PORT_TM 0x070
142#define EP_THREAD_ACTIVE 0x074
143#define EP_STOPPED 0x078
144#define HSFSPI_COUNT0 0x100
145#define HSFSPI_COUNT13 0x134
146#define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
147#define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
148 HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
149#define BLCG 0x840
150#define SSPX_CORE_CNT0 0x610
151#define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
152#define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
153#define SSPX_CORE_CNT30 0x688
154#define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
155#define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
156 SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
157#define SSPX_CORE_CNT32 0x690
158#define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
159#define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
160 SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
161#define SSPX_CORE_PADCTL4 0x750
162#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
163#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
164 SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
165#define BLCG_DFPCI BIT(0)
166#define BLCG_UFPCI BIT(1)
167#define BLCG_FE BIT(2)
168#define BLCG_COREPLL_PWRDN BIT(8)
169#define BLCG_IOPLL_0_PWRDN BIT(9)
170#define BLCG_IOPLL_1_PWRDN BIT(10)
171#define BLCG_IOPLL_2_PWRDN BIT(11)
172#define BLCG_ALL 0x1ff
173#define CFG_DEV_SSPI_XFER 0x858
174#define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
175#define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
176 CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
177#define CFG_DEV_FE 0x85c
178#define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
179#define CFG_DEV_FE_PORTREGSEL_SS_PI 1
180#define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
181#define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
182#define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
183
184/* FPCI registers */
185#define XUSB_DEV_CFG_1 0x004
186#define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
187#define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
188#define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
189#define XUSB_DEV_CFG_4 0x010
190#define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
191#define XUSB_DEV_CFG_5 0x014
192
193/* IPFS registers */
194#define XUSB_DEV_CONFIGURATION_0 0x180
195#define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
196#define XUSB_DEV_INTR_MASK_0 0x188
197#define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
198
199struct tegra_xudc_ep_context {
200 __le32 info0;
201 __le32 info1;
202 __le32 deq_lo;
203 __le32 deq_hi;
204 __le32 tx_info;
205 __le32 rsvd[11];
206};
207
208#define EP_STATE_DISABLED 0
209#define EP_STATE_RUNNING 1
210#define EP_STATE_HALTED 2
211#define EP_STATE_STOPPED 3
212#define EP_STATE_ERROR 4
213
214#define EP_TYPE_INVALID 0
215#define EP_TYPE_ISOCH_OUT 1
216#define EP_TYPE_BULK_OUT 2
217#define EP_TYPE_INTERRUPT_OUT 3
218#define EP_TYPE_CONTROL 4
219#define EP_TYPE_ISCOH_IN 5
220#define EP_TYPE_BULK_IN 6
221#define EP_TYPE_INTERRUPT_IN 7
222
223#define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
224static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
225{ \
226 return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
227} \
228static inline void \
229ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
230{ \
231 u32 tmp; \
232 \
233 tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
234 tmp |= (val & (mask)) << (shift); \
235 ctx->member = cpu_to_le32(tmp); \
236}
237
238BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
239BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
240BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
241BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
242BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
243BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
244BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
245BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
246BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
247BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
248BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
249BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
250BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
251BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
252BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
253BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
254BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff)
255BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
256BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
257BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
258BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
259BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
260
261static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
262{
263 return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
264 (ep_ctx_read_deq_lo(ctx) << 4);
265}
266
267static inline void
268ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
269{
270 ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
271 ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
272}
273
274struct tegra_xudc_trb {
275 __le32 data_lo;
276 __le32 data_hi;
277 __le32 status;
278 __le32 control;
279};
280
281#define TRB_TYPE_RSVD 0
282#define TRB_TYPE_NORMAL 1
283#define TRB_TYPE_SETUP_STAGE 2
284#define TRB_TYPE_DATA_STAGE 3
285#define TRB_TYPE_STATUS_STAGE 4
286#define TRB_TYPE_ISOCH 5
287#define TRB_TYPE_LINK 6
288#define TRB_TYPE_TRANSFER_EVENT 32
289#define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
290#define TRB_TYPE_STREAM 48
291#define TRB_TYPE_SETUP_PACKET_EVENT 63
292
293#define TRB_CMPL_CODE_INVALID 0
294#define TRB_CMPL_CODE_SUCCESS 1
295#define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
296#define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
297#define TRB_CMPL_CODE_USB_TRANS_ERR 4
298#define TRB_CMPL_CODE_TRB_ERR 5
299#define TRB_CMPL_CODE_STALL 6
300#define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
301#define TRB_CMPL_CODE_SHORT_PACKET 13
302#define TRB_CMPL_CODE_RING_UNDERRUN 14
303#define TRB_CMPL_CODE_RING_OVERRUN 15
304#define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
305#define TRB_CMPL_CODE_STOPPED 26
306#define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
307#define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
308#define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
309#define TRB_CMPL_CODE_HOST_REJECTED 221
310#define TRB_CMPL_CODE_CTRL_DIR_ERR 222
311#define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
312
313#define BUILD_TRB_RW(name, member, shift, mask) \
314static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
315{ \
316 return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
317} \
318static inline void \
319trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
320{ \
321 u32 tmp; \
322 \
323 tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
324 tmp |= (val & (mask)) << (shift); \
325 trb->member = cpu_to_le32(tmp); \
326}
327
328BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
329BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
330BUILD_TRB_RW(seq_num, status, 0, 0xffff)
331BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
332BUILD_TRB_RW(td_size, status, 17, 0x1f)
333BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
334BUILD_TRB_RW(cycle, control, 0, 0x1)
335BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
336BUILD_TRB_RW(isp, control, 2, 0x1)
337BUILD_TRB_RW(chain, control, 4, 0x1)
338BUILD_TRB_RW(ioc, control, 5, 0x1)
339BUILD_TRB_RW(type, control, 10, 0x3f)
340BUILD_TRB_RW(stream_id, control, 16, 0xffff)
341BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
342BUILD_TRB_RW(tlbpc, control, 16, 0xf)
343BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
344BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
345BUILD_TRB_RW(sia, control, 31, 0x1)
346
347static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
348{
349 return ((u64)trb_read_data_hi(trb) << 32) |
350 trb_read_data_lo(trb);
351}
352
353static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
354{
355 trb_write_data_lo(trb, lower_32_bits(addr));
356 trb_write_data_hi(trb, upper_32_bits(addr));
357}
358
359struct tegra_xudc_request {
360 struct usb_request usb_req;
361
362 size_t buf_queued;
363 unsigned int trbs_queued;
364 unsigned int trbs_needed;
365 bool need_zlp;
366
367 struct tegra_xudc_trb *first_trb;
368 struct tegra_xudc_trb *last_trb;
369
370 struct list_head list;
371};
372
373struct tegra_xudc_ep {
374 struct tegra_xudc *xudc;
375 struct usb_ep usb_ep;
376 unsigned int index;
377 char name[8];
378
379 struct tegra_xudc_ep_context *context;
380
381#define XUDC_TRANSFER_RING_SIZE 64
382 struct tegra_xudc_trb *transfer_ring;
383 dma_addr_t transfer_ring_phys;
384
385 unsigned int enq_ptr;
386 unsigned int deq_ptr;
387 bool pcs;
388 bool ring_full;
389 bool stream_rejected;
390
391 struct list_head queue;
392 const struct usb_endpoint_descriptor *desc;
393 const struct usb_ss_ep_comp_descriptor *comp_desc;
394};
395
396struct tegra_xudc_sel_timing {
397 __u8 u1sel;
398 __u8 u1pel;
399 __le16 u2sel;
400 __le16 u2pel;
401};
402
403enum tegra_xudc_setup_state {
404 WAIT_FOR_SETUP,
405 DATA_STAGE_XFER,
406 DATA_STAGE_RECV,
407 STATUS_STAGE_XFER,
408 STATUS_STAGE_RECV,
409};
410
411struct tegra_xudc_setup_packet {
412 struct usb_ctrlrequest ctrl_req;
413 unsigned int seq_num;
414};
415
416struct tegra_xudc_save_regs {
417 u32 ctrl;
418 u32 portpm;
419};
420
421struct tegra_xudc {
422 struct device *dev;
423 const struct tegra_xudc_soc *soc;
424 struct tegra_xusb_padctl *padctl;
425
426 spinlock_t lock;
427
428 struct usb_gadget gadget;
429 struct usb_gadget_driver *driver;
430
431#define XUDC_NR_EVENT_RINGS 2
432#define XUDC_EVENT_RING_SIZE 4096
433 struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
434 dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
435 unsigned int event_ring_index;
436 unsigned int event_ring_deq_ptr;
437 bool ccs;
438
439#define XUDC_NR_EPS 32
440 struct tegra_xudc_ep ep[XUDC_NR_EPS];
441 struct tegra_xudc_ep_context *ep_context;
442 dma_addr_t ep_context_phys;
443
444 struct device *genpd_dev_device;
445 struct device *genpd_dev_ss;
446 struct device_link *genpd_dl_device;
447 struct device_link *genpd_dl_ss;
448
449 struct dma_pool *transfer_ring_pool;
450
451 bool queued_setup_packet;
452 struct tegra_xudc_setup_packet setup_packet;
453 enum tegra_xudc_setup_state setup_state;
454 u16 setup_seq_num;
455
456 u16 dev_addr;
457 u16 isoch_delay;
458 struct tegra_xudc_sel_timing sel_timing;
459 u8 test_mode_pattern;
460 u16 status_buf;
461 struct tegra_xudc_request *ep0_req;
462
463 bool pullup;
464
465 unsigned int nr_enabled_eps;
466 unsigned int nr_isoch_eps;
467
468 unsigned int device_state;
469 unsigned int resume_state;
470
471 int irq;
472
473 void __iomem *base;
474 resource_size_t phys_base;
475 void __iomem *ipfs;
476 void __iomem *fpci;
477
478 struct regulator_bulk_data *supplies;
479
480 struct clk_bulk_data *clks;
481
9ce0a14b 482 bool device_mode;
49db4272
NK
483 struct work_struct usb_role_sw_work;
484
b4e19931
NK
485 struct phy **usb3_phy;
486 struct phy *curr_usb3_phy;
487 struct phy **utmi_phy;
488 struct phy *curr_utmi_phy;
49db4272
NK
489
490 struct tegra_xudc_save_regs saved_regs;
491 bool suspended;
492 bool powergated;
493
b4e19931 494 struct usb_phy **usbphy;
b77f2ffe
NK
495 struct notifier_block vbus_nb;
496
49db4272
NK
497 struct completion disconnect_complete;
498
499 bool selfpowered;
500
501#define TOGGLE_VBUS_WAIT_MS 100
502 struct delayed_work plc_reset_work;
503 bool wait_csc;
504
505 struct delayed_work port_reset_war_work;
506 bool wait_for_sec_prc;
507};
508
509#define XUDC_TRB_MAX_BUFFER_SIZE 65536
510#define XUDC_MAX_ISOCH_EPS 4
511#define XUDC_INTERRUPT_MODERATION_US 0
512
513static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
514 .bLength = USB_DT_ENDPOINT_SIZE,
515 .bDescriptorType = USB_DT_ENDPOINT,
516 .bEndpointAddress = 0,
517 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
518 .wMaxPacketSize = cpu_to_le16(64),
519};
520
521struct tegra_xudc_soc {
522 const char * const *supply_names;
523 unsigned int num_supplies;
524 const char * const *clock_names;
525 unsigned int num_clks;
b4e19931 526 unsigned int num_phys;
49db4272
NK
527 bool u1_enable;
528 bool u2_enable;
529 bool lpm_enable;
530 bool invalid_seq_num;
531 bool pls_quirk;
532 bool port_reset_quirk;
533 bool has_ipfs;
534};
535
536static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
537{
538 return readl(xudc->fpci + offset);
539}
540
541static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
542 unsigned int offset)
543{
544 writel(val, xudc->fpci + offset);
545}
546
547static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
548{
549 return readl(xudc->ipfs + offset);
550}
551
552static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
553 unsigned int offset)
554{
555 writel(val, xudc->ipfs + offset);
556}
557
558static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
559{
560 return readl(xudc->base + offset);
561}
562
563static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
564 unsigned int offset)
565{
566 writel(val, xudc->base + offset);
567}
568
569static inline int xudc_readl_poll(struct tegra_xudc *xudc,
570 unsigned int offset, u32 mask, u32 val)
571{
572 u32 regval;
573
574 return readl_poll_timeout_atomic(xudc->base + offset, regval,
575 (regval & mask) == val, 1, 100);
576}
577
578static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
579{
580 return container_of(gadget, struct tegra_xudc, gadget);
581}
582
583static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
584{
585 return container_of(ep, struct tegra_xudc_ep, usb_ep);
586}
587
588static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
589{
590 return container_of(req, struct tegra_xudc_request, usb_req);
591}
592
593static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
594 struct tegra_xudc_trb *trb)
595{
596 dev_dbg(xudc->dev,
597 "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
598 type, trb, trb->data_lo, trb->data_hi, trb->status,
599 trb->control);
600}
601
602static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
603{
604 int err;
605
606 pm_runtime_get_sync(xudc->dev);
607
b4e19931 608 err = phy_power_on(xudc->curr_utmi_phy);
49db4272
NK
609 if (err < 0)
610 dev_err(xudc->dev, "utmi power on failed %d\n", err);
611
b4e19931 612 err = phy_power_on(xudc->curr_usb3_phy);
49db4272
NK
613 if (err < 0)
614 dev_err(xudc->dev, "usb3 phy power on failed %d\n", err);
615
616 dev_dbg(xudc->dev, "device mode on\n");
617
b4e19931
NK
618 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
619 USB_ROLE_DEVICE);
49db4272
NK
620}
621
622static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
623{
624 bool connected = false;
625 u32 pls, val;
626 int err;
627
628 dev_dbg(xudc->dev, "device mode off\n");
629
630 connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
631
632 reinit_completion(&xudc->disconnect_complete);
633
b4e19931 634 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
49db4272
NK
635
636 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
637 PORTSC_PLS_SHIFT;
638
639 /* Direct link to U0 if disconnected in RESUME or U2. */
640 if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
641 (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
642 val = xudc_readl(xudc, PORTPM);
643 val |= PORTPM_FRWE;
644 xudc_writel(xudc, val, PORTPM);
645
646 val = xudc_readl(xudc, PORTSC);
647 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
648 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
649 xudc_writel(xudc, val, PORTSC);
650 }
651
49db4272
NK
652 /* Wait for disconnect event. */
653 if (connected)
654 wait_for_completion(&xudc->disconnect_complete);
655
656 /* Make sure interrupt handler has completed before powergating. */
657 synchronize_irq(xudc->irq);
658
b4e19931 659 err = phy_power_off(xudc->curr_utmi_phy);
49db4272
NK
660 if (err < 0)
661 dev_err(xudc->dev, "utmi_phy power off failed %d\n", err);
662
b4e19931 663 err = phy_power_off(xudc->curr_usb3_phy);
49db4272
NK
664 if (err < 0)
665 dev_err(xudc->dev, "usb3_phy power off failed %d\n", err);
666
667 pm_runtime_put(xudc->dev);
668}
669
670static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
671{
672 struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
673 usb_role_sw_work);
674
9ce0a14b 675 if (xudc->device_mode)
49db4272
NK
676 tegra_xudc_device_mode_on(xudc);
677 else
678 tegra_xudc_device_mode_off(xudc);
49db4272
NK
679}
680
b4e19931
NK
681static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
682 struct usb_phy *usbphy)
683{
684 unsigned int i;
685
686 for (i = 0; i < xudc->soc->num_phys; i++) {
687 if (xudc->usbphy[i] && usbphy == xudc->usbphy[i])
688 return i;
689 }
690
691 dev_info(xudc->dev, "phy index could not be found for shared USB PHY");
692 return -1;
693}
694
b77f2ffe
NK
695static int tegra_xudc_vbus_notify(struct notifier_block *nb,
696 unsigned long action, void *data)
697{
698 struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
699 vbus_nb);
700 struct usb_phy *usbphy = (struct usb_phy *)data;
b4e19931 701 int phy_index;
b77f2ffe
NK
702
703 dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
704
705 if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
706 (!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
707 dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
708 xudc->device_mode);
709 return NOTIFY_OK;
710 }
711
712 xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
713 false;
714
b4e19931
NK
715 phy_index = tegra_xudc_get_phy_index(xudc, usbphy);
716 dev_dbg(xudc->dev, "%s(): current phy index is %d\n", __func__,
717 phy_index);
718
719 if (!xudc->suspended && phy_index != -1) {
720 xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
721 xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
b77f2ffe 722 schedule_work(&xudc->usb_role_sw_work);
b4e19931 723 }
b77f2ffe
NK
724
725 return NOTIFY_OK;
726}
727
49db4272
NK
728static void tegra_xudc_plc_reset_work(struct work_struct *work)
729{
730 struct delayed_work *dwork = to_delayed_work(work);
731 struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
732 plc_reset_work);
733 unsigned long flags;
734
735 spin_lock_irqsave(&xudc->lock, flags);
736
737 if (xudc->wait_csc) {
738 u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
739 PORTSC_PLS_SHIFT;
740
741 if (pls == PORTSC_PLS_INACTIVE) {
742 dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
b4e19931 743 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
b9c9fd4a 744 USB_ROLE_NONE);
b4e19931 745 phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG,
b9c9fd4a
NK
746 USB_ROLE_DEVICE);
747
49db4272
NK
748 xudc->wait_csc = false;
749 }
750 }
751
752 spin_unlock_irqrestore(&xudc->lock, flags);
753}
754
755static void tegra_xudc_port_reset_war_work(struct work_struct *work)
756{
757 struct delayed_work *dwork = to_delayed_work(work);
758 struct tegra_xudc *xudc =
759 container_of(dwork, struct tegra_xudc, port_reset_war_work);
760 unsigned long flags;
761 u32 pls;
762 int ret;
763
764 spin_lock_irqsave(&xudc->lock, flags);
765
9ce0a14b 766 if (xudc->device_mode && xudc->wait_for_sec_prc) {
49db4272
NK
767 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
768 PORTSC_PLS_SHIFT;
769 dev_dbg(xudc->dev, "pls = %x\n", pls);
770
771 if (pls == PORTSC_PLS_DISABLED) {
772 dev_dbg(xudc->dev, "toggle vbus\n");
773 /* PRC doesn't complete in 100ms, toggle the vbus */
b4e19931
NK
774 ret = tegra_phy_xusb_utmi_port_reset(
775 xudc->curr_utmi_phy);
49db4272
NK
776 if (ret == 1)
777 xudc->wait_for_sec_prc = 0;
778 }
779 }
780
781 spin_unlock_irqrestore(&xudc->lock, flags);
782}
783
784static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
785 struct tegra_xudc_trb *trb)
786{
787 unsigned int index;
788
789 index = trb - ep->transfer_ring;
790
791 if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
792 return 0;
793
794 return (ep->transfer_ring_phys + index * sizeof(*trb));
795}
796
797static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
798 dma_addr_t addr)
799{
800 struct tegra_xudc_trb *trb;
801 unsigned int index;
802
803 index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
804
805 if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
806 return NULL;
807
808 trb = &ep->transfer_ring[index];
809
810 return trb;
811}
812
813static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
814{
815 xudc_writel(xudc, BIT(ep), EP_RELOAD);
816 xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
817}
818
819static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
820{
821 u32 val;
822
823 val = xudc_readl(xudc, EP_PAUSE);
824 if (val & BIT(ep))
825 return;
826 val |= BIT(ep);
827
828 xudc_writel(xudc, val, EP_PAUSE);
829
830 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
831
832 xudc_writel(xudc, BIT(ep), EP_STCHG);
833}
834
835static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
836{
837 u32 val;
838
839 val = xudc_readl(xudc, EP_PAUSE);
840 if (!(val & BIT(ep)))
841 return;
842 val &= ~BIT(ep);
843
844 xudc_writel(xudc, val, EP_PAUSE);
845
846 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
847
848 xudc_writel(xudc, BIT(ep), EP_STCHG);
849}
850
851static void ep_unpause_all(struct tegra_xudc *xudc)
852{
853 u32 val;
854
855 val = xudc_readl(xudc, EP_PAUSE);
856
857 xudc_writel(xudc, 0, EP_PAUSE);
858
859 xudc_readl_poll(xudc, EP_STCHG, val, val);
860
861 xudc_writel(xudc, val, EP_STCHG);
862}
863
864static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
865{
866 u32 val;
867
868 val = xudc_readl(xudc, EP_HALT);
869 if (val & BIT(ep))
870 return;
871 val |= BIT(ep);
872 xudc_writel(xudc, val, EP_HALT);
873
874 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
875
876 xudc_writel(xudc, BIT(ep), EP_STCHG);
877}
878
879static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
880{
881 u32 val;
882
883 val = xudc_readl(xudc, EP_HALT);
884 if (!(val & BIT(ep)))
885 return;
886 val &= ~BIT(ep);
887 xudc_writel(xudc, val, EP_HALT);
888
889 xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
890
891 xudc_writel(xudc, BIT(ep), EP_STCHG);
892}
893
894static void ep_unhalt_all(struct tegra_xudc *xudc)
895{
896 u32 val;
897
898 val = xudc_readl(xudc, EP_HALT);
899 if (!val)
900 return;
901 xudc_writel(xudc, 0, EP_HALT);
902
903 xudc_readl_poll(xudc, EP_STCHG, val, val);
904
905 xudc_writel(xudc, val, EP_STCHG);
906}
907
908static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
909{
910 xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
911 xudc_writel(xudc, BIT(ep), EP_STOPPED);
912}
913
914static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
915{
916 xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
917}
918
919static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
920 struct tegra_xudc_request *req, int status)
921{
922 struct tegra_xudc *xudc = ep->xudc;
923
924 dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
925 req, ep->index, status);
926
927 if (likely(req->usb_req.status == -EINPROGRESS))
928 req->usb_req.status = status;
929
930 list_del_init(&req->list);
931
932 if (usb_endpoint_xfer_control(ep->desc)) {
933 usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
934 (xudc->setup_state ==
935 DATA_STAGE_XFER));
936 } else {
937 usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
938 usb_endpoint_dir_in(ep->desc));
939 }
940
941 spin_unlock(&xudc->lock);
942 usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
943 spin_lock(&xudc->lock);
944}
945
946static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
947{
948 struct tegra_xudc_request *req;
949
950 while (!list_empty(&ep->queue)) {
951 req = list_first_entry(&ep->queue, struct tegra_xudc_request,
952 list);
953 tegra_xudc_req_done(ep, req, status);
954 }
955}
956
957static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
958{
959 if (ep->ring_full)
960 return 0;
961
962 if (ep->deq_ptr > ep->enq_ptr)
963 return ep->deq_ptr - ep->enq_ptr - 1;
964
965 return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
966}
967
968static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
969 struct tegra_xudc_request *req,
970 struct tegra_xudc_trb *trb,
971 bool ioc)
972{
973 struct tegra_xudc *xudc = ep->xudc;
974 dma_addr_t buf_addr;
975 size_t len;
976
977 len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
978 req->buf_queued);
979 if (len > 0)
980 buf_addr = req->usb_req.dma + req->buf_queued;
981 else
982 buf_addr = 0;
983
984 trb_write_data_ptr(trb, buf_addr);
985
986 trb_write_transfer_len(trb, len);
987 trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
988
989 if (req->trbs_queued == req->trbs_needed - 1 ||
990 (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
991 trb_write_chain(trb, 0);
992 else
993 trb_write_chain(trb, 1);
994
995 trb_write_ioc(trb, ioc);
996
997 if (usb_endpoint_dir_out(ep->desc) ||
998 (usb_endpoint_xfer_control(ep->desc) &&
999 (xudc->setup_state == DATA_STAGE_RECV)))
1000 trb_write_isp(trb, 1);
1001 else
1002 trb_write_isp(trb, 0);
1003
1004 if (usb_endpoint_xfer_control(ep->desc)) {
1005 if (xudc->setup_state == DATA_STAGE_XFER ||
1006 xudc->setup_state == DATA_STAGE_RECV)
1007 trb_write_type(trb, TRB_TYPE_DATA_STAGE);
1008 else
1009 trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
1010
1011 if (xudc->setup_state == DATA_STAGE_XFER ||
1012 xudc->setup_state == STATUS_STAGE_XFER)
1013 trb_write_data_stage_dir(trb, 1);
1014 else
1015 trb_write_data_stage_dir(trb, 0);
1016 } else if (usb_endpoint_xfer_isoc(ep->desc)) {
1017 trb_write_type(trb, TRB_TYPE_ISOCH);
1018 trb_write_sia(trb, 1);
1019 trb_write_frame_id(trb, 0);
1020 trb_write_tlbpc(trb, 0);
1021 } else if (usb_ss_max_streams(ep->comp_desc)) {
1022 trb_write_type(trb, TRB_TYPE_STREAM);
1023 trb_write_stream_id(trb, req->usb_req.stream_id);
1024 } else {
1025 trb_write_type(trb, TRB_TYPE_NORMAL);
1026 trb_write_stream_id(trb, 0);
1027 }
1028
1029 trb_write_cycle(trb, ep->pcs);
1030
1031 req->trbs_queued++;
1032 req->buf_queued += len;
1033
1034 dump_trb(xudc, "TRANSFER", trb);
1035}
1036
1037static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
1038 struct tegra_xudc_request *req)
1039{
1040 unsigned int i, count, available;
1041 bool wait_td = false;
1042
1043 available = ep_available_trbs(ep);
1044 count = req->trbs_needed - req->trbs_queued;
1045 if (available < count) {
1046 count = available;
1047 ep->ring_full = true;
1048 }
1049
1050 /*
1051 * To generate zero-length packet on USB bus, SW needs schedule a
1052 * standalone zero-length TD. According to HW's behavior, SW needs
1053 * to schedule TDs in different ways for different endpoint types.
1054 *
1055 * For control endpoint:
1056 * - Data stage TD (IOC = 1, CH = 0)
1057 * - Ring doorbell and wait transfer event
1058 * - Data stage TD for ZLP (IOC = 1, CH = 0)
1059 * - Ring doorbell
1060 *
1061 * For bulk and interrupt endpoints:
1062 * - Normal transfer TD (IOC = 0, CH = 0)
1063 * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
1064 * - Ring doorbell
1065 */
1066
1067 if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
1068 wait_td = true;
1069
1070 if (!req->first_trb)
1071 req->first_trb = &ep->transfer_ring[ep->enq_ptr];
1072
1073 for (i = 0; i < count; i++) {
1074 struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
1075 bool ioc = false;
1076
1077 if ((i == count - 1) || (wait_td && i == count - 2))
1078 ioc = true;
1079
1080 tegra_xudc_queue_one_trb(ep, req, trb, ioc);
1081 req->last_trb = trb;
1082
1083 ep->enq_ptr++;
1084 if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
1085 trb = &ep->transfer_ring[ep->enq_ptr];
1086 trb_write_cycle(trb, ep->pcs);
1087 ep->pcs = !ep->pcs;
1088 ep->enq_ptr = 0;
1089 }
1090
1091 if (ioc)
1092 break;
1093 }
1094
1095 return count;
1096}
1097
1098static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
1099{
1100 struct tegra_xudc *xudc = ep->xudc;
1101 u32 val;
1102
1103 if (list_empty(&ep->queue))
1104 return;
1105
1106 val = DB_TARGET(ep->index);
1107 if (usb_endpoint_xfer_control(ep->desc)) {
1108 val |= DB_STREAMID(xudc->setup_seq_num);
1109 } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
1110 struct tegra_xudc_request *req;
1111
1112 /* Don't ring doorbell if the stream has been rejected. */
1113 if (ep->stream_rejected)
1114 return;
1115
1116 req = list_first_entry(&ep->queue, struct tegra_xudc_request,
1117 list);
1118 val |= DB_STREAMID(req->usb_req.stream_id);
1119 }
1120
1121 dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
1122 xudc_writel(xudc, val, DB);
1123}
1124
1125static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
1126{
1127 struct tegra_xudc_request *req;
1128 bool trbs_queued = false;
1129
1130 list_for_each_entry(req, &ep->queue, list) {
1131 if (ep->ring_full)
1132 break;
1133
1134 if (tegra_xudc_queue_trbs(ep, req) > 0)
1135 trbs_queued = true;
1136 }
1137
1138 if (trbs_queued)
1139 tegra_xudc_ep_ring_doorbell(ep);
1140}
1141
1142static int
1143__tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
1144{
1145 struct tegra_xudc *xudc = ep->xudc;
1146 int err;
1147
1148 if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
1149 dev_err(xudc->dev, "control EP has pending transfers\n");
1150 return -EINVAL;
1151 }
1152
1153 if (usb_endpoint_xfer_control(ep->desc)) {
1154 err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
1155 (xudc->setup_state ==
1156 DATA_STAGE_XFER));
1157 } else {
1158 err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
1159 usb_endpoint_dir_in(ep->desc));
1160 }
1161
1162 if (err < 0) {
1163 dev_err(xudc->dev, "failed to map request: %d\n", err);
1164 return err;
1165 }
1166
1167 req->first_trb = NULL;
1168 req->last_trb = NULL;
1169 req->buf_queued = 0;
1170 req->trbs_queued = 0;
1171 req->need_zlp = false;
1172 req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
1173 XUDC_TRB_MAX_BUFFER_SIZE);
1174 if (req->usb_req.length == 0)
1175 req->trbs_needed++;
1176
1177 if (!usb_endpoint_xfer_isoc(ep->desc) &&
1178 req->usb_req.zero && req->usb_req.length &&
1179 ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
1180 req->trbs_needed++;
1181 req->need_zlp = true;
1182 }
1183
1184 req->usb_req.status = -EINPROGRESS;
1185 req->usb_req.actual = 0;
1186
1187 list_add_tail(&req->list, &ep->queue);
1188
1189 tegra_xudc_ep_kick_queue(ep);
1190
1191 return 0;
1192}
1193
1194static int
1195tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
1196 gfp_t gfp)
1197{
1198 struct tegra_xudc_request *req;
1199 struct tegra_xudc_ep *ep;
1200 struct tegra_xudc *xudc;
1201 unsigned long flags;
1202 int ret;
1203
1204 if (!usb_ep || !usb_req)
1205 return -EINVAL;
1206
1207 ep = to_xudc_ep(usb_ep);
1208 req = to_xudc_req(usb_req);
1209 xudc = ep->xudc;
1210
1211 spin_lock_irqsave(&xudc->lock, flags);
1212 if (xudc->powergated || !ep->desc) {
1213 ret = -ESHUTDOWN;
1214 goto unlock;
1215 }
1216
1217 ret = __tegra_xudc_ep_queue(ep, req);
1218unlock:
1219 spin_unlock_irqrestore(&xudc->lock, flags);
1220
1221 return ret;
1222}
1223
1224static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
1225 struct tegra_xudc_request *req)
1226{
1227 struct tegra_xudc_trb *trb = req->first_trb;
1228 bool pcs_enq = trb_read_cycle(trb);
1229 bool pcs;
1230
1231 /*
1232 * Clear out all the TRBs part of or after the cancelled request,
1233 * and must correct trb cycle bit to the last un-enqueued state.
1234 */
1235 while (trb != &ep->transfer_ring[ep->enq_ptr]) {
1236 pcs = trb_read_cycle(trb);
1237 memset(trb, 0, sizeof(*trb));
1238 trb_write_cycle(trb, !pcs);
1239 trb++;
1240
1241 if (trb_read_type(trb) == TRB_TYPE_LINK)
1242 trb = ep->transfer_ring;
1243 }
1244
1245 /* Requests will be re-queued at the start of the cancelled request. */
1246 ep->enq_ptr = req->first_trb - ep->transfer_ring;
1247 /*
1248 * Retrieve the correct cycle bit state from the first trb of
1249 * the cancelled request.
1250 */
1251 ep->pcs = pcs_enq;
1252 ep->ring_full = false;
1253 list_for_each_entry_continue(req, &ep->queue, list) {
1254 req->usb_req.status = -EINPROGRESS;
1255 req->usb_req.actual = 0;
1256
1257 req->first_trb = NULL;
1258 req->last_trb = NULL;
1259 req->buf_queued = 0;
1260 req->trbs_queued = 0;
1261 }
1262}
1263
1264/*
1265 * Determine if the given TRB is in the range [first trb, last trb] for the
1266 * given request.
1267 */
1268static bool trb_in_request(struct tegra_xudc_ep *ep,
1269 struct tegra_xudc_request *req,
1270 struct tegra_xudc_trb *trb)
1271{
1272 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
1273 req->first_trb, req->last_trb, trb);
1274
1275 if (trb >= req->first_trb && (trb <= req->last_trb ||
1276 req->last_trb < req->first_trb))
1277 return true;
1278
1279 if (trb < req->first_trb && trb <= req->last_trb &&
1280 req->last_trb < req->first_trb)
1281 return true;
1282
1283 return false;
1284}
1285
1286/*
1287 * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
1288 * for the given endpoint and request.
1289 */
1290static bool trb_before_request(struct tegra_xudc_ep *ep,
1291 struct tegra_xudc_request *req,
1292 struct tegra_xudc_trb *trb)
1293{
1294 struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
1295
1296 dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
1297 __func__, req->first_trb, req->last_trb, enq_trb, trb);
1298
1299 if (trb < req->first_trb && (enq_trb <= trb ||
1300 req->first_trb < enq_trb))
1301 return true;
1302
1303 if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
1304 return true;
1305
1306 return false;
1307}
1308
1309static int
1310__tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
1311 struct tegra_xudc_request *req)
1312{
1313 struct tegra_xudc *xudc = ep->xudc;
1314 struct tegra_xudc_request *r;
1315 struct tegra_xudc_trb *deq_trb;
1316 bool busy, kick_queue = false;
1317 int ret = 0;
1318
1319 /* Make sure the request is actually queued to this endpoint. */
1320 list_for_each_entry(r, &ep->queue, list) {
1321 if (r == req)
1322 break;
1323 }
1324
1325 if (r != req)
1326 return -EINVAL;
1327
1328 /* Request hasn't been queued in the transfer ring yet. */
1329 if (!req->trbs_queued) {
1330 tegra_xudc_req_done(ep, req, -ECONNRESET);
1331 return 0;
1332 }
1333
1334 /* Halt DMA for this endpiont. */
1335 if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
1336 ep_pause(xudc, ep->index);
1337 ep_wait_for_inactive(xudc, ep->index);
1338 }
1339
1340 deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
1341 /* Is the hardware processing the TRB at the dequeue pointer? */
1342 busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
1343
1344 if (trb_in_request(ep, req, deq_trb) && busy) {
1345 /*
1346 * Request has been partially completed or it hasn't
1347 * started processing yet.
1348 */
1349 dma_addr_t deq_ptr;
1350
1351 squeeze_transfer_ring(ep, req);
1352
1353 req->usb_req.actual = ep_ctx_read_edtla(ep->context);
1354 tegra_xudc_req_done(ep, req, -ECONNRESET);
1355 kick_queue = true;
1356
1357 /* EDTLA is > 0: request has been partially completed */
1358 if (req->usb_req.actual > 0) {
1359 /*
1360 * Abort the pending transfer and update the dequeue
1361 * pointer
1362 */
1363 ep_ctx_write_edtla(ep->context, 0);
1364 ep_ctx_write_partial_td(ep->context, 0);
1365 ep_ctx_write_data_offset(ep->context, 0);
1366
1367 deq_ptr = trb_virt_to_phys(ep,
1368 &ep->transfer_ring[ep->enq_ptr]);
1369
1370 if (dma_mapping_error(xudc->dev, deq_ptr)) {
1371 ret = -EINVAL;
1372 } else {
1373 ep_ctx_write_deq_ptr(ep->context, deq_ptr);
1374 ep_ctx_write_dcs(ep->context, ep->pcs);
1375 ep_reload(xudc, ep->index);
1376 }
1377 }
1378 } else if (trb_before_request(ep, req, deq_trb) && busy) {
1379 /* Request hasn't started processing yet. */
1380 squeeze_transfer_ring(ep, req);
1381
1382 tegra_xudc_req_done(ep, req, -ECONNRESET);
1383 kick_queue = true;
1384 } else {
1385 /*
1386 * Request has completed, but we haven't processed the
1387 * completion event yet.
1388 */
1389 tegra_xudc_req_done(ep, req, -ECONNRESET);
1390 ret = -EINVAL;
1391 }
1392
1393 /* Resume the endpoint. */
1394 ep_unpause(xudc, ep->index);
1395
1396 if (kick_queue)
1397 tegra_xudc_ep_kick_queue(ep);
1398
1399 return ret;
1400}
1401
1402static int
1403tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
1404{
1405 struct tegra_xudc_request *req;
1406 struct tegra_xudc_ep *ep;
1407 struct tegra_xudc *xudc;
1408 unsigned long flags;
1409 int ret;
1410
1411 if (!usb_ep || !usb_req)
1412 return -EINVAL;
1413
1414 ep = to_xudc_ep(usb_ep);
1415 req = to_xudc_req(usb_req);
1416 xudc = ep->xudc;
1417
1418 spin_lock_irqsave(&xudc->lock, flags);
1419
1420 if (xudc->powergated || !ep->desc) {
1421 ret = -ESHUTDOWN;
1422 goto unlock;
1423 }
1424
1425 ret = __tegra_xudc_ep_dequeue(ep, req);
1426unlock:
1427 spin_unlock_irqrestore(&xudc->lock, flags);
1428
1429 return ret;
1430}
1431
1432static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
1433{
1434 struct tegra_xudc *xudc = ep->xudc;
1435
1436 if (!ep->desc)
1437 return -EINVAL;
1438
1439 if (usb_endpoint_xfer_isoc(ep->desc)) {
1440 dev_err(xudc->dev, "can't halt isoc EP\n");
1441 return -ENOTSUPP;
1442 }
1443
1444 if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
1445 dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
1446 halt ? "halted" : "not halted");
1447 return 0;
1448 }
1449
1450 if (halt) {
1451 ep_halt(xudc, ep->index);
1452 } else {
1453 ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
1454
1455 ep_reload(xudc, ep->index);
1456
1457 ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
1458 ep_ctx_write_seq_num(ep->context, 0);
1459
1460 ep_reload(xudc, ep->index);
1461 ep_unpause(xudc, ep->index);
1462 ep_unhalt(xudc, ep->index);
1463
1464 tegra_xudc_ep_ring_doorbell(ep);
1465 }
1466
1467 return 0;
1468}
1469
1470static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
1471{
1472 struct tegra_xudc_ep *ep;
1473 struct tegra_xudc *xudc;
1474 unsigned long flags;
1475 int ret;
1476
1477 if (!usb_ep)
1478 return -EINVAL;
1479
1480 ep = to_xudc_ep(usb_ep);
1481 xudc = ep->xudc;
1482
1483 spin_lock_irqsave(&xudc->lock, flags);
1484 if (xudc->powergated) {
1485 ret = -ESHUTDOWN;
1486 goto unlock;
1487 }
1488
1489 if (value && usb_endpoint_dir_in(ep->desc) &&
1490 !list_empty(&ep->queue)) {
1491 dev_err(xudc->dev, "can't halt EP with requests pending\n");
1492 ret = -EAGAIN;
1493 goto unlock;
1494 }
1495
1496 ret = __tegra_xudc_ep_set_halt(ep, value);
1497unlock:
1498 spin_unlock_irqrestore(&xudc->lock, flags);
1499
1500 return ret;
1501}
1502
1503static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
1504{
1505 const struct usb_endpoint_descriptor *desc = ep->desc;
1506 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1507 struct tegra_xudc *xudc = ep->xudc;
1508 u16 maxpacket, maxburst = 0, esit = 0;
1509 u32 val;
1510
1511 maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
1512 if (xudc->gadget.speed == USB_SPEED_SUPER) {
1513 if (!usb_endpoint_xfer_control(desc))
1514 maxburst = comp_desc->bMaxBurst;
1515
1516 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
1517 esit = le16_to_cpu(comp_desc->wBytesPerInterval);
1518 } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
1519 (usb_endpoint_xfer_int(desc) ||
1520 usb_endpoint_xfer_isoc(desc))) {
1521 if (xudc->gadget.speed == USB_SPEED_HIGH) {
1522 maxburst = (usb_endpoint_maxp(desc) >> 11) & 0x3;
1523 if (maxburst == 0x3) {
1524 dev_warn(xudc->dev,
1525 "invalid endpoint maxburst\n");
1526 maxburst = 0x2;
1527 }
1528 }
1529 esit = maxpacket * (maxburst + 1);
1530 }
1531
1532 memset(ep->context, 0, sizeof(*ep->context));
1533
1534 ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
1535 ep_ctx_write_interval(ep->context, desc->bInterval);
1536 if (xudc->gadget.speed == USB_SPEED_SUPER) {
1537 if (usb_endpoint_xfer_isoc(desc)) {
1538 ep_ctx_write_mult(ep->context,
1539 comp_desc->bmAttributes & 0x3);
1540 }
1541
1542 if (usb_endpoint_xfer_bulk(desc)) {
1543 ep_ctx_write_max_pstreams(ep->context,
1544 comp_desc->bmAttributes &
1545 0x1f);
1546 ep_ctx_write_lsa(ep->context, 1);
1547 }
1548 }
1549
1550 if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
1551 val = usb_endpoint_type(desc);
1552 else
1553 val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
1554
1555 ep_ctx_write_type(ep->context, val);
1556 ep_ctx_write_cerr(ep->context, 0x3);
1557 ep_ctx_write_max_packet_size(ep->context, maxpacket);
1558 ep_ctx_write_max_burst_size(ep->context, maxburst);
1559
1560 ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
1561 ep_ctx_write_dcs(ep->context, ep->pcs);
1562
1563 /* Select a reasonable average TRB length based on endpoint type. */
1564 switch (usb_endpoint_type(desc)) {
1565 case USB_ENDPOINT_XFER_CONTROL:
1566 val = 8;
1567 break;
1568 case USB_ENDPOINT_XFER_INT:
1569 val = 1024;
1570 break;
1571 case USB_ENDPOINT_XFER_BULK:
1572 case USB_ENDPOINT_XFER_ISOC:
1573 default:
1574 val = 3072;
1575 break;
1576 }
1577
1578 ep_ctx_write_avg_trb_len(ep->context, val);
1579 ep_ctx_write_max_esit_payload(ep->context, esit);
1580
1581 ep_ctx_write_cerrcnt(ep->context, 0x3);
1582}
1583
1584static void setup_link_trb(struct tegra_xudc_ep *ep,
1585 struct tegra_xudc_trb *trb)
1586{
1587 trb_write_data_ptr(trb, ep->transfer_ring_phys);
1588 trb_write_type(trb, TRB_TYPE_LINK);
1589 trb_write_toggle_cycle(trb, 1);
1590}
1591
1592static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
1593{
1594 struct tegra_xudc *xudc = ep->xudc;
1595
1596 if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
1597 dev_err(xudc->dev, "endpoint %u already disabled\n",
1598 ep->index);
1599 return -EINVAL;
1600 }
1601
1602 ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
1603
1604 ep_reload(xudc, ep->index);
1605
1606 tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
1607
1608 xudc->nr_enabled_eps--;
1609 if (usb_endpoint_xfer_isoc(ep->desc))
1610 xudc->nr_isoch_eps--;
1611
1612 ep->desc = NULL;
1613 ep->comp_desc = NULL;
1614
1615 memset(ep->context, 0, sizeof(*ep->context));
1616
1617 ep_unpause(xudc, ep->index);
1618 ep_unhalt(xudc, ep->index);
1619 if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
1620 xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
1621
1622 /*
1623 * If this is the last endpoint disabled in a de-configure request,
1624 * switch back to address state.
1625 */
1626 if ((xudc->device_state == USB_STATE_CONFIGURED) &&
1627 (xudc->nr_enabled_eps == 1)) {
1628 u32 val;
1629
1630 xudc->device_state = USB_STATE_ADDRESS;
1631 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1632
1633 val = xudc_readl(xudc, CTRL);
1634 val &= ~CTRL_RUN;
1635 xudc_writel(xudc, val, CTRL);
1636 }
1637
1638 dev_info(xudc->dev, "ep %u disabled\n", ep->index);
1639
1640 return 0;
1641}
1642
1643static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
1644{
1645 struct tegra_xudc_ep *ep;
1646 struct tegra_xudc *xudc;
1647 unsigned long flags;
1648 int ret;
1649
1650 if (!usb_ep)
1651 return -EINVAL;
1652
1653 ep = to_xudc_ep(usb_ep);
1654 xudc = ep->xudc;
1655
1656 spin_lock_irqsave(&xudc->lock, flags);
1657 if (xudc->powergated) {
1658 ret = -ESHUTDOWN;
1659 goto unlock;
1660 }
1661
1662 ret = __tegra_xudc_ep_disable(ep);
1663unlock:
1664 spin_unlock_irqrestore(&xudc->lock, flags);
1665
1666 return ret;
1667}
1668
1669static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
1670 const struct usb_endpoint_descriptor *desc)
1671{
1672 struct tegra_xudc *xudc = ep->xudc;
1673 unsigned int i;
1674 u32 val;
1675
1676 if (xudc->gadget.speed == USB_SPEED_SUPER &&
1677 !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
1678 return -EINVAL;
1679
1680 /* Disable the EP if it is not disabled */
1681 if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
1682 __tegra_xudc_ep_disable(ep);
1683
1684 ep->desc = desc;
1685 ep->comp_desc = ep->usb_ep.comp_desc;
1686
1687 if (usb_endpoint_xfer_isoc(desc)) {
1688 if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
1689 dev_err(xudc->dev, "too many isoch endpoints\n");
1690 return -EBUSY;
1691 }
1692 xudc->nr_isoch_eps++;
1693 }
1694
1695 memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
1696 sizeof(*ep->transfer_ring));
1697 setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
1698
1699 ep->enq_ptr = 0;
1700 ep->deq_ptr = 0;
1701 ep->pcs = true;
1702 ep->ring_full = false;
1703 xudc->nr_enabled_eps++;
1704
1705 tegra_xudc_ep_context_setup(ep);
1706
1707 /*
1708 * No need to reload and un-halt EP0. This will be done automatically
1709 * once a valid SETUP packet is received.
1710 */
1711 if (usb_endpoint_xfer_control(desc))
1712 goto out;
1713
1714 /*
1715 * Transition to configured state once the first non-control
1716 * endpoint is enabled.
1717 */
1718 if (xudc->device_state == USB_STATE_ADDRESS) {
1719 val = xudc_readl(xudc, CTRL);
1720 val |= CTRL_RUN;
1721 xudc_writel(xudc, val, CTRL);
1722
1723 xudc->device_state = USB_STATE_CONFIGURED;
1724 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1725 }
1726
1727 if (usb_endpoint_xfer_isoc(desc)) {
1728 /*
1729 * Pause all bulk endpoints when enabling an isoch endpoint
1730 * to ensure the isoch endpoint is allocated enough bandwidth.
1731 */
1732 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
1733 if (xudc->ep[i].desc &&
1734 usb_endpoint_xfer_bulk(xudc->ep[i].desc))
1735 ep_pause(xudc, i);
1736 }
1737 }
1738
1739 ep_reload(xudc, ep->index);
1740 ep_unpause(xudc, ep->index);
1741 ep_unhalt(xudc, ep->index);
1742
1743 if (usb_endpoint_xfer_isoc(desc)) {
1744 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
1745 if (xudc->ep[i].desc &&
1746 usb_endpoint_xfer_bulk(xudc->ep[i].desc))
1747 ep_unpause(xudc, i);
1748 }
1749 }
1750
1751out:
1752 dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
1753 usb_ep_type_string(usb_endpoint_type(ep->desc)),
1754 usb_endpoint_dir_in(ep->desc) ? "in" : "out");
1755
1756 return 0;
1757}
1758
1759static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
1760 const struct usb_endpoint_descriptor *desc)
1761{
1762 struct tegra_xudc_ep *ep;
1763 struct tegra_xudc *xudc;
1764 unsigned long flags;
1765 int ret;
1766
1767 if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
1768 return -EINVAL;
1769
1770 ep = to_xudc_ep(usb_ep);
1771 xudc = ep->xudc;
1772
1773 spin_lock_irqsave(&xudc->lock, flags);
1774 if (xudc->powergated) {
1775 ret = -ESHUTDOWN;
1776 goto unlock;
1777 }
1778
1779 ret = __tegra_xudc_ep_enable(ep, desc);
1780unlock:
1781 spin_unlock_irqrestore(&xudc->lock, flags);
1782
1783 return ret;
1784}
1785
1786static struct usb_request *
1787tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
1788{
1789 struct tegra_xudc_request *req;
1790
1791 req = kzalloc(sizeof(*req), gfp);
1792 if (!req)
1793 return NULL;
1794
1795 INIT_LIST_HEAD(&req->list);
1796
1797 return &req->usb_req;
1798}
1799
1800static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
1801 struct usb_request *usb_req)
1802{
1803 struct tegra_xudc_request *req = to_xudc_req(usb_req);
1804
1805 kfree(req);
1806}
1807
1808static struct usb_ep_ops tegra_xudc_ep_ops = {
1809 .enable = tegra_xudc_ep_enable,
1810 .disable = tegra_xudc_ep_disable,
1811 .alloc_request = tegra_xudc_ep_alloc_request,
1812 .free_request = tegra_xudc_ep_free_request,
1813 .queue = tegra_xudc_ep_queue,
1814 .dequeue = tegra_xudc_ep_dequeue,
1815 .set_halt = tegra_xudc_ep_set_halt,
1816};
1817
1818static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
1819 const struct usb_endpoint_descriptor *desc)
1820{
1821 return -EBUSY;
1822}
1823
1824static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
1825{
1826 return -EBUSY;
1827}
1828
1829static struct usb_ep_ops tegra_xudc_ep0_ops = {
1830 .enable = tegra_xudc_ep0_enable,
1831 .disable = tegra_xudc_ep0_disable,
1832 .alloc_request = tegra_xudc_ep_alloc_request,
1833 .free_request = tegra_xudc_ep_free_request,
1834 .queue = tegra_xudc_ep_queue,
1835 .dequeue = tegra_xudc_ep_dequeue,
1836 .set_halt = tegra_xudc_ep_set_halt,
1837};
1838
1839static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
1840{
1841 struct tegra_xudc *xudc = to_xudc(gadget);
1842 unsigned long flags;
1843 int ret;
1844
1845 spin_lock_irqsave(&xudc->lock, flags);
1846 if (xudc->powergated) {
1847 ret = -ESHUTDOWN;
1848 goto unlock;
1849 }
1850
1851 ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
1852 MFINDEX_FRAME_SHIFT;
1853unlock:
1854 spin_unlock_irqrestore(&xudc->lock, flags);
1855
1856 return ret;
1857}
1858
1859static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
1860{
1861 unsigned int i;
1862 u32 val;
1863
1864 ep_unpause_all(xudc);
1865
1866 /* Direct link to U0. */
1867 val = xudc_readl(xudc, PORTSC);
1868 if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
1869 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
1870 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
1871 xudc_writel(xudc, val, PORTSC);
1872 }
1873
1874 if (xudc->device_state == USB_STATE_SUSPENDED) {
1875 xudc->device_state = xudc->resume_state;
1876 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1877 xudc->resume_state = 0;
1878 }
1879
1880 /*
1881 * Doorbells may be dropped if they are sent too soon (< ~200ns)
1882 * after unpausing the endpoint. Wait for 500ns just to be safe.
1883 */
1884 ndelay(500);
1885 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
1886 tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
1887}
1888
1889static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
1890{
1891 struct tegra_xudc *xudc = to_xudc(gadget);
1892 unsigned long flags;
1893 int ret = 0;
1894 u32 val;
1895
1896 spin_lock_irqsave(&xudc->lock, flags);
1897
1898 if (xudc->powergated) {
1899 ret = -ESHUTDOWN;
1900 goto unlock;
1901 }
1902 val = xudc_readl(xudc, PORTPM);
1903 dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
1904 val, gadget->speed);
1905
1906 if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
1907 (val & PORTPM_RWE)) ||
1908 ((xudc->gadget.speed == USB_SPEED_SUPER) &&
1909 (val & PORTPM_FRWE))) {
1910 tegra_xudc_resume_device_state(xudc);
1911
1912 /* Send Device Notification packet. */
1913 if (xudc->gadget.speed == USB_SPEED_SUPER) {
1914 val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
1915 | DEVNOTIF_LO_TRIG;
1916 xudc_writel(xudc, 0, DEVNOTIF_HI);
1917 xudc_writel(xudc, val, DEVNOTIF_LO);
1918 }
1919 }
1920
1921unlock:
1922 dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
1923 spin_unlock_irqrestore(&xudc->lock, flags);
1924
1925 return ret;
1926}
1927
1928static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
1929{
1930 struct tegra_xudc *xudc = to_xudc(gadget);
1931 unsigned long flags;
1932 u32 val;
1933
1934 pm_runtime_get_sync(xudc->dev);
1935
1936 spin_lock_irqsave(&xudc->lock, flags);
1937
1938 if (is_on != xudc->pullup) {
1939 val = xudc_readl(xudc, CTRL);
1940 if (is_on)
1941 val |= CTRL_ENABLE;
1942 else
1943 val &= ~CTRL_ENABLE;
1944 xudc_writel(xudc, val, CTRL);
1945 }
1946
1947 xudc->pullup = is_on;
1948 dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
1949
1950 spin_unlock_irqrestore(&xudc->lock, flags);
1951
1952 pm_runtime_put(xudc->dev);
1953
1954 return 0;
1955}
1956
1957static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
1958 struct usb_gadget_driver *driver)
1959{
1960 struct tegra_xudc *xudc = to_xudc(gadget);
1961 unsigned long flags;
1962 u32 val;
1963 int ret;
b4e19931 1964 unsigned int i;
49db4272
NK
1965
1966 if (!driver)
1967 return -EINVAL;
1968
1969 pm_runtime_get_sync(xudc->dev);
1970
1971 spin_lock_irqsave(&xudc->lock, flags);
1972
1973 if (xudc->driver) {
1974 ret = -EBUSY;
1975 goto unlock;
1976 }
1977
1978 xudc->setup_state = WAIT_FOR_SETUP;
1979 xudc->device_state = USB_STATE_DEFAULT;
1980 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
1981
1982 ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
1983 if (ret < 0)
1984 goto unlock;
1985
1986 val = xudc_readl(xudc, CTRL);
1987 val |= CTRL_IE | CTRL_LSE;
1988 xudc_writel(xudc, val, CTRL);
1989
1990 val = xudc_readl(xudc, PORTHALT);
1991 val |= PORTHALT_STCHG_INTR_EN;
1992 xudc_writel(xudc, val, PORTHALT);
1993
1994 if (xudc->pullup) {
1995 val = xudc_readl(xudc, CTRL);
1996 val |= CTRL_ENABLE;
1997 xudc_writel(xudc, val, CTRL);
1998 }
1999
b4e19931
NK
2000 for (i = 0; i < xudc->soc->num_phys; i++)
2001 if (xudc->usbphy[i])
2002 otg_set_peripheral(xudc->usbphy[i]->otg, gadget);
b77f2ffe 2003
49db4272
NK
2004 xudc->driver = driver;
2005unlock:
2006 dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
2007 spin_unlock_irqrestore(&xudc->lock, flags);
2008
2009 pm_runtime_put(xudc->dev);
2010
2011 return ret;
2012}
2013
2014static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
2015{
2016 struct tegra_xudc *xudc = to_xudc(gadget);
2017 unsigned long flags;
2018 u32 val;
b4e19931 2019 unsigned int i;
49db4272
NK
2020
2021 pm_runtime_get_sync(xudc->dev);
2022
2023 spin_lock_irqsave(&xudc->lock, flags);
2024
b4e19931
NK
2025 for (i = 0; i < xudc->soc->num_phys; i++)
2026 if (xudc->usbphy[i])
2027 otg_set_peripheral(xudc->usbphy[i]->otg, NULL);
b77f2ffe 2028
49db4272
NK
2029 val = xudc_readl(xudc, CTRL);
2030 val &= ~(CTRL_IE | CTRL_ENABLE);
2031 xudc_writel(xudc, val, CTRL);
2032
2033 __tegra_xudc_ep_disable(&xudc->ep[0]);
2034
2035 xudc->driver = NULL;
2036 dev_dbg(xudc->dev, "Gadget stopped");
2037
2038 spin_unlock_irqrestore(&xudc->lock, flags);
2039
2040 pm_runtime_put(xudc->dev);
2041
2042 return 0;
2043}
2044
2045static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
2046{
2047 struct tegra_xudc *xudc = to_xudc(gadget);
2048
2049 dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
2050 xudc->selfpowered = !!is_on;
2051
2052 return 0;
2053}
2054
2055static struct usb_gadget_ops tegra_xudc_gadget_ops = {
2056 .get_frame = tegra_xudc_gadget_get_frame,
2057 .wakeup = tegra_xudc_gadget_wakeup,
2058 .pullup = tegra_xudc_gadget_pullup,
2059 .udc_start = tegra_xudc_gadget_start,
2060 .udc_stop = tegra_xudc_gadget_stop,
2061 .set_selfpowered = tegra_xudc_set_selfpowered,
2062};
2063
2064static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
2065{
2066}
2067
2068static int
2069tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
2070 void (*cmpl)(struct usb_ep *, struct usb_request *))
2071{
2072 xudc->ep0_req->usb_req.buf = NULL;
2073 xudc->ep0_req->usb_req.dma = 0;
2074 xudc->ep0_req->usb_req.length = 0;
2075 xudc->ep0_req->usb_req.complete = cmpl;
2076 xudc->ep0_req->usb_req.context = xudc;
2077
2078 return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
2079}
2080
2081static int
2082tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
2083 void (*cmpl)(struct usb_ep *, struct usb_request *))
2084{
2085 xudc->ep0_req->usb_req.buf = buf;
2086 xudc->ep0_req->usb_req.length = len;
2087 xudc->ep0_req->usb_req.complete = cmpl;
2088 xudc->ep0_req->usb_req.context = xudc;
2089
2090 return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
2091}
2092
2093static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
2094{
2095 switch (xudc->setup_state) {
2096 case DATA_STAGE_XFER:
2097 xudc->setup_state = STATUS_STAGE_RECV;
2098 tegra_xudc_ep0_queue_status(xudc, no_op_complete);
2099 break;
2100 case DATA_STAGE_RECV:
2101 xudc->setup_state = STATUS_STAGE_XFER;
2102 tegra_xudc_ep0_queue_status(xudc, no_op_complete);
2103 break;
2104 default:
2105 xudc->setup_state = WAIT_FOR_SETUP;
2106 break;
2107 }
2108}
2109
2110static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
2111 struct usb_ctrlrequest *ctrl)
2112{
2113 int ret;
2114
2115 spin_unlock(&xudc->lock);
2116 ret = xudc->driver->setup(&xudc->gadget, ctrl);
2117 spin_lock(&xudc->lock);
2118
2119 return ret;
2120}
2121
2122static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
2123{
2124 struct tegra_xudc *xudc = req->context;
2125
2126 if (xudc->test_mode_pattern) {
2127 xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
2128 xudc->test_mode_pattern = 0;
2129 }
2130}
2131
2132static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
2133 struct usb_ctrlrequest *ctrl)
2134{
2135 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
2136 u32 feature = le16_to_cpu(ctrl->wValue);
2137 u32 index = le16_to_cpu(ctrl->wIndex);
2138 u32 val, ep;
2139 int ret;
2140
2141 if (le16_to_cpu(ctrl->wLength) != 0)
2142 return -EINVAL;
2143
2144 switch (ctrl->bRequestType & USB_RECIP_MASK) {
2145 case USB_RECIP_DEVICE:
2146 switch (feature) {
2147 case USB_DEVICE_REMOTE_WAKEUP:
2148 if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
2149 (xudc->device_state == USB_STATE_DEFAULT))
2150 return -EINVAL;
2151
2152 val = xudc_readl(xudc, PORTPM);
2153 if (set)
2154 val |= PORTPM_RWE;
2155 else
2156 val &= ~PORTPM_RWE;
2157
2158 xudc_writel(xudc, val, PORTPM);
2159 break;
2160 case USB_DEVICE_U1_ENABLE:
2161 case USB_DEVICE_U2_ENABLE:
2162 if ((xudc->device_state != USB_STATE_CONFIGURED) ||
2163 (xudc->gadget.speed != USB_SPEED_SUPER))
2164 return -EINVAL;
2165
2166 val = xudc_readl(xudc, PORTPM);
2167 if ((feature == USB_DEVICE_U1_ENABLE) &&
2168 xudc->soc->u1_enable) {
2169 if (set)
2170 val |= PORTPM_U1E;
2171 else
2172 val &= ~PORTPM_U1E;
2173 }
2174
2175 if ((feature == USB_DEVICE_U2_ENABLE) &&
2176 xudc->soc->u2_enable) {
2177 if (set)
2178 val |= PORTPM_U2E;
2179 else
2180 val &= ~PORTPM_U2E;
2181 }
2182
2183 xudc_writel(xudc, val, PORTPM);
2184 break;
2185 case USB_DEVICE_TEST_MODE:
2186 if (xudc->gadget.speed != USB_SPEED_HIGH)
2187 return -EINVAL;
2188
2189 if (!set)
2190 return -EINVAL;
2191
2192 xudc->test_mode_pattern = index >> 8;
2193 break;
2194 default:
2195 return -EINVAL;
2196 }
2197
2198 break;
2199 case USB_RECIP_INTERFACE:
2200 if (xudc->device_state != USB_STATE_CONFIGURED)
2201 return -EINVAL;
2202
2203 switch (feature) {
2204 case USB_INTRF_FUNC_SUSPEND:
2205 if (set) {
2206 val = xudc_readl(xudc, PORTPM);
2207
2208 if (index & USB_INTRF_FUNC_SUSPEND_RW)
2209 val |= PORTPM_FRWE;
2210 else
2211 val &= ~PORTPM_FRWE;
2212
2213 xudc_writel(xudc, val, PORTPM);
2214 }
2215
2216 return tegra_xudc_ep0_delegate_req(xudc, ctrl);
2217 default:
2218 return -EINVAL;
2219 }
2220
2221 break;
2222 case USB_RECIP_ENDPOINT:
2223 ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
2224 ((index & USB_DIR_IN) ? 1 : 0);
2225
2226 if ((xudc->device_state == USB_STATE_DEFAULT) ||
2227 ((xudc->device_state == USB_STATE_ADDRESS) &&
2228 (index != 0)))
2229 return -EINVAL;
2230
2231 ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
2232 if (ret < 0)
2233 return ret;
2234 break;
2235 default:
2236 return -EINVAL;
2237 }
2238
2239 return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
2240}
2241
2242static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
2243 struct usb_ctrlrequest *ctrl)
2244{
2245 struct tegra_xudc_ep_context *ep_ctx;
2246 u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
2247 u16 status = 0;
2248
2249 if (!(ctrl->bRequestType & USB_DIR_IN))
2250 return -EINVAL;
2251
2252 if ((le16_to_cpu(ctrl->wValue) != 0) ||
2253 (le16_to_cpu(ctrl->wLength) != 2))
2254 return -EINVAL;
2255
2256 switch (ctrl->bRequestType & USB_RECIP_MASK) {
2257 case USB_RECIP_DEVICE:
2258 val = xudc_readl(xudc, PORTPM);
2259
2260 if (xudc->selfpowered)
2261 status |= BIT(USB_DEVICE_SELF_POWERED);
2262
2263 if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
2264 (val & PORTPM_RWE))
2265 status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
2266
2267 if (xudc->gadget.speed == USB_SPEED_SUPER) {
2268 if (val & PORTPM_U1E)
2269 status |= BIT(USB_DEV_STAT_U1_ENABLED);
2270 if (val & PORTPM_U2E)
2271 status |= BIT(USB_DEV_STAT_U2_ENABLED);
2272 }
2273 break;
2274 case USB_RECIP_INTERFACE:
2275 if (xudc->gadget.speed == USB_SPEED_SUPER) {
2276 status |= USB_INTRF_STAT_FUNC_RW_CAP;
2277 val = xudc_readl(xudc, PORTPM);
2278 if (val & PORTPM_FRWE)
2279 status |= USB_INTRF_STAT_FUNC_RW;
2280 }
2281 break;
2282 case USB_RECIP_ENDPOINT:
2283 ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
2284 ((index & USB_DIR_IN) ? 1 : 0);
2285 ep_ctx = &xudc->ep_context[ep];
2286
2287 if ((xudc->device_state != USB_STATE_CONFIGURED) &&
2288 ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
2289 return -EINVAL;
2290
2291 if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
2292 return -EINVAL;
2293
2294 if (xudc_readl(xudc, EP_HALT) & BIT(ep))
2295 status |= BIT(USB_ENDPOINT_HALT);
2296 break;
2297 default:
2298 return -EINVAL;
2299 }
2300
2301 xudc->status_buf = cpu_to_le16(status);
2302 return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
2303 sizeof(xudc->status_buf),
2304 no_op_complete);
2305}
2306
2307static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
2308{
2309 /* Nothing to do with SEL values */
2310}
2311
2312static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
2313 struct usb_ctrlrequest *ctrl)
2314{
2315 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2316 USB_TYPE_STANDARD))
2317 return -EINVAL;
2318
2319 if (xudc->device_state == USB_STATE_DEFAULT)
2320 return -EINVAL;
2321
2322 if ((le16_to_cpu(ctrl->wIndex) != 0) ||
2323 (le16_to_cpu(ctrl->wValue) != 0) ||
2324 (le16_to_cpu(ctrl->wLength) != 6))
2325 return -EINVAL;
2326
2327 return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
2328 sizeof(xudc->sel_timing),
2329 set_sel_complete);
2330}
2331
2332static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
2333{
2334 /* Nothing to do with isoch delay */
2335}
2336
2337static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
2338 struct usb_ctrlrequest *ctrl)
2339{
2340 u32 delay = le16_to_cpu(ctrl->wValue);
2341
2342 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2343 USB_TYPE_STANDARD))
2344 return -EINVAL;
2345
2346 if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
2347 (le16_to_cpu(ctrl->wLength) != 0))
2348 return -EINVAL;
2349
2350 xudc->isoch_delay = delay;
2351
2352 return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
2353}
2354
2355static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
2356{
2357 struct tegra_xudc *xudc = req->context;
2358
2359 if ((xudc->device_state == USB_STATE_DEFAULT) &&
2360 (xudc->dev_addr != 0)) {
2361 xudc->device_state = USB_STATE_ADDRESS;
2362 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2363 } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
2364 (xudc->dev_addr == 0)) {
2365 xudc->device_state = USB_STATE_DEFAULT;
2366 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2367 }
2368}
2369
2370static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
2371 struct usb_ctrlrequest *ctrl)
2372{
2373 struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2374 u32 val, addr = le16_to_cpu(ctrl->wValue);
2375
2376 if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
2377 USB_TYPE_STANDARD))
2378 return -EINVAL;
2379
2380 if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
2381 (le16_to_cpu(ctrl->wLength) != 0))
2382 return -EINVAL;
2383
2384 if (xudc->device_state == USB_STATE_CONFIGURED)
2385 return -EINVAL;
2386
2387 dev_dbg(xudc->dev, "set address: %u\n", addr);
2388
2389 xudc->dev_addr = addr;
2390 val = xudc_readl(xudc, CTRL);
2391 val &= ~(CTRL_DEVADDR_MASK);
2392 val |= CTRL_DEVADDR(addr);
2393 xudc_writel(xudc, val, CTRL);
2394
2395 ep_ctx_write_devaddr(ep0->context, addr);
2396
2397 return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
2398}
2399
2400static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
2401 struct usb_ctrlrequest *ctrl)
2402{
2403 int ret;
2404
2405 switch (ctrl->bRequest) {
2406 case USB_REQ_GET_STATUS:
2407 dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
2408 ret = tegra_xudc_ep0_get_status(xudc, ctrl);
2409 break;
2410 case USB_REQ_SET_ADDRESS:
2411 dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
2412 ret = tegra_xudc_ep0_set_address(xudc, ctrl);
2413 break;
2414 case USB_REQ_SET_SEL:
2415 dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
2416 ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
2417 break;
2418 case USB_REQ_SET_ISOCH_DELAY:
2419 dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
2420 ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
2421 break;
2422 case USB_REQ_CLEAR_FEATURE:
2423 case USB_REQ_SET_FEATURE:
2424 dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
2425 ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
2426 break;
2427 case USB_REQ_SET_CONFIGURATION:
2428 dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
2429 /*
2430 * In theory we need to clear RUN bit before status stage of
2431 * deconfig request sent, but this seems to be causing problems.
2432 * Clear RUN once all endpoints are disabled instead.
2433 */
2434 fallthrough;
2435 default:
2436 ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
2437 break;
2438 }
2439
2440 return ret;
2441}
2442
2443static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
2444 struct usb_ctrlrequest *ctrl,
2445 u16 seq_num)
2446{
2447 int ret;
2448
2449 xudc->setup_seq_num = seq_num;
2450
2451 /* Ensure EP0 is unhalted. */
2452 ep_unhalt(xudc, 0);
2453
2454 /*
2455 * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
2456 * are invalid. Halt EP0 until we get a valid packet.
2457 */
2458 if (xudc->soc->invalid_seq_num &&
2459 (seq_num == 0xfffe || seq_num == 0xffff)) {
2460 dev_warn(xudc->dev, "invalid sequence number detected\n");
2461 ep_halt(xudc, 0);
2462 return;
2463 }
2464
2465 if (ctrl->wLength)
2466 xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
2467 DATA_STAGE_XFER : DATA_STAGE_RECV;
2468 else
2469 xudc->setup_state = STATUS_STAGE_XFER;
2470
2471 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
2472 ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
2473 else
2474 ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
2475
2476 if (ret < 0) {
2477 dev_warn(xudc->dev, "setup request failed: %d\n", ret);
2478 xudc->setup_state = WAIT_FOR_SETUP;
2479 ep_halt(xudc, 0);
2480 }
2481}
2482
2483static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
2484 struct tegra_xudc_trb *event)
2485{
2486 struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
2487 u16 seq_num = trb_read_seq_num(event);
2488
2489 if (xudc->setup_state != WAIT_FOR_SETUP) {
2490 /*
2491 * The controller is in the process of handling another
2492 * setup request. Queue subsequent requests and handle
2493 * the last one once the controller reports a sequence
2494 * number error.
2495 */
2496 memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
2497 xudc->setup_packet.seq_num = seq_num;
2498 xudc->queued_setup_packet = true;
2499 } else {
2500 tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
2501 }
2502}
2503
2504static struct tegra_xudc_request *
2505trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
2506{
2507 struct tegra_xudc_request *req;
2508
2509 list_for_each_entry(req, &ep->queue, list) {
2510 if (!req->trbs_queued)
2511 break;
2512
2513 if (trb_in_request(ep, req, trb))
2514 return req;
2515 }
2516
2517 return NULL;
2518}
2519
2520static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
2521 struct tegra_xudc_ep *ep,
2522 struct tegra_xudc_trb *event)
2523{
2524 struct tegra_xudc_request *req;
2525 struct tegra_xudc_trb *trb;
2526 bool short_packet;
2527
2528 short_packet = (trb_read_cmpl_code(event) ==
2529 TRB_CMPL_CODE_SHORT_PACKET);
2530
2531 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2532 req = trb_to_request(ep, trb);
2533
2534 /*
2535 * TDs are complete on short packet or when the completed TRB is the
2536 * last TRB in the TD (the CHAIN bit is unset).
2537 */
2538 if (req && (short_packet || (!trb_read_chain(trb) &&
2539 (req->trbs_needed == req->trbs_queued)))) {
2540 struct tegra_xudc_trb *last = req->last_trb;
2541 unsigned int residual;
2542
2543 residual = trb_read_transfer_len(event);
2544 req->usb_req.actual = req->usb_req.length - residual;
2545
2546 dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
2547 req->usb_req.actual, req->usb_req.length);
2548
2549 tegra_xudc_req_done(ep, req, 0);
2550
2551 if (ep->desc && usb_endpoint_xfer_control(ep->desc))
2552 tegra_xudc_ep0_req_done(xudc);
2553
2554 /*
2555 * Advance the dequeue pointer past the end of the current TD
2556 * on short packet completion.
2557 */
2558 if (short_packet) {
2559 ep->deq_ptr = (last - ep->transfer_ring) + 1;
2560 if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
2561 ep->deq_ptr = 0;
2562 }
2563 } else if (!req) {
2564 dev_warn(xudc->dev, "transfer event on dequeued request\n");
2565 }
2566
2567 if (ep->desc)
2568 tegra_xudc_ep_kick_queue(ep);
2569}
2570
2571static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
2572 struct tegra_xudc_trb *event)
2573{
2574 unsigned int ep_index = trb_read_endpoint_id(event);
2575 struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
2576 struct tegra_xudc_trb *trb;
2577 u16 comp_code;
2578
2579 if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
2580 dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
2581 ep_index);
2582 return;
2583 }
2584
2585 /* Update transfer ring dequeue pointer. */
2586 trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
2587 comp_code = trb_read_cmpl_code(event);
2588 if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
2589 ep->deq_ptr = (trb - ep->transfer_ring) + 1;
2590
2591 if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
2592 ep->deq_ptr = 0;
2593 ep->ring_full = false;
2594 }
2595
2596 switch (comp_code) {
2597 case TRB_CMPL_CODE_SUCCESS:
2598 case TRB_CMPL_CODE_SHORT_PACKET:
2599 tegra_xudc_handle_transfer_completion(xudc, ep, event);
2600 break;
2601 case TRB_CMPL_CODE_HOST_REJECTED:
2602 dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
2603
2604 ep->stream_rejected = true;
2605 break;
2606 case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
2607 dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
2608
2609 if (ep->stream_rejected) {
2610 ep->stream_rejected = false;
2611 /*
2612 * An EP is stopped when a stream is rejected. Wait
2613 * for the EP to report that it is stopped and then
2614 * un-stop it.
2615 */
2616 ep_wait_for_stopped(xudc, ep_index);
2617 }
2618 tegra_xudc_ep_ring_doorbell(ep);
2619 break;
2620 case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
2621 /*
2622 * Wait for the EP to be stopped so the controller stops
2623 * processing doorbells.
2624 */
2625 ep_wait_for_stopped(xudc, ep_index);
2626 ep->enq_ptr = ep->deq_ptr;
2627 tegra_xudc_ep_nuke(ep, -EIO);
2628 /* FALLTHROUGH */
2629 case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
2630 case TRB_CMPL_CODE_CTRL_DIR_ERR:
2631 case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
2632 case TRB_CMPL_CODE_RING_UNDERRUN:
2633 case TRB_CMPL_CODE_RING_OVERRUN:
2634 case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
2635 case TRB_CMPL_CODE_USB_TRANS_ERR:
2636 case TRB_CMPL_CODE_TRB_ERR:
2637 dev_err(xudc->dev, "completion error %#x on EP %u\n",
2638 comp_code, ep_index);
2639
2640 ep_halt(xudc, ep_index);
2641 break;
2642 case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
2643 dev_info(xudc->dev, "sequence number error\n");
2644
2645 /*
2646 * Kill any queued control request and skip to the last
2647 * setup packet we received.
2648 */
2649 tegra_xudc_ep_nuke(ep, -EINVAL);
2650 xudc->setup_state = WAIT_FOR_SETUP;
2651 if (!xudc->queued_setup_packet)
2652 break;
2653
2654 tegra_xudc_handle_ep0_setup_packet(xudc,
2655 &xudc->setup_packet.ctrl_req,
2656 xudc->setup_packet.seq_num);
2657 xudc->queued_setup_packet = false;
2658 break;
2659 case TRB_CMPL_CODE_STOPPED:
2660 dev_dbg(xudc->dev, "stop completion code on EP %u\n",
2661 ep_index);
2662
2663 /* Disconnected. */
2664 tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
2665 break;
2666 default:
2667 dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
2668 comp_code, ep_index);
2669 break;
2670 }
2671}
2672
2673static void tegra_xudc_reset(struct tegra_xudc *xudc)
2674{
2675 struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2676 dma_addr_t deq_ptr;
2677 unsigned int i;
2678
2679 xudc->setup_state = WAIT_FOR_SETUP;
2680 xudc->device_state = USB_STATE_DEFAULT;
2681 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2682
2683 ep_unpause_all(xudc);
2684
2685 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
2686 tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
2687
2688 /*
2689 * Reset sequence number and dequeue pointer to flush the transfer
2690 * ring.
2691 */
2692 ep0->deq_ptr = ep0->enq_ptr;
2693 ep0->ring_full = false;
2694
2695 xudc->setup_seq_num = 0;
2696 xudc->queued_setup_packet = false;
2697
2698 ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num);
2699
2700 deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
2701
2702 if (!dma_mapping_error(xudc->dev, deq_ptr)) {
2703 ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
2704 ep_ctx_write_dcs(ep0->context, ep0->pcs);
2705 }
2706
2707 ep_unhalt_all(xudc);
2708 ep_reload(xudc, 0);
2709 ep_unpause(xudc, 0);
2710}
2711
2712static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
2713{
2714 struct tegra_xudc_ep *ep0 = &xudc->ep[0];
2715 u16 maxpacket;
2716 u32 val;
2717
2718 val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
2719 switch (val) {
2720 case PORTSC_PS_LS:
2721 xudc->gadget.speed = USB_SPEED_LOW;
2722 break;
2723 case PORTSC_PS_FS:
2724 xudc->gadget.speed = USB_SPEED_FULL;
2725 break;
2726 case PORTSC_PS_HS:
2727 xudc->gadget.speed = USB_SPEED_HIGH;
2728 break;
2729 case PORTSC_PS_SS:
2730 xudc->gadget.speed = USB_SPEED_SUPER;
2731 break;
2732 default:
2733 xudc->gadget.speed = USB_SPEED_UNKNOWN;
2734 break;
2735 }
2736
2737 xudc->device_state = USB_STATE_DEFAULT;
2738 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2739
2740 xudc->setup_state = WAIT_FOR_SETUP;
2741
2742 if (xudc->gadget.speed == USB_SPEED_SUPER)
2743 maxpacket = 512;
2744 else
2745 maxpacket = 64;
2746
2747 ep_ctx_write_max_packet_size(ep0->context, maxpacket);
2748 tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
2749 usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
2750
2751 if (!xudc->soc->u1_enable) {
2752 val = xudc_readl(xudc, PORTPM);
2753 val &= ~(PORTPM_U1TIMEOUT_MASK);
2754 xudc_writel(xudc, val, PORTPM);
2755 }
2756
2757 if (!xudc->soc->u2_enable) {
2758 val = xudc_readl(xudc, PORTPM);
2759 val &= ~(PORTPM_U2TIMEOUT_MASK);
2760 xudc_writel(xudc, val, PORTPM);
2761 }
2762
2763 if (xudc->gadget.speed <= USB_SPEED_HIGH) {
2764 val = xudc_readl(xudc, PORTPM);
2765 val &= ~(PORTPM_L1S_MASK);
2766 if (xudc->soc->lpm_enable)
2767 val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
2768 else
2769 val |= PORTPM_L1S(PORTPM_L1S_NYET);
2770 xudc_writel(xudc, val, PORTPM);
2771 }
2772
2773 val = xudc_readl(xudc, ST);
2774 if (val & ST_RC)
2775 xudc_writel(xudc, ST_RC, ST);
2776}
2777
2778static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
2779{
2780 tegra_xudc_reset(xudc);
2781
2782 if (xudc->driver && xudc->driver->disconnect) {
2783 spin_unlock(&xudc->lock);
2784 xudc->driver->disconnect(&xudc->gadget);
2785 spin_lock(&xudc->lock);
2786 }
2787
2788 xudc->device_state = USB_STATE_NOTATTACHED;
2789 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2790
2791 complete(&xudc->disconnect_complete);
2792}
2793
2794static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
2795{
2796 tegra_xudc_reset(xudc);
2797
2798 if (xudc->driver) {
2799 spin_unlock(&xudc->lock);
2800 usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
2801 spin_lock(&xudc->lock);
2802 }
2803
2804 tegra_xudc_port_connect(xudc);
2805}
2806
2807static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
2808{
2809 dev_dbg(xudc->dev, "port suspend\n");
2810
2811 xudc->resume_state = xudc->device_state;
2812 xudc->device_state = USB_STATE_SUSPENDED;
2813 usb_gadget_set_state(&xudc->gadget, xudc->device_state);
2814
2815 if (xudc->driver->suspend) {
2816 spin_unlock(&xudc->lock);
2817 xudc->driver->suspend(&xudc->gadget);
2818 spin_lock(&xudc->lock);
2819 }
2820}
2821
2822static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
2823{
2824 dev_dbg(xudc->dev, "port resume\n");
2825
2826 tegra_xudc_resume_device_state(xudc);
2827
2828 if (xudc->driver->resume) {
2829 spin_unlock(&xudc->lock);
2830 xudc->driver->resume(&xudc->gadget);
2831 spin_lock(&xudc->lock);
2832 }
2833}
2834
2835static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
2836{
2837 u32 val;
2838
2839 val = xudc_readl(xudc, PORTSC);
2840 val &= ~PORTSC_CHANGE_MASK;
2841 val |= flag;
2842 xudc_writel(xudc, val, PORTSC);
2843}
2844
2845static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
2846{
2847 u32 portsc, porthalt;
2848
2849 porthalt = xudc_readl(xudc, PORTHALT);
2850 if ((porthalt & PORTHALT_STCHG_REQ) &&
2851 (porthalt & PORTHALT_HALT_LTSSM)) {
2852 dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
2853 porthalt &= ~PORTHALT_HALT_LTSSM;
2854 xudc_writel(xudc, porthalt, PORTHALT);
2855 }
2856
2857 portsc = xudc_readl(xudc, PORTSC);
2858 if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
2859 dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
2860 clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
2861#define TOGGLE_VBUS_WAIT_MS 100
2862 if (xudc->soc->port_reset_quirk) {
2863 schedule_delayed_work(&xudc->port_reset_war_work,
2864 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
2865 xudc->wait_for_sec_prc = 1;
2866 }
2867 }
2868
2869 if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
2870 dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
2871 clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
2872 tegra_xudc_port_reset(xudc);
2873 cancel_delayed_work(&xudc->port_reset_war_work);
2874 xudc->wait_for_sec_prc = 0;
2875 }
2876
2877 portsc = xudc_readl(xudc, PORTSC);
2878 if (portsc & PORTSC_WRC) {
2879 dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
2880 clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
2881 if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
2882 tegra_xudc_port_reset(xudc);
2883 }
2884
2885 portsc = xudc_readl(xudc, PORTSC);
2886 if (portsc & PORTSC_CSC) {
2887 dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
2888 clear_port_change(xudc, PORTSC_CSC);
2889
2890 if (portsc & PORTSC_CCS)
2891 tegra_xudc_port_connect(xudc);
2892 else
2893 tegra_xudc_port_disconnect(xudc);
2894
2895 if (xudc->wait_csc) {
2896 cancel_delayed_work(&xudc->plc_reset_work);
2897 xudc->wait_csc = false;
2898 }
2899 }
2900
2901 portsc = xudc_readl(xudc, PORTSC);
2902 if (portsc & PORTSC_PLC) {
2903 u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
2904
2905 dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
2906 clear_port_change(xudc, PORTSC_PLC);
2907 switch (pls) {
2908 case PORTSC_PLS_U3:
2909 tegra_xudc_port_suspend(xudc);
2910 break;
2911 case PORTSC_PLS_U0:
2912 if (xudc->gadget.speed < USB_SPEED_SUPER)
2913 tegra_xudc_port_resume(xudc);
2914 break;
2915 case PORTSC_PLS_RESUME:
2916 if (xudc->gadget.speed == USB_SPEED_SUPER)
2917 tegra_xudc_port_resume(xudc);
2918 break;
2919 case PORTSC_PLS_INACTIVE:
2920 schedule_delayed_work(&xudc->plc_reset_work,
2921 msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
2922 xudc->wait_csc = true;
2923 break;
2924 default:
2925 break;
2926 }
2927 }
2928
2929 if (portsc & PORTSC_CEC) {
2930 dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
2931 clear_port_change(xudc, PORTSC_CEC);
2932 }
2933
2934 dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
2935}
2936
2937static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
2938{
2939 while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
2940 (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
2941 __tegra_xudc_handle_port_status(xudc);
2942}
2943
2944static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
2945 struct tegra_xudc_trb *event)
2946{
2947 u32 type = trb_read_type(event);
2948
2949 dump_trb(xudc, "EVENT", event);
2950
2951 switch (type) {
2952 case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
2953 tegra_xudc_handle_port_status(xudc);
2954 break;
2955 case TRB_TYPE_TRANSFER_EVENT:
2956 tegra_xudc_handle_transfer_event(xudc, event);
2957 break;
2958 case TRB_TYPE_SETUP_PACKET_EVENT:
2959 tegra_xudc_handle_ep0_event(xudc, event);
2960 break;
2961 default:
2962 dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
2963 break;
2964 }
2965}
2966
2967static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
2968{
2969 struct tegra_xudc_trb *event;
2970 dma_addr_t erdp;
2971
2972 while (true) {
2973 event = xudc->event_ring[xudc->event_ring_index] +
2974 xudc->event_ring_deq_ptr;
2975
2976 if (trb_read_cycle(event) != xudc->ccs)
2977 break;
2978
2979 tegra_xudc_handle_event(xudc, event);
2980
2981 xudc->event_ring_deq_ptr++;
2982 if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
2983 xudc->event_ring_deq_ptr = 0;
2984 xudc->event_ring_index++;
2985 }
2986
2987 if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
2988 xudc->event_ring_index = 0;
2989 xudc->ccs = !xudc->ccs;
2990 }
2991 }
2992
2993 erdp = xudc->event_ring_phys[xudc->event_ring_index] +
2994 xudc->event_ring_deq_ptr * sizeof(*event);
2995
2996 xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
2997 xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
2998}
2999
3000static irqreturn_t tegra_xudc_irq(int irq, void *data)
3001{
3002 struct tegra_xudc *xudc = data;
3003 unsigned long flags;
3004 u32 val;
3005
3006 val = xudc_readl(xudc, ST);
3007 if (!(val & ST_IP))
3008 return IRQ_NONE;
3009 xudc_writel(xudc, ST_IP, ST);
3010
3011 spin_lock_irqsave(&xudc->lock, flags);
3012 tegra_xudc_process_event_ring(xudc);
3013 spin_unlock_irqrestore(&xudc->lock, flags);
3014
3015 return IRQ_HANDLED;
3016}
3017
3018static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
3019{
3020 struct tegra_xudc_ep *ep = &xudc->ep[index];
3021
3022 ep->xudc = xudc;
3023 ep->index = index;
3024 ep->context = &xudc->ep_context[index];
3025 INIT_LIST_HEAD(&ep->queue);
3026
3027 /*
3028 * EP1 would be the input endpoint corresponding to EP0, but since
3029 * EP0 is bi-directional, EP1 is unused.
3030 */
3031 if (index == 1)
3032 return 0;
3033
3034 ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
3035 GFP_KERNEL,
3036 &ep->transfer_ring_phys);
3037 if (!ep->transfer_ring)
3038 return -ENOMEM;
3039
3040 if (index) {
3041 snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
3042 (index % 2 == 0) ? "out" : "in");
3043 ep->usb_ep.name = ep->name;
3044 usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
3045 ep->usb_ep.max_streams = 16;
3046 ep->usb_ep.ops = &tegra_xudc_ep_ops;
3047 ep->usb_ep.caps.type_bulk = true;
3048 ep->usb_ep.caps.type_int = true;
3049 if (index & 1)
3050 ep->usb_ep.caps.dir_in = true;
3051 else
3052 ep->usb_ep.caps.dir_out = true;
3053 list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
3054 } else {
3055 strscpy(ep->name, "ep0", 3);
3056 ep->usb_ep.name = ep->name;
3057 usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
3058 ep->usb_ep.ops = &tegra_xudc_ep0_ops;
3059 ep->usb_ep.caps.type_control = true;
3060 ep->usb_ep.caps.dir_in = true;
3061 ep->usb_ep.caps.dir_out = true;
3062 }
3063
3064 return 0;
3065}
3066
3067static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
3068{
3069 struct tegra_xudc_ep *ep = &xudc->ep[index];
3070
3071 /*
3072 * EP1 would be the input endpoint corresponding to EP0, but since
3073 * EP0 is bi-directional, EP1 is unused.
3074 */
3075 if (index == 1)
3076 return;
3077
3078 dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
3079 ep->transfer_ring_phys);
3080}
3081
3082static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
3083{
3084 struct usb_request *req;
3085 unsigned int i;
3086 int err;
3087
3088 xudc->ep_context =
3089 dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
3090 sizeof(*xudc->ep_context),
3091 &xudc->ep_context_phys, GFP_KERNEL);
3092 if (!xudc->ep_context)
3093 return -ENOMEM;
3094
3095 xudc->transfer_ring_pool =
3096 dmam_pool_create(dev_name(xudc->dev), xudc->dev,
3097 XUDC_TRANSFER_RING_SIZE *
3098 sizeof(struct tegra_xudc_trb),
3099 sizeof(struct tegra_xudc_trb), 0);
3100 if (!xudc->transfer_ring_pool) {
3101 err = -ENOMEM;
3102 goto free_ep_context;
3103 }
3104
3105 INIT_LIST_HEAD(&xudc->gadget.ep_list);
3106 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
3107 err = tegra_xudc_alloc_ep(xudc, i);
3108 if (err < 0)
3109 goto free_eps;
3110 }
3111
3112 req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
3113 if (!req) {
3114 err = -ENOMEM;
3115 goto free_eps;
3116 }
3117 xudc->ep0_req = to_xudc_req(req);
3118
3119 return 0;
3120
3121free_eps:
3122 for (; i > 0; i--)
3123 tegra_xudc_free_ep(xudc, i - 1);
3124free_ep_context:
3125 dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
3126 xudc->ep_context, xudc->ep_context_phys);
3127 return err;
3128}
3129
3130static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
3131{
3132 xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
3133 xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
3134}
3135
3136static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
3137{
3138 unsigned int i;
3139
3140 tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
3141 &xudc->ep0_req->usb_req);
3142
3143 for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
3144 tegra_xudc_free_ep(xudc, i);
3145
3146 dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
3147 xudc->ep_context, xudc->ep_context_phys);
3148}
3149
3150static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
3151{
3152 unsigned int i;
3153
3154 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3155 xudc->event_ring[i] =
3156 dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3157 sizeof(*xudc->event_ring[i]),
3158 &xudc->event_ring_phys[i],
3159 GFP_KERNEL);
3160 if (!xudc->event_ring[i])
3161 goto free_dma;
3162 }
3163
3164 return 0;
3165
3166free_dma:
3167 for (; i > 0; i--) {
3168 dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3169 sizeof(*xudc->event_ring[i - 1]),
3170 xudc->event_ring[i - 1],
3171 xudc->event_ring_phys[i - 1]);
3172 }
3173 return -ENOMEM;
3174}
3175
3176static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
3177{
3178 unsigned int i;
3179 u32 val;
3180
3181 val = xudc_readl(xudc, SPARAM);
3182 val &= ~(SPARAM_ERSTMAX_MASK);
3183 val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS);
3184 xudc_writel(xudc, val, SPARAM);
3185
3186 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3187 memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
3188 sizeof(*xudc->event_ring[i]));
3189
3190 val = xudc_readl(xudc, ERSTSZ);
3191 val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
3192 val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
3193 xudc_writel(xudc, val, ERSTSZ);
3194
3195 xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
3196 ERSTXBALO(i));
3197 xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
3198 ERSTXBAHI(i));
3199 }
3200
3201 val = lower_32_bits(xudc->event_ring_phys[0]);
3202 xudc_writel(xudc, val, ERDPLO);
3203 val |= EREPLO_ECS;
3204 xudc_writel(xudc, val, EREPLO);
3205
3206 val = upper_32_bits(xudc->event_ring_phys[0]);
3207 xudc_writel(xudc, val, ERDPHI);
3208 xudc_writel(xudc, val, EREPHI);
3209
3210 xudc->ccs = true;
3211 xudc->event_ring_index = 0;
3212 xudc->event_ring_deq_ptr = 0;
3213}
3214
3215static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
3216{
3217 unsigned int i;
3218
3219 for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
3220 dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
3221 sizeof(*xudc->event_ring[i]),
3222 xudc->event_ring[i],
3223 xudc->event_ring_phys[i]);
3224 }
3225}
3226
3227static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
3228{
3229 u32 val;
3230
3231 if (xudc->soc->has_ipfs) {
3232 val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
3233 val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
3234 ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
3235 usleep_range(10, 15);
3236 }
3237
3238 /* Enable bus master */
3239 val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
3240 XUSB_DEV_CFG_1_BUS_MASTER_EN;
3241 fpci_writel(xudc, val, XUSB_DEV_CFG_1);
3242
3243 /* Program BAR0 space */
3244 val = fpci_readl(xudc, XUSB_DEV_CFG_4);
3245 val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
3246 val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
3247
3248 fpci_writel(xudc, val, XUSB_DEV_CFG_4);
3249 fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
3250
3251 usleep_range(100, 200);
3252
3253 if (xudc->soc->has_ipfs) {
3254 /* Enable interrupt assertion */
3255 val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
3256 val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
3257 ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
3258 }
3259}
3260
3261static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
3262{
3263 u32 val, imod;
3264
3265 if (xudc->soc->has_ipfs) {
3266 val = xudc_readl(xudc, BLCG);
3267 val |= BLCG_ALL;
3268 val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
3269 BLCG_COREPLL_PWRDN);
3270 val |= BLCG_IOPLL_0_PWRDN;
3271 val |= BLCG_IOPLL_1_PWRDN;
3272 val |= BLCG_IOPLL_2_PWRDN;
3273
3274 xudc_writel(xudc, val, BLCG);
3275 }
3276
3277 /* Set a reasonable U3 exit timer value. */
3278 val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
3279 val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
3280 val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
3281 xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
3282
3283 /* Default ping LFPS tBurst is too large. */
3284 val = xudc_readl(xudc, SSPX_CORE_CNT0);
3285 val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
3286 val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
3287 xudc_writel(xudc, val, SSPX_CORE_CNT0);
3288
3289 /* Default tPortConfiguration timeout is too small. */
3290 val = xudc_readl(xudc, SSPX_CORE_CNT30);
3291 val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
3292 val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
3293 xudc_writel(xudc, val, SSPX_CORE_CNT30);
3294
3295 if (xudc->soc->lpm_enable) {
3296 /* Set L1 resume duration to 95 us. */
3297 val = xudc_readl(xudc, HSFSPI_COUNT13);
3298 val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
3299 val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
3300 xudc_writel(xudc, val, HSFSPI_COUNT13);
3301 }
3302
3303 /*
3304 * Compliacne suite appears to be violating polling LFPS tBurst max
3305 * of 1.4us. Send 1.45us instead.
3306 */
3307 val = xudc_readl(xudc, SSPX_CORE_CNT32);
3308 val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
3309 val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
3310 xudc_writel(xudc, val, SSPX_CORE_CNT32);
3311
3312 /* Direct HS/FS port instance to RxDetect. */
3313 val = xudc_readl(xudc, CFG_DEV_FE);
3314 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3315 val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
3316 xudc_writel(xudc, val, CFG_DEV_FE);
3317
3318 val = xudc_readl(xudc, PORTSC);
3319 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
3320 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
3321 xudc_writel(xudc, val, PORTSC);
3322
3323 /* Direct SS port instance to RxDetect. */
3324 val = xudc_readl(xudc, CFG_DEV_FE);
3325 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3326 val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
3327 xudc_writel(xudc, val, CFG_DEV_FE);
3328
3329 val = xudc_readl(xudc, PORTSC);
3330 val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
3331 val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
3332 xudc_writel(xudc, val, PORTSC);
3333
3334 /* Restore port instance. */
3335 val = xudc_readl(xudc, CFG_DEV_FE);
3336 val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
3337 xudc_writel(xudc, val, CFG_DEV_FE);
3338
3339 /*
3340 * Enable INFINITE_SS_RETRY to prevent device from entering
3341 * Disabled.Error when attached to buggy SuperSpeed hubs.
3342 */
3343 val = xudc_readl(xudc, CFG_DEV_FE);
3344 val |= CFG_DEV_FE_INFINITE_SS_RETRY;
3345 xudc_writel(xudc, val, CFG_DEV_FE);
3346
3347 /* Set interrupt moderation. */
3348 imod = XUDC_INTERRUPT_MODERATION_US * 4;
3349 val = xudc_readl(xudc, RT_IMOD);
3350 val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
3351 val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
3352 xudc_writel(xudc, val, RT_IMOD);
3353
3354 /* increase SSPI transaction timeout from 32us to 512us */
3355 val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
3356 val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
3357 val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
3358 xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
3359}
3360
b4e19931 3361static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
49db4272 3362{
b4e19931
NK
3363 int err = 0, usb3;
3364 unsigned int i;
49db4272 3365
b4e19931
NK
3366 xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3367 sizeof(*xudc->utmi_phy), GFP_KERNEL);
3368 if (!xudc->utmi_phy)
3369 return -ENOMEM;
49db4272 3370
b4e19931
NK
3371 xudc->usb3_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3372 sizeof(*xudc->usb3_phy), GFP_KERNEL);
3373 if (!xudc->usb3_phy)
3374 return -ENOMEM;
3375
3376 xudc->usbphy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
3377 sizeof(*xudc->usbphy), GFP_KERNEL);
3378 if (!xudc->usbphy)
3379 return -ENOMEM;
3380
3381 xudc->vbus_nb.notifier_call = tegra_xudc_vbus_notify;
3382
3383 for (i = 0; i < xudc->soc->num_phys; i++) {
3384 char phy_name[] = "usb.-.";
3385
3386 /* Get USB2 phy */
3387 snprintf(phy_name, sizeof(phy_name), "usb2-%d", i);
3388 xudc->utmi_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
3389 if (IS_ERR(xudc->utmi_phy[i])) {
3390 err = PTR_ERR(xudc->utmi_phy[i]);
3391 if (err != -EPROBE_DEFER)
3392 dev_err(xudc->dev, "failed to get usb2-%d phy: %d\n",
3393 i, err);
3394
3395 goto clean_up;
3396 } else if (xudc->utmi_phy[i]) {
3397 /* Get usb-phy, if utmi phy is available */
3398 xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
3399 xudc->utmi_phy[i]->dev.of_node,
3400 &xudc->vbus_nb);
3401 if (IS_ERR(xudc->usbphy[i])) {
3402 err = PTR_ERR(xudc->usbphy[i]);
3403 dev_err(xudc->dev, "failed to get usbphy-%d: %d\n",
3404 i, err);
3405 goto clean_up;
3406 }
3407 } else if (!xudc->utmi_phy[i]) {
3408 /* if utmi phy is not available, ignore USB3 phy get */
3409 continue;
3410 }
3411
3412 /* Get USB3 phy */
3413 usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
3414 if (usb3 < 0)
3415 continue;
3416
3417 snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
3418 xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
3419 if (IS_ERR(xudc->usb3_phy[i])) {
3420 err = PTR_ERR(xudc->usb3_phy[i]);
3421 if (err != -EPROBE_DEFER)
3422 dev_err(xudc->dev, "failed to get usb3-%d phy: %d\n",
3423 usb3, err);
3424
3425 goto clean_up;
3426 } else if (xudc->usb3_phy[i])
3427 dev_dbg(xudc->dev, "usb3_phy-%d registered", usb3);
49db4272
NK
3428 }
3429
b4e19931
NK
3430 return err;
3431
3432clean_up:
3433 for (i = 0; i < xudc->soc->num_phys; i++) {
3434 xudc->usb3_phy[i] = NULL;
3435 xudc->utmi_phy[i] = NULL;
3436 xudc->usbphy[i] = NULL;
3437 }
49db4272 3438
49db4272
NK
3439 return err;
3440}
3441
3442static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
3443{
b4e19931
NK
3444 unsigned int i;
3445
3446 for (i = 0; i < xudc->soc->num_phys; i++) {
3447 phy_exit(xudc->usb3_phy[i]);
3448 phy_exit(xudc->utmi_phy[i]);
3449 }
3450}
3451
3452static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
3453{
3454 int err;
3455 unsigned int i;
3456
3457 for (i = 0; i < xudc->soc->num_phys; i++) {
3458 err = phy_init(xudc->utmi_phy[i]);
3459 if (err < 0) {
3460 dev_err(xudc->dev, "utmi phy init failed: %d\n", err);
3461 goto exit_phy;
3462 }
3463
3464 err = phy_init(xudc->usb3_phy[i]);
3465 if (err < 0) {
3466 dev_err(xudc->dev, "usb3 phy init failed: %d\n", err);
3467 goto exit_phy;
3468 }
3469 }
3470 return 0;
3471
3472exit_phy:
3473 tegra_xudc_phy_exit(xudc);
3474 return err;
49db4272
NK
3475}
3476
3477static const char * const tegra210_xudc_supply_names[] = {
3478 "hvdd-usb",
3479 "avddio-usb",
3480};
3481
3482static const char * const tegra210_xudc_clock_names[] = {
3483 "dev",
3484 "ss",
3485 "ss_src",
3486 "hs_src",
3487 "fs_src",
3488};
3489
3490static const char * const tegra186_xudc_clock_names[] = {
3491 "dev",
3492 "ss",
3493 "ss_src",
3494 "fs_src",
3495};
3496
3497static struct tegra_xudc_soc tegra210_xudc_soc_data = {
3498 .supply_names = tegra210_xudc_supply_names,
3499 .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
3500 .clock_names = tegra210_xudc_clock_names,
3501 .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
b4e19931 3502 .num_phys = 4,
49db4272
NK
3503 .u1_enable = false,
3504 .u2_enable = true,
3505 .lpm_enable = false,
3506 .invalid_seq_num = true,
3507 .pls_quirk = true,
3508 .port_reset_quirk = true,
3509 .has_ipfs = true,
3510};
3511
3512static struct tegra_xudc_soc tegra186_xudc_soc_data = {
3513 .clock_names = tegra186_xudc_clock_names,
3514 .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
b4e19931 3515 .num_phys = 4,
49db4272
NK
3516 .u1_enable = true,
3517 .u2_enable = true,
3518 .lpm_enable = false,
3519 .invalid_seq_num = false,
3520 .pls_quirk = false,
3521 .port_reset_quirk = false,
3522 .has_ipfs = false,
3523};
3524
3525static const struct of_device_id tegra_xudc_of_match[] = {
3526 {
3527 .compatible = "nvidia,tegra210-xudc",
3528 .data = &tegra210_xudc_soc_data
3529 },
3530 {
3531 .compatible = "nvidia,tegra186-xudc",
3532 .data = &tegra186_xudc_soc_data
3533 },
3534 { }
3535};
3536MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
3537
3538static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
3539{
3540 if (xudc->genpd_dl_ss)
3541 device_link_del(xudc->genpd_dl_ss);
3542 if (xudc->genpd_dl_device)
3543 device_link_del(xudc->genpd_dl_device);
3544 if (xudc->genpd_dev_ss)
3545 dev_pm_domain_detach(xudc->genpd_dev_ss, true);
3546 if (xudc->genpd_dev_device)
3547 dev_pm_domain_detach(xudc->genpd_dev_device, true);
3548}
3549
3550static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
3551{
3552 struct device *dev = xudc->dev;
3553 int err;
3554
3555 xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev,
3556 "dev");
3557 if (IS_ERR(xudc->genpd_dev_device)) {
3558 err = PTR_ERR(xudc->genpd_dev_device);
3559 dev_err(dev, "failed to get dev pm-domain: %d\n", err);
3560 return err;
3561 }
3562
3563 xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
3564 if (IS_ERR(xudc->genpd_dev_ss)) {
3565 err = PTR_ERR(xudc->genpd_dev_ss);
3566 dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
3567 return err;
3568 }
3569
3570 xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
3571 DL_FLAG_PM_RUNTIME |
3572 DL_FLAG_STATELESS);
3573 if (!xudc->genpd_dl_device) {
3574 dev_err(dev, "adding usb device device link failed!\n");
3575 return -ENODEV;
3576 }
3577
3578 xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
3579 DL_FLAG_PM_RUNTIME |
3580 DL_FLAG_STATELESS);
3581 if (!xudc->genpd_dl_ss) {
3582 dev_err(dev, "adding superspeed device link failed!\n");
3583 return -ENODEV;
3584 }
3585
3586 return 0;
3587}
3588
3589static int tegra_xudc_probe(struct platform_device *pdev)
3590{
3591 struct tegra_xudc *xudc;
3592 struct resource *res;
49db4272
NK
3593 unsigned int i;
3594 int err;
3595
3596 xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_ATOMIC);
3597 if (!xudc)
3598 return -ENOMEM;
3599
3600 xudc->dev = &pdev->dev;
3601 platform_set_drvdata(pdev, xudc);
3602
3603 xudc->soc = of_device_get_match_data(&pdev->dev);
3604 if (!xudc->soc)
3605 return -ENODEV;
3606
3607 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3608 xudc->base = devm_ioremap_resource(&pdev->dev, res);
3609 if (IS_ERR(xudc->base))
3610 return PTR_ERR(xudc->base);
3611 xudc->phys_base = res->start;
3612
3613 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fpci");
3614 xudc->fpci = devm_ioremap_resource(&pdev->dev, res);
3615 if (IS_ERR(xudc->fpci))
3616 return PTR_ERR(xudc->fpci);
3617
3618 if (xudc->soc->has_ipfs) {
3619 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3620 "ipfs");
3621 xudc->ipfs = devm_ioremap_resource(&pdev->dev, res);
3622 if (IS_ERR(xudc->ipfs))
3623 return PTR_ERR(xudc->ipfs);
3624 }
3625
3626 xudc->irq = platform_get_irq(pdev, 0);
49f1997a 3627 if (xudc->irq < 0)
49db4272 3628 return xudc->irq;
49db4272
NK
3629
3630 err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
3631 dev_name(&pdev->dev), xudc);
3632 if (err < 0) {
3633 dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
3634 err);
3635 return err;
3636 }
3637
3638 xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks,
3639 sizeof(*xudc->clks), GFP_KERNEL);
3640 if (!xudc->clks)
3641 return -ENOMEM;
3642
3643 for (i = 0; i < xudc->soc->num_clks; i++)
3644 xudc->clks[i].id = xudc->soc->clock_names[i];
3645
3646 err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks,
3647 xudc->clks);
3648 if (err) {
3649 dev_err(xudc->dev, "failed to request clks %d\n", err);
3650 return err;
3651 }
3652
3653 xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
3654 sizeof(*xudc->supplies), GFP_KERNEL);
3655 if (!xudc->supplies)
3656 return -ENOMEM;
3657
3658 for (i = 0; i < xudc->soc->num_supplies; i++)
3659 xudc->supplies[i].supply = xudc->soc->supply_names[i];
3660
3661 err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
3662 xudc->supplies);
3663 if (err) {
3664 dev_err(xudc->dev, "failed to request regulators %d\n", err);
3665 return err;
3666 }
3667
3668 xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
3669 if (IS_ERR(xudc->padctl))
3670 return PTR_ERR(xudc->padctl);
3671
3672 err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
3673 if (err) {
3674 dev_err(xudc->dev, "failed to enable regulators %d\n", err);
3675 goto put_padctl;
3676 }
3677
b4e19931
NK
3678 err = tegra_xudc_phy_get(xudc);
3679 if (err)
49db4272 3680 goto disable_regulator;
49db4272
NK
3681
3682 err = tegra_xudc_powerdomain_init(xudc);
3683 if (err)
3684 goto put_powerdomains;
3685
3686 err = tegra_xudc_phy_init(xudc);
3687 if (err)
3688 goto put_powerdomains;
3689
3690 err = tegra_xudc_alloc_event_ring(xudc);
3691 if (err)
3692 goto disable_phy;
3693
3694 err = tegra_xudc_alloc_eps(xudc);
3695 if (err)
3696 goto free_event_ring;
3697
3698 spin_lock_init(&xudc->lock);
3699
3700 init_completion(&xudc->disconnect_complete);
3701
3702 INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
3703
3704 INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
3705
3706 INIT_DELAYED_WORK(&xudc->port_reset_war_work,
3707 tegra_xudc_port_reset_war_work);
3708
49db4272
NK
3709 pm_runtime_enable(&pdev->dev);
3710
3711 xudc->gadget.ops = &tegra_xudc_gadget_ops;
3712 xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
3713 xudc->gadget.name = "tegra-xudc";
3714 xudc->gadget.max_speed = USB_SPEED_SUPER;
3715
3716 err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
3717 if (err) {
3718 dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
3719 goto free_eps;
3720 }
3721
3722 return 0;
3723
3724free_eps:
3725 tegra_xudc_free_eps(xudc);
3726free_event_ring:
3727 tegra_xudc_free_event_ring(xudc);
3728disable_phy:
3729 tegra_xudc_phy_exit(xudc);
3730put_powerdomains:
3731 tegra_xudc_powerdomain_remove(xudc);
3732disable_regulator:
3733 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3734put_padctl:
3735 tegra_xusb_padctl_put(xudc->padctl);
3736
3737 return err;
3738}
3739
3740static int tegra_xudc_remove(struct platform_device *pdev)
3741{
3742 struct tegra_xudc *xudc = platform_get_drvdata(pdev);
b4e19931 3743 unsigned int i;
49db4272
NK
3744
3745 pm_runtime_get_sync(xudc->dev);
3746
3747 cancel_delayed_work(&xudc->plc_reset_work);
9ce0a14b 3748 cancel_work_sync(&xudc->usb_role_sw_work);
49db4272
NK
3749
3750 usb_del_gadget_udc(&xudc->gadget);
3751
3752 tegra_xudc_free_eps(xudc);
3753 tegra_xudc_free_event_ring(xudc);
3754
3755 tegra_xudc_powerdomain_remove(xudc);
3756
3757 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3758
b4e19931
NK
3759 for (i = 0; i < xudc->soc->num_phys; i++) {
3760 phy_power_off(xudc->utmi_phy[i]);
3761 phy_power_off(xudc->usb3_phy[i]);
3762 }
49db4272
NK
3763
3764 tegra_xudc_phy_exit(xudc);
3765
3766 pm_runtime_disable(xudc->dev);
3767 pm_runtime_put(xudc->dev);
3768
3769 tegra_xusb_padctl_put(xudc->padctl);
3770
3771 return 0;
3772}
3773
3774static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
3775{
3776 unsigned long flags;
3777
3778 dev_dbg(xudc->dev, "entering ELPG\n");
3779
3780 spin_lock_irqsave(&xudc->lock, flags);
3781
3782 xudc->powergated = true;
3783 xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
3784 xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
3785 xudc_writel(xudc, 0, CTRL);
3786
3787 spin_unlock_irqrestore(&xudc->lock, flags);
3788
3789 clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
3790
3791 regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
3792
3793 dev_dbg(xudc->dev, "entering ELPG done\n");
3794 return 0;
3795}
3796
3797static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
3798{
3799 unsigned long flags;
3800 int err;
3801
3802 dev_dbg(xudc->dev, "exiting ELPG\n");
3803
3804 err = regulator_bulk_enable(xudc->soc->num_supplies,
3805 xudc->supplies);
3806 if (err < 0)
3807 return err;
3808
3809 err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
3810 if (err < 0)
3811 return err;
3812
3813 tegra_xudc_fpci_ipfs_init(xudc);
3814
3815 tegra_xudc_device_params_init(xudc);
3816
3817 tegra_xudc_init_event_ring(xudc);
3818
3819 tegra_xudc_init_eps(xudc);
3820
3821 xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
3822 xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
3823
3824 spin_lock_irqsave(&xudc->lock, flags);
3825 xudc->powergated = false;
3826 spin_unlock_irqrestore(&xudc->lock, flags);
3827
3828 dev_dbg(xudc->dev, "exiting ELPG done\n");
3829 return 0;
3830}
3831
3832static int __maybe_unused tegra_xudc_suspend(struct device *dev)
3833{
3834 struct tegra_xudc *xudc = dev_get_drvdata(dev);
3835 unsigned long flags;
3836
3837 spin_lock_irqsave(&xudc->lock, flags);
3838 xudc->suspended = true;
3839 spin_unlock_irqrestore(&xudc->lock, flags);
3840
3841 flush_work(&xudc->usb_role_sw_work);
3842
0534d401
TR
3843 if (!pm_runtime_status_suspended(dev)) {
3844 /* Forcibly disconnect before powergating. */
3845 tegra_xudc_device_mode_off(xudc);
49db4272 3846 tegra_xudc_powergate(xudc);
0534d401 3847 }
49db4272
NK
3848
3849 pm_runtime_disable(dev);
3850
3851 return 0;
3852}
3853
3854static int __maybe_unused tegra_xudc_resume(struct device *dev)
3855{
3856 struct tegra_xudc *xudc = dev_get_drvdata(dev);
3857 unsigned long flags;
3858 int err;
3859
3860 err = tegra_xudc_unpowergate(xudc);
3861 if (err < 0)
3862 return err;
3863
3864 spin_lock_irqsave(&xudc->lock, flags);
3865 xudc->suspended = false;
3866 spin_unlock_irqrestore(&xudc->lock, flags);
3867
3868 schedule_work(&xudc->usb_role_sw_work);
3869
3870 pm_runtime_enable(dev);
3871
3872 return 0;
3873}
3874
3875static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
3876{
3877 struct tegra_xudc *xudc = dev_get_drvdata(dev);
3878
3879 return tegra_xudc_powergate(xudc);
3880}
3881
3882static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
3883{
3884 struct tegra_xudc *xudc = dev_get_drvdata(dev);
3885
3886 return tegra_xudc_unpowergate(xudc);
3887}
3888
3889static const struct dev_pm_ops tegra_xudc_pm_ops = {
3890 SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
3891 SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
3892 tegra_xudc_runtime_resume, NULL)
3893};
3894
3895static struct platform_driver tegra_xudc_driver = {
3896 .probe = tegra_xudc_probe,
3897 .remove = tegra_xudc_remove,
3898 .driver = {
3899 .name = "tegra-xudc",
3900 .pm = &tegra_xudc_pm_ops,
3901 .of_match_table = tegra_xudc_of_match,
3902 },
3903};
3904module_platform_driver(tegra_xudc_driver);
3905
3906MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
3907MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
3908MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
3909MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
3910MODULE_LICENSE("GPL v2");