2 * USB HOST XHCI Controller stack
4 * Based on xHCI host controller driver in linux-kernel
7 * Copyright (C) 2008 Intel Corp.
10 * Copyright (C) 2013 Samsung Electronics Co.Ltd
11 * Authors: Vivek Gautam <gautam.vivek@samsung.com>
12 * Vikas Sajjan <vikas.sajjan@samsung.com>
14 * SPDX-License-Identifier: GPL-2.0+
19 #include <asm/byteorder.h>
22 #include <asm/cache.h>
23 #include <linux/errno.h>
27 #define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE
29 * flushes the address passed till the length
31 * @param addr pointer to memory region to be flushed
32 * @param len the length of the cache line to be flushed
35 void xhci_flush_cache(uintptr_t addr
, u32 len
)
37 BUG_ON((void *)addr
== NULL
|| len
== 0);
39 flush_dcache_range(addr
& ~(CACHELINE_SIZE
- 1),
40 ALIGN(addr
+ len
, CACHELINE_SIZE
));
44 * invalidates the address passed till the length
46 * @param addr pointer to memory region to be invalidates
47 * @param len the length of the cache line to be invalidated
50 void xhci_inval_cache(uintptr_t addr
, u32 len
)
52 BUG_ON((void *)addr
== NULL
|| len
== 0);
54 invalidate_dcache_range(addr
& ~(CACHELINE_SIZE
- 1),
55 ALIGN(addr
+ len
, CACHELINE_SIZE
));
60 * frees the "segment" pointer passed
62 * @param ptr pointer to "segement" to be freed
65 static void xhci_segment_free(struct xhci_segment
*seg
)
74 * frees the "ring" pointer passed
76 * @param ptr pointer to "ring" to be freed
79 static void xhci_ring_free(struct xhci_ring
*ring
)
81 struct xhci_segment
*seg
;
82 struct xhci_segment
*first_seg
;
86 first_seg
= ring
->first_seg
;
87 seg
= first_seg
->next
;
88 while (seg
!= first_seg
) {
89 struct xhci_segment
*next
= seg
->next
;
90 xhci_segment_free(seg
);
93 xhci_segment_free(first_seg
);
99 * Free the scratchpad buffer array and scratchpad buffers
101 * @ctrl host controller data structure
104 static void xhci_scratchpad_free(struct xhci_ctrl
*ctrl
)
106 if (!ctrl
->scratchpad
)
109 ctrl
->dcbaa
->dev_context_ptrs
[0] = 0;
111 free((void *)(uintptr_t)ctrl
->scratchpad
->sp_array
[0]);
112 free(ctrl
->scratchpad
->sp_array
);
113 free(ctrl
->scratchpad
);
114 ctrl
->scratchpad
= NULL
;
118 * frees the "xhci_container_ctx" pointer passed
120 * @param ptr pointer to "xhci_container_ctx" to be freed
123 static void xhci_free_container_ctx(struct xhci_container_ctx
*ctx
)
130 * frees the virtual devices for "xhci_ctrl" pointer passed
132 * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed
135 static void xhci_free_virt_devices(struct xhci_ctrl
*ctrl
)
139 struct xhci_virt_device
*virt_dev
;
142 * refactored here to loop through all virt_dev
143 * Slot ID 0 is reserved
145 for (slot_id
= 0; slot_id
< MAX_HC_SLOTS
; slot_id
++) {
146 virt_dev
= ctrl
->devs
[slot_id
];
150 ctrl
->dcbaa
->dev_context_ptrs
[slot_id
] = 0;
152 for (i
= 0; i
< 31; ++i
)
153 if (virt_dev
->eps
[i
].ring
)
154 xhci_ring_free(virt_dev
->eps
[i
].ring
);
156 if (virt_dev
->in_ctx
)
157 xhci_free_container_ctx(virt_dev
->in_ctx
);
158 if (virt_dev
->out_ctx
)
159 xhci_free_container_ctx(virt_dev
->out_ctx
);
162 /* make sure we are pointing to NULL */
163 ctrl
->devs
[slot_id
] = NULL
;
168 * frees all the memory allocated
170 * @param ptr pointer to "xhci_ctrl" to be cleaned up
173 void xhci_cleanup(struct xhci_ctrl
*ctrl
)
175 xhci_ring_free(ctrl
->event_ring
);
176 xhci_ring_free(ctrl
->cmd_ring
);
177 xhci_scratchpad_free(ctrl
);
178 xhci_free_virt_devices(ctrl
);
179 free(ctrl
->erst
.entries
);
181 memset(ctrl
, '\0', sizeof(struct xhci_ctrl
));
185 * Malloc the aligned memory
187 * @param size size of memory to be allocated
188 * @return allocates the memory and returns the aligned pointer
190 static void *xhci_malloc(unsigned int size
)
193 size_t cacheline_size
= max(XHCI_ALIGNMENT
, CACHELINE_SIZE
);
195 ptr
= memalign(cacheline_size
, ALIGN(size
, cacheline_size
));
197 memset(ptr
, '\0', size
);
199 xhci_flush_cache((uintptr_t)ptr
, size
);
205 * Make the prev segment point to the next segment.
206 * Change the last TRB in the prev segment to be a Link TRB which points to the
207 * address of the next segment. The caller needs to set any Link TRB
208 * related flags, such as End TRB, Toggle Cycle, and no snoop.
210 * @param prev pointer to the previous segment
211 * @param next pointer to the next segment
212 * @param link_trbs flag to indicate whether to link the trbs or NOT
215 static void xhci_link_segments(struct xhci_segment
*prev
,
216 struct xhci_segment
*next
, bool link_trbs
)
225 val_64
= (uintptr_t)next
->trbs
;
226 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.segment_ptr
= val_64
;
229 * Set the last TRB in the segment to
230 * have a TRB type ID of Link TRB
232 val
= le32_to_cpu(prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
);
233 val
&= ~TRB_TYPE_BITMASK
;
234 val
|= (TRB_LINK
<< TRB_TYPE_SHIFT
);
236 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
= cpu_to_le32(val
);
241 * Initialises the Ring's enqueue,dequeue,enq_seg pointers
243 * @param ring pointer to the RING to be intialised
246 static void xhci_initialize_ring_info(struct xhci_ring
*ring
)
249 * The ring is empty, so the enqueue pointer == dequeue pointer
251 ring
->enqueue
= ring
->first_seg
->trbs
;
252 ring
->enq_seg
= ring
->first_seg
;
253 ring
->dequeue
= ring
->enqueue
;
254 ring
->deq_seg
= ring
->first_seg
;
257 * The ring is initialized to 0. The producer must write 1 to the
258 * cycle bit to handover ownership of the TRB, so PCS = 1.
259 * The consumer must compare CCS to the cycle bit to
260 * check ownership, so CCS = 1.
262 ring
->cycle_state
= 1;
266 * Allocates a generic ring segment from the ring pool, sets the dma address,
267 * initializes the segment to zero, and sets the private next pointer to NULL.
269 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
272 * @return pointer to the newly allocated SEGMENT
274 static struct xhci_segment
*xhci_segment_alloc(void)
276 struct xhci_segment
*seg
;
278 seg
= (struct xhci_segment
*)malloc(sizeof(struct xhci_segment
));
281 seg
->trbs
= (union xhci_trb
*)xhci_malloc(SEGMENT_SIZE
);
289 * Create a new ring with zero or more segments.
290 * TODO: current code only uses one-time-allocated single-segment rings
291 * of 1KB anyway, so we might as well get rid of all the segment and
292 * linking code (and maybe increase the size a bit, e.g. 4KB).
295 * Link each segment together into a ring.
296 * Set the end flag and the cycle toggle bit on the last segment.
297 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0.
299 * @param num_segs number of segments in the ring
300 * @param link_trbs flag to indicate whether to link the trbs or NOT
301 * @return pointer to the newly created RING
303 struct xhci_ring
*xhci_ring_alloc(unsigned int num_segs
, bool link_trbs
)
305 struct xhci_ring
*ring
;
306 struct xhci_segment
*prev
;
308 ring
= (struct xhci_ring
*)malloc(sizeof(struct xhci_ring
));
314 ring
->first_seg
= xhci_segment_alloc();
315 BUG_ON(!ring
->first_seg
);
319 prev
= ring
->first_seg
;
320 while (num_segs
> 0) {
321 struct xhci_segment
*next
;
323 next
= xhci_segment_alloc();
326 xhci_link_segments(prev
, next
, link_trbs
);
331 xhci_link_segments(prev
, ring
->first_seg
, link_trbs
);
333 /* See section 4.9.2.1 and 6.4.4.1 */
334 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
|=
335 cpu_to_le32(LINK_TOGGLE
);
337 xhci_initialize_ring_info(ring
);
343 * Set up the scratchpad buffer array and scratchpad buffers
345 * @ctrl host controller data structure
346 * @return -ENOMEM if buffer allocation fails, 0 on success
348 static int xhci_scratchpad_alloc(struct xhci_ctrl
*ctrl
)
350 struct xhci_hccr
*hccr
= ctrl
->hccr
;
351 struct xhci_hcor
*hcor
= ctrl
->hcor
;
352 struct xhci_scratchpad
*scratchpad
;
358 num_sp
= HCS_MAX_SCRATCHPAD(xhci_readl(&hccr
->cr_hcsparams2
));
362 scratchpad
= malloc(sizeof(*scratchpad
));
365 ctrl
->scratchpad
= scratchpad
;
367 scratchpad
->sp_array
= xhci_malloc(num_sp
* sizeof(u64
));
368 if (!scratchpad
->sp_array
)
370 ctrl
->dcbaa
->dev_context_ptrs
[0] =
371 cpu_to_le64((uintptr_t)scratchpad
->sp_array
);
373 page_size
= xhci_readl(&hcor
->or_pagesize
) & 0xffff;
374 for (i
= 0; i
< 16; i
++) {
375 if ((0x1 & page_size
) != 0)
377 page_size
= page_size
>> 1;
381 page_size
= 1 << (i
+ 12);
382 buf
= memalign(page_size
, num_sp
* page_size
);
385 memset(buf
, '\0', num_sp
* page_size
);
386 xhci_flush_cache((uintptr_t)buf
, num_sp
* page_size
);
388 for (i
= 0; i
< num_sp
; i
++) {
389 uintptr_t ptr
= (uintptr_t)buf
+ i
* page_size
;
390 scratchpad
->sp_array
[i
] = cpu_to_le64(ptr
);
396 free(scratchpad
->sp_array
);
400 ctrl
->scratchpad
= NULL
;
407 * Allocates the Container context
409 * @param ctrl Host controller data structure
410 * @param type type of XHCI Container Context
411 * @return NULL if failed else pointer to the context on success
413 static struct xhci_container_ctx
414 *xhci_alloc_container_ctx(struct xhci_ctrl
*ctrl
, int type
)
416 struct xhci_container_ctx
*ctx
;
418 ctx
= (struct xhci_container_ctx
*)
419 malloc(sizeof(struct xhci_container_ctx
));
422 BUG_ON((type
!= XHCI_CTX_TYPE_DEVICE
) && (type
!= XHCI_CTX_TYPE_INPUT
));
424 ctx
->size
= (MAX_EP_CTX_NUM
+ 1) *
425 CTX_SIZE(readl(&ctrl
->hccr
->cr_hccparams
));
426 if (type
== XHCI_CTX_TYPE_INPUT
)
427 ctx
->size
+= CTX_SIZE(readl(&ctrl
->hccr
->cr_hccparams
));
429 ctx
->bytes
= (u8
*)xhci_malloc(ctx
->size
);
435 * Allocating virtual device
437 * @param udev pointer to USB deivce structure
438 * @return 0 on success else -1 on failure
440 int xhci_alloc_virt_device(struct xhci_ctrl
*ctrl
, unsigned int slot_id
)
443 struct xhci_virt_device
*virt_dev
;
445 /* Slot ID 0 is reserved */
446 if (ctrl
->devs
[slot_id
]) {
447 printf("Virt dev for slot[%d] already allocated\n", slot_id
);
451 ctrl
->devs
[slot_id
] = (struct xhci_virt_device
*)
452 malloc(sizeof(struct xhci_virt_device
));
454 if (!ctrl
->devs
[slot_id
]) {
455 puts("Failed to allocate virtual device\n");
459 memset(ctrl
->devs
[slot_id
], 0, sizeof(struct xhci_virt_device
));
460 virt_dev
= ctrl
->devs
[slot_id
];
462 /* Allocate the (output) device context that will be used in the HC. */
463 virt_dev
->out_ctx
= xhci_alloc_container_ctx(ctrl
,
464 XHCI_CTX_TYPE_DEVICE
);
465 if (!virt_dev
->out_ctx
) {
466 puts("Failed to allocate out context for virt dev\n");
470 /* Allocate the (input) device context for address device command */
471 virt_dev
->in_ctx
= xhci_alloc_container_ctx(ctrl
,
472 XHCI_CTX_TYPE_INPUT
);
473 if (!virt_dev
->in_ctx
) {
474 puts("Failed to allocate in context for virt dev\n");
478 /* Allocate endpoint 0 ring */
479 virt_dev
->eps
[0].ring
= xhci_ring_alloc(1, true);
481 byte_64
= (uintptr_t)(virt_dev
->out_ctx
->bytes
);
483 /* Point to output device context in dcbaa. */
484 ctrl
->dcbaa
->dev_context_ptrs
[slot_id
] = byte_64
;
486 xhci_flush_cache((uintptr_t)&ctrl
->dcbaa
->dev_context_ptrs
[slot_id
],
492 * Allocates the necessary data structures
493 * for XHCI host controller
495 * @param ctrl Host controller data structure
496 * @param hccr pointer to HOST Controller Control Registers
497 * @param hcor pointer to HOST Controller Operational Registers
498 * @return 0 if successful else -1 on failure
500 int xhci_mem_init(struct xhci_ctrl
*ctrl
, struct xhci_hccr
*hccr
,
501 struct xhci_hcor
*hcor
)
508 struct xhci_segment
*seg
;
510 /* DCBAA initialization */
511 ctrl
->dcbaa
= (struct xhci_device_context_array
*)
512 xhci_malloc(sizeof(struct xhci_device_context_array
));
513 if (ctrl
->dcbaa
== NULL
) {
514 puts("unable to allocate DCBA\n");
518 val_64
= (uintptr_t)ctrl
->dcbaa
;
519 /* Set the pointer in DCBAA register */
520 xhci_writeq(&hcor
->or_dcbaap
, val_64
);
522 /* Command ring control pointer register initialization */
523 ctrl
->cmd_ring
= xhci_ring_alloc(1, true);
525 /* Set the address in the Command Ring Control register */
526 trb_64
= (uintptr_t)ctrl
->cmd_ring
->first_seg
->trbs
;
527 val_64
= xhci_readq(&hcor
->or_crcr
);
528 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
529 (trb_64
& (u64
) ~CMD_RING_RSVD_BITS
) |
530 ctrl
->cmd_ring
->cycle_state
;
531 xhci_writeq(&hcor
->or_crcr
, val_64
);
533 /* write the address of db register */
534 val
= xhci_readl(&hccr
->cr_dboff
);
536 ctrl
->dba
= (struct xhci_doorbell_array
*)((char *)hccr
+ val
);
538 /* write the address of runtime register */
539 val
= xhci_readl(&hccr
->cr_rtsoff
);
541 ctrl
->run_regs
= (struct xhci_run_regs
*)((char *)hccr
+ val
);
543 /* writting the address of ir_set structure */
544 ctrl
->ir_set
= &ctrl
->run_regs
->ir_set
[0];
546 /* Event ring does not maintain link TRB */
547 ctrl
->event_ring
= xhci_ring_alloc(ERST_NUM_SEGS
, false);
548 ctrl
->erst
.entries
= (struct xhci_erst_entry
*)
549 xhci_malloc(sizeof(struct xhci_erst_entry
) * ERST_NUM_SEGS
);
551 ctrl
->erst
.num_entries
= ERST_NUM_SEGS
;
553 for (val
= 0, seg
= ctrl
->event_ring
->first_seg
;
557 trb_64
= (uintptr_t)seg
->trbs
;
558 struct xhci_erst_entry
*entry
= &ctrl
->erst
.entries
[val
];
559 xhci_writeq(&entry
->seg_addr
, trb_64
);
560 entry
->seg_size
= cpu_to_le32(TRBS_PER_SEGMENT
);
564 xhci_flush_cache((uintptr_t)ctrl
->erst
.entries
,
565 ERST_NUM_SEGS
* sizeof(struct xhci_erst_entry
));
567 deq
= (unsigned long)ctrl
->event_ring
->dequeue
;
569 /* Update HC event ring dequeue pointer */
570 xhci_writeq(&ctrl
->ir_set
->erst_dequeue
,
571 (u64
)deq
& (u64
)~ERST_PTR_MASK
);
573 /* set ERST count with the number of entries in the segment table */
574 val
= xhci_readl(&ctrl
->ir_set
->erst_size
);
575 val
&= ERST_SIZE_MASK
;
576 val
|= ERST_NUM_SEGS
;
577 xhci_writel(&ctrl
->ir_set
->erst_size
, val
);
579 /* this is the event ring segment table pointer */
580 val_64
= xhci_readq(&ctrl
->ir_set
->erst_base
);
581 val_64
&= ERST_PTR_MASK
;
582 val_64
|= ((uintptr_t)(ctrl
->erst
.entries
) & ~ERST_PTR_MASK
);
584 xhci_writeq(&ctrl
->ir_set
->erst_base
, val_64
);
586 /* set up the scratchpad buffer array and scratchpad buffers */
587 xhci_scratchpad_alloc(ctrl
);
589 /* initializing the virtual devices to NULL */
590 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
)
591 ctrl
->devs
[i
] = NULL
;
594 * Just Zero'ing this register completely,
595 * or some spurious Device Notification Events
596 * might screw things here.
598 xhci_writel(&hcor
->or_dnctrl
, 0x0);
604 * Give the input control context for the passed container context
606 * @param ctx pointer to the context
607 * @return pointer to the Input control context data
609 struct xhci_input_control_ctx
610 *xhci_get_input_control_ctx(struct xhci_container_ctx
*ctx
)
612 BUG_ON(ctx
->type
!= XHCI_CTX_TYPE_INPUT
);
613 return (struct xhci_input_control_ctx
*)ctx
->bytes
;
617 * Give the slot context for the passed container context
619 * @param ctrl Host controller data structure
620 * @param ctx pointer to the context
621 * @return pointer to the slot control context data
623 struct xhci_slot_ctx
*xhci_get_slot_ctx(struct xhci_ctrl
*ctrl
,
624 struct xhci_container_ctx
*ctx
)
626 if (ctx
->type
== XHCI_CTX_TYPE_DEVICE
)
627 return (struct xhci_slot_ctx
*)ctx
->bytes
;
629 return (struct xhci_slot_ctx
*)
630 (ctx
->bytes
+ CTX_SIZE(readl(&ctrl
->hccr
->cr_hccparams
)));
634 * Gets the EP context from based on the ep_index
636 * @param ctrl Host controller data structure
637 * @param ctx context container
638 * @param ep_index index of the endpoint
639 * @return pointer to the End point context
641 struct xhci_ep_ctx
*xhci_get_ep_ctx(struct xhci_ctrl
*ctrl
,
642 struct xhci_container_ctx
*ctx
,
643 unsigned int ep_index
)
645 /* increment ep index by offset of start of ep ctx array */
647 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
)
650 return (struct xhci_ep_ctx
*)
652 (ep_index
* CTX_SIZE(readl(&ctrl
->hccr
->cr_hccparams
))));
656 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
657 * Useful when you want to change one particular aspect of the endpoint
658 * and then issue a configure endpoint command.
660 * @param ctrl Host controller data structure
661 * @param in_ctx contains the input context
662 * @param out_ctx contains the input context
663 * @param ep_index index of the end point
666 void xhci_endpoint_copy(struct xhci_ctrl
*ctrl
,
667 struct xhci_container_ctx
*in_ctx
,
668 struct xhci_container_ctx
*out_ctx
,
669 unsigned int ep_index
)
671 struct xhci_ep_ctx
*out_ep_ctx
;
672 struct xhci_ep_ctx
*in_ep_ctx
;
674 out_ep_ctx
= xhci_get_ep_ctx(ctrl
, out_ctx
, ep_index
);
675 in_ep_ctx
= xhci_get_ep_ctx(ctrl
, in_ctx
, ep_index
);
677 in_ep_ctx
->ep_info
= out_ep_ctx
->ep_info
;
678 in_ep_ctx
->ep_info2
= out_ep_ctx
->ep_info2
;
679 in_ep_ctx
->deq
= out_ep_ctx
->deq
;
680 in_ep_ctx
->tx_info
= out_ep_ctx
->tx_info
;
684 * Copy output xhci_slot_ctx to the input xhci_slot_ctx.
685 * Useful when you want to change one particular aspect of the endpoint
686 * and then issue a configure endpoint command.
687 * Only the context entries field matters, but
688 * we'll copy the whole thing anyway.
690 * @param ctrl Host controller data structure
691 * @param in_ctx contains the inpout context
692 * @param out_ctx contains the inpout context
695 void xhci_slot_copy(struct xhci_ctrl
*ctrl
, struct xhci_container_ctx
*in_ctx
,
696 struct xhci_container_ctx
*out_ctx
)
698 struct xhci_slot_ctx
*in_slot_ctx
;
699 struct xhci_slot_ctx
*out_slot_ctx
;
701 in_slot_ctx
= xhci_get_slot_ctx(ctrl
, in_ctx
);
702 out_slot_ctx
= xhci_get_slot_ctx(ctrl
, out_ctx
);
704 in_slot_ctx
->dev_info
= out_slot_ctx
->dev_info
;
705 in_slot_ctx
->dev_info2
= out_slot_ctx
->dev_info2
;
706 in_slot_ctx
->tt_info
= out_slot_ctx
->tt_info
;
707 in_slot_ctx
->dev_state
= out_slot_ctx
->dev_state
;
711 * Setup an xHCI virtual device for a Set Address command
713 * @param udev pointer to the Device Data Structure
714 * @return returns negative value on failure else 0 on success
716 void xhci_setup_addressable_virt_dev(struct xhci_ctrl
*ctrl
, int slot_id
,
717 int speed
, int hop_portnr
)
719 struct xhci_virt_device
*virt_dev
;
720 struct xhci_ep_ctx
*ep0_ctx
;
721 struct xhci_slot_ctx
*slot_ctx
;
725 virt_dev
= ctrl
->devs
[slot_id
];
729 /* Extract the EP0 and Slot Ctrl */
730 ep0_ctx
= xhci_get_ep_ctx(ctrl
, virt_dev
->in_ctx
, 0);
731 slot_ctx
= xhci_get_slot_ctx(ctrl
, virt_dev
->in_ctx
);
733 /* Only the control endpoint is valid - one endpoint context */
734 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(1) | 0);
737 case USB_SPEED_SUPER
:
738 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_SS
);
741 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_HS
);
744 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_FS
);
747 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_LS
);
750 /* Speed was set earlier, this shouldn't happen. */
754 port_num
= hop_portnr
;
755 debug("port_num = %d\n", port_num
);
757 slot_ctx
->dev_info2
|=
758 cpu_to_le32(((port_num
& ROOT_HUB_PORT_MASK
) <<
759 ROOT_HUB_PORT_SHIFT
));
761 /* Step 4 - ring already allocated */
763 ep0_ctx
->ep_info2
= cpu_to_le32(CTRL_EP
<< EP_TYPE_SHIFT
);
764 debug("SPEED = %d\n", speed
);
767 case USB_SPEED_SUPER
:
768 ep0_ctx
->ep_info2
|= cpu_to_le32(((512 & MAX_PACKET_MASK
) <<
770 debug("Setting Packet size = 512bytes\n");
773 /* USB core guesses at a 64-byte max packet first for FS devices */
775 ep0_ctx
->ep_info2
|= cpu_to_le32(((64 & MAX_PACKET_MASK
) <<
777 debug("Setting Packet size = 64bytes\n");
780 ep0_ctx
->ep_info2
|= cpu_to_le32(((8 & MAX_PACKET_MASK
) <<
782 debug("Setting Packet size = 8bytes\n");
789 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
791 cpu_to_le32(((0 & MAX_BURST_MASK
) << MAX_BURST_SHIFT
) |
792 ((3 & ERROR_COUNT_MASK
) << ERROR_COUNT_SHIFT
));
794 trb_64
= (uintptr_t)virt_dev
->eps
[0].ring
->first_seg
->trbs
;
795 ep0_ctx
->deq
= cpu_to_le64(trb_64
| virt_dev
->eps
[0].ring
->cycle_state
);
797 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
799 xhci_flush_cache((uintptr_t)ep0_ctx
, sizeof(struct xhci_ep_ctx
));
800 xhci_flush_cache((uintptr_t)slot_ctx
, sizeof(struct xhci_slot_ctx
));