* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
- unsigned int cycle_state,
unsigned int max_packet,
unsigned int num,
gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
- int i;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
return NULL;
}
}
- /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
- if (cycle_state == 0) {
- for (i = 0; i < TRBS_PER_SEGMENT; i++)
- seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
- }
seg->num = num;
seg->dma = dma;
seg->next = NULL;
chain_links = xhci_link_chain_quirk(xhci, ring->type);
+ /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
+ if (ring->cycle_state == 0) {
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
+ for (int i = 0; i < TRBS_PER_SEGMENT; i++)
+ seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+ }
+ }
+
next = ring->enq_seg->next;
xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
xhci_link_segments(last, next, ring->type, chain_links);
kfree(ring);
}
-void xhci_initialize_ring_info(struct xhci_ring *ring,
- unsigned int cycle_state)
+void xhci_initialize_ring_info(struct xhci_ring *ring)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
* New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring.
*/
- ring->cycle_state = cycle_state;
+ ring->cycle_state = 1;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
struct xhci_segment **first,
struct xhci_segment **last,
unsigned int num_segs,
- unsigned int cycle_state,
enum xhci_ring_type type,
unsigned int max_packet,
gfp_t flags)
chain_links = xhci_link_chain_quirk(xhci, type);
- prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
+ prev = xhci_segment_alloc(xhci, max_packet, num, flags);
if (!prev)
return -ENOMEM;
num++;
while (num < num_segs) {
struct xhci_segment *next;
- next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
- flags);
+ next = xhci_segment_alloc(xhci, max_packet, num, flags);
if (!next)
goto free_segments;
* Set the end flag and the cycle toggle bit on the last segment.
* See section 4.9.1 and figures 15 and 16.
*/
-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
- unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_ring *ring;
int ret;
return ring;
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs,
- cycle_state, type, max_packet, flags);
+ type, max_packet, flags);
if (ret)
goto fail;
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE);
}
- xhci_initialize_ring_info(ring, cycle_state);
+ xhci_initialize_ring_info(ring);
trace_xhci_ring_alloc(ring);
return ring;
struct xhci_segment *last;
int ret;
- ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->cycle_state,
- ring->type, ring->bounce_buf_len, flags);
+ ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->type,
+ ring->bounce_buf_len, flags);
if (ret)
return -ENOMEM;
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
- xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
- mem_flags);
+ xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
}
/* Allocate endpoint 0 ring */
- dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags);
if (!dev->eps[0].ring)
goto fail;
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags);
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
if (!ir)
return NULL;
- ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags);
+ ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags);
if (!ir->event_ring) {
xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
kfree(ir);
goto fail;
/* Set up the command ring to have one segments for now. */
- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,