#include <errno.h>
#include <byteswap.h>
#include <ipxe/malloc.h>
-#include <ipxe/umalloc.h>
#include <ipxe/pci.h>
#include <ipxe/usb.h>
#include <ipxe/init.h>
/* Read structural parameters 2 */
hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 );
- xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
+ xhci->scratch.count = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
DBGC2 ( xhci, "XHCI %s needs %d scratchpads\n",
- xhci->name, xhci->scratchpads );
+ xhci->name, xhci->scratch.count );
/* Read capability parameters 1 */
hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 );
* align on its own size (rounded up to a power of two and
* with a minimum of 64 bytes).
*/
- len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
- xhci->dcbaa = malloc_phys ( len, xhci_align ( len ) );
- if ( ! xhci->dcbaa ) {
+ len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa.context[0] ) );
+ xhci->dcbaa.context = dma_alloc ( xhci->dma, &xhci->dcbaa.map, len,
+ xhci_align ( len ) );
+ if ( ! xhci->dcbaa.context ) {
DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name );
rc = -ENOMEM;
goto err_alloc;
}
- memset ( xhci->dcbaa, 0, len );
+ memset ( xhci->dcbaa.context, 0, len );
/* Program DCBAA pointer */
- dcbaap = virt_to_phys ( xhci->dcbaa );
+ dcbaap = dma ( &xhci->dcbaa.map, xhci->dcbaa.context );
if ( ( rc = xhci_writeq ( xhci, dcbaap,
xhci->op + XHCI_OP_DCBAAP ) ) != 0 )
goto err_writeq;
- DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n",
- xhci->name, dcbaap, ( dcbaap + len ) );
+ DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n", xhci->name,
+ virt_to_phys ( xhci->dcbaa.context ),
+ ( virt_to_phys ( xhci->dcbaa.context ) + len ) );
return 0;
err_writeq:
- free_phys ( xhci->dcbaa, len );
+ dma_free ( &xhci->dcbaa.map, xhci->dcbaa.context, len );
err_alloc:
return rc;
}
/* Sanity check */
for ( i = 0 ; i <= xhci->slots ; i++ )
- assert ( xhci->dcbaa[i] == 0 );
+ assert ( xhci->dcbaa.context[i] == 0 );
/* Clear DCBAA pointer */
xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP );
/* Free DCBAA */
- len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
- free_phys ( xhci->dcbaa, len );
+ len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa.context[0] ) );
+ dma_free ( &xhci->dcbaa.map, xhci->dcbaa.context, len );
}
/******************************************************************************
* @ret rc Return status code
*/
static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
+ struct xhci_scratchpad *scratch = &xhci->scratch;
+ size_t buffer_len;
size_t array_len;
- size_t len;
- physaddr_t phys;
+ physaddr_t addr;
unsigned int i;
int rc;
/* Do nothing if no scratchpad buffers are used */
- if ( ! xhci->scratchpads )
+ if ( ! scratch->count )
return 0;
- /* Allocate scratchpads */
- len = ( xhci->scratchpads * xhci->pagesize );
- xhci->scratchpad = umalloc ( len );
- if ( ! xhci->scratchpad ) {
+ /* Allocate scratchpad buffers */
+ buffer_len = ( scratch->count * xhci->pagesize );
+ scratch->buffer = dma_umalloc ( xhci->dma, &scratch->buffer_map,
+ buffer_len, xhci->pagesize );
+ if ( ! scratch->buffer ) {
DBGC ( xhci, "XHCI %s could not allocate scratchpad buffers\n",
xhci->name );
rc = -ENOMEM;
goto err_alloc;
}
- memset_user ( xhci->scratchpad, 0, 0, len );
+ memset_user ( scratch->buffer, 0, 0, buffer_len );
/* Allocate scratchpad array */
- array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
- xhci->scratchpad_array =
- malloc_phys ( array_len, xhci_align ( array_len ) );
- if ( ! xhci->scratchpad_array ) {
+ array_len = ( scratch->count * sizeof ( scratch->array[0] ) );
+ scratch->array = dma_alloc ( xhci->dma, &scratch->array_map,
+ array_len, xhci_align ( array_len ) );
+ if ( ! scratch->array ) {
DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer "
"array\n", xhci->name );
rc = -ENOMEM;
}
/* Populate scratchpad array */
- for ( i = 0 ; i < xhci->scratchpads ; i++ ) {
- phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize ));
- xhci->scratchpad_array[i] = phys;
+ addr = dma_phys ( &scratch->buffer_map,
+ user_to_phys ( scratch->buffer, 0 ) );
+ for ( i = 0 ; i < scratch->count ; i++ ) {
+ scratch->array[i] = cpu_to_le64 ( addr );
+ addr += xhci->pagesize;
}
/* Set scratchpad array pointer */
- assert ( xhci->dcbaa != NULL );
- xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array ));
+ assert ( xhci->dcbaa.context != NULL );
+ xhci->dcbaa.context[0] = cpu_to_le64 ( dma ( &scratch->array_map,
+ scratch->array ) );
DBGC2 ( xhci, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
- xhci->name, user_to_phys ( xhci->scratchpad, 0 ),
- user_to_phys ( xhci->scratchpad, len ),
- virt_to_phys ( xhci->scratchpad_array ),
- ( virt_to_phys ( xhci->scratchpad_array ) + array_len ) );
+ xhci->name, user_to_phys ( scratch->buffer, 0 ),
+ user_to_phys ( scratch->buffer, buffer_len ),
+ virt_to_phys ( scratch->array ),
+ ( virt_to_phys ( scratch->array ) + array_len ) );
return 0;
- free_phys ( xhci->scratchpad_array, array_len );
+ dma_free ( &scratch->array_map, scratch->array, array_len );
err_alloc_array:
- ufree ( xhci->scratchpad );
+ dma_ufree ( &scratch->buffer_map, scratch->buffer, buffer_len );
err_alloc:
return rc;
}
* @v xhci xHCI device
*/
static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
+ struct xhci_scratchpad *scratch = &xhci->scratch;
size_t array_len;
+ size_t buffer_len;
/* Do nothing if no scratchpad buffers are used */
- if ( ! xhci->scratchpads )
+ if ( ! scratch->count )
return;
/* Clear scratchpad array pointer */
- assert ( xhci->dcbaa != NULL );
- xhci->dcbaa[0] = 0;
+ assert ( xhci->dcbaa.context != NULL );
+ xhci->dcbaa.context[0] = 0;
/* Free scratchpad array */
- array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
- free_phys ( xhci->scratchpad_array, array_len );
+ array_len = ( scratch->count * sizeof ( scratch->array[0] ) );
+ dma_free ( &scratch->array_map, scratch->array, array_len );
- /* Free scratchpads */
- ufree ( xhci->scratchpad );
+ /* Free scratchpad buffers */
+ buffer_len = ( scratch->count * xhci->pagesize );
+ dma_ufree ( &scratch->buffer_map, scratch->buffer, buffer_len );
}
/******************************************************************************
}
/* Allocate TRBs */
- ring->trb = malloc_phys ( ring->len, xhci_align ( ring->len ) );
+ ring->trb = dma_alloc ( xhci->dma, &ring->map, ring->len,
+ xhci_align ( ring->len ) );
if ( ! ring->trb ) {
rc = -ENOMEM;
goto err_alloc_trb;
/* Initialise Link TRB */
link = &ring->trb[count].link;
- link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) );
+ link->next = cpu_to_le64 ( dma ( &ring->map, ring->trb ) );
link->flags = XHCI_TRB_TC;
link->type = XHCI_TRB_LINK;
ring->link = link;
return 0;
- free_phys ( ring->trb, ring->len );
+ dma_free ( &ring->map, ring->trb, ring->len );
err_alloc_trb:
free ( ring->iobuf );
err_alloc_iobuf:
assert ( ring->iobuf[i] == NULL );
/* Free TRBs */
- free_phys ( ring->trb, ring->len );
+ dma_free ( &ring->map, ring->trb, ring->len );
/* Free I/O buffers */
free ( ring->iobuf );
goto err_ring_alloc;
/* Program command ring control register */
- crp = virt_to_phys ( xhci->command.trb );
+ crp = dma ( &xhci->command.map, xhci->command.trb );
if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ),
xhci->op + XHCI_OP_CRCR ) ) != 0 )
goto err_writeq;
- DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n",
- xhci->name, crp, ( crp + xhci->command.len ) );
+ DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n", xhci->name,
+ virt_to_phys ( xhci->command.trb ),
+ ( virt_to_phys ( xhci->command.trb ) + xhci->command.len ) );
return 0;
err_writeq:
/* Allocate event ring */
count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
len = ( count * sizeof ( event->trb[0] ) );
- event->trb = malloc_phys ( len, xhci_align ( len ) );
+ event->trb = dma_alloc ( xhci->dma, &event->trb_map, len,
+ xhci_align ( len ) );
if ( ! event->trb ) {
rc = -ENOMEM;
goto err_alloc_trb;
memset ( event->trb, 0, len );
/* Allocate event ring segment table */
- event->segment = malloc_phys ( sizeof ( event->segment[0] ),
- xhci_align ( sizeof(event->segment[0])));
+ event->segment = dma_alloc ( xhci->dma, &event->segment_map,
+ sizeof ( event->segment[0] ),
+ xhci_align ( sizeof (event->segment[0])));
if ( ! event->segment ) {
rc = -ENOMEM;
goto err_alloc_segment;
}
memset ( event->segment, 0, sizeof ( event->segment[0] ) );
- event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) );
+ event->segment[0].base = cpu_to_le64 ( dma ( &event->trb_map,
+ event->trb ) );
event->segment[0].count = cpu_to_le32 ( count );
/* Program event ring registers */
writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
- if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ),
+ if ( ( rc = xhci_writeq ( xhci, dma ( &event->trb_map, event->trb ),
xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
goto err_writeq_erdp;
- if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ),
+ if ( ( rc = xhci_writeq ( xhci,
+ dma ( &event->segment_map, event->segment ),
xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
goto err_writeq_erstba;
( virt_to_phys ( event->trb ) + len ),
virt_to_phys ( event->segment ),
( virt_to_phys ( event->segment ) +
- sizeof (event->segment[0] ) ) );
+ sizeof ( event->segment[0] ) ) );
return 0;
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
err_writeq_erstba:
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
err_writeq_erdp:
- free_phys ( event->trb, len );
+ dma_free ( &event->segment_map, event->segment,
+ sizeof ( event->segment[0] ) );
err_alloc_segment:
- free_phys ( event->segment, sizeof ( event->segment[0] ) );
+ dma_free ( &event->trb_map, event->trb, len );
err_alloc_trb:
return rc;
}
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
/* Free event ring segment table */
- free_phys ( event->segment, sizeof ( event->segment[0] ) );
+ dma_free ( &event->segment_map, event->segment,
+ sizeof ( event->segment[0] ) );
/* Free event ring */
count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
len = ( count * sizeof ( event->trb[0] ) );
- free_phys ( event->trb, len );
+ dma_free ( &event->trb_map, event->trb, len );
}
/**
iobuf = xhci_dequeue_multi ( &endpoint->ring );
assert ( iobuf != NULL );
+ /* Unmap I/O buffer */
+ iob_unmap ( iobuf );
+
/* Check for errors */
if ( ! ( ( trb->code == XHCI_CMPLT_SUCCESS ) ||
( trb->code == XHCI_CMPLT_SHORT ) ) ) {
/* Update dequeue pointer if applicable */
if ( consumed ) {
- xhci_writeq ( xhci, virt_to_phys ( trb ),
+ xhci_writeq ( xhci, dma ( &event->trb_map, trb ),
xhci->run + XHCI_RUN_ERDP ( 0 ) );
profile_stop ( &xhci_event_profiler );
}
/* Reset the command ring control register */
xhci_ring_reset ( &xhci->command );
- crp = virt_to_phys ( xhci->command.trb );
+ crp = dma ( &xhci->command.map, xhci->command.trb );
xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR );
}
void *input ) ) {
union xhci_trb trb;
struct xhci_trb_context *context = &trb.context;
+ struct dma_mapping map;
size_t len;
void *input;
int rc;
/* Allocate an input context */
+ memset ( &map, 0, sizeof ( map ) );
len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
- input = malloc_phys ( len, xhci_align ( len ) );
+ input = dma_alloc ( xhci->dma, &map, len, xhci_align ( len ) );
if ( ! input ) {
rc = -ENOMEM;
goto err_alloc;
/* Construct command */
memset ( context, 0, sizeof ( *context ) );
context->type = type;
- context->input = cpu_to_le64 ( virt_to_phys ( input ) );
+ context->input = cpu_to_le64 ( dma ( &map, input ) );
context->slot = slot->id;
/* Issue command and wait for completion */
goto err_command;
err_command:
- free_phys ( input, len );
+ dma_free ( &map, input, len );
err_alloc:
return rc;
}
struct xhci_slot *slot,
struct xhci_endpoint *endpoint,
void *input ) {
+ struct xhci_trb_ring *ring = &endpoint->ring;
struct xhci_control_context *control_ctx;
struct xhci_slot_context *slot_ctx;
struct xhci_endpoint_context *ep_ctx;
ep_ctx->type = XHCI_EP_TYPE_CONTROL;
ep_ctx->burst = endpoint->ep->burst;
ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
- ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
+ ep_ctx->dequeue = cpu_to_le64 ( dma ( &ring->map, ring->trb ) |
XHCI_EP_DCS );
ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN );
}
struct xhci_slot *slot,
struct xhci_endpoint *endpoint,
void *input ) {
+ struct xhci_trb_ring *ring = &endpoint->ring;
struct xhci_control_context *control_ctx;
struct xhci_slot_context *slot_ctx;
struct xhci_endpoint_context *ep_ctx;
ep_ctx->type = endpoint->type;
ep_ctx->burst = endpoint->ep->burst;
ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
- ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) |
+ ep_ctx->dequeue = cpu_to_le64 ( dma ( &ring->map, ring->trb ) |
XHCI_EP_DCS );
ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */
}
unsigned int mask;
unsigned int index;
unsigned int dcs;
+ physaddr_t addr;
int rc;
/* Construct command */
mask = ring->mask;
dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS );
index = ( cons & mask );
- dequeue->dequeue =
- cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs );
+ addr = dma ( &ring->map, &ring->trb[index] );
+ dequeue->dequeue = cpu_to_le64 ( addr | dcs );
dequeue->slot = slot->id;
dequeue->endpoint = endpoint->ctx;
dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER;
/* Cancel any incomplete transfers */
while ( xhci_ring_fill ( &endpoint->ring ) ) {
iobuf = xhci_dequeue_multi ( &endpoint->ring );
+ iob_unmap ( iobuf );
usb_complete_err ( ep, iobuf, -ECANCELED );
}
static int xhci_endpoint_message ( struct usb_endpoint *ep,
struct io_buffer *iobuf ) {
struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
+ struct xhci_device *xhci = endpoint->xhci;
struct usb_setup_packet *packet;
unsigned int input;
size_t len;
if ( len )
setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT );
+ /* Map I/O buffer */
+ if ( ( rc = iob_map ( iobuf, xhci->dma, len,
+ ( input ? DMA_RX : DMA_TX ) ) ) != 0 )
+ goto err_map;
+
/* Construct data stage TRB, if applicable */
if ( len ) {
data = &(trb++)->data;
- data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) );
+ data->data = cpu_to_le64 ( iob_dma ( iobuf ) );
data->len = cpu_to_le32 ( len );
data->type = XHCI_TRB_DATA;
data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT );
/* Enqueue TRBs */
if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
( trb - trbs ) ) ) != 0 )
- return rc;
+ goto err_enqueue;
/* Ring the doorbell */
xhci_doorbell ( &endpoint->ring );
profile_stop ( &xhci_message_profiler );
return 0;
+
+ err_enqueue:
+ iob_unmap ( iobuf );
+ err_map:
+ return rc;
}
/**
static int xhci_endpoint_stream ( struct usb_endpoint *ep,
struct io_buffer *iobuf, int zlp ) {
struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
- void *data = iobuf->data;
+ struct xhci_device *xhci = endpoint->xhci;
size_t len = iob_len ( iobuf );
unsigned int count = xhci_endpoint_count ( len, zlp );
union xhci_trb trbs[count];
union xhci_trb *trb = trbs;
struct xhci_trb_normal *normal;
+ physaddr_t data;
unsigned int i;
size_t trb_len;
int rc;
/* Profile stream transfers */
profile_start ( &xhci_stream_profiler );
+ /* Map I/O buffer */
+ if ( ( rc = iob_map ( iobuf, xhci->dma, len,
+ ( ( ep->address & USB_DIR_IN ) ?
+ DMA_RX : DMA_TX ) ) ) != 0 )
+ goto err_map;
+ data = iob_dma ( iobuf );
+
/* Construct normal TRBs */
memset ( &trbs, 0, sizeof ( trbs ) );
for ( i = 0 ; i < count ; i ++ ) {
/* Construct normal TRB */
normal = &trb->normal;
- normal->data = cpu_to_le64 ( virt_to_phys ( data ) );
+ normal->data = cpu_to_le64 ( data );
normal->len = cpu_to_le32 ( trb_len );
normal->type = XHCI_TRB_NORMAL;
normal->flags = XHCI_TRB_CH;
/* Enqueue TRBs */
if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
count ) ) != 0 )
- return rc;
+ goto err_enqueue;
/* Ring the doorbell */
xhci_doorbell ( &endpoint->ring );
profile_stop ( &xhci_stream_profiler );
return 0;
+
+ err_enqueue:
+ iob_unmap ( iobuf );
+ err_map:
+ return rc;
}
/******************************************************************************
/* Allocate a device context */
len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
- slot->context = malloc_phys ( len, xhci_align ( len ) );
+ slot->context = dma_alloc ( xhci->dma, &slot->map, len,
+ xhci_align ( len ) );
if ( ! slot->context ) {
rc = -ENOMEM;
goto err_alloc_context;
memset ( slot->context, 0, len );
/* Set device context base address */
- assert ( xhci->dcbaa[id] == 0 );
- xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) );
+ assert ( xhci->dcbaa.context[id] == 0 );
+ xhci->dcbaa.context[id] = cpu_to_le64 ( dma ( &slot->map,
+ slot->context ) );
DBGC2 ( xhci, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n",
xhci->name, slot->id, virt_to_phys ( slot->context ),
( virt_to_phys ( slot->context ) + len ), usb->name );
return 0;
- xhci->dcbaa[id] = 0;
- free_phys ( slot->context, len );
+ xhci->dcbaa.context[id] = 0;
+ dma_free ( &slot->map, slot->context, len );
err_alloc_context:
xhci->slot[id] = NULL;
free ( slot );
/* Free slot */
if ( slot->context ) {
- free_phys ( slot->context, len );
- xhci->dcbaa[id] = 0;
+ dma_free ( &slot->map, slot->context, len );
+ xhci->dcbaa.context[id] = 0;
}
xhci->slot[id] = NULL;
free ( slot );
/* Initialise xHCI device */
xhci_init ( xhci, xhci->regs );
+ /* Configure DMA device */
+ xhci->dma = &pci->dma;
+ if ( xhci->addr64 )
+ dma_set_mask_64bit ( xhci->dma );
+
/* Initialise USB legacy support and claim ownership */
xhci_legacy_init ( xhci );
xhci_legacy_claim ( xhci );