/* Allocate and zero pages */
va_start ( args, hv );
for ( i = 0 ; ( ( page = va_arg ( args, void ** ) ) != NULL ); i++ ) {
- *page = malloc_dma ( PAGE_SIZE, PAGE_SIZE );
+ *page = malloc_phys ( PAGE_SIZE, PAGE_SIZE );
if ( ! *page )
goto err_alloc;
memset ( *page, 0, PAGE_SIZE );
va_start ( args, hv );
for ( ; i >= 0 ; i-- ) {
page = va_arg ( args, void ** );
- free_dma ( *page, PAGE_SIZE );
+ free_phys ( *page, PAGE_SIZE );
}
va_end ( args );
return -ENOMEM;
va_start ( args, hv );
while ( ( page = va_arg ( args, void * ) ) != NULL )
- free_dma ( page, PAGE_SIZE );
+ free_phys ( page, PAGE_SIZE );
va_end ( args );
}
/* Allocate buffer. Must be aligned to at least 8 bytes and
* must not cross a page boundary, so align on its own size.
*/
- hv->message = malloc_dma ( sizeof ( *hv->message ),
- sizeof ( *hv->message ) );
+ hv->message = malloc_phys ( sizeof ( *hv->message ),
+ sizeof ( *hv->message ) );
if ( ! hv->message )
return -ENOMEM;
static void hv_free_message ( struct hv_hypervisor *hv ) {
/* Free buffer */
- free_dma ( hv->message, sizeof ( *hv->message ) );
+ free_phys ( hv->message, sizeof ( *hv->message ) );
}
/**
/* Allocate pages */
hvm->hypercall_len = ( pages * PAGE_SIZE );
- hvm->xen.hypercall = malloc_dma ( hvm->hypercall_len, PAGE_SIZE );
+ hvm->xen.hypercall = malloc_phys ( hvm->hypercall_len, PAGE_SIZE );
if ( ! hvm->xen.hypercall ) {
DBGC ( hvm, "HVM could not allocate %d hypercall page(s)\n",
pages );
static void hvm_unmap_hypercall ( struct hvm_device *hvm ) {
/* Free pages */
- free_dma ( hvm->xen.hypercall, hvm->hypercall_len );
+ free_phys ( hvm->xen.hypercall, hvm->hypercall_len );
}
/**
len += ( ( - len - offset ) & ( __alignof__ ( *iobuf ) - 1 ) );
/* Allocate memory for buffer plus descriptor */
- data = malloc_dma_offset ( len + sizeof ( *iobuf ), align,
- offset );
+ data = malloc_phys_offset ( len + sizeof ( *iobuf ), align,
+ offset );
if ( ! data )
return NULL;
iobuf = ( data + len );
} else {
/* Allocate memory for buffer */
- data = malloc_dma_offset ( len, align, offset );
+ data = malloc_phys_offset ( len, align, offset );
if ( ! data )
return NULL;
/* Allocate memory for descriptor */
iobuf = malloc ( sizeof ( *iobuf ) );
if ( ! iobuf ) {
- free_dma ( data, len );
+ free_phys ( data, len );
return NULL;
}
}
if ( iobuf->end == iobuf ) {
/* Descriptor is inline */
- free_dma ( iobuf->head, ( len + sizeof ( *iobuf ) ) );
+ free_phys ( iobuf->head, ( len + sizeof ( *iobuf ) ) );
} else {
/* Descriptor is detached */
- free_dma ( iobuf->head, len );
+ free_phys ( iobuf->head, len );
free ( iobuf );
}
}
*
* @v ptr Memory allocated by malloc(), or NULL
*
- * Memory allocated with malloc_dma() cannot be freed with free(); it
- * must be freed with free_dma() instead.
+ * Memory allocated with malloc_phys() cannot be freed with free(); it
+ * must be freed with free_phys() instead.
*
* If @c ptr is NULL, no action is taken.
*/
/* Allocate completion queue itself */
arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) );
- arbel_cq->cqe = malloc_dma ( arbel_cq->cqe_size,
- sizeof ( arbel_cq->cqe[0] ) );
+ arbel_cq->cqe = malloc_phys ( arbel_cq->cqe_size,
+ sizeof ( arbel_cq->cqe[0] ) );
if ( ! arbel_cq->cqe ) {
rc = -ENOMEM;
goto err_cqe;
err_sw2hw_cq:
MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
- free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
+ free_phys ( arbel_cq->cqe, arbel_cq->cqe_size );
err_cqe:
free ( arbel_cq );
err_arbel_cq:
MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
/* Free memory */
- free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
+ free_phys ( arbel_cq->cqe, arbel_cq->cqe_size );
free ( arbel_cq );
/* Mark queue number as free */
/* Allocate work queue */
arbel_send_wq->wqe_size = ( num_wqes *
sizeof ( arbel_send_wq->wqe[0] ) );
- arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
- sizeof ( arbel_send_wq->wqe[0] ) );
+ arbel_send_wq->wqe = malloc_phys ( arbel_send_wq->wqe_size,
+ sizeof ( arbel_send_wq->wqe[0] ) );
if ( ! arbel_send_wq->wqe )
return -ENOMEM;
memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
/* Allocate work queue */
arbel_recv_wq->wqe_size = ( num_wqes *
sizeof ( arbel_recv_wq->wqe[0] ) );
- arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
- sizeof ( arbel_recv_wq->wqe[0] ) );
+ arbel_recv_wq->wqe = malloc_phys ( arbel_recv_wq->wqe_size,
+ sizeof ( arbel_recv_wq->wqe[0] ) );
if ( ! arbel_recv_wq->wqe ) {
rc = -ENOMEM;
goto err_alloc_wqe;
( type == IB_QPT_UD ) ) {
arbel_recv_wq->grh_size = ( num_wqes *
sizeof ( arbel_recv_wq->grh[0] ) );
- arbel_recv_wq->grh = malloc_dma ( arbel_recv_wq->grh_size,
- sizeof ( void * ) );
+ arbel_recv_wq->grh = malloc_phys ( arbel_recv_wq->grh_size,
+ sizeof ( void * ) );
if ( ! arbel_recv_wq->grh ) {
rc = -ENOMEM;
goto err_alloc_grh;
return 0;
- free_dma ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
+ free_phys ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
err_alloc_grh:
- free_dma ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
+ free_phys ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
err_alloc_wqe:
return rc;
}
MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
err_unsupported_address_split:
- free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
- free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
+ free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
+ free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
err_create_recv_wq:
- free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
+ free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
err_create_send_wq:
free ( arbel_qp );
err_arbel_qp:
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
/* Free memory */
- free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
- free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
- free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
+ free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
+ free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
+ free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
free ( arbel_qp );
/* Mark queue number as free */
/* Allocate event queue itself */
arbel_eq->eqe_size =
( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
- arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size,
- sizeof ( arbel_eq->eqe[0] ) );
+ arbel_eq->eqe = malloc_phys ( arbel_eq->eqe_size,
+ sizeof ( arbel_eq->eqe[0] ) );
if ( ! arbel_eq->eqe ) {
rc = -ENOMEM;
goto err_eqe;
err_map_eq:
arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
err_sw2hw_eq:
- free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
+ free_phys ( arbel_eq->eqe, arbel_eq->eqe_size );
err_eqe:
memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
return rc;
}
/* Free memory */
- free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
+ free_phys ( arbel_eq->eqe, arbel_eq->eqe_size );
memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
}
icm_phys = user_to_phys ( arbel->icm, 0 );
/* Allocate doorbell UAR */
- arbel->db_rec = malloc_dma ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
+ arbel->db_rec = malloc_phys ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
if ( ! arbel->db_rec ) {
rc = -ENOMEM;
goto err_alloc_doorbell;
err_map_icm:
arbel_cmd_unmap_icm_aux ( arbel );
err_map_icm_aux:
- free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
+ free_phys ( arbel->db_rec, ARBEL_PAGE_SIZE );
arbel->db_rec= NULL;
err_alloc_doorbell:
err_alloc_icm:
arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
&unmap_icm );
arbel_cmd_unmap_icm_aux ( arbel );
- free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
+ free_phys ( arbel->db_rec, ARBEL_PAGE_SIZE );
arbel->db_rec = NULL;
}
goto err_arbel;
/* Allocate space for mailboxes */
- arbel->mailbox_in = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
+ arbel->mailbox_in = malloc_phys ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
if ( ! arbel->mailbox_in )
goto err_mailbox_in;
- arbel->mailbox_out = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
+ arbel->mailbox_out = malloc_phys ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
if ( ! arbel->mailbox_out )
goto err_mailbox_out;
return arbel;
- free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
+ free_phys ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
err_mailbox_out:
- free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
+ free_phys ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
err_mailbox_in:
free ( arbel );
err_arbel:
ufree ( arbel->icm );
ufree ( arbel->firmware_area );
- free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
- free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
+ free_phys ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
+ free_phys ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
free ( arbel );
}
static inline void golan_cmd_uninit ( struct golan *golan )
{
- free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
- free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
- free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
+ free_phys(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
+ free_phys(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
+ free_phys(golan->cmd.addr, GOLAN_PAGE_SIZE);
}
/**
int rc = 0;
uint32_t addr_l_sz;
- if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
+ if (!(golan->cmd.addr = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
rc = -ENOMEM;
- goto malloc_dma_failed;
+ goto malloc_phys_failed;
}
- if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
+ if (!(golan->mboxes.inbox = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
rc = -ENOMEM;
- goto malloc_dma_inbox_failed;
+ goto malloc_phys_inbox_failed;
}
- if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
+ if (!(golan->mboxes.outbox = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
rc = -ENOMEM;
- goto malloc_dma_outbox_failed;
+ goto malloc_phys_outbox_failed;
}
addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__);
return 0;
-malloc_dma_outbox_failed:
- free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
-malloc_dma_inbox_failed:
- free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
-malloc_dma_failed:
+malloc_phys_outbox_failed:
+ free_phys(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
+malloc_phys_inbox_failed:
+ free_phys(golan->cmd.addr, GOLAN_PAGE_SIZE);
+malloc_phys_failed:
DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n",
__FUNCTION__, rc);
return rc;
eq->cons_index = 0;
eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
- eq->eqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
+ eq->eqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
if (!eq->eqes) {
rc = -ENOMEM;
goto err_create_eq_eqe_alloc;
return 0;
err_create_eq_cmd:
- free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
+ free_phys ( eq->eqes , GOLAN_PAGE_SIZE );
err_create_eq_eqe_alloc:
DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
return rc;
rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
GOLAN_PRINT_RC_AND_CMD_STATUS;
- free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
+ free_phys ( golan->eq.eqes , GOLAN_PAGE_SIZE );
golan->eq.eqn = 0;
DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
goto err_create_cq;
}
golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes;
- golan_cq->doorbell_record = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE,
+ golan_cq->doorbell_record = malloc_phys(GOLAN_CQ_DB_RECORD_SIZE,
GOLAN_CQ_DB_RECORD_SIZE);
if (!golan_cq->doorbell_record) {
rc = -ENOMEM;
goto err_create_cq_db_alloc;
}
- golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
+ golan_cq->cqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
if (!golan_cq->cqes) {
rc = -ENOMEM;
goto err_create_cq_cqe_alloc;
return 0;
err_create_cq_cmd:
- free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
+ free_phys( golan_cq->cqes , GOLAN_PAGE_SIZE );
err_create_cq_cqe_alloc:
- free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
+ free_phys(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
err_create_cq_db_alloc:
free ( golan_cq );
err_create_cq:
cq->cqn = 0;
ib_cq_set_drvdata(cq, NULL);
- free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
- free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
+ free_phys ( golan_cq->cqes , GOLAN_PAGE_SIZE );
+ free_phys(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
free(golan_cq);
DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn);
golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
/* allocate dma memory for WQEs (1 page is enough) - should change it */
- golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
+ golan_qp->wqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
if (!golan_qp->wqes) {
rc = -ENOMEM;
goto err_create_qp_wqe_alloc;
data++;
}
- golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db),
+ golan_qp->doorbell_record = malloc_phys(sizeof(struct golan_qp_db),
sizeof(struct golan_qp_db));
if (!golan_qp->doorbell_record) {
rc = -ENOMEM;
return 0;
err_create_qp_cmd:
- free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
+ free_phys(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
err_create_qp_db_alloc:
- free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
+ free_phys ( golan_qp->wqes, GOLAN_PAGE_SIZE );
err_create_qp_wqe_alloc:
err_create_qp_sq_size:
err_create_qp_sq_wqe_size:
qp->qpn = 0;
ib_qp_set_drvdata(qp, NULL);
- free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
- free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
+ free_phys(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
+ free_phys ( golan_qp->wqes, GOLAN_PAGE_SIZE );
free(golan_qp);
DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
}
/* Allocate doorbell */
- hermon_cq->doorbell = malloc_dma ( sizeof ( hermon_cq->doorbell[0] ),
- sizeof ( hermon_cq->doorbell[0] ) );
+ hermon_cq->doorbell = malloc_phys ( sizeof ( hermon_cq->doorbell[0] ),
+ sizeof ( hermon_cq->doorbell[0] ) );
if ( ! hermon_cq->doorbell ) {
rc = -ENOMEM;
goto err_doorbell;
/* Allocate completion queue itself */
hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) );
- hermon_cq->cqe = malloc_dma ( hermon_cq->cqe_size,
- sizeof ( hermon_cq->cqe[0] ) );
+ hermon_cq->cqe = malloc_phys ( hermon_cq->cqe_size,
+ sizeof ( hermon_cq->cqe[0] ) );
if ( ! hermon_cq->cqe ) {
rc = -ENOMEM;
goto err_cqe;
err_sw2hw_cq:
hermon_free_mtt ( hermon, &hermon_cq->mtt );
err_alloc_mtt:
- free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
+ free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
err_cqe:
- free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
+ free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
err_doorbell:
free ( hermon_cq );
err_hermon_cq:
hermon_free_mtt ( hermon, &hermon_cq->mtt );
/* Free memory */
- free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
- free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
+ free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
+ free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
free ( hermon_cq );
/* Mark queue number as free */
/* Allocate doorbells */
hermon_qp->recv.doorbell =
- malloc_dma ( sizeof ( hermon_qp->recv.doorbell[0] ),
- sizeof ( hermon_qp->recv.doorbell[0] ) );
+ malloc_phys ( sizeof ( hermon_qp->recv.doorbell[0] ),
+ sizeof ( hermon_qp->recv.doorbell[0] ) );
if ( ! hermon_qp->recv.doorbell ) {
rc = -ENOMEM;
goto err_recv_doorbell;
hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
hermon_qp->recv.wqe_size +
hermon_qp->recv.grh_size );
- hermon_qp->wqe = malloc_dma ( hermon_qp->wqe_size,
- sizeof ( hermon_qp->send.wqe[0] ) );
+ hermon_qp->wqe = malloc_phys ( hermon_qp->wqe_size,
+ sizeof ( hermon_qp->send.wqe[0] ) );
if ( ! hermon_qp->wqe ) {
rc = -ENOMEM;
goto err_alloc_wqe;
err_rst2init_qp:
hermon_free_mtt ( hermon, &hermon_qp->mtt );
err_alloc_mtt:
- free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
+ free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
err_alloc_wqe:
- free_dma ( hermon_qp->recv.doorbell,
- sizeof ( hermon_qp->recv.doorbell[0] ) );
+ free_phys ( hermon_qp->recv.doorbell,
+ sizeof ( hermon_qp->recv.doorbell[0] ) );
err_recv_doorbell:
free ( hermon_qp );
err_hermon_qp:
hermon_free_mtt ( hermon, &hermon_qp->mtt );
/* Free memory */
- free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
- free_dma ( hermon_qp->recv.doorbell,
- sizeof ( hermon_qp->recv.doorbell[0] ) );
+ free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
+ free_phys ( hermon_qp->recv.doorbell,
+ sizeof ( hermon_qp->recv.doorbell[0] ) );
free ( hermon_qp );
/* Mark queue number as free */
/* Allocate event queue itself */
hermon_eq->eqe_size =
( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) );
- hermon_eq->eqe = malloc_dma ( hermon_eq->eqe_size,
- sizeof ( hermon_eq->eqe[0] ) );
+ hermon_eq->eqe = malloc_phys ( hermon_eq->eqe_size,
+ sizeof ( hermon_eq->eqe[0] ) );
if ( ! hermon_eq->eqe ) {
rc = -ENOMEM;
goto err_eqe;
err_sw2hw_eq:
hermon_free_mtt ( hermon, &hermon_eq->mtt );
err_alloc_mtt:
- free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
+ free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
err_eqe:
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
return rc;
hermon_free_mtt ( hermon, &hermon_eq->mtt );
/* Free memory */
- free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
+ free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
}
goto err_hermon;
/* Allocate space for mailboxes */
- hermon->mailbox_in = malloc_dma ( HERMON_MBOX_SIZE,
- HERMON_MBOX_ALIGN );
+ hermon->mailbox_in = malloc_phys ( HERMON_MBOX_SIZE,
+ HERMON_MBOX_ALIGN );
if ( ! hermon->mailbox_in )
goto err_mailbox_in;
- hermon->mailbox_out = malloc_dma ( HERMON_MBOX_SIZE,
- HERMON_MBOX_ALIGN );
+ hermon->mailbox_out = malloc_phys ( HERMON_MBOX_SIZE,
+ HERMON_MBOX_ALIGN );
if ( ! hermon->mailbox_out )
goto err_mailbox_out;
return hermon;
- free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
+ free_phys ( hermon->mailbox_out, HERMON_MBOX_SIZE );
err_mailbox_out:
- free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
+ free_phys ( hermon->mailbox_in, HERMON_MBOX_SIZE );
err_mailbox_in:
free ( hermon );
err_hermon:
ufree ( hermon->icm );
ufree ( hermon->firmware_area );
- free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
- free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
+ free_phys ( hermon->mailbox_out, HERMON_MBOX_SIZE );
+ free_phys ( hermon->mailbox_in, HERMON_MBOX_SIZE );
free ( hermon );
}
linda->send_buf[i] = i;
/* Allocate space for the SendBufAvail array */
- linda->sendbufavail = malloc_dma ( sizeof ( *linda->sendbufavail ),
- LINDA_SENDBUFAVAIL_ALIGN );
+ linda->sendbufavail = malloc_phys ( sizeof ( *linda->sendbufavail ),
+ LINDA_SENDBUFAVAIL_ALIGN );
if ( ! linda->sendbufavail ) {
rc = -ENOMEM;
goto err_alloc_sendbufavail;
return 0;
- free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
+ free_phys ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
err_alloc_sendbufavail:
return rc;
}
/* Ensure hardware has seen this disable */
linda_readq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
- free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
+ free_phys ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
}
/***************************************************************************
linda_wq->eager_cons = 0;
/* Allocate receive header buffer */
- linda_wq->header = malloc_dma ( LINDA_RECV_HEADERS_SIZE,
- LINDA_RECV_HEADERS_ALIGN );
+ linda_wq->header = malloc_phys ( LINDA_RECV_HEADERS_SIZE,
+ LINDA_RECV_HEADERS_ALIGN );
if ( ! linda_wq->header ) {
rc = -ENOMEM;
goto err_alloc_header;
virt_to_bus ( &linda_wq->header_prod ) );
return 0;
- free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
+ free_phys ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
err_alloc_header:
return rc;
}
mb();
/* Free headers ring */
- free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
+ free_phys ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
/* Free context */
linda_free_ctx ( linda, ctx );
)
{
mlx_status status = MLX_SUCCESS;
- *ptr = malloc_dma(size, align);
+ *ptr = malloc_phys(size, align);
if (*ptr == NULL) {
status = MLX_OUT_OF_RESOURCES;
} else {
)
{
mlx_status status = MLX_SUCCESS;
- free_dma(ptr, size);
+ free_phys(ptr, size);
return status;
}
mlx_status
}
/* Allocate space for the SendBufAvail array */
- qib7322->sendbufavail = malloc_dma ( sizeof ( *qib7322->sendbufavail ),
- QIB7322_SENDBUFAVAIL_ALIGN );
+ qib7322->sendbufavail = malloc_phys ( sizeof ( *qib7322->sendbufavail ),
+ QIB7322_SENDBUFAVAIL_ALIGN );
if ( ! qib7322->sendbufavail ) {
rc = -ENOMEM;
goto err_alloc_sendbufavail;
return 0;
- free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
+ free_phys ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
err_alloc_sendbufavail:
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
err_create_send_bufs_vl15_port1:
/* Ensure hardware has seen this disable */
qib7322_readq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset );
- free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
+ free_phys ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port0 );
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_small );
qib7322_wq->eager_cons = 0;
/* Allocate receive header buffer */
- qib7322_wq->header = malloc_dma ( QIB7322_RECV_HEADERS_SIZE,
- QIB7322_RECV_HEADERS_ALIGN );
+ qib7322_wq->header = malloc_phys ( QIB7322_RECV_HEADERS_SIZE,
+ QIB7322_RECV_HEADERS_ALIGN );
if ( ! qib7322_wq->header ) {
rc = -ENOMEM;
goto err_alloc_header;
virt_to_bus ( &qib7322_wq->header_prod ) );
return 0;
- free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
+ free_phys ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
err_alloc_header:
return rc;
}
mb();
/* Free headers ring */
- free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
+ free_phys ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
}
/**
{
DBGP("a3c90x_setup_tx_ring\n");
p->tx_ring =
- malloc_dma(TX_RING_SIZE * sizeof(struct TXD), TX_RING_ALIGN);
+ malloc_phys(TX_RING_SIZE * sizeof(struct TXD), TX_RING_ALIGN);
if (!p->tx_ring) {
DBG("Could not allocate TX-ring\n");
{
DBGP("a3c90x_free_tx_ring\n");
- free_dma(p->tx_ring, TX_RING_SIZE * sizeof(struct TXD));
+ free_phys(p->tx_ring, TX_RING_SIZE * sizeof(struct TXD));
p->tx_ring = NULL;
/* io_buffers are free()ed by netdev_tx_complete[,_err]() */
}
DBGP("a3c90x_setup_rx_ring\n");
p->rx_ring =
- malloc_dma(RX_RING_SIZE * sizeof(struct RXD), RX_RING_ALIGN);
+ malloc_phys(RX_RING_SIZE * sizeof(struct RXD), RX_RING_ALIGN);
if (!p->rx_ring) {
DBG("Could not allocate RX-ring\n");
{
DBGP("a3c90x_free_rx_ring\n");
- free_dma(p->rx_ring, RX_RING_SIZE * sizeof(struct RXD));
+ free_phys(p->rx_ring, RX_RING_SIZE * sizeof(struct RXD));
p->rx_ring = NULL;
}
/* allocate descriptors */
sc->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + 1);
- sc->desc = malloc_dma(sc->desc_len, ATH5K_DESC_ALIGN);
+ sc->desc = malloc_phys(sc->desc_len, ATH5K_DESC_ALIGN);
if (sc->desc == NULL) {
DBG("ath5k: can't allocate descriptors\n");
ret = -ENOMEM;
return 0;
err_free:
- free_dma(sc->desc, sc->desc_len);
+ free_phys(sc->desc, sc->desc_len);
err:
sc->desc = NULL;
return ret;
ath5k_rxbuf_free(sc, bf);
/* Free memory associated with all descriptors */
- free_dma(sc->desc, sc->desc_len);
+ free_phys(sc->desc, sc->desc_len);
free(sc->bufptr);
sc->bufptr = NULL;
}
/* allocate descriptors */
- dd->dd_desc = malloc_dma(dd->dd_desc_len, 16);
+ dd->dd_desc = malloc_phys(dd->dd_desc_len, 16);
if (dd->dd_desc == NULL) {
error = -ENOMEM;
goto fail;
}
return 0;
fail2:
- free_dma(dd->dd_desc, dd->dd_desc_len);
+ free_phys(dd->dd_desc, dd->dd_desc_len);
fail:
memset(dd, 0, sizeof(*dd));
return error;
struct ath_descdma *dd,
struct list_head *head)
{
- free_dma(dd->dd_desc, dd->dd_desc_len);
+ free_phys(dd->dd_desc, dd->dd_desc_len);
INIT_LIST_HEAD(head);
free(dd->dd_bufptr);
atl1e_clean_rx_ring(adapter);
if (adapter->ring_vir_addr) {
- free_dma(adapter->ring_vir_addr, adapter->ring_size);
+ free_phys(adapter->ring_vir_addr, adapter->ring_size);
adapter->ring_vir_addr = NULL;
adapter->ring_dma = 0;
}
/* real ring DMA buffer */
size = adapter->ring_size;
- adapter->ring_vir_addr = malloc_dma(adapter->ring_size, 32);
+ adapter->ring_vir_addr = malloc_phys(adapter->ring_size, 32);
if (adapter->ring_vir_addr == NULL) {
DBG("atl1e: out of memory allocating %d bytes for %s ring\n",
free_iob(bp->rx_iobuf[i]);
bp->rx_iobuf[i] = NULL;
}
- free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
+ free_phys(bp->rx, B44_RX_RING_LEN_BYTES);
bp->rx = NULL;
}
}
{
b44_free_rx_ring(bp);
- bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
+ bp->rx = malloc_phys(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
if (!bp->rx)
return -ENOMEM;
if (!b44_address_ok(bp->rx)) {
- free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
+ free_phys(bp->rx, B44_RX_RING_LEN_BYTES);
return -ENOTSUP;
}
static void b44_free_tx_ring(struct b44_private *bp)
{
if (bp->tx) {
- free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
+ free_phys(bp->tx, B44_TX_RING_LEN_BYTES);
bp->tx = NULL;
}
}
{
b44_free_tx_ring(bp);
- bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
+ bp->tx = malloc_phys(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
if (!bp->tx)
return -ENOMEM;
if (!b44_address_ok(bp->tx)) {
- free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
+ free_phys(bp->tx, B44_TX_RING_LEN_BYTES);
return -ENOTSUP;
}
{
DBGP ( "%s\n", __func__ );
if ( bp->nq.bd_virt ) {
- free_dma ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
+ free_phys ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
bp->nq.bd_virt = NULL;
}
if ( bp->cq.bd_virt ) {
- free_dma ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
+ free_phys ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
bp->cq.bd_virt = NULL;
}
if ( bp->rx.bd_virt ) {
- free_dma ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
+ free_phys ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
bp->rx.bd_virt = NULL;
}
if ( bp->tx.bd_virt ) {
- free_dma ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
+ free_phys ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
bp->tx.bd_virt = NULL;
}
if ( bp->hwrm_addr_dma ) {
- free_dma ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
+ free_phys ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
bp->dma_addr_mapping = 0;
bp->hwrm_addr_dma = NULL;
}
if ( bp->hwrm_addr_resp ) {
- free_dma ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
+ free_phys ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
bp->resp_addr_mapping = 0;
bp->hwrm_addr_resp = NULL;
}
if ( bp->hwrm_addr_req ) {
- free_dma ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
+ free_phys ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
bp->req_addr_mapping = 0;
bp->hwrm_addr_req = NULL;
}
int bnxt_alloc_mem ( struct bnxt *bp )
{
DBGP ( "%s\n", __func__ );
- bp->hwrm_addr_req = malloc_dma ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
- bp->hwrm_addr_resp = malloc_dma ( RESP_BUFFER_SIZE,
- BNXT_DMA_ALIGNMENT );
- bp->hwrm_addr_dma = malloc_dma ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
- bp->tx.bd_virt = malloc_dma ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
- bp->rx.bd_virt = malloc_dma ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
- bp->cq.bd_virt = malloc_dma ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
- bp->nq.bd_virt = malloc_dma ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
+ bp->hwrm_addr_req = malloc_phys ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
+ bp->hwrm_addr_resp = malloc_phys ( RESP_BUFFER_SIZE,
+ BNXT_DMA_ALIGNMENT );
+ bp->hwrm_addr_dma = malloc_phys ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
+ bp->tx.bd_virt = malloc_phys ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
+ bp->rx.bd_virt = malloc_phys ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
+ bp->cq.bd_virt = malloc_phys ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
+ bp->nq.bd_virt = malloc_phys ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
test_if ( bp->hwrm_addr_req &&
bp->hwrm_addr_resp &&
bp->hwrm_addr_dma &&
/*
* Debugging levels:
- * - DBG() is for any errors, i.e. failed alloc_iob(), malloc_dma(),
+ * - DBG() is for any errors, i.e. failed alloc_iob(), malloc_phys(),
* TX overflow, corrupted packets, ...
* - DBG2() is for successful events, like packet received,
* packet transmitted, and other general notifications.
ifec_mdio_setup ( netdev, options );
/* Prepare MAC address w/ Individual Address Setup (ias) command.*/
- ias = malloc_dma ( sizeof ( *ias ), CB_ALIGN );
+ ias = malloc_phys ( sizeof ( *ias ), CB_ALIGN );
if ( !ias ) {
rc = -ENOMEM;
goto error;
memcpy ( ias->ia, netdev->ll_addr, ETH_ALEN );
/* Prepare operating parameters w/ a configure command. */
- cfg = malloc_dma ( sizeof ( *cfg ), CB_ALIGN );
+ cfg = malloc_phys ( sizeof ( *cfg ), CB_ALIGN );
if ( !cfg ) {
rc = -ENOMEM;
goto error;
DBG ( "Failed to initiate!\n" );
goto error;
}
- free_dma ( ias, sizeof ( *ias ) );
- free_dma ( cfg, sizeof ( *cfg ) );
+ free_phys ( ias, sizeof ( *ias ) );
+ free_phys ( cfg, sizeof ( *cfg ) );
DBG2 ( "cfg " );
/* Enable rx by sending ring address to card */
return 0;
error:
- free_dma ( cfg, sizeof ( *cfg ) );
- free_dma ( ias, sizeof ( *ias ) );
+ free_phys ( cfg, sizeof ( *cfg ) );
+ free_phys ( ias, sizeof ( *ias ) );
ifec_free ( netdev );
ifec_reset ( netdev );
return rc;
}
/* free TX ring buffer */
- free_dma ( priv->tcbs, TX_RING_BYTES );
+ free_phys ( priv->tcbs, TX_RING_BYTES );
priv->tcbs = NULL;
}
DBGP ( "ifec_tx_setup\n" );
/* allocate tx ring */
- priv->tcbs = malloc_dma ( TX_RING_BYTES, CB_ALIGN );
+ priv->tcbs = malloc_phys ( TX_RING_BYTES, CB_ALIGN );
if ( !priv->tcbs ) {
DBG ( "TX-ring allocation failed\n" );
return -ENOMEM;
int rc;
/* Allocate admin completion queue */
- ena->acq.rsp = malloc_dma ( acq_len, acq_len );
+ ena->acq.rsp = malloc_phys ( acq_len, acq_len );
if ( ! ena->acq.rsp ) {
rc = -ENOMEM;
goto err_alloc_acq;
memset ( ena->acq.rsp, 0, acq_len );
/* Allocate admin queue */
- ena->aq.req = malloc_dma ( aq_len, aq_len );
+ ena->aq.req = malloc_phys ( aq_len, aq_len );
if ( ! ena->aq.req ) {
rc = -ENOMEM;
goto err_alloc_aq;
ena_clear_caps ( ena, ENA_AQ_CAPS );
ena_clear_caps ( ena, ENA_ACQ_CAPS );
- free_dma ( ena->aq.req, aq_len );
+ free_phys ( ena->aq.req, aq_len );
err_alloc_aq:
- free_dma ( ena->acq.rsp, acq_len );
+ free_phys ( ena->acq.rsp, acq_len );
err_alloc_acq:
return rc;
}
wmb();
/* Free queues */
- free_dma ( ena->aq.req, aq_len );
- free_dma ( ena->acq.rsp, acq_len );
+ free_phys ( ena->aq.req, aq_len );
+ free_phys ( ena->acq.rsp, acq_len );
DBGC ( ena, "ENA %p AQ and ACQ destroyed\n", ena );
}
int rc;
/* Allocate submission queue entries */
- sq->sqe.raw = malloc_dma ( sq->len, ENA_ALIGN );
+ sq->sqe.raw = malloc_phys ( sq->len, ENA_ALIGN );
if ( ! sq->sqe.raw ) {
rc = -ENOMEM;
goto err_alloc;
return 0;
err_admin:
- free_dma ( sq->sqe.raw, sq->len );
+ free_phys ( sq->sqe.raw, sq->len );
err_alloc:
return rc;
}
return rc;
/* Free submission queue entries */
- free_dma ( sq->sqe.raw, sq->len );
+ free_phys ( sq->sqe.raw, sq->len );
DBGC ( ena, "ENA %p %s SQ%d destroyed\n",
ena, ena_direction ( sq->direction ), sq->id );
int rc;
/* Allocate completion queue entries */
- cq->cqe.raw = malloc_dma ( cq->len, ENA_ALIGN );
+ cq->cqe.raw = malloc_phys ( cq->len, ENA_ALIGN );
if ( ! cq->cqe.raw ) {
rc = -ENOMEM;
goto err_alloc;
return 0;
err_admin:
- free_dma ( cq->cqe.raw, cq->len );
+ free_phys ( cq->cqe.raw, cq->len );
err_alloc:
return rc;
}
return rc;
/* Free completion queue entries */
- free_dma ( cq->cqe.raw, cq->len );
+ free_phys ( cq->cqe.raw, cq->len );
DBGC ( ena, "ENA %p CQ%d destroyed\n", ena, cq->id );
return 0;
{
/* We don't bother cleaning up the buffer table entries -
* we're hardly limited */
- free_dma ( p, EFAB_BUF_ALIGN );
+ free_phys ( p, EFAB_BUF_ALIGN );
}
static void*
unsigned long dma_addr;
/* Allocate the buffer, aligned on a buffer address boundary */
- buffer = malloc_dma ( bytes, EFAB_BUF_ALIGN );
+ buffer = malloc_phys ( bytes, EFAB_BUF_ALIGN );
if ( ! buffer )
return NULL;
}
/* Allocate transmit feedback region (shared between all ports) */
- exanic->txf = malloc_dma ( EXANIC_TXF_LEN, EXANIC_ALIGN );
+ exanic->txf = malloc_phys ( EXANIC_TXF_LEN, EXANIC_ALIGN );
if ( ! exanic->txf ) {
rc = -ENOMEM;
goto err_alloc_txf;
for ( i-- ; i >= 0 ; i-- )
exanic_remove_port ( exanic, i );
exanic_reset ( exanic );
- free_dma ( exanic->txf, EXANIC_TXF_LEN );
+ free_phys ( exanic->txf, EXANIC_TXF_LEN );
err_alloc_txf:
iounmap ( exanic->tx );
err_ioremap_tx:
exanic_reset ( exanic );
/* Free transmit feedback region */
- free_dma ( exanic->txf, EXANIC_TXF_LEN );
+ free_phys ( exanic->txf, EXANIC_TXF_LEN );
/* Unmap transmit region */
iounmap ( exanic->tx );
/* Allocate ring for both TX and RX */
priv->rx_ring =
- malloc_dma ( sizeof(struct ring_desc) * RXTX_RING_SIZE, 32 );
+ malloc_phys ( sizeof(struct ring_desc) * RXTX_RING_SIZE, 32 );
if ( ! priv->rx_ring )
goto err_malloc;
priv->tx_ring = &priv->rx_ring[RX_RING_SIZE];
DBGP ( "nv_free_rxtx_resources\n" );
- free_dma ( priv->rx_ring, sizeof(struct ring_desc) * RXTX_RING_SIZE );
+ free_phys ( priv->rx_ring, sizeof(struct ring_desc) * RXTX_RING_SIZE );
for ( i = 0; i < RX_RING_SIZE; i++ ) {
free_iob ( priv->rx_iobuf[i] );
struct icplus_descriptor *next;
/* Allocate descriptor ring */
- ring->entry = malloc_dma ( len, ICP_ALIGN );
+ ring->entry = malloc_phys ( len, ICP_ALIGN );
if ( ! ring->entry ) {
rc = -ENOMEM;
goto err_alloc;
( virt_to_bus ( ring->entry ) + len ) );
return 0;
- free_dma ( ring->entry, len );
+ free_phys ( ring->entry, len );
ring->entry = NULL;
err_alloc:
return rc;
size_t len = ( sizeof ( ring->entry[0] ) * ICP_NUM_DESC );
/* Free descriptor ring */
- free_dma ( ring->entry, len );
+ free_phys ( ring->entry, len );
ring->entry = NULL;
}
/* Allocate transmit descriptor ring memory.
It must not cross a 64K boundary because of hardware errata #23
- so we use malloc_dma() requesting a 128 byte block that is
+ so we use malloc_phys() requesting a 128 byte block that is
128 byte aligned. This should guarantee that the memory
allocated will not cross a 64K boundary, because 128 is an
even multiple of 65536 ( 65536 / 128 == 512 ), so all possible
*/
adapter->tx_base =
- malloc_dma ( adapter->tx_ring_size, adapter->tx_ring_size );
+ malloc_phys ( adapter->tx_ring_size, adapter->tx_ring_size );
if ( ! adapter->tx_base ) {
return -ENOMEM;
{
DBG ( "igbvf_free_tx_resources\n" );
- free_dma ( adapter->tx_base, adapter->tx_ring_size );
+ free_phys ( adapter->tx_base, adapter->tx_ring_size );
}
/**
DBG ( "igbvf_free_rx_resources\n" );
- free_dma ( adapter->rx_base, adapter->rx_ring_size );
+ free_phys ( adapter->rx_base, adapter->rx_ring_size );
for ( i = 0; i < NUM_RX_DESC; i++ ) {
free_iob ( adapter->rx_iobuf[i] );
*/
adapter->rx_base =
- malloc_dma ( adapter->rx_ring_size, adapter->rx_ring_size );
+ malloc_phys ( adapter->rx_ring_size, adapter->rx_ring_size );
if ( ! adapter->rx_base ) {
return -ENOMEM;
* prevent any possible page-crossing errors due to hardware
* errata.
*/
- ring->desc = malloc_dma ( ring->len, ring->len );
+ ring->desc = malloc_phys ( ring->len, ring->len );
if ( ! ring->desc )
return -ENOMEM;
intel_reset_ring ( intel, ring->reg );
/* Free descriptor ring */
- free_dma ( ring->desc, ring->len );
+ free_phys ( ring->desc, ring->len );
ring->desc = NULL;
ring->prod = 0;
ring->cons = 0;
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
/* Allocate admin queue */
- admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN );
+ admin->buf = malloc_phys ( ( buf_len + len ), INTELXL_ALIGN );
if ( ! admin->buf )
return -ENOMEM;
admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
/* Free queue */
- free_dma ( admin->buf, ( buf_len + len ) );
+ free_phys ( admin->buf, ( buf_len + len ) );
}
/**
int rc;
/* Allocate descriptor ring */
- ring->desc.raw = malloc_dma ( ring->len, INTELXL_ALIGN );
+ ring->desc.raw = malloc_phys ( ring->len, INTELXL_ALIGN );
if ( ! ring->desc.raw ) {
rc = -ENOMEM;
goto err_alloc;
return 0;
- free_dma ( ring->desc.raw, ring->len );
+ free_phys ( ring->desc.raw, ring->len );
err_alloc:
return rc;
}
struct intelxl_ring *ring ) {
/* Free descriptor ring */
- free_dma ( ring->desc.raw, ring->len );
+ free_phys ( ring->desc.raw, ring->len );
ring->desc.raw = NULL;
}
sizeof(struct io_buffer *) * jme->tx_ring_size);
free(txring->bufinf);
}
- free_dma(txring->desc, jme->tx_ring_size * TX_DESC_SIZE);
+ free_phys(txring->desc, jme->tx_ring_size * TX_DESC_SIZE);
txring->desc = NULL;
txring->dma = 0;
txring->bufinf = NULL;
{
struct jme_ring *txring = &jme->txring;
- txring->desc = malloc_dma(jme->tx_ring_size * TX_DESC_SIZE,
+ txring->desc = malloc_phys(jme->tx_ring_size * TX_DESC_SIZE,
RING_DESC_ALIGN);
if (!txring->desc) {
DBG("Can not allocate transmit ring descriptors.\n");
free(rxring->bufinf);
}
- free_dma(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE);
+ free_phys(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE);
rxring->desc = NULL;
rxring->dma = 0;
rxring->bufinf = NULL;
struct jme_ring *rxring = &jme->rxring;
struct io_buffer **bufinf;
- rxring->desc = malloc_dma(jme->rx_ring_size * RX_DESC_SIZE,
+ rxring->desc = malloc_phys(jme->rx_ring_size * RX_DESC_SIZE,
RING_DESC_ALIGN);
if (!rxring->desc) {
DBG("Can not allocate receive ring descriptors.\n");
/*
* Debugging levels:
- * - DBG() is for any errors, i.e. failed alloc_iob(), malloc_dma(),
+ * - DBG() is for any errors, i.e. failed alloc_iob(), malloc_phys(),
* TX overflow, corrupted packets, ...
* - DBG2() is for successful events, like packet received,
* packet transmitted, and other general notifications.
/* Release DMAable memory. */
- free_dma ( priv->dma, sizeof ( *priv->dma ) );
+ free_phys ( priv->dma, sizeof ( *priv->dma ) );
/* Erase all state from the open. */
/* Allocate cleared DMAable buffers. */
- priv->dma = malloc_dma ( sizeof ( *priv->dma ) , 128 );
+ priv->dma = malloc_phys ( sizeof ( *priv->dma ) , 128 );
if ( !priv->dma ) {
rc = -ENOMEM;
dbg = "DMA";
free_iob ( priv->receive_iob[priv->receives_posted] );
abort_with_dma:
/* Because the link is not up, we don't have to reset the NIC here. */
- free_dma ( priv->dma, sizeof ( *priv->dma ) );
+ free_phys ( priv->dma, sizeof ( *priv->dma ) );
abort_with_nothing:
/* Erase all signs of the failed open. */
memset ( priv, 0, sizeof ( *priv ) );
int rc;
/* Allocate descriptor ring */
- ring->desc = malloc_dma ( len, MYSON_RING_ALIGN );
+ ring->desc = malloc_phys ( len, MYSON_RING_ALIGN );
if ( ! ring->desc ) {
rc = -ENOMEM;
goto err_alloc;
return 0;
err_64bit:
- free_dma ( ring->desc, len );
+ free_phys ( ring->desc, len );
ring->desc = NULL;
err_alloc:
return rc;
writel ( 0, myson->regs + ring->reg );
/* Free descriptor ring */
- free_dma ( ring->desc, len );
+ free_phys ( ring->desc, len );
ring->desc = NULL;
ring->prod = 0;
ring->cons = 0;
* ensure that it can't possibly cross the boundary of 32-bit
* address space.
*/
- ring->desc = malloc_dma ( len, len );
+ ring->desc = malloc_phys ( len, len );
if ( ! ring->desc ) {
rc = -ENOMEM;
goto err_alloc;
return 0;
err_64bit:
- free_dma ( ring->desc, len );
+ free_phys ( ring->desc, len );
ring->desc = NULL;
err_alloc:
return rc;
writel ( 0, natsemi->regs + ring->reg + 4 );
/* Free descriptor ring */
- free_dma ( ring->desc, len );
+ free_phys ( ring->desc, len );
ring->desc = NULL;
ring->prod = 0;
ring->cons = 0;
ring->id_cons = 0;
/* Allocate and initialise shared ring */
- ring->sring.raw = malloc_dma ( PAGE_SIZE, PAGE_SIZE );
+ ring->sring.raw = malloc_phys ( PAGE_SIZE, PAGE_SIZE );
if ( ! ring->sring.raw ) {
rc = -ENOMEM;
goto err_alloc;
err_write_num:
xengrant_invalidate ( xen, ring->ref );
err_permit_access:
- free_dma ( ring->sring.raw, PAGE_SIZE );
+ free_phys ( ring->sring.raw, PAGE_SIZE );
err_alloc:
return rc;
}
xengrant_invalidate ( xen, ring->ref );
/* Free page */
- free_dma ( ring->sring.raw, PAGE_SIZE );
+ free_phys ( ring->sring.raw, PAGE_SIZE );
ring->sring.raw = NULL;
}
{
DBGP ( "pcnet32_setup_rx_resources\n" );
- priv->rx_base = malloc_dma ( RX_RING_BYTES, RX_RING_ALIGN );
+ priv->rx_base = malloc_phys ( RX_RING_BYTES, RX_RING_ALIGN );
DBG ( "priv->rx_base = %#08lx\n", virt_to_bus ( priv->rx_base ) );
DBGP ( "pcnet32_free_rx_resources\n" );
- free_dma ( priv->rx_base, RX_RING_BYTES );
+ free_phys ( priv->rx_base, RX_RING_BYTES );
for ( i = 0; i < RX_RING_SIZE; i++ ) {
free_iob ( priv->rx_iobuf[i] );
{
DBGP ( "pcnet32_setup_tx_resources\n" );
- priv->tx_base = malloc_dma ( TX_RING_BYTES, TX_RING_ALIGN );
+ priv->tx_base = malloc_phys ( TX_RING_BYTES, TX_RING_ALIGN );
if ( ! priv->tx_base ) {
return -ENOMEM;
{
DBGP ( "pcnet32_free_tx_resources\n" );
- free_dma ( priv->tx_base, TX_RING_BYTES );
+ free_phys ( priv->tx_base, TX_RING_BYTES );
}
static int
int rc;
/* Allocate context creation buffer */
- buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
+ buf = malloc_phys ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
if ( ! buf ) {
rc = -ENOMEM;
goto out;
phantom, phantom->sds_irq_mask_crb );
out:
- free_dma ( buf, sizeof ( *buf ) );
+ free_phys ( buf, sizeof ( *buf ) );
return rc;
}
int rc;
/* Allocate context creation buffer */
- buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
+ buf = malloc_phys ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
if ( ! buf ) {
rc = -ENOMEM;
goto out;
phantom, phantom->cds_producer_crb );
out:
- free_dma ( buf, sizeof ( *buf ) );
+ free_phys ( buf, sizeof ( *buf ) );
return rc;
}
int rc;
/* Allocate and zero descriptor rings */
- phantom->desc = malloc_dma ( sizeof ( *(phantom->desc) ),
- UNM_DMA_BUFFER_ALIGN );
+ phantom->desc = malloc_phys ( sizeof ( *(phantom->desc) ),
+ UNM_DMA_BUFFER_ALIGN );
if ( ! phantom->desc ) {
rc = -ENOMEM;
goto err_alloc_desc;
err_create_tx_ctx:
phantom_destroy_rx_ctx ( phantom );
err_create_rx_ctx:
- free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) );
+ free_phys ( phantom->desc, sizeof ( *(phantom->desc) ) );
phantom->desc = NULL;
err_alloc_desc:
return rc;
phantom_del_macaddr ( phantom, netdev->ll_broadcast );
phantom_destroy_tx_ctx ( phantom );
phantom_destroy_rx_ctx ( phantom );
- free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) );
+ free_phys ( phantom->desc, sizeof ( *(phantom->desc) ) );
phantom->desc = NULL;
/* Flush any uncompleted descriptors */
return 0;
/* Allocate buffer */
- rtl->rx_buffer = malloc_dma ( len, RTL_RXBUF_ALIGN );
+ rtl->rx_buffer = malloc_phys ( len, RTL_RXBUF_ALIGN );
if ( ! rtl->rx_buffer ) {
rc = -ENOMEM;
goto err_alloc;
return 0;
err_64bit:
- free_dma ( rtl->rx_buffer, len );
+ free_phys ( rtl->rx_buffer, len );
rtl->rx_buffer = NULL;
err_alloc:
return rc;
writel ( 0, rtl->regs + RTL_RBSTART );
/* Free buffer */
- free_dma ( rtl->rx_buffer, len );
+ free_phys ( rtl->rx_buffer, len );
rtl->rx_buffer = NULL;
rtl->rx_offset = 0;
}
return 0;
/* Allocate descriptor ring */
- ring->desc = malloc_dma ( ring->len, RTL_RING_ALIGN );
+ ring->desc = malloc_phys ( ring->len, RTL_RING_ALIGN );
if ( ! ring->desc )
return -ENOMEM;
writel ( 0, rtl->regs + ring->reg + 4 );
/* Free descriptor ring */
- free_dma ( ring->desc, ring->len );
+ free_phys ( ring->desc, ring->len );
ring->desc = NULL;
}
unsigned int i;
/* Allocate descriptors */
- ring->desc = malloc_dma ( len, RHINE_RING_ALIGN );
+ ring->desc = malloc_phys ( len, RHINE_RING_ALIGN );
if ( ! ring->desc )
return -ENOMEM;
writel ( 0, rhn->regs + ring->reg );
/* Free descriptor ring */
- free_dma ( ring->desc, len );
+ free_phys ( ring->desc, len );
ring->desc = NULL;
ring->prod = 0;
ring->cons = 0;
struct rtl818x_rx_desc *entry;
int i;
- priv->rx_ring = malloc_dma(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE,
- RTL818X_RING_ALIGN);
+ priv->rx_ring = malloc_phys(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE,
+ RTL818X_RING_ALIGN);
priv->rx_ring_dma = virt_to_bus(priv->rx_ring);
if (!priv->rx_ring) {
DBG("rtl818x %s: cannot allocate RX ring\n", dev->netdev->name);
priv->rx_buf[i] = NULL;
}
- free_dma(priv->rx_ring, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE);
+ free_phys(priv->rx_ring, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE);
priv->rx_ring = NULL;
}
struct rtl818x_priv *priv = dev->priv;
int i;
- priv->tx_ring = malloc_dma(sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE,
- RTL818X_RING_ALIGN);
+ priv->tx_ring = malloc_phys(sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE,
+ RTL818X_RING_ALIGN);
priv->tx_ring_dma = virt_to_bus(priv->tx_ring);
if (!priv->tx_ring) {
DBG("rtl818x %s: cannot allocate TX ring\n", dev->netdev->name);
priv->tx_buf[i] = NULL;
}
- free_dma(priv->tx_ring, sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE);
+ free_phys(priv->tx_ring, sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE);
priv->tx_ring = NULL;
}
void efx_hunt_free_special_buffer(void *buf, int bytes)
{
- free_dma(buf, bytes);
+ free_phys(buf, bytes);
}
static void *efx_hunt_alloc_special_buffer(int bytes,
* buffer will be passed into an MC_CMD_INIT_*Q command to setup the
* appropriate type of queue via MCDI.
*/
- buffer = malloc_dma(bytes, EFX_BUF_ALIGN);
+ buffer = malloc_phys(bytes, EFX_BUF_ALIGN);
if (!buffer)
return NULL;
int rc;
/* Allocate TX ring */
- tp->TxDescRing = malloc_dma(TX_RING_BYTES, RING_ALIGNMENT);
+ tp->TxDescRing = malloc_phys(TX_RING_BYTES, RING_ALIGNMENT);
if (!tp->TxDescRing) {
DBG("sis190: TX ring allocation failed\n");
rc = -ENOMEM;
tp->tx_dma = cpu_to_le32(virt_to_bus(tp->TxDescRing));
/* Allocate RX ring */
- tp->RxDescRing = malloc_dma(RX_RING_BYTES, RING_ALIGNMENT);
+ tp->RxDescRing = malloc_phys(RX_RING_BYTES, RING_ALIGNMENT);
if (!tp->RxDescRing) {
DBG("sis190: RX ring allocation failed\n");
rc = -ENOMEM;
struct sis190_private *tp = netdev_priv(dev);
int i;
- free_dma(tp->TxDescRing, TX_RING_BYTES);
- free_dma(tp->RxDescRing, RX_RING_BYTES);
+ free_phys(tp->TxDescRing, TX_RING_BYTES);
+ free_phys(tp->RxDescRing, RX_RING_BYTES);
tp->TxDescRing = NULL;
tp->RxDescRing = NULL;
free(skge->tx_ring.start);
skge->tx_ring.start = NULL;
- free_dma(skge->mem, RING_SIZE);
+ free_phys(skge->mem, RING_SIZE);
skge->mem = NULL;
skge->dma = 0;
}
DBG2(PFX "%s: enabling interface\n", dev->name);
- skge->mem = malloc_dma(RING_SIZE, SKGE_RING_ALIGN);
+ skge->mem = malloc_phys(RING_SIZE, SKGE_RING_ALIGN);
skge->dma = virt_to_bus(skge->mem);
if (!skge->mem)
return -ENOMEM;
/* Free the le and ring buffers */
static void sky2_free_rings(struct sky2_port *sky2)
{
- free_dma(sky2->rx_le, RX_LE_BYTES);
+ free_phys(sky2->rx_le, RX_LE_BYTES);
free(sky2->rx_ring);
- free_dma(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le));
+ free_phys(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le));
free(sky2->tx_ring);
sky2->tx_le = NULL;
netdev_link_down(dev);
/* must be power of 2 */
- sky2->tx_le = malloc_dma(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN);
+ sky2->tx_le = malloc_phys(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN);
sky2->tx_le_map = virt_to_bus(sky2->tx_le);
if (!sky2->tx_le)
goto err_out;
tx_init(sky2);
- sky2->rx_le = malloc_dma(RX_LE_BYTES, RX_RING_ALIGN);
+ sky2->rx_le = malloc_phys(RX_LE_BYTES, RX_RING_ALIGN);
sky2->rx_le_map = virt_to_bus(sky2->rx_le);
if (!sky2->rx_le)
goto err_out;
}
/* ring for status responses */
- hw->st_le = malloc_dma(STATUS_LE_BYTES, STATUS_RING_ALIGN);
+ hw->st_le = malloc_phys(STATUS_LE_BYTES, STATUS_RING_ALIGN);
if (!hw->st_le)
goto err_out_iounmap;
hw->st_dma = virt_to_bus(hw->st_le);
netdev_put(dev);
err_out_free_pci:
sky2_write8(hw, B0_CTST, CS_RST_SET);
- free_dma(hw->st_le, STATUS_LE_BYTES);
+ free_phys(hw->st_le, STATUS_LE_BYTES);
err_out_iounmap:
iounmap((void *)hw->regs);
err_out_free_hw:
sky2_write8(hw, B0_CTST, CS_RST_SET);
sky2_read8(hw, B0_CTST);
- free_dma(hw->st_le, STATUS_LE_BYTES);
+ free_phys(hw->st_le, STATUS_LE_BYTES);
for (i = hw->ports-1; i >= 0; --i) {
netdev_nullify(hw->dev[i]);
{ DBGP("%s\n", __func__);
if (tpr->rx_std) {
- free_dma(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
+ free_phys(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
tpr->rx_std = NULL;
}
}
{ DBGP("%s\n", __func__);
if (tp->tx_ring) {
- free_dma(tp->tx_ring, TG3_TX_RING_BYTES);
+ free_phys(tp->tx_ring, TG3_TX_RING_BYTES);
tp->tx_ring = NULL;
}
tp->tx_buffers = NULL;
if (tp->rx_rcb) {
- free_dma(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
+ free_phys(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
tp->rx_rcb_mapping = 0;
tp->rx_rcb = NULL;
}
tg3_rx_prodring_fini(&tp->prodring);
if (tp->hw_status) {
- free_dma(tp->hw_status, TG3_HW_STATUS_SIZE);
+ free_phys(tp->hw_status, TG3_HW_STATUS_SIZE);
tp->status_mapping = 0;
tp->hw_status = NULL;
}
struct tg3_hw_status *sblk;
struct tg3_rx_prodring_set *tpr = &tp->prodring;
- tp->hw_status = malloc_dma(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
+ tp->hw_status = malloc_phys(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
if (!tp->hw_status) {
DBGC(tp->dev, "hw_status alloc failed\n");
goto err_out;
memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tp->hw_status;
- tpr->rx_std = malloc_dma(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
+ tpr->rx_std = malloc_phys(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
if (!tpr->rx_std) {
DBGC(tp->dev, "rx prodring alloc failed\n");
goto err_out;
if (!tp->tx_buffers)
goto err_out;
- tp->tx_ring = malloc_dma(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
+ tp->tx_ring = malloc_phys(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
if (!tp->tx_ring)
goto err_out;
tp->tx_desc_mapping = virt_to_bus(tp->tx_ring);
tp->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
- tp->rx_rcb = malloc_dma(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
+ tp->rx_rcb = malloc_phys(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
if (!tp->rx_rcb)
goto err_out;
tp->rx_rcb_mapping = virt_to_bus(tp->rx_rcb);
u32 *buf;
int ret = 0;
- buf = malloc_dma(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
+ buf = malloc_phys(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
if (!buf) {
ret = -ENOMEM;
goto out_nofree;
}
out:
- free_dma(buf, TEST_BUFFER_SIZE);
+ free_phys(buf, TEST_BUFFER_SIZE);
out_nofree:
return ret;
}
vlc->rx_prod = 0;
vlc->rx_cons = 0;
vlc->rx_commit = 0;
- vlc->rx_ring = malloc_dma ( VELOCITY_RXDESC_SIZE, VELOCITY_RING_ALIGN );
+ vlc->rx_ring = malloc_phys ( VELOCITY_RXDESC_SIZE,
+ VELOCITY_RING_ALIGN );
if ( ! vlc->rx_ring )
return -ENOMEM;
/* Allocate TX descriptor ring */
vlc->tx_prod = 0;
vlc->tx_cons = 0;
- vlc->tx_ring = malloc_dma ( VELOCITY_TXDESC_SIZE, VELOCITY_RING_ALIGN );
+ vlc->tx_ring = malloc_phys ( VELOCITY_TXDESC_SIZE,
+ VELOCITY_RING_ALIGN );
if ( ! vlc->tx_ring ) {
rc = -ENOMEM;
goto err_tx_alloc;
return 0;
err_tx_alloc:
- free_dma ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
+ free_phys ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
return rc;
}
writew ( 0, vlc->regs + VELOCITY_RXDESCNUM );
/* Destroy RX ring */
- free_dma ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
+ free_phys ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
vlc->rx_ring = NULL;
vlc->rx_prod = 0;
vlc->rx_cons = 0;
writew ( 0, vlc->regs + VELOCITY_TXDESCNUM );
/* Destroy TX ring */
- free_dma ( vlc->tx_ring, VELOCITY_TXDESC_SIZE );
+ free_phys ( vlc->tx_ring, VELOCITY_TXDESC_SIZE );
vlc->tx_ring = NULL;
vlc->tx_prod = 0;
vlc->tx_cons = 0;
int rc;
/* Allocate DMA areas */
- vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
+ vmxnet->dma = malloc_phys ( sizeof ( *vmxnet->dma ),
+ VMXNET3_DMA_ALIGN );
if ( ! vmxnet->dma ) {
DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
vmxnet );
err_activate:
vmxnet3_flush_tx ( netdev );
vmxnet3_flush_rx ( netdev );
- free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
+ free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
err_alloc_dma:
return rc;
}
vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
vmxnet3_flush_tx ( netdev );
vmxnet3_flush_rx ( netdev );
- free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
+ free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
}
/** vmxnet3 net device operations */
hldev = vpath->hldev;
vp_id = vpath->vp_id;
- ring->rxdl = malloc_dma(sizeof(struct __vxge_hw_ring_block),
+ ring->rxdl = malloc_phys(sizeof(struct __vxge_hw_ring_block),
sizeof(struct __vxge_hw_ring_block));
if (!ring->rxdl) {
- vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
+ vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n",
__func__, __LINE__);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
if (ring->rxdl) {
- free_dma(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
+ free_phys(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
ring->rxdl = NULL;
}
ring->rxd_offset = 0;
fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP)
+ VXGE_HW_VPATH_INTR_TX;
- fifo->txdl = malloc_dma(sizeof(struct vxge_hw_fifo_txd)
+ fifo->txdl = malloc_phys(sizeof(struct vxge_hw_fifo_txd)
* fifo->depth, fifo->depth);
if (!fifo->txdl) {
- vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
+ vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n",
__func__, __LINE__);
return VXGE_HW_ERR_OUT_OF_MEMORY;
}
vxge_trace();
if (fifo->txdl)
- free_dma(fifo->txdl,
+ free_phys(fifo->txdl,
sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
fifo->txdl = NULL;
}
/* Allocate queue head */
- ring->head = malloc_dma ( sizeof ( *ring->head ),
- ehci_align ( sizeof ( *ring->head ) ) );
+ ring->head = malloc_phys ( sizeof ( *ring->head ),
+ ehci_align ( sizeof ( *ring->head ) ) );
if ( ! ring->head ) {
rc = -ENOMEM;
goto err_alloc_queue;
/* Allocate transfer descriptors */
len = ( EHCI_RING_COUNT * sizeof ( ring->desc[0] ) );
- ring->desc = malloc_dma ( len, sizeof ( ring->desc[0] ) );
+ ring->desc = malloc_phys ( len, sizeof ( ring->desc[0] ) );
if ( ! ring->desc ) {
rc = -ENOMEM;
goto err_alloc_desc;
return 0;
err_unreachable_desc:
- free_dma ( ring->desc, len );
+ free_phys ( ring->desc, len );
err_alloc_desc:
err_unreachable_queue:
- free_dma ( ring->head, sizeof ( *ring->head ) );
+ free_phys ( ring->head, sizeof ( *ring->head ) );
err_alloc_queue:
free ( ring->iobuf );
err_alloc_iobuf:
assert ( ring->iobuf[i] == NULL );
/* Free transfer descriptors */
- free_dma ( ring->desc, ( EHCI_RING_COUNT * sizeof ( ring->desc[0] ) ) );
+ free_phys ( ring->desc, ( EHCI_RING_COUNT *
+ sizeof ( ring->desc[0] ) ) );
/* Free queue head */
- free_dma ( ring->head, sizeof ( *ring->head ) );
+ free_phys ( ring->head, sizeof ( *ring->head ) );
/* Free I/O buffers */
free ( ring->iobuf );
assert ( list_empty ( &ehci->periodic ) );
/* Allocate and initialise asynchronous queue head */
- ehci->head = malloc_dma ( sizeof ( *ehci->head ),
- ehci_align ( sizeof ( *ehci->head ) ) );
+ ehci->head = malloc_phys ( sizeof ( *ehci->head ),
+ ehci_align ( sizeof ( *ehci->head ) ) );
if ( ! ehci->head ) {
rc = -ENOMEM;
goto err_alloc_head;
/* Allocate periodic frame list */
frames = EHCI_PERIODIC_FRAMES ( ehci->flsize );
len = ( frames * sizeof ( ehci->frame[0] ) );
- ehci->frame = malloc_dma ( len, EHCI_PAGE_ALIGN );
+ ehci->frame = malloc_phys ( len, EHCI_PAGE_ALIGN );
if ( ! ehci->frame ) {
rc = -ENOMEM;
goto err_alloc_frame;
ehci_stop ( ehci );
err_unreachable_frame:
- free_dma ( ehci->frame, len );
+ free_phys ( ehci->frame, len );
err_alloc_frame:
err_ctrldssegment:
- free_dma ( ehci->head, sizeof ( *ehci->head ) );
+ free_phys ( ehci->head, sizeof ( *ehci->head ) );
err_alloc_head:
return rc;
}
ehci_stop ( ehci );
/* Free periodic frame list */
- free_dma ( ehci->frame, ( frames * sizeof ( ehci->frame[0] ) ) );
+ free_phys ( ehci->frame, ( frames * sizeof ( ehci->frame[0] ) ) );
/* Free asynchronous schedule */
- free_dma ( ehci->head, sizeof ( *ehci->head ) );
+ free_phys ( ehci->head, sizeof ( *ehci->head ) );
}
/**
memset ( ring, 0, sizeof ( *ring ) );
/* Allocate queue head */
- ring->head = malloc_dma ( sizeof ( *ring->head ), UHCI_ALIGN );
+ ring->head = malloc_phys ( sizeof ( *ring->head ), UHCI_ALIGN );
if ( ! ring->head ) {
rc = -ENOMEM;
goto err_alloc;
return 0;
err_unreachable:
- free_dma ( ring->head, sizeof ( *ring->head ) );
+ free_phys ( ring->head, sizeof ( *ring->head ) );
err_alloc:
return rc;
}
assert ( ring->xfer[i] == NULL );
/* Free queue head */
- free_dma ( ring->head, sizeof ( *ring->head ) );
+ free_phys ( ring->head, sizeof ( *ring->head ) );
}
/**
/* Allocate transfer descriptors */
len = ( count * sizeof ( xfer->desc[0] ) );
- xfer->desc = malloc_dma ( len, UHCI_ALIGN );
+ xfer->desc = malloc_phys ( len, UHCI_ALIGN );
if ( ! xfer->desc ) {
rc = -ENOMEM;
goto err_alloc_desc;
return 0;
err_unreachable_desc:
- free_dma ( xfer->desc, len );
+ free_phys ( xfer->desc, len );
err_alloc_desc:
free ( xfer );
err_alloc_xfer:
/* Free transfer descriptors */
len = ( xfer->prod * sizeof ( xfer->desc[0] ) );
- free_dma ( xfer->desc, len );
+ free_phys ( xfer->desc, len );
/* Free transfer */
free ( xfer );
assert ( list_empty ( &uhci->periodic ) );
/* Allocate and initialise asynchronous queue head */
- uhci->head = malloc_dma ( sizeof ( *uhci->head ), UHCI_ALIGN );
+ uhci->head = malloc_phys ( sizeof ( *uhci->head ), UHCI_ALIGN );
if ( ! uhci->head ) {
rc = -ENOMEM;
goto err_alloc_head;
uhci_async_schedule ( uhci );
/* Allocate periodic frame list */
- uhci->frame = malloc_dma ( sizeof ( *uhci->frame ),
- sizeof ( *uhci->frame ) );
+ uhci->frame = malloc_phys ( sizeof ( *uhci->frame ),
+ sizeof ( *uhci->frame ) );
if ( ! uhci->frame ) {
rc = -ENOMEM;
goto err_alloc_frame;
uhci_stop ( uhci );
err_unreachable_frame:
- free_dma ( uhci->frame, sizeof ( *uhci->frame ) );
+ free_phys ( uhci->frame, sizeof ( *uhci->frame ) );
err_alloc_frame:
err_unreachable_head:
- free_dma ( uhci->head, sizeof ( *uhci->head ) );
+ free_phys ( uhci->head, sizeof ( *uhci->head ) );
err_alloc_head:
return rc;
}
uhci_stop ( uhci );
/* Free periodic frame list */
- free_dma ( uhci->frame, sizeof ( *uhci->frame ) );
+ free_phys ( uhci->frame, sizeof ( *uhci->frame ) );
/* Free asynchronous schedule */
- free_dma ( uhci->head, sizeof ( *uhci->head ) );
+ free_phys ( uhci->head, sizeof ( *uhci->head ) );
}
/**
* with a minimum of 64 bytes).
*/
len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
- xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) );
+ xhci->dcbaa = malloc_phys ( len, xhci_align ( len ) );
if ( ! xhci->dcbaa ) {
DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name );
rc = -ENOMEM;
return 0;
err_writeq:
- free_dma ( xhci->dcbaa, len );
+ free_phys ( xhci->dcbaa, len );
err_alloc:
return rc;
}
/* Free DCBAA */
len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
- free_dma ( xhci->dcbaa, len );
+ free_phys ( xhci->dcbaa, len );
}
/******************************************************************************
/* Allocate scratchpad array */
array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
xhci->scratchpad_array =
- malloc_dma ( array_len, xhci_align ( array_len ) );
+ malloc_phys ( array_len, xhci_align ( array_len ) );
if ( ! xhci->scratchpad_array ) {
DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer "
"array\n", xhci->name );
( virt_to_phys ( xhci->scratchpad_array ) + array_len ) );
return 0;
- free_dma ( xhci->scratchpad_array, array_len );
+ free_phys ( xhci->scratchpad_array, array_len );
err_alloc_array:
ufree ( xhci->scratchpad );
err_alloc:
/* Free scratchpad array */
array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
- free_dma ( xhci->scratchpad_array, array_len );
+ free_phys ( xhci->scratchpad_array, array_len );
/* Free scratchpads */
ufree ( xhci->scratchpad );
}
/* Allocate TRBs */
- ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) );
+ ring->trb = malloc_phys ( ring->len, xhci_align ( ring->len ) );
if ( ! ring->trb ) {
rc = -ENOMEM;
goto err_alloc_trb;
return 0;
- free_dma ( ring->trb, ring->len );
+ free_phys ( ring->trb, ring->len );
err_alloc_trb:
free ( ring->iobuf );
err_alloc_iobuf:
assert ( ring->iobuf[i] == NULL );
/* Free TRBs */
- free_dma ( ring->trb, ring->len );
+ free_phys ( ring->trb, ring->len );
/* Free I/O buffers */
free ( ring->iobuf );
/* Allocate event ring */
count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
len = ( count * sizeof ( event->trb[0] ) );
- event->trb = malloc_dma ( len, xhci_align ( len ) );
+ event->trb = malloc_phys ( len, xhci_align ( len ) );
if ( ! event->trb ) {
rc = -ENOMEM;
goto err_alloc_trb;
memset ( event->trb, 0, len );
/* Allocate event ring segment table */
- event->segment = malloc_dma ( sizeof ( event->segment[0] ),
- xhci_align ( sizeof (event->segment[0])));
+ event->segment = malloc_phys ( sizeof ( event->segment[0] ),
+ xhci_align ( sizeof(event->segment[0])));
if ( ! event->segment ) {
rc = -ENOMEM;
goto err_alloc_segment;
err_writeq_erstba:
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
err_writeq_erdp:
- free_dma ( event->trb, len );
+ free_phys ( event->trb, len );
err_alloc_segment:
- free_dma ( event->segment, sizeof ( event->segment[0] ) );
+ free_phys ( event->segment, sizeof ( event->segment[0] ) );
err_alloc_trb:
return rc;
}
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
/* Free event ring segment table */
- free_dma ( event->segment, sizeof ( event->segment[0] ) );
+ free_phys ( event->segment, sizeof ( event->segment[0] ) );
/* Free event ring */
count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
len = ( count * sizeof ( event->trb[0] ) );
- free_dma ( event->trb, len );
+ free_phys ( event->trb, len );
}
/**
/* Allocate an input context */
len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
- input = malloc_dma ( len, xhci_align ( len ) );
+ input = malloc_phys ( len, xhci_align ( len ) );
if ( ! input ) {
rc = -ENOMEM;
goto err_alloc;
goto err_command;
err_command:
- free_dma ( input, len );
+ free_phys ( input, len );
err_alloc:
return rc;
}
/* Allocate a device context */
len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
- slot->context = malloc_dma ( len, xhci_align ( len ) );
+ slot->context = malloc_phys ( len, xhci_align ( len ) );
if ( ! slot->context ) {
rc = -ENOMEM;
goto err_alloc_context;
return 0;
xhci->dcbaa[id] = 0;
- free_dma ( slot->context, len );
+ free_phys ( slot->context, len );
err_alloc_context:
xhci->slot[id] = NULL;
free ( slot );
/* Free slot */
if ( slot->context ) {
- free_dma ( slot->context, len );
+ free_phys ( slot->context, len );
xhci->dcbaa[id] = 0;
}
xhci->slot[id] = NULL;
/*
* Prototypes for the standard functions (malloc() et al) are in
* stdlib.h. Include <ipxe/malloc.h> only if you need the
- * non-standard functions, such as malloc_dma().
+ * non-standard functions, such as malloc_phys().
*
*/
#include <stdlib.h>
extern void mdumpfree ( void );
/**
- * Allocate memory for DMA
+ * Allocate memory with specified physical alignment and offset
*
* @v size Requested size
* @v align Physical alignment
* @v offset Offset from physical alignment
* @ret ptr Memory, or NULL
*
- * Allocates physically-aligned memory for DMA.
- *
* @c align must be a power of two. @c size may not be zero.
*/
-static inline void * __malloc malloc_dma_offset ( size_t size,
- size_t phys_align,
- size_t offset ) {
+static inline void * __malloc malloc_phys_offset ( size_t size,
+ size_t phys_align,
+ size_t offset ) {
void * ptr = alloc_memblock ( size, phys_align, offset );
if ( ptr && size )
VALGRIND_MALLOCLIKE_BLOCK ( ptr, size, 0, 0 );
}
/**
- * Allocate memory for DMA
+ * Allocate memory with specified physical alignment
*
* @v size Requested size
* @v align Physical alignment
* @ret ptr Memory, or NULL
*
- * Allocates physically-aligned memory for DMA.
- *
* @c align must be a power of two. @c size may not be zero.
*/
-static inline void * __malloc malloc_dma ( size_t size, size_t phys_align ) {
- return malloc_dma_offset ( size, phys_align, 0 );
+static inline void * __malloc malloc_phys ( size_t size, size_t phys_align ) {
+ return malloc_phys_offset ( size, phys_align, 0 );
}
/**
- * Free memory allocated with malloc_dma()
+ * Free memory allocated with malloc_phys()
*
- * @v ptr Memory allocated by malloc_dma(), or NULL
- * @v size Size of memory, as passed to malloc_dma()
+ * @v ptr Memory allocated by malloc_phys(), or NULL
+ * @v size Size of memory, as passed to malloc_phys()
*
- * Memory allocated with malloc_dma() can only be freed with
- * free_dma(); it cannot be freed with the standard free().
+ * Memory allocated with malloc_phys() can only be freed with
+ * free_phys(); it cannot be freed with the standard free().
*
* If @c ptr is NULL, no action is taken.
*/
-static inline void free_dma ( void *ptr, size_t size ) {
+static inline void free_phys ( void *ptr, size_t size ) {
VALGRIND_FREELIKE_BLOCK ( ptr, 0 );
free_memblock ( ptr, size );
}
len = ( sizeof ( *vmdev->out ) + out_len +
sizeof ( *vmdev->in ) + in_len );
assert ( ( len % PAGE_SIZE ) == 0 );
- ring = malloc_dma ( len, PAGE_SIZE );
+ ring = malloc_phys ( len, PAGE_SIZE );
if ( ! ring ) {
rc = -ENOMEM;
goto err_alloc_ring;
err_post_message:
vmbus_gpadl_teardown ( vmdev, vmdev->gpadl );
err_establish:
- free_dma ( ring, len );
+ free_phys ( ring, len );
err_alloc_ring:
free ( packet );
err_alloc_packet:
/* Free ring buffer */
len = ( sizeof ( *vmdev->out ) + vmdev->out_len +
sizeof ( *vmdev->in ) + vmdev->in_len );
- free_dma ( vmdev->out, len );
+ free_phys ( vmdev->out, len );
vmdev->out = NULL;
vmdev->in = NULL;