#include <byteswap.h>
#include <ipxe/pci.h>
#include <ipxe/iobuf.h>
+#include <ipxe/dma.h>
#include <ipxe/timer.h>
#include <ipxe/malloc.h>
#include <ipxe/if_ether.h>
return ( avail-use );
}
-void bnxt_set_txq ( struct bnxt *bp, int entry, dma_addr_t mapping, int len )
+void bnxt_set_txq ( struct bnxt *bp, int entry, physaddr_t mapping, int len )
{
struct tx_bd_short *prod_bd;
else
prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_GTE2K;
prod_bd->flags_type |= TX_BD_FLAGS;
- prod_bd->dma.addr = mapping;
+ prod_bd->dma = mapping;
prod_bd->len = len;
prod_bd->opaque = ( u32 )entry;
}
for ( i = 0; i < bp->rx.buf_cnt; i++ ) {
if ( bp->rx.iob[i] ) {
- free_iob ( bp->rx.iob[i] );
+ free_rx_iob ( bp->rx.iob[i] );
bp->rx.iob[i] = NULL;
}
}
desc->flags_type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
desc->len = MAX_ETHERNET_PACKET_BUFFER_SIZE;
desc->opaque = idx;
- desc->dma.addr = virt_to_bus ( iob->data );
+ desc->dma = iob_dma ( iob );
}
static int bnxt_alloc_rx_iob ( struct bnxt *bp, u16 cons_id, u16 iob_idx )
{
struct io_buffer *iob;
- iob = alloc_iob ( BNXT_RX_STD_DMA_SZ );
+ iob = alloc_rx_iob ( BNXT_RX_STD_DMA_SZ, bp->dma );
if ( !iob ) {
DBGP ( "- %s ( ): alloc_iob Failed\n", __func__ );
return -ENOMEM;
u8 cmpl_bit = bp->cq.completion_bit;
if ( bp->cq.cons_id == ( bp->cq.ring_cnt - 1 ) ) {
- rx_cmp_hi = ( struct rx_pkt_cmpl_hi * )bp->cq.bd_virt;
+ rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) CQ_DMA_ADDR ( bp );
cmpl_bit ^= 0x1; /* Ring has wrapped. */
} else
rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) ( rx_cmp+1 );
memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
memset ( bp->hwrm_addr_dma, 0, DMA_BUFFER_SIZE );
- bp->req_addr_mapping = virt_to_bus ( bp->hwrm_addr_req );
- bp->resp_addr_mapping = virt_to_bus ( bp->hwrm_addr_resp );
- bp->dma_addr_mapping = virt_to_bus ( bp->hwrm_addr_dma );
+ memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE );
+ memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE );
+ memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE );
+ memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE );
+
bp->link_status = STATUS_LINK_DOWN;
bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE;
{
DBGP ( "%s\n", __func__ );
if ( bp->nq.bd_virt ) {
- free_phys ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
+ dma_free ( &bp->nq_mapping, bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
bp->nq.bd_virt = NULL;
}
if ( bp->cq.bd_virt ) {
- free_phys ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
+ dma_free ( &bp->cq_mapping, bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
bp->cq.bd_virt = NULL;
}
if ( bp->rx.bd_virt ) {
- free_phys ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
+ dma_free ( &bp->rx_mapping, bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
bp->rx.bd_virt = NULL;
}
if ( bp->tx.bd_virt ) {
- free_phys ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
+ dma_free ( &bp->tx_mapping, bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
bp->tx.bd_virt = NULL;
}
if ( bp->hwrm_addr_dma ) {
- free_phys ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
- bp->dma_addr_mapping = 0;
+ dma_free ( &bp->dma_mapped, bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
bp->hwrm_addr_dma = NULL;
}
if ( bp->hwrm_addr_resp ) {
- free_phys ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
- bp->resp_addr_mapping = 0;
+ dma_free ( &bp->resp_mapping, bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
bp->hwrm_addr_resp = NULL;
}
if ( bp->hwrm_addr_req ) {
- free_phys ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
- bp->req_addr_mapping = 0;
+ dma_free ( &bp->req_mapping, bp->hwrm_addr_req, REQ_BUFFER_SIZE );
bp->hwrm_addr_req = NULL;
}
DBGP ( "- %s ( ): - Done\n", __func__ );
int bnxt_alloc_mem ( struct bnxt *bp )
{
DBGP ( "%s\n", __func__ );
- bp->hwrm_addr_req = malloc_phys ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
- bp->hwrm_addr_resp = malloc_phys ( RESP_BUFFER_SIZE,
- BNXT_DMA_ALIGNMENT );
- bp->hwrm_addr_dma = malloc_phys ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
- bp->tx.bd_virt = malloc_phys ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
- bp->rx.bd_virt = malloc_phys ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
- bp->cq.bd_virt = malloc_phys ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
- bp->nq.bd_virt = malloc_phys ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
+ bp->hwrm_addr_req = dma_alloc ( bp->dma, &bp->req_mapping,
+ REQ_BUFFER_SIZE, REQ_BUFFER_SIZE );
+ bp->hwrm_addr_resp = dma_alloc ( bp->dma, &bp->resp_mapping,
+ RESP_BUFFER_SIZE, RESP_BUFFER_SIZE );
+ bp->hwrm_addr_dma = dma_alloc ( bp->dma, &bp->dma_mapped,
+ DMA_BUFFER_SIZE, DMA_BUFFER_SIZE);
+ bp->tx.bd_virt = dma_alloc ( bp->dma, &bp->tx_mapping,
+ TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
+ bp->rx.bd_virt = dma_alloc ( bp->dma, &bp->rx_mapping,
+ RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
+ bp->cq.bd_virt = dma_alloc ( bp->dma, &bp->cq_mapping,
+ CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
+ bp->nq.bd_virt = dma_alloc ( bp->dma, &bp->nq_mapping,
+ NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
test_if ( bp->hwrm_addr_req &&
bp->hwrm_addr_resp &&
bp->hwrm_addr_dma &&
req->req_type = cmd;
req->cmpl_ring = ( u16 )HWRM_NA_SIGNATURE;
req->target_id = ( u16 )HWRM_NA_SIGNATURE;
- req->resp_addr = bp->resp_addr_mapping;
+ req->resp_addr = RESP_DMA_ADDR ( bp );
req->seq_id = bp->seq_id++;
}
struct hwrm_short_input sreq;
memset ( &sreq, 0, sizeof ( struct hwrm_short_input ) );
- sreq.req_type = ( u16 ) ( ( struct input * )bp->hwrm_addr_req )->req_type;
+ sreq.req_type = ( u16 ) ( ( struct input * ) REQ_DMA_ADDR (bp ) )->req_type;
sreq.signature = SHORT_REQ_SIGNATURE_SHORT_CMD;
sreq.size = len;
- sreq.req_addr = bp->req_addr_mapping;
+ sreq.req_addr = REQ_DMA_ADDR ( bp );
mdelay ( 100 );
dbg_short_cmd ( ( u8 * )&sreq, __func__,
sizeof ( struct hwrm_short_input ) );
static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func )
{
- struct input *req = ( struct input * )bp->hwrm_addr_req;
- struct output *resp = ( struct output * )bp->hwrm_addr_resp;
+ struct input *req = ( struct input * ) REQ_DMA_ADDR ( bp );
+ struct output *resp = ( struct output * ) RESP_DMA_ADDR ( bp );
u8 *ptr = ( u8 * )resp;
u32 idx;
u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER ( ( u32 )tmo );
int rc;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_ver_get_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_ver_get_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_ver_get_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_ver_get_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VER_GET, cmd_len );
req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
req->hwrm_intf_min = HWRM_VERSION_MINOR;
int rc;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_func_resource_qcaps_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_func_resource_qcaps_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_func_resource_qcaps_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_func_resource_qcaps_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESOURCE_QCAPS,
cmd_len );
req->fid = ( u16 )HWRM_NA_SIGNATURE;
if ( FLAG_TEST ( bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT ) )
enables = bnxt_set_ring_info ( bp );
- req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp );
req->num_cmpl_rings = bp->num_cmpl_rings;
req->num_tx_rings = bp->num_tx_rings;
req->num_rx_rings = bp->num_rx_rings;
if ( bp->vf )
return STATUS_SUCCESS;
- req = ( struct hwrm_func_qcaps_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_func_qcaps_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_func_qcaps_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_func_qcaps_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCAPS, cmd_len );
req->fid = ( u16 )HWRM_NA_SIGNATURE;
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
int rc;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_func_qcfg_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_func_qcfg_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_func_qcfg_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_func_qcfg_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCFG, cmd_len );
req->fid = ( u16 )HWRM_NA_SIGNATURE;
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_port_phy_qcaps_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_port_phy_qcaps_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_port_phy_qcaps_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_port_phy_qcaps_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCAPS, cmd_len );
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
if ( rc ) {
struct hwrm_func_reset_input *req;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_func_reset_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_func_reset_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESET, cmd_len );
if ( !bp->vf )
req->func_reset_level = FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME;
if ( bp->vf )
return STATUS_SUCCESS;
- req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
req->fid = ( u16 )HWRM_NA_SIGNATURE;
bnxt_hwrm_assign_resources ( bp );
int rc;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_func_drv_rgtr_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_func_drv_rgtr_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_RGTR, cmd_len );
/* Register with HWRM */
if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_DRIVER_REG ) ) )
return STATUS_SUCCESS;
- req = ( struct hwrm_func_drv_unrgtr_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_func_drv_unrgtr_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_UNRGTR, cmd_len );
req->flags = FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN;
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_vf_cfg_input );
struct hwrm_func_vf_cfg_input *req;
- req = ( struct hwrm_func_vf_cfg_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_func_vf_cfg_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_VF_CFG,
cmd_len );
req->enables = VF_CFG_ENABLE_FLAGS;
u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input );
struct hwrm_func_cfg_input *req;
- req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
req->fid = ( u16 )HWRM_NA_SIGNATURE;
req->enables = FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR;
u32 enables;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_cfa_l2_filter_alloc_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_cfa_l2_filter_alloc_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_cfa_l2_filter_alloc_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_cfa_l2_filter_alloc_output * ) RESP_DMA_ADDR ( bp );
if ( bp->vf )
flags |= CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST;
enables = CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_L2_FILTER ) ) )
return STATUS_SUCCESS;
- req = ( struct hwrm_cfa_l2_filter_free_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_cfa_l2_filter_free_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_FREE,
cmd_len );
req->l2_filter_id = bp->l2_filter_id;
struct hwrm_cfa_l2_set_rx_mask_input *req;
u32 mask = set_rx_mask ( rx_mask );
- req = ( struct hwrm_cfa_l2_set_rx_mask_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_cfa_l2_set_rx_mask_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_SET_RX_MASK,
cmd_len );
req->vnic_id = bp->vnic_id;
int rc;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_port_phy_qcfg_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_port_phy_qcfg_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_port_phy_qcfg_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_port_phy_qcfg_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCFG, cmd_len );
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
if ( rc ) {
struct hwrm_nvm_get_variable_input *req;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_nvm_get_variable_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_nvm_get_variable_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_NVM_GET_VARIABLE, cmd_len );
- req->dest_data_addr = bp->dma_addr_mapping;
+ req->dest_data_addr = DMA_DMA_ADDR ( bp );
req->data_len = data_len;
req->option_num = option_num;
req->dimensions = dimensions;
static int bnxt_get_link_speed ( struct bnxt *bp )
{
- u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma;
+ u32 *ptr32 = ( u32 * ) DMA_DMA_ADDR ( bp );
DBGP ( "%s\n", __func__ );
if ( ! ( FLAG_TEST (bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ) ) {
static int bnxt_get_vlan ( struct bnxt *bp )
{
- u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma;
+ u32 *ptr32 = ( u32 * ) DMA_DMA_ADDR ( bp );
/* If VF is set to TRUE, Do not issue this command */
if ( bp->vf )
if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
return STATUS_SUCCESS;
- req = ( struct hwrm_func_backing_store_qcfg_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_func_backing_store_qcfg_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_QCFG,
cmd_len );
return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
return STATUS_SUCCESS;
- req = ( struct hwrm_func_backing_store_cfg_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_func_backing_store_cfg_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_CFG,
cmd_len );
req->flags = FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE;
if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
return STATUS_SUCCESS;
- req = ( struct hwrm_queue_qportcfg_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_queue_qportcfg_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_queue_qportcfg_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_queue_qportcfg_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_QUEUE_QPORTCFG, cmd_len );
req->flags = 0;
req->port_id = 0;
if ( bp->vf )
return STATUS_SUCCESS;
- req = ( struct hwrm_port_mac_cfg_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_port_mac_cfg_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_MAC_CFG, cmd_len );
req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
u8 auto_duplex = 0;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_port_phy_cfg_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_port_phy_cfg_input * ) REQ_DMA_ADDR ( bp );
flags = PORT_PHY_CFG_REQ_FLAGS_FORCE |
PORT_PHY_CFG_REQ_FLAGS_RESET_PHY;
int rc;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_stat_ctx_alloc_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_stat_ctx_alloc_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_stat_ctx_alloc_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_stat_ctx_alloc_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_ALLOC, cmd_len );
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
if ( rc ) {
if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_STAT_CTX ) ) )
return STATUS_SUCCESS;
- req = ( struct hwrm_stat_ctx_free_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_stat_ctx_free_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_FREE, cmd_len );
req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_GRP ) ) )
return STATUS_SUCCESS;
- req = ( struct hwrm_ring_grp_free_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_ring_grp_free_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_FREE, cmd_len );
req->ring_group_id = ( u32 )bp->ring_grp_id;
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) )
return STATUS_SUCCESS;
- req = ( struct hwrm_ring_grp_alloc_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_ring_grp_alloc_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_ring_grp_alloc_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_ring_grp_alloc_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_ALLOC, cmd_len );
req->cr = bp->cq_ring_id;
req->rr = bp->rx_ring_id;
struct hwrm_ring_free_input *req;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_ring_free_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_ring_free_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_FREE, cmd_len );
req->ring_type = ring_type;
req->ring_id = ring_id;
int rc;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_ring_alloc_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_ring_alloc_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_ring_alloc_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_ring_alloc_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_ALLOC, cmd_len );
req->ring_type = type;
switch ( type ) {
req->int_mode = BNXT_CQ_INTR_MODE ( ( (FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7) ) || bp->vf ) );
req->length = ( u32 )bp->nq.ring_cnt;
req->logical_id = 0xFFFF; // Required value for Thor FW?
- req->page_tbl_addr = virt_to_bus ( bp->nq.bd_virt );
+ req->page_tbl_addr = NQ_DMA_ADDR ( bp );
break;
case RING_ALLOC_REQ_RING_TYPE_L2_CMPL:
req->page_size = LM_PAGE_BITS ( 8 );
req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
req->length = ( u32 )bp->cq.ring_cnt;
- req->page_tbl_addr = virt_to_bus ( bp->cq.bd_virt );
+ req->page_tbl_addr = CQ_DMA_ADDR ( bp );
if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
break;
req->enables = RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
req->queue_id = ( u16 )bp->queue_id;
req->stat_ctx_id = ( u32 )bp->stat_ctx_id;
req->cmpl_ring_id = bp->cq_ring_id;
- req->page_tbl_addr = virt_to_bus ( bp->tx.bd_virt );
+ req->page_tbl_addr = TX_DMA_ADDR ( bp );
break;
case RING_ALLOC_REQ_RING_TYPE_RX:
req->page_size = LM_PAGE_BITS ( 8 );
req->length = ( u32 )bp->rx.ring_cnt;
req->stat_ctx_id = ( u32 )STAT_CTX_ID;
req->cmpl_ring_id = bp->cq_ring_id;
- req->page_tbl_addr = virt_to_bus ( bp->rx.bd_virt );
+ req->page_tbl_addr = RX_DMA_ADDR ( bp );
if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
break;
req->queue_id = ( u16 )RX_RING_QID;
int rc;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_vnic_alloc_input * )bp->hwrm_addr_req;
- resp = ( struct hwrm_vnic_alloc_output * )bp->hwrm_addr_resp;
+ req = ( struct hwrm_vnic_alloc_input * ) REQ_DMA_ADDR ( bp );
+ resp = ( struct hwrm_vnic_alloc_output * ) RESP_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_ALLOC, cmd_len );
req->flags = VNIC_ALLOC_REQ_FLAGS_DEFAULT;
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_VNIC_ID ) ) )
return STATUS_SUCCESS;
- req = ( struct hwrm_vnic_free_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_vnic_free_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_FREE, cmd_len );
req->vnic_id = bp->vnic_id;
rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
struct hwrm_vnic_cfg_input *req;
DBGP ( "%s\n", __func__ );
- req = ( struct hwrm_vnic_cfg_input * )bp->hwrm_addr_req;
+ req = ( struct hwrm_vnic_cfg_input * ) REQ_DMA_ADDR ( bp );
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_CFG, cmd_len );
req->enables = VNIC_CFG_REQ_ENABLES_MRU;
req->mru = bp->mtu;
int ret;
for ( ptr = cmds; *ptr; ++ptr ) {
- memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE );
- memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE );
+ memset ( ( void * ) REQ_DMA_ADDR ( bp ), 0, REQ_BUFFER_SIZE );
+ memset ( ( void * ) RESP_DMA_ADDR ( bp ), 0, RESP_BUFFER_SIZE );
ret = ( *ptr ) ( bp );
if ( ret ) {
DBGP ( "- %s ( ): Failed\n", __func__ );
{
struct bnxt *bp = dev->priv;
u16 len, entry;
- dma_addr_t mapping;
+ physaddr_t mapping;
if ( bnxt_tx_avail ( bp ) < 1 ) {
DBGP ( "- %s ( ): Failed no bd's available\n", __func__ );
return -ENOBUFS;
}
+ mapping = iob_dma ( iob );
bnxt_tx_adjust_pkt ( bp, iob );
entry = bp->tx.prod_id;
- mapping = virt_to_bus ( iob->data );
len = iob_len ( iob );
bp->tx.iob[entry] = iob;
bnxt_set_txq ( bp, entry, mapping, len );
u32 cq_type;
while ( done == SERVICE_NEXT_CQ_BD ) {
- cmp = ( struct cmpl_base * )BD_NOW ( bp->cq.bd_virt,
+ cmp = ( struct cmpl_base * )BD_NOW ( CQ_DMA_ADDR ( bp ),
bp->cq.cons_id,
sizeof ( struct cmpl_base ) );
return;
while ( done == SERVICE_NEXT_NQ_BD ) {
- nqp = ( struct nq_base * )BD_NOW ( bp->nq.bd_virt,
- bp->nq.cons_id, sizeof ( struct nq_base ) );
+ nqp = ( struct nq_base * )BD_NOW ( NQ_DMA_ADDR ( bp ),
+ bp->nq.cons_id,
+ sizeof ( struct nq_base ) );
if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit )
break;
nq_type = ( nqp->type & NQ_CN_TYPE_MASK );
bp->dev = netdev;
netdev->dev = &pci->dev;
+ /* Configure DMA */
+ bp->dma = &pci->dma;
+ netdev->dma = bp->dma;
+
/* Enable PCI device */
adjust_pci_device ( pci );