// INS_FLD(virt_to_bus(dev_buffers_p), buf, arbelprm_mpt_st,
// start_address_l);
// INS_FLD(memreg_size, buf, arbelprm_mpt_st, reg_wnd_len_l);
+ INS_FLD(0, buf, arbelprm_mpt_st, start_address_l);
+ INS_FLD(0, buf, arbelprm_mpt_st, start_address_h);
INS_FLD(0xffffffffUL, buf, arbelprm_mpt_st, reg_wnd_len_l);
INS_FLD(0xffffffffUL, buf, arbelprm_mpt_st, reg_wnd_len_h);
}
__u8 nds;
void *ptr;
+ DBG ( "*** Creating MADS queue pair ***\n" );
+
qp = &dev_ib_data.mads_qp;
/* set the pointer to the receive WQEs buffer */
*rcv_cq_pp = &qp->rcv_cq;
}
+ DBG ( "*** Created MADS queue pair ***\n" );
+
return rc;
}
__u8 nds;
void *ptr;
+ DBG ( "*** Creating IPoIB queue pair ***\n" );
+
qp = &dev_ib_data.ipoib_qp;
/* set the pointer to the receive WQEs buffer */
*rcv_cq_pp = &qp->rcv_cq;
}
+ DBG ( "*** Created IPoIB queue pair ***\n" );
+
return rc;
}
qp->snd_cq.ci_db_ctx_pointer =
dev_ib_data.uar_context_base + 8 * qp->snd_cq.ci_db_ctx_idx;
+ DBG ( "* Creating send CQ *\n" );
+
/* create send CQ */
init_cq_buf(qp->snd_cq.cq_buf, qp->snd_cq.num_cqes);
qp->snd_cq.cons_counter = 0;
goto exit;
}
+ DBG ( "* Creating receive CQ *\n" );
+
/* create receive CQ */
init_cq_buf(qp->rcv_cq.cq_buf, qp->rcv_cq.num_cqes);
qp->rcv_cq.cons_counter = 0;
goto undo_snd_cq;
}
+ DBG ( "* Creating QP *\n" );
+
prep_rst2init_qpee_buf(inprm,
qp->snd_cq.cqn,
qp->rcv_cq.cqn,
/* to get the interface to the body of the program */
#include "nic.h"
+#define CREATE_OWN 1
+
#include "mt25218_imp.c"
#include "arbel.h"
#define MLX_RX_MAX_FILL NUM_IPOIB_RCV_WQES
struct mlx_nic {
+#if ! CREATE_OWN
/** Queue pair handle */
udqp_t ipoib_qph;
- /** Broadcast Address Vector */
- ud_av_t bcast_av;
/** Send completion queue */
cq_t snd_cqh;
/** Receive completion queue */
cq_t rcv_cqh;
+#endif
+ /** Broadcast Address Vector */
+ ud_av_t bcast_av;
/** RX fill level */
unsigned int rx_fill;
+
+#if CREATE_OWN
+ struct ib_completion_queue *own_send_cq;
+ struct ib_completion_queue *own_recv_cq;
+ struct ib_queue_pair *own_qp;
+#endif
};
static struct arbel static_arbel;
+#if ! CREATE_OWN
+
static struct arbel_completion_queue static_arbel_ipoib_send_cq = {
.ci_doorbell_idx = IPOIB_SND_CQ_CI_DB_IDX,
};
.dev_priv = &static_arbel_ipoib_qp,
};
+#endif
+
static struct ib_device static_ibdev = {
.dev_priv = &static_arbel,
};
memcpy ( &av.gid, ( ( void * ) bav ) + 16, 16 );
- rc = arbel_post_send ( &static_ibdev, &static_ipoib_qp, &av, iobuf );
+ rc = arbel_post_send ( &static_ibdev,
+#if CREATE_OWN
+ mlx->own_qp,
+#else
+ &static_ipoib_qp,
+#endif
+ &av, iobuf );
return rc;
}
break;
DBG ( "Posting RX buffer %p:\n", iobuf );
if ( ( rc = arbel_post_recv ( &static_ibdev,
+#if CREATE_OWN
+ mlx->own_qp,
+#else
&static_ipoib_qp,
+#endif
iobuf ) ) != 0 ) {
free_iob ( iobuf );
break;
}
/* Poll completion queues */
- arbel_poll_cq ( &static_ibdev, &static_ipoib_send_cq,
+ arbel_poll_cq ( &static_ibdev,
+#if CREATE_OWN
+ mlx->own_send_cq,
+#else
+ &static_ipoib_send_cq,
+#endif
temp_complete_send, temp_complete_recv );
- arbel_poll_cq ( &static_ibdev, &static_ipoib_recv_cq,
+ arbel_poll_cq ( &static_ibdev,
+#if CREATE_OWN
+ mlx->own_recv_cq,
+#else
+ &static_ipoib_recv_cq,
+#endif
temp_complete_send, temp_complete_recv );
mlx_refill_rx ( netdev );
opcode_modifier, op_mod,
go, 1 );
+ DBG_HD ( &hcr, sizeof ( hcr ) );
+ if ( in_len ) {
+ size_t dump_len = in_len;
+ if ( dump_len > 256 )
+ dump_len = 256;
+ DBG ( "Input:\n" );
+ DBG_HD ( in, dump_len );
+ }
+
/* Issue command */
for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
i++ ) {
hcr.u.dwords[4] = readl ( arbel->config + ARBEL_HCR_REG ( 4 ) );
memcpy ( out, out_buffer, out_len );
+ if ( out_len ) {
+ size_t dump_len = out_len;
+ if ( dump_len > 256 )
+ dump_len = 256;
+ DBG ( "Output:\n" );
+ DBG_HD ( out, dump_len );
+ }
+
return 0;
}
struct arbelprm_recv_wqe *wqe;
struct arbelprm_recv_wqe *next_wqe;
unsigned int wqe_idx_mask;
+ size_t nds;
unsigned int i;
+ unsigned int j;
/* Allocate work queue */
arbel_recv_wq->wqe_size = ( num_wqes *
/* Link work queue entries */
wqe_idx_mask = ( num_wqes - 1 );
+ nds = ( ( offsetof ( typeof ( *wqe ), data ) +
+ sizeof ( wqe->data[0] ) ) >> 4 );
for ( i = 0 ; i < num_wqes ; i++ ) {
wqe = &arbel_recv_wq->wqe[i].recv;
next_wqe = &arbel_recv_wq->wqe[( i + 1 ) & wqe_idx_mask].recv;
MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
( virt_to_bus ( next_wqe ) >> 6 ) );
+ MLX_FILL_1 ( &wqe->next, 1, nds, ( sizeof ( *wqe ) / 16 ) );
+ for ( j = 0 ; ( ( ( void * ) &wqe->data[j] ) <
+ ( ( void * ) ( wqe + 1 ) ) ) ; j++ ) {
+ MLX_FILL_1 ( &wqe->data[j], 1,
+ l_key, ARBEL_INVALID_LKEY );
+ }
}
return 0;
qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */,
qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
qpc_eec_data.log_rq_stride,
- ( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ),
+ ( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ),
qpc_eec_data.log_sq_size, fls ( qp->send.num_wqes - 1 ),
qpc_eec_data.log_sq_stride,
- ( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ) );
+ ( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ) );
MLX_FILL_1 ( &qpctx, 5,
qpc_eec_data.usr_page, arbel->limits.reserved_uars );
MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
MLX_FILL_1 ( &wqe->ud, 8, destination_qp, av->dest_qp );
MLX_FILL_1 ( &wqe->ud, 9, q_key, av->qkey );
MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_len ( iobuf ) );
+ MLX_FILL_1 ( &wqe->data[0], 1, l_key, arbel->reserved_lkey );
MLX_FILL_1 ( &wqe->data[0], 3,
local_address_l, virt_to_bus ( iobuf->data ) );
/* Initialise hardware */
if ( ( rc = ib_driver_init ( pci, &qph ) ) != 0 )
goto err_ipoib_init;
+#if ! CREATE_OWN
mlx->ipoib_qph = qph;
mlx->bcast_av = ib_data.bcast_av;
mlx->snd_cqh = ib_data.ipoib_snd_cq;
mac = ( ( struct ib_mac * ) netdev->ll_addr );
mac->qpn = htonl ( ib_get_qpn ( mlx->ipoib_qph ) );
memcpy ( &mac->gid, ib_data.port_gid.raw, sizeof ( mac->gid ) );
+#endif
/* Hack up IB structures */
arbel->config = memfree_pci_dev.cr_space;
arbel->db_rec = dev_ib_data.uar_context_base;
arbel->reserved_lkey = dev_ib_data.mkey;
arbel->eqn = dev_ib_data.eq.eqn;
+#if ! CREATE_OWN
static_arbel_ipoib_qp.send.wqe =
( ( struct udqp_st * ) qph )->snd_wq;
static_arbel_ipoib_qp.recv.wqe =
&static_ipoib_send_cq.work_queues );
list_add ( &static_ipoib_qp.recv.list,
&static_ipoib_recv_cq.work_queues );
+#endif
static_ibdev.op = &arbel_ib_operations;
/* Get device limits */
arbel->limits.reserved_qps =
( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
+#if CREATE_OWN
+ struct ib_device *ibdev = &static_ibdev;
+ mlx->own_send_cq = ib_create_cq ( ibdev, 32 );
+ if ( ! mlx->own_send_cq ) {
+ DBG ( "Could not create send CQ\n" );
+ return -EIO;
+ }
+ mlx->own_recv_cq = ib_create_cq ( ibdev, 32 );
+ if ( ! mlx->own_recv_cq ) {
+ DBG ( "Could not create send CQ\n" );
+ return -EIO;
+ }
+ mlx->own_qp = ib_create_qp ( ibdev, NUM_IPOIB_SND_WQES,
+ mlx->own_send_cq, NUM_IPOIB_RCV_WQES,
+ mlx->own_recv_cq, ipoib_qkey );
+ if ( ! mlx->own_qp ) {
+ DBG ( "Could not create QP\n" );
+ return -EIO;
+ }
+ mlx->own_qp->owner_priv = netdev;
+
+ mac = ( ( struct ib_mac * ) netdev->ll_addr );
+ mac->qpn = htonl ( mlx->own_qp->qpn );
+ memcpy ( &mac->gid, ib_data.port_gid.raw, sizeof ( mac->gid ) );
+#endif
+
+#if 0
DBG ( "MADS SND CQN = %#lx\n", dev_ib_data.mads_qp.snd_cq.cqn );
struct ib_completion_queue *test_cq;
test_cq = ib_create_cq ( &static_ibdev, 32 );
if ( test_cq ) {
DBG ( "Woot: create_cq() passed!\n" );
}
+#endif
/* Register network device */
if ( ( rc = register_netdev ( netdev ) ) != 0 )