* @v ibdev Infiniband device
* @v cq Completion queue
* @v cqe Hardware completion queue entry
- * @v complete_send Send completion handler
- * @v complete_recv Receive completion handler
* @ret rc Return status code
*/
static int arbel_complete ( struct ib_device *ibdev,
struct ib_completion_queue *cq,
- union arbelprm_completion_entry *cqe,
- ib_completer_t complete_send,
- ib_completer_t complete_recv ) {
+ union arbelprm_completion_entry *cqe ) {
struct arbel *arbel = ib_get_drvdata ( ibdev );
struct ib_completion completion;
struct ib_work_queue *wq;
struct arbel_recv_work_queue *arbel_recv_wq;
struct arbelprm_recv_wqe *recv_wqe;
struct io_buffer *iobuf;
- ib_completer_t complete;
unsigned int opcode;
unsigned long qpn;
int is_send;
}
/* Pass off to caller's completion handler */
- complete = ( is_send ? complete_send : complete_recv );
- complete ( ibdev, qp, &completion, iobuf );
+ if ( is_send ) {
+ ib_complete_send ( ibdev, qp, &completion, iobuf );
+ } else {
+ ib_complete_recv ( ibdev, qp, &completion, iobuf );
+ }
return rc;
}
*
* @v ibdev Infiniband device
* @v cq Completion queue
- * @v complete_send Send completion handler
- * @v complete_recv Receive completion handler
*/
static void arbel_poll_cq ( struct ib_device *ibdev,
- struct ib_completion_queue *cq,
- ib_completer_t complete_send,
- ib_completer_t complete_recv ) {
+ struct ib_completion_queue *cq ) {
struct arbel *arbel = ib_get_drvdata ( ibdev );
struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
struct arbelprm_cq_ci_db_record *ci_db_rec;
}
/* Handle completion */
- if ( ( rc = arbel_complete ( ibdev, cq, cqe, complete_send,
- complete_recv ) ) != 0 ) {
+ if ( ( rc = arbel_complete ( ibdev, cq, cqe ) ) != 0 ) {
DBGC ( arbel, "Arbel %p failed to complete: %s\n",
arbel, strerror ( rc ) );
DBGC_HD ( arbel, cqe, sizeof ( *cqe ) );
* @v ibdev Infiniband device
* @v cq Completion queue
* @v cqe Hardware completion queue entry
- * @v complete_send Send completion handler
- * @v complete_recv Receive completion handler
* @ret rc Return status code
*/
static int hermon_complete ( struct ib_device *ibdev,
struct ib_completion_queue *cq,
- union hermonprm_completion_entry *cqe,
- ib_completer_t complete_send,
- ib_completer_t complete_recv ) {
+ union hermonprm_completion_entry *cqe ) {
struct hermon *hermon = ib_get_drvdata ( ibdev );
struct ib_completion completion;
struct ib_work_queue *wq;
struct ib_queue_pair *qp;
struct hermon_queue_pair *hermon_qp;
struct io_buffer *iobuf;
- ib_completer_t complete;
unsigned int opcode;
unsigned long qpn;
int is_send;
}
/* Pass off to caller's completion handler */
- complete = ( is_send ? complete_send : complete_recv );
- complete ( ibdev, qp, &completion, iobuf );
+ if ( is_send ) {
+ ib_complete_send ( ibdev, qp, &completion, iobuf );
+ } else {
+ ib_complete_recv ( ibdev, qp, &completion, iobuf );
+ }
return rc;
}
*
* @v ibdev Infiniband device
* @v cq Completion queue
- * @v complete_send Send completion handler
- * @v complete_recv Receive completion handler
*/
static void hermon_poll_cq ( struct ib_device *ibdev,
- struct ib_completion_queue *cq,
- ib_completer_t complete_send,
- ib_completer_t complete_recv ) {
+ struct ib_completion_queue *cq ) {
struct hermon *hermon = ib_get_drvdata ( ibdev );
struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
union hermonprm_completion_entry *cqe;
DBGCP_HD ( hermon, cqe, sizeof ( *cqe ) );
/* Handle completion */
- if ( ( rc = hermon_complete ( ibdev, cq, cqe, complete_send,
- complete_recv ) ) != 0 ) {
+ if ( ( rc = hermon_complete ( ibdev, cq, cqe ) ) != 0 ) {
DBGC ( hermon, "Hermon %p failed to complete: %s\n",
hermon, strerror ( rc ) );
DBGC_HD ( hermon, cqe, sizeof ( *cqe ) );
*
* @v ipoib IPoIB device
* @v qset Queue set
+ * @v num_cqes Number of completion queue entries
+ * @v num_send_wqes Number of send work queue entries
+ * @v complete_send Send completion handler
+ * @v num_recv_wqes Number of receive work queue entries
+ * @v complete_recv Receive completion handler
+ * @v qkey Queue key
* @ret rc Return status code
*/
static int ipoib_create_qset ( struct ipoib_device *ipoib,
struct ipoib_queue_set *qset,
unsigned int num_cqes,
unsigned int num_send_wqes,
+ ib_completer_t complete_send,
unsigned int num_recv_wqes,
+ ib_completer_t complete_recv,
unsigned long qkey ) {
struct ib_device *ibdev = ipoib->ibdev;
int rc;
qset->recv_max_fill = num_recv_wqes;
/* Allocate completion queue */
- qset->cq = ib_create_cq ( ibdev, num_cqes );
+ qset->cq = ib_create_cq ( ibdev, num_cqes, complete_send,
+ complete_recv );
if ( ! qset->cq ) {
DBGC ( ipoib, "IPoIB %p could not allocate completion queue\n",
ipoib );
struct ipoib_device *ipoib = netdev->priv;
struct ib_device *ibdev = ipoib->ibdev;
- ib_poll_cq ( ibdev, ipoib->meta.cq, ipoib_meta_complete_send,
- ipoib_meta_complete_recv );
- ib_poll_cq ( ibdev, ipoib->data.cq, ipoib_data_complete_send,
- ipoib_data_complete_recv );
+ ib_poll_cq ( ibdev, ipoib->meta.cq );
+ ib_poll_cq ( ibdev, ipoib->data.cq );
ipoib_refill_recv ( ipoib, &ipoib->meta );
ipoib_refill_recv ( ipoib, &ipoib->data );
}
if ( ( rc = ipoib_create_qset ( ipoib, &ipoib->meta,
IPOIB_META_NUM_CQES,
IPOIB_META_NUM_SEND_WQES,
+ ipoib_meta_complete_send,
IPOIB_META_NUM_RECV_WQES,
+ ipoib_meta_complete_recv,
IB_GLOBAL_QKEY ) ) != 0 ) {
DBGC ( ipoib, "IPoIB %p could not allocate metadata QP: %s\n",
ipoib, strerror ( rc ) );
if ( ( rc = ipoib_create_qset ( ipoib, &ipoib->data,
IPOIB_DATA_NUM_CQES,
IPOIB_DATA_NUM_SEND_WQES,
+ ipoib_data_complete_send,
IPOIB_DATA_NUM_RECV_WQES,
+ ipoib_data_complete_recv,
IB_GLOBAL_QKEY ) ) != 0 ) {
DBGC ( ipoib, "IPoIB %p could not allocate data QP: %s\n",
ipoib, strerror ( rc ) );
IB_MODIFY_QKEY = 0x0001,
};
-/** An Infiniband Completion Queue */
-struct ib_completion_queue {
- /** Completion queue number */
- unsigned long cqn;
- /** Number of completion queue entries */
- unsigned int num_cqes;
- /** Next completion queue entry index
- *
- * This is the index of the next entry to be filled (i.e. the
- * first empty entry). This value is not bounded by num_wqes;
- * users must logical-AND with (num_wqes-1) to generate an
- * array index.
- */
- unsigned long next_idx;
- /** List of work queues completing to this queue */
- struct list_head work_queues;
- /** Driver private data */
- void *drv_priv;
-};
-
/** An Infiniband completion */
struct ib_completion {
/** Syndrome
size_t len;
};
+/** Infiniband completion syndromes */
+enum ib_syndrome {
+ IB_SYN_NONE = 0,
+ IB_SYN_LOCAL_LENGTH = 1,
+ IB_SYN_LOCAL_QP = 2,
+ IB_SYN_LOCAL_PROT = 4,
+};
+
/** An Infiniband completion handler
*
* @v ibdev Infiniband device
struct ib_completion *completion,
struct io_buffer *iobuf );
+/** An Infiniband Completion Queue */
+struct ib_completion_queue {
+ /** Completion queue number */
+ unsigned long cqn;
+ /** Number of completion queue entries */
+ unsigned int num_cqes;
+ /** Next completion queue entry index
+ *
+ * This is the index of the next entry to be filled (i.e. the
+ * first empty entry). This value is not bounded by num_wqes;
+ * users must logical-AND with (num_wqes-1) to generate an
+ * array index.
+ */
+ unsigned long next_idx;
+ /** List of work queues completing to this queue */
+ struct list_head work_queues;
+ /** Send completion handler */
+ ib_completer_t complete_send;
+ /** Receive completion handler */
+ ib_completer_t complete_recv;
+ /** Driver private data */
+ void *drv_priv;
+};
+
/** An Infiniband Address Vector */
struct ib_address_vector {
/** Destination Queue Pair */
*
* @v ibdev Infiniband device
* @v cq Completion queue
- * @v complete_send Send completion handler
- * @v complete_recv Receive completion handler
*
- * The completion handler takes ownership of the I/O buffer.
+ * The relevant completion handler (specified at completion
+ * queue creation time) takes ownership of the I/O buffer.
*/
void ( * poll_cq ) ( struct ib_device *ibdev,
- struct ib_completion_queue *cq,
- ib_completer_t complete_send,
- ib_completer_t complete_recv );
+ struct ib_completion_queue *cq );
/**
* Poll event queue
*
void *owner_priv;
};
-extern struct ib_completion_queue * ib_create_cq ( struct ib_device *ibdev,
- unsigned int num_cqes );
+extern struct ib_completion_queue *
+ib_create_cq ( struct ib_device *ibdev, unsigned int num_cqes,
+ ib_completer_t complete_send, ib_completer_t complete_recv );
extern void ib_destroy_cq ( struct ib_device *ibdev,
struct ib_completion_queue *cq );
extern struct ib_queue_pair *
return ibdev->op->post_recv ( ibdev, qp, iobuf );
}
+/**
+ * Complete send work queue entry
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v completion Completion
+ * @v iobuf I/O buffer
+ */
+static inline __attribute__ (( always_inline )) void
+ib_complete_send ( struct ib_device *ibdev, struct ib_queue_pair *qp,
+ struct ib_completion *completion,
+ struct io_buffer *iobuf ) {
+ return qp->send.cq->complete_send ( ibdev, qp, completion, iobuf );
+}
+
+/**
+ * Complete receive work queue entry
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v completion Completion
+ * @v iobuf I/O buffer
+ */
+static inline __attribute__ (( always_inline )) void
+ib_complete_recv ( struct ib_device *ibdev, struct ib_queue_pair *qp,
+ struct ib_completion *completion,
+ struct io_buffer *iobuf ) {
+ return qp->recv.cq->complete_recv ( ibdev, qp, completion, iobuf );
+}
+
/**
* Poll completion queue
*
* @v ibdev Infiniband device
* @v cq Completion queue
- * @v complete_send Send completion handler
- * @v complete_recv Receive completion handler
*/
static inline __attribute__ (( always_inline )) void
-ib_poll_cq ( struct ib_device *ibdev, struct ib_completion_queue *cq,
- ib_completer_t complete_send, ib_completer_t complete_recv ) {
- ibdev->op->poll_cq ( ibdev, cq, complete_send, complete_recv );
+ib_poll_cq ( struct ib_device *ibdev, struct ib_completion_queue *cq ) {
+ ibdev->op->poll_cq ( ibdev, cq );
}
/**
*
* @v ibdev Infiniband device
* @v num_cqes Number of completion queue entries
+ * @v complete_send Send completion handler
+ * @v complete_recv Receive completion handler
* @ret cq New completion queue
*/
-struct ib_completion_queue * ib_create_cq ( struct ib_device *ibdev,
- unsigned int num_cqes ) {
+struct ib_completion_queue *
+ib_create_cq ( struct ib_device *ibdev, unsigned int num_cqes,
+ ib_completer_t complete_send, ib_completer_t complete_recv ) {
struct ib_completion_queue *cq;
int rc;
return NULL;
cq->num_cqes = num_cqes;
INIT_LIST_HEAD ( &cq->work_queues );
+ cq->complete_send = complete_send;
+ cq->complete_recv = complete_recv;
/* Perform device-specific initialisation and get CQN */
if ( ( rc = ibdev->op->create_cq ( ibdev, cq ) ) != 0 ) {
* @v qp Queue pair
*/
void ib_destroy_qp ( struct ib_device *ibdev, struct ib_queue_pair *qp ) {
+ struct ib_completion completion = {
+ .syndrome = IB_SYN_LOCAL_QP,
+ };
+ struct io_buffer *iobuf;
+ unsigned int i;
+
DBGC ( ibdev, "IBDEV %p destroying QPN %#lx\n",
ibdev, qp->qpn );
+
+ /* Perform device-specific destruction */
ibdev->op->destroy_qp ( ibdev, qp );
+
+ /* Complete any remaining I/O buffers with errors */
+ for ( i = 0 ; i < qp->send.num_wqes ; i++ ) {
+ if ( ( iobuf = qp->send.iobufs[i] ) != NULL )
+ ib_complete_send ( ibdev, qp, &completion, iobuf );
+ }
+ for ( i = 0 ; i < qp->recv.num_wqes ; i++ ) {
+ if ( ( iobuf = qp->recv.iobufs[i] ) != NULL )
+ ib_complete_recv ( ibdev, qp, &completion, iobuf );
+ }
+
+ /* Remove work queues from completion queue */
list_del ( &qp->send.list );
list_del ( &qp->recv.list );
+
+ /* Free QP */
free ( qp );
}