]> git.ipfire.org Git - thirdparty/ipxe.git/commitdiff
[infiniband] Allow queue pairs to have a custom allocator for receive iobufs
authorMichael Brown <mcb30@ipxe.org>
Wed, 29 Aug 2012 21:11:58 +0000 (22:11 +0100)
committerMichael Brown <mcb30@ipxe.org>
Fri, 31 Aug 2012 20:22:57 +0000 (21:22 +0100)
Signed-off-by: Michael Brown <mcb30@ipxe.org>
src/drivers/infiniband/hermon.c
src/drivers/net/ipoib.c
src/include/ipxe/infiniband.h
src/net/infiniband.c
src/net/infiniband/ib_cmrc.c
src/net/infiniband/ib_mi.c

index e7db0359c501a8716a053bb1fd2c2daa0e012123..ff95e95f44470d62593bdfbcb57474a39d33a3e4 100644 (file)
@@ -3128,6 +3128,11 @@ static int hermon_eth_transmit ( struct net_device *netdev,
        return 0;
 }
 
+/** Hermon Ethernet queue pair operations */
+static struct ib_queue_pair_operations hermon_eth_qp_op = {
+       .alloc_iob = alloc_iob,
+};
+
 /**
  * Handle Hermon Ethernet device send completion
  *
@@ -3225,7 +3230,8 @@ static int hermon_eth_open ( struct net_device *netdev ) {
        /* Allocate queue pair */
        port->eth_qp = ib_create_qp ( ibdev, IB_QPT_ETH,
                                      HERMON_ETH_NUM_SEND_WQES, port->eth_cq,
-                                     HERMON_ETH_NUM_RECV_WQES, port->eth_cq );
+                                     HERMON_ETH_NUM_RECV_WQES, port->eth_cq,
+                                     &hermon_eth_qp_op );
        if ( ! port->eth_qp ) {
                DBGC ( hermon, "Hermon %p port %d could not create queue "
                       "pair\n", hermon, ibdev->port );
index 44afa2b61d7d216fd6e3c64ed9ff7cf52f092062..8d4cc49ebc164db63ef6433dd7b3bf3f948302b3 100644 (file)
@@ -534,6 +534,11 @@ static struct ib_completion_queue_operations ipoib_cq_op = {
        .complete_recv = ipoib_complete_recv,
 };
 
+/** IPoIB queue pair operations */
+static struct ib_queue_pair_operations ipoib_qp_op = {
+       .alloc_iob = alloc_iob,
+};
+
 /**
  * Poll IPoIB network device
  *
@@ -667,9 +672,9 @@ static int ipoib_open ( struct net_device *netdev ) {
        }
 
        /* Allocate queue pair */
-       ipoib->qp = ib_create_qp ( ibdev, IB_QPT_UD,
-                                  IPOIB_NUM_SEND_WQES, ipoib->cq,
-                                  IPOIB_NUM_RECV_WQES, ipoib->cq );
+       ipoib->qp = ib_create_qp ( ibdev, IB_QPT_UD, IPOIB_NUM_SEND_WQES,
+                                  ipoib->cq, IPOIB_NUM_RECV_WQES, ipoib->cq,
+                                  &ipoib_qp_op );
        if ( ! ipoib->qp ) {
                DBGC ( ipoib, "IPoIB %p could not allocate queue pair\n",
                       ipoib );
index f97a5d4fe2a47d7fcc8b620a4a016480d0898603..1a64eef6c895792c60878b0d6dc2a85d93a2a18b 100644 (file)
@@ -142,6 +142,16 @@ enum ib_queue_pair_type {
        IB_QPT_ETH,
 };
 
+/** Infiniband queue pair operations */
+struct ib_queue_pair_operations {
+       /** Allocate receive I/O buffer
+        *
+        * @v len               Maximum receive length
+        * @ret iobuf           I/O buffer (or NULL if out of memory)
+        */
+       struct io_buffer * ( * alloc_iob ) ( size_t len );
+};
+
 /** An Infiniband Queue Pair */
 struct ib_queue_pair {
        /** Containing Infiniband device */
@@ -169,6 +179,8 @@ struct ib_queue_pair {
        struct list_head mgids;
        /** Address vector */
        struct ib_address_vector av;
+       /** Queue pair operations */
+       struct ib_queue_pair_operations *op;
        /** Driver private data */
        void *drv_priv;
        /** Queue owner private data */
@@ -478,8 +490,8 @@ extern void ib_poll_cq ( struct ib_device *ibdev,
 extern struct ib_queue_pair *
 ib_create_qp ( struct ib_device *ibdev, enum ib_queue_pair_type type,
               unsigned int num_send_wqes, struct ib_completion_queue *send_cq,
-              unsigned int num_recv_wqes,
-              struct ib_completion_queue *recv_cq );
+              unsigned int num_recv_wqes, struct ib_completion_queue *recv_cq,
+              struct ib_queue_pair_operations *op );
 extern int ib_modify_qp ( struct ib_device *ibdev, struct ib_queue_pair *qp );
 extern void ib_destroy_qp ( struct ib_device *ibdev,
                            struct ib_queue_pair *qp );
index c17b6181d7686e475f4ba8a99a92caf3e26533dd..a50b7a035335663d9983bfe5c98dc0808c17f6d1 100644 (file)
@@ -168,6 +168,7 @@ void ib_poll_cq ( struct ib_device *ibdev,
  * @v send_cq          Send completion queue
  * @v num_recv_wqes    Number of receive work queue entries
  * @v recv_cq          Receive completion queue
+ * @v op               Queue pair operations
  * @ret qp             Queue pair
  *
  * The queue pair will be left in the INIT state; you must call
@@ -178,7 +179,8 @@ struct ib_queue_pair * ib_create_qp ( struct ib_device *ibdev,
                                      unsigned int num_send_wqes,
                                      struct ib_completion_queue *send_cq,
                                      unsigned int num_recv_wqes,
-                                     struct ib_completion_queue *recv_cq ) {
+                                     struct ib_completion_queue *recv_cq,
+                                     struct ib_queue_pair_operations *op ) {
        struct ib_queue_pair *qp;
        size_t total_size;
        int rc;
@@ -210,6 +212,7 @@ struct ib_queue_pair * ib_create_qp ( struct ib_device *ibdev,
        qp->recv.iobufs = ( ( ( void * ) qp ) + sizeof ( *qp ) +
                            ( num_send_wqes * sizeof ( qp->send.iobufs[0] ) ));
        INIT_LIST_HEAD ( &qp->mgids );
+       qp->op = op;
 
        /* Perform device-specific initialisation and get QPN */
        if ( ( rc = ibdev->op->create_qp ( ibdev, qp ) ) != 0 ) {
@@ -514,7 +517,7 @@ void ib_refill_recv ( struct ib_device *ibdev, struct ib_queue_pair *qp ) {
        while ( qp->recv.fill < qp->recv.num_wqes ) {
 
                /* Allocate I/O buffer */
-               iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
+               iobuf = qp->op->alloc_iob ( IB_MAX_PAYLOAD_SIZE );
                if ( ! iobuf ) {
                        /* Non-fatal; we will refill on next attempt */
                        return;
index 369e2e906e5092855697a3a05ef698b4341fd20a..dd623ddbf4cfe5105595e0ed5a536284f941597a 100644 (file)
@@ -257,6 +257,11 @@ static struct ib_completion_queue_operations ib_cmrc_completion_ops = {
        .complete_recv = ib_cmrc_complete_recv,
 };
 
+/** Infiniband CMRC queue pair operations */
+static struct ib_queue_pair_operations ib_cmrc_queue_pair_ops = {
+       .alloc_iob = alloc_iob,
+};
+
 /**
  * Send data via CMRC
  *
@@ -410,7 +415,8 @@ int ib_cmrc_open ( struct interface *xfer, struct ib_device *ibdev,
 
        /* Create queue pair */
        cmrc->qp = ib_create_qp ( ibdev, IB_QPT_RC, IB_CMRC_NUM_SEND_WQES,
-                                 cmrc->cq, IB_CMRC_NUM_RECV_WQES, cmrc->cq );
+                                 cmrc->cq, IB_CMRC_NUM_RECV_WQES, cmrc->cq,
+                                 &ib_cmrc_queue_pair_ops );
        if ( ! cmrc->qp ) {
                DBGC ( cmrc, "CMRC %p could not create queue pair\n", cmrc );
                rc = -ENOMEM;
index ced2eea17b706371e2ed19aa68f7ee7a3ba242f1..31fe71a4838700a5902743cbbbb436f48122a48d 100644 (file)
@@ -164,6 +164,11 @@ static struct ib_completion_queue_operations ib_mi_completion_ops = {
        .complete_recv = ib_mi_complete_recv,
 };
 
+/** Management interface queue pair operations */
+static struct ib_queue_pair_operations ib_mi_queue_pair_ops = {
+       .alloc_iob = alloc_iob,
+};
+
 /**
  * Transmit MAD
  *
@@ -353,7 +358,8 @@ struct ib_mad_interface * ib_create_mi ( struct ib_device *ibdev,
 
        /* Create queue pair */
        mi->qp = ib_create_qp ( ibdev, type, IB_MI_NUM_SEND_WQES, mi->cq,
-                               IB_MI_NUM_RECV_WQES, mi->cq );
+                               IB_MI_NUM_RECV_WQES, mi->cq,
+                               &ib_mi_queue_pair_ops );
        if ( ! mi->qp ) {
                DBGC ( mi, "MI %p could not allocate queue pair\n", mi );
                goto err_create_qp;