return (void *)response->packet;
}
+static struct smbdirect_send_io *smbd_alloc_send_io(struct smbdirect_socket *sc)
+{
+ struct smbdirect_send_io *msg;
+
+ msg = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+ msg->socket = sc;
+ INIT_LIST_HEAD(&msg->sibling_list);
+ msg->num_sge = 0;
+
+ return msg;
+}
+
+static void smbd_free_send_io(struct smbdirect_send_io *msg)
+{
+ struct smbdirect_socket *sc = msg->socket;
+ size_t i;
+
+ /*
+ * The list needs to be empty!
+ * The caller should take care of it.
+ */
+ WARN_ON_ONCE(!list_empty(&msg->sibling_list));
+
+ /*
+ * Note we call ib_dma_unmap_page(), even if some sges are mapped using
+ * ib_dma_map_single().
+ *
+ * The difference between _single() and _page() only matters for the
+ * ib_dma_map_*() case.
+ *
+ * For the ib_dma_unmap_*() case it does not matter as both take the
+ * dma_addr_t and dma_unmap_single_attrs() is just an alias to
+ * dma_unmap_page_attrs().
+ */
+ for (i = 0; i < msg->num_sge; i++)
+ ib_dma_unmap_page(sc->ib.dev,
+ msg->sge[i].addr,
+ msg->sge[i].length,
+ DMA_TO_DEVICE);
+
+ mempool_free(msg, sc->send_io.mem.pool);
+}
+
/* Called when a RDMA send is done */
static void send_done(struct ib_cq *cq, struct ib_wc *wc)
{
- int i;
struct smbdirect_send_io *request =
container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
struct smbdirect_socket *sc = request->socket;
log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%s\n",
request, ib_wc_status_msg(wc->status));
- for (i = 0; i < request->num_sge; i++)
- ib_dma_unmap_single(sc->ib.dev,
- request->sge[i].addr,
- request->sge[i].length,
- DMA_TO_DEVICE);
- mempool_free(request, sc->send_io.mem.pool);
+ /* Note this frees wc->wr_cqe, but not wc */
+ smbd_free_send_io(request);
lcredits += 1;
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
{
struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_send_wr send_wr;
- int rc = -ENOMEM;
+ int rc;
struct smbdirect_send_io *request;
struct smbdirect_negotiate_req *packet;
- request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL);
- if (!request)
- return rc;
-
- request->socket = sc;
+ request = smbd_alloc_send_io(sc);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
packet = smbdirect_send_io_payload(request);
packet->min_version = cpu_to_le16(SMBDIRECT_V1);
packet->max_fragmented_size =
cpu_to_le32(sp->max_fragmented_recv_size);
- request->num_sge = 1;
request->sge[0].addr = ib_dma_map_single(
sc->ib.dev, (void *)packet,
sizeof(*packet), DMA_TO_DEVICE);
rc = -EIO;
goto dma_mapping_failed;
}
+ request->num_sge = 1;
request->sge[0].length = sizeof(*packet);
request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
/* if we reach here, post send failed */
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
atomic_dec(&sc->send_io.pending.count);
- ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr,
- request->sge[0].length, DMA_TO_DEVICE);
smbd_disconnect_rdma_connection(sc);
dma_mapping_failed:
- mempool_free(request, sc->send_io.mem.pool);
+ smbd_free_send_io(request);
return rc;
}
int *_remaining_data_length)
{
struct smbdirect_socket_parameters *sp = &sc->parameters;
- int i, rc;
+ int rc;
int header_length;
int data_length;
struct smbdirect_send_io *request;
goto err_wait_credit;
}
- request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL);
- if (!request) {
- rc = -ENOMEM;
+ request = smbd_alloc_send_io(sc);
+ if (IS_ERR(request)) {
+ rc = PTR_ERR(request);
goto err_alloc;
}
- request->socket = sc;
memset(request->sge, 0, sizeof(request->sge));
/* Map the packet to DMA */
return 0;
err_dma:
- for (i = 0; i < request->num_sge; i++)
- if (request->sge[i].addr)
- ib_dma_unmap_single(sc->ib.dev,
- request->sge[i].addr,
- request->sge[i].length,
- DMA_TO_DEVICE);
- mempool_free(request, sc->send_io.mem.pool);
+ smbd_free_send_io(request);
err_alloc:
atomic_inc(&sc->send_io.credits.count);