/* SMB_DIRECT negotiation timeout in seconds */
#define SMB_DIRECT_NEGOTIATE_TIMEOUT 120
-#define SMB_DIRECT_MAX_SEND_SGES 6
-
/*
* Default maximum number of RDMA read/write outstanding on this connection
* This value is possibly decreased during QP creation on hardware limit
unsigned int remote_key;
};
-struct smb_direct_sendmsg {
- struct smb_direct_transport *transport;
- struct ib_send_wr wr;
- struct list_head list;
- int num_sge;
- struct ib_sge sge[SMB_DIRECT_MAX_SEND_SGES];
- struct ib_cqe cqe;
- u8 packet[];
-};
-
struct smb_direct_rdma_rw_msg {
struct smb_direct_transport *t;
struct ib_cqe cqe;
ksmbd_conn_free(KSMBD_TRANS(t)->conn);
}
-static struct smb_direct_sendmsg
+static struct smbdirect_send_io
*smb_direct_alloc_sendmsg(struct smb_direct_transport *t)
{
- struct smb_direct_sendmsg *msg;
+ struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_send_io *msg;
msg = mempool_alloc(t->sendmsg_mempool, KSMBD_DEFAULT_GFP);
if (!msg)
return ERR_PTR(-ENOMEM);
- msg->transport = t;
- INIT_LIST_HEAD(&msg->list);
+ msg->socket = sc;
+ INIT_LIST_HEAD(&msg->sibling_list);
msg->num_sge = 0;
return msg;
}
static void smb_direct_free_sendmsg(struct smb_direct_transport *t,
- struct smb_direct_sendmsg *msg)
+ struct smbdirect_send_io *msg)
{
struct smbdirect_socket *sc = &t->socket;
int i;
static void send_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct smb_direct_sendmsg *sendmsg, *sibling;
+ struct smbdirect_send_io *sendmsg, *sibling;
struct smb_direct_transport *t;
+ struct smbdirect_socket *sc;
struct list_head *pos, *prev, *end;
- sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe);
- t = sendmsg->transport;
+ sendmsg = container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
+ sc = sendmsg->socket;
+ t = container_of(sc, struct smb_direct_transport, socket);
ksmbd_debug(RDMA, "Send completed. status='%s (%d)', opcode=%d\n",
ib_wc_status_msg(wc->status), wc->status,
/* iterate and free the list of messages in reverse. the list's head
* is invalid.
*/
- for (pos = &sendmsg->list, prev = pos->prev, end = sendmsg->list.next;
+ for (pos = &sendmsg->sibling_list, prev = pos->prev, end = sendmsg->sibling_list.next;
prev != end; pos = prev, prev = prev->prev) {
- sibling = container_of(pos, struct smb_direct_sendmsg, list);
+ sibling = container_of(pos, struct smbdirect_send_io, sibling_list);
smb_direct_free_sendmsg(t, sibling);
}
- sibling = container_of(pos, struct smb_direct_sendmsg, list);
+ sibling = container_of(pos, struct smbdirect_send_io, sibling_list);
smb_direct_free_sendmsg(t, sibling);
}
struct smb_direct_send_ctx *send_ctx,
bool is_last)
{
- struct smb_direct_sendmsg *first, *last;
+ struct smbdirect_send_io *first, *last;
int ret;
if (list_empty(&send_ctx->msg_list))
return 0;
first = list_first_entry(&send_ctx->msg_list,
- struct smb_direct_sendmsg,
- list);
+ struct smbdirect_send_io,
+ sibling_list);
last = list_last_entry(&send_ctx->msg_list,
- struct smb_direct_sendmsg,
- list);
+ struct smbdirect_send_io,
+ sibling_list);
last->wr.send_flags = IB_SEND_SIGNALED;
last->wr.wr_cqe = &last->cqe;
atomic_add(send_ctx->wr_cnt, &t->send_credits);
wake_up(&t->wait_send_credits);
list_for_each_entry_safe(first, last, &send_ctx->msg_list,
- list) {
+ sibling_list) {
smb_direct_free_sendmsg(t, first);
}
}
static int smb_direct_create_header(struct smb_direct_transport *t,
int size, int remaining_data_length,
- struct smb_direct_sendmsg **sendmsg_out)
+ struct smbdirect_send_io **sendmsg_out)
{
struct smbdirect_socket *sc = &t->socket;
struct smbdirect_socket_parameters *sp = &sc->parameters;
- struct smb_direct_sendmsg *sendmsg;
+ struct smbdirect_send_io *sendmsg;
struct smbdirect_data_transfer *packet;
int header_length;
int ret;
static int post_sendmsg(struct smb_direct_transport *t,
struct smb_direct_send_ctx *send_ctx,
- struct smb_direct_sendmsg *msg)
+ struct smbdirect_send_io *msg)
{
struct smbdirect_socket *sc = &t->socket;
int i;
msg->wr.wr_cqe = NULL;
msg->wr.send_flags = 0;
if (!list_empty(&send_ctx->msg_list)) {
- struct smb_direct_sendmsg *last;
+ struct smbdirect_send_io *last;
last = list_last_entry(&send_ctx->msg_list,
- struct smb_direct_sendmsg,
- list);
+ struct smbdirect_send_io,
+ sibling_list);
last->wr.next = &msg->wr;
}
- list_add_tail(&msg->list, &send_ctx->msg_list);
+ list_add_tail(&msg->sibling_list, &send_ctx->msg_list);
send_ctx->wr_cnt++;
return 0;
}
{
struct smbdirect_socket *sc = &t->socket;
int i, j, ret;
- struct smb_direct_sendmsg *msg;
+ struct smbdirect_send_io *msg;
int data_length;
- struct scatterlist sg[SMB_DIRECT_MAX_SEND_SGES - 1];
+ struct scatterlist sg[SMBDIRECT_SEND_IO_MAX_SGE - 1];
ret = wait_for_send_credits(t, send_ctx);
if (ret)
struct ib_sge *sge;
int sg_cnt;
- sg_init_table(sg, SMB_DIRECT_MAX_SEND_SGES - 1);
+ sg_init_table(sg, SMBDIRECT_SEND_IO_MAX_SGE - 1);
sg_cnt = get_mapped_sg_list(sc->ib.dev,
iov[i].iov_base, iov[i].iov_len,
- sg, SMB_DIRECT_MAX_SEND_SGES - 1,
+ sg, SMBDIRECT_SEND_IO_MAX_SGE - 1,
DMA_TO_DEVICE);
if (sg_cnt <= 0) {
pr_err("failed to map buffer\n");
ret = -ENOMEM;
goto err;
- } else if (sg_cnt + msg->num_sge > SMB_DIRECT_MAX_SEND_SGES) {
+ } else if (sg_cnt + msg->num_sge > SMBDIRECT_SEND_IO_MAX_SGE) {
pr_err("buffer not fitted into sges\n");
ret = -E2BIG;
ib_dma_unmap_sg(sc->ib.dev, sg, sg_cnt,
smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
while (remaining_data_length) {
- struct kvec vecs[SMB_DIRECT_MAX_SEND_SGES - 1]; /* minus smbdirect hdr */
+ struct kvec vecs[SMBDIRECT_SEND_IO_MAX_SGE - 1]; /* minus smbdirect hdr */
size_t possible_bytes = max_iov_size;
size_t possible_vecs;
size_t bytes = 0;
{
struct smbdirect_socket *sc = &t->socket;
struct smbdirect_socket_parameters *sp = &sc->parameters;
- struct smb_direct_sendmsg *sendmsg;
+ struct smbdirect_send_io *sendmsg;
struct smbdirect_negotiate_resp *resp;
int ret;
*/
sp->max_send_size = smb_direct_max_send_size;
max_send_sges = DIV_ROUND_UP(sp->max_send_size, PAGE_SIZE) + 3;
- if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
+ if (max_send_sges > SMBDIRECT_SEND_IO_MAX_SGE) {
pr_err("max_send_size %d is too large\n", sp->max_send_size);
return -EINVAL;
}
return -EINVAL;
}
- if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
+ if (device->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE) {
pr_err("warning: device max_send_sge = %d too small\n",
device->attrs.max_send_sge);
return -EINVAL;
cap->max_send_wr = max_send_wrs;
cap->max_recv_wr = sp->recv_credit_max;
- cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
+ cap->max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
cap->max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
cap->max_inline_data = 0;
cap->max_rdma_ctxs = t->max_rw_credits;
int i;
struct smbdirect_recv_io *recvmsg;
- snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t);
+ snprintf(name, sizeof(name), "smbdirect_send_io_pool_%p", t);
t->sendmsg_cache = kmem_cache_create(name,
- sizeof(struct smb_direct_sendmsg) +
+ sizeof(struct smbdirect_send_io) +
sizeof(struct smbdirect_negotiate_resp),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!t->sendmsg_cache)