bool full_packet_received;
wait_queue_head_t wait_status;
- int max_send_size;
- int max_recv_size;
- int max_fragmented_send_size;
- int max_fragmented_recv_size;
- int max_rdma_rw_size;
-
spinlock_t reassembly_queue_lock;
struct list_head reassembly_queue;
int reassembly_data_length;
spinlock_t receive_credit_lock;
int recv_credits;
int count_avail_recvmsg;
- int recv_credit_max;
int recv_credit_target;
spinlock_t recvmsg_queue_lock;
struct list_head recvmsg_queue;
- int send_credit_target;
atomic_t send_credits;
spinlock_t lock_new_recv_credits;
int new_recv_credits;
struct smb_direct_recvmsg *recvmsg;
struct smb_direct_transport *t;
struct smbdirect_socket *sc;
+ struct smbdirect_socket_parameters *sp;
recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe);
t = recvmsg->transport;
sc = &t->socket;
+ sp = &sc->parameters;
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
put_recvmsg(t, recvmsg);
smb_direct_disconnect_rdma_connection(t);
return;
}
- if (remaining_data_length > t->max_fragmented_recv_size ||
- data_length > t->max_fragmented_recv_size ||
+ if (remaining_data_length > sp->max_fragmented_recv_size ||
+ data_length > sp->max_fragmented_recv_size ||
(u64)remaining_data_length + (u64)data_length >
- (u64)t->max_fragmented_recv_size) {
+ (u64)sp->max_fragmented_recv_size) {
put_recvmsg(t, recvmsg);
smb_direct_disconnect_rdma_connection(t);
return;
struct smb_direct_recvmsg *recvmsg)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_recv_wr wr;
int ret;
recvmsg->sge.addr = ib_dma_map_single(sc->ib.dev,
- recvmsg->packet, t->max_recv_size,
+ recvmsg->packet,
+ sp->max_recv_size,
DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(sc->ib.dev, recvmsg->sge.addr);
if (ret)
return ret;
- recvmsg->sge.length = t->max_recv_size;
+ recvmsg->sge.length = sp->max_recv_size;
recvmsg->sge.lkey = sc->ib.pd->local_dma_lkey;
recvmsg->cqe.done = recv_done;
struct smb_direct_sendmsg **sendmsg_out)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_direct_sendmsg *sendmsg;
struct smbdirect_data_transfer *packet;
int header_length;
/* Fill in the packet header */
packet = (struct smbdirect_data_transfer *)sendmsg->packet;
- packet->credits_requested = cpu_to_le16(t->send_credit_target);
+ packet->credits_requested = cpu_to_le16(sp->send_credit_target);
packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
packet->flags = 0;
{
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
struct smbdirect_socket *sc = &st->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
size_t remaining_data_length;
size_t iov_idx;
size_t iov_ofs;
- size_t max_iov_size = st->max_send_size -
+ size_t max_iov_size = sp->max_send_size -
sizeof(struct smbdirect_data_transfer);
int ret;
struct smb_direct_send_ctx send_ctx;
bool is_read)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_direct_rdma_rw_msg *msg, *next_msg;
int i, ret;
DECLARE_COMPLETION_ONSTACK(completion);
if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
return -ENOTCONN;
- if (buf_len > t->max_rdma_rw_size)
+ if (buf_len > sp->max_read_write_size)
return -EINVAL;
/* calculate needed credits */
int failed)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_direct_sendmsg *sendmsg;
struct smbdirect_negotiate_resp *resp;
int ret;
resp->negotiated_version = SMB_DIRECT_VERSION_LE;
resp->reserved = 0;
resp->credits_requested =
- cpu_to_le16(t->send_credit_target);
+ cpu_to_le16(sp->send_credit_target);
resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t));
- resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size);
- resp->preferred_send_size = cpu_to_le32(t->max_send_size);
- resp->max_receive_size = cpu_to_le32(t->max_recv_size);
+ resp->max_readwrite_size = cpu_to_le32(sp->max_read_write_size);
+ resp->preferred_send_size = cpu_to_le32(sp->max_send_size);
+ resp->max_receive_size = cpu_to_le32(sp->max_recv_size);
resp->max_fragmented_size =
- cpu_to_le32(t->max_fragmented_recv_size);
+ cpu_to_le32(sp->max_fragmented_recv_size);
}
sendmsg->sge[0].addr = ib_dma_map_single(sc->ib.dev,
struct ib_qp_cap *cap)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_device *device = sc->ib.dev;
int max_send_sges, max_rw_wrs, max_send_wrs;
unsigned int max_sge_per_wr, wrs_per_credit;
/* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
* SMB2 response could be mapped.
*/
- t->max_send_size = smb_direct_max_send_size;
- max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3;
+ sp->max_send_size = smb_direct_max_send_size;
+ max_send_sges = DIV_ROUND_UP(sp->max_send_size, PAGE_SIZE) + 3;
if (max_send_sges > SMB_DIRECT_MAX_SEND_SGES) {
- pr_err("max_send_size %d is too large\n", t->max_send_size);
+ pr_err("max_send_size %d is too large\n", sp->max_send_size);
return -EINVAL;
}
* are needed for MR registration, RDMA R/W, local & remote
* MR invalidation.
*/
- t->max_rdma_rw_size = smb_direct_max_read_write_size;
+ sp->max_read_write_size = smb_direct_max_read_write_size;
t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t);
- t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size,
+ t->max_rw_credits = DIV_ROUND_UP(sp->max_read_write_size,
(t->pages_per_rw_credit - 1) *
PAGE_SIZE);
t->recv_credits = 0;
t->count_avail_recvmsg = 0;
- t->recv_credit_max = smb_direct_receive_credit_max;
+ sp->recv_credit_max = smb_direct_receive_credit_max;
t->recv_credit_target = 10;
t->new_recv_credits = 0;
- t->send_credit_target = smb_direct_send_credit_target;
+ sp->send_credit_target = smb_direct_send_credit_target;
atomic_set(&t->send_credits, 0);
atomic_set(&t->rw_credits, t->max_rw_credits);
- t->max_send_size = smb_direct_max_send_size;
- t->max_recv_size = smb_direct_max_receive_size;
- t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
+ sp->max_send_size = smb_direct_max_send_size;
+ sp->max_recv_size = smb_direct_max_receive_size;
+ sp->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size;
cap->max_send_wr = max_send_wrs;
- cap->max_recv_wr = t->recv_credit_max;
+ cap->max_recv_wr = sp->recv_credit_max;
cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
cap->max_inline_data = 0;
static int smb_direct_create_pools(struct smb_direct_transport *t)
{
+ struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
char name[80];
int i;
struct smb_direct_recvmsg *recvmsg;
if (!t->sendmsg_cache)
return -ENOMEM;
- t->sendmsg_mempool = mempool_create(t->send_credit_target,
+ t->sendmsg_mempool = mempool_create(sp->send_credit_target,
mempool_alloc_slab, mempool_free_slab,
t->sendmsg_cache);
if (!t->sendmsg_mempool)
snprintf(name, sizeof(name), "smb_direct_resp_%p", t);
t->recvmsg_cache = kmem_cache_create(name,
sizeof(struct smb_direct_recvmsg) +
- t->max_recv_size,
+ sp->max_recv_size,
0, SLAB_HWCACHE_ALIGN, NULL);
if (!t->recvmsg_cache)
goto err;
t->recvmsg_mempool =
- mempool_create(t->recv_credit_max, mempool_alloc_slab,
+ mempool_create(sp->recv_credit_max, mempool_alloc_slab,
mempool_free_slab, t->recvmsg_cache);
if (!t->recvmsg_mempool)
goto err;
INIT_LIST_HEAD(&t->recvmsg_queue);
- for (i = 0; i < t->recv_credit_max; i++) {
+ for (i = 0; i < sp->recv_credit_max; i++) {
recvmsg = mempool_alloc(t->recvmsg_mempool, KSMBD_DEFAULT_GFP);
if (!recvmsg)
goto err;
recvmsg->sge.length = 0;
list_add(&recvmsg->list, &t->recvmsg_queue);
}
- t->count_avail_recvmsg = t->recv_credit_max;
+ t->count_avail_recvmsg = sp->recv_credit_max;
return 0;
err:
struct ib_qp_cap *cap)
{
struct smbdirect_socket *sc = &t->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
int ret;
struct ib_qp_init_attr qp_attr;
int pages_per_rw;
}
sc->ib.recv_cq = ib_alloc_cq(sc->ib.dev, t,
- t->recv_credit_max, 0, IB_POLL_WORKQUEUE);
+ sp->recv_credit_max, 0, IB_POLL_WORKQUEUE);
if (IS_ERR(sc->ib.recv_cq)) {
pr_err("Can't create RDMA recv CQ\n");
ret = PTR_ERR(sc->ib.recv_cq);
sc->ib.qp = sc->rdma.cm_id->qp;
sc->rdma.cm_id->event_handler = smb_direct_cm_handler;
- pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
+ pages_per_rw = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE) + 1;
if (pages_per_rw > sc->ib.dev->attrs.max_sgl_rd) {
ret = ib_mr_pool_init(sc->ib.qp, &sc->ib.qp->rdma_mrs,
t->max_rw_credits, IB_MR_TYPE_MEM_REG,
{
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
struct smbdirect_socket *sc = &st->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_direct_recvmsg *recvmsg;
struct smbdirect_negotiate_req *req;
int ret;
goto out;
req = (struct smbdirect_negotiate_req *)recvmsg->packet;
- st->max_recv_size = min_t(int, st->max_recv_size,
+ sp->max_recv_size = min_t(int, sp->max_recv_size,
le32_to_cpu(req->preferred_send_size));
- st->max_send_size = min_t(int, st->max_send_size,
+ sp->max_send_size = min_t(int, sp->max_send_size,
le32_to_cpu(req->max_receive_size));
- st->max_fragmented_send_size =
+ sp->max_fragmented_send_size =
le32_to_cpu(req->max_fragmented_size);
- st->max_fragmented_recv_size =
- (st->recv_credit_max * st->max_recv_size) / 2;
+ sp->max_fragmented_recv_size =
+ (sp->recv_credit_max * sp->max_recv_size) / 2;
ret = smb_direct_send_negotiate_response(st, ret);
out: