int count_avail_recvmsg;
int recv_credit_target;
- spinlock_t recvmsg_queue_lock;
- struct list_head recvmsg_queue;
-
atomic_t send_credits;
spinlock_t lock_new_recv_credits;
int new_recv_credits;
static struct
smbdirect_recv_io *get_free_recvmsg(struct smb_direct_transport *t)
{
+ struct smbdirect_socket *sc = &t->socket;
struct smbdirect_recv_io *recvmsg = NULL;
- spin_lock(&t->recvmsg_queue_lock);
- if (!list_empty(&t->recvmsg_queue)) {
- recvmsg = list_first_entry(&t->recvmsg_queue,
+ spin_lock(&sc->recv_io.free.lock);
+ if (!list_empty(&sc->recv_io.free.list)) {
+ recvmsg = list_first_entry(&sc->recv_io.free.list,
struct smbdirect_recv_io,
list);
list_del(&recvmsg->list);
}
- spin_unlock(&t->recvmsg_queue_lock);
+ spin_unlock(&sc->recv_io.free.lock);
return recvmsg;
}
recvmsg->sge.length = 0;
}
- spin_lock(&t->recvmsg_queue_lock);
- list_add(&recvmsg->list, &t->recvmsg_queue);
- spin_unlock(&t->recvmsg_queue_lock);
+ spin_lock(&sc->recv_io.free.lock);
+ list_add(&recvmsg->list, &sc->recv_io.free.list);
+ spin_unlock(&sc->recv_io.free.lock);
}
static void enqueue_reassembly(struct smb_direct_transport *t,
sc->ib.dev = sc->rdma.cm_id->device;
+ INIT_LIST_HEAD(&sc->recv_io.free.list);
+ spin_lock_init(&sc->recv_io.free.lock);
+
sc->status = SMBDIRECT_SOCKET_CREATED;
init_waitqueue_head(&t->wait_status);
init_waitqueue_head(&t->wait_rw_credits);
spin_lock_init(&t->receive_credit_lock);
- spin_lock_init(&t->recvmsg_queue_lock);
- INIT_LIST_HEAD(&t->recvmsg_queue);
init_waitqueue_head(&t->wait_send_pending);
atomic_set(&t->send_pending, 0);
if (!t->recvmsg_mempool)
goto err;
- INIT_LIST_HEAD(&t->recvmsg_queue);
-
for (i = 0; i < sp->recv_credit_max; i++) {
recvmsg = mempool_alloc(t->recvmsg_mempool, KSMBD_DEFAULT_GFP);
if (!recvmsg)
goto err;
recvmsg->socket = sc;
recvmsg->sge.length = 0;
- list_add(&recvmsg->list, &t->recvmsg_queue);
+ list_add(&recvmsg->list, &sc->recv_io.free.list);
}
t->count_avail_recvmsg = sp->recv_credit_max;