struct smbdirect_recv_io *response, int packet_length)
{
struct smbdirect_socket *sc = response->socket;
- struct smbd_connection *info =
- container_of(sc, struct smbd_connection, socket);
struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smbdirect_negotiate_resp *packet = smbdirect_recv_io_payload(response);
sp->max_read_write_size = min_t(u32,
le32_to_cpu(packet->max_readwrite_size),
- info->max_frmr_depth * PAGE_SIZE);
- info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
+ sp->max_frmr_depth * PAGE_SIZE);
+ sp->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER;
return true;
struct sockaddr *dstaddr, int port)
{
struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
int rc;
WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_CREATED);
rc = -EPROTONOSUPPORT;
goto out2;
}
- info->max_frmr_depth = min_t(int,
- smbd_max_frmr_depth,
+ sp->max_frmr_depth = min_t(u32,
+ sp->max_frmr_depth,
sc->ib.dev->attrs.max_fast_reg_page_list_len);
info->mr_type = IB_MR_TYPE_MEM_REG;
if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
sp->max_send_size = smbd_max_send_size;
sp->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
sp->max_recv_size = smbd_max_receive_size;
+ sp->max_frmr_depth = smbd_max_frmr_depth;
sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000;
sp->keepalive_timeout_msec = KEEPALIVE_RECV_TIMEOUT * 1000;
struct smbd_connection *info =
container_of(work, struct smbd_connection, mr_recovery_work);
struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smbdirect_mr_io *smbdirect_mr;
int rc;
smbdirect_mr->mr = ib_alloc_mr(
sc->ib.pd, info->mr_type,
- info->max_frmr_depth);
+ sp->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
info->mr_type,
- info->max_frmr_depth);
+ sp->max_frmr_depth);
smbd_disconnect_rdma_connection(info);
continue;
}
if (!smbdirect_mr)
goto cleanup_entries;
smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type,
- info->max_frmr_depth);
+ sp->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
- info->mr_type, info->max_frmr_depth);
+ info->mr_type, sp->max_frmr_depth);
goto out;
}
- smbdirect_mr->sgt.sgl = kcalloc(info->max_frmr_depth,
+ smbdirect_mr->sgt.sgl = kcalloc(sp->max_frmr_depth,
sizeof(struct scatterlist),
GFP_KERNEL);
if (!smbdirect_mr->sgt.sgl) {
bool writing, bool need_invalidate)
{
struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smbdirect_mr_io *smbdirect_mr;
int rc, num_pages;
enum dma_data_direction dir;
struct ib_reg_wr *reg_wr;
- num_pages = iov_iter_npages(iter, info->max_frmr_depth + 1);
- if (num_pages > info->max_frmr_depth) {
+ num_pages = iov_iter_npages(iter, sp->max_frmr_depth + 1);
+ if (num_pages > sp->max_frmr_depth) {
log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
- num_pages, info->max_frmr_depth);
+ num_pages, sp->max_frmr_depth);
WARN_ON_ONCE(1);
return NULL;
}
smbdirect_mr->sgt.orig_nents = 0;
log_rdma_mr(INFO, "num_pages=0x%x count=0x%zx depth=%u\n",
- num_pages, iov_iter_count(iter), info->max_frmr_depth);
- smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth);
+ num_pages, iov_iter_count(iter), sp->max_frmr_depth);
+ smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, sp->max_frmr_depth);
rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents, dir);