From: Stefan Metzmacher Date: Thu, 21 Aug 2025 12:50:56 +0000 (+0200) Subject: smb: client: move rdma_readwrite_threshold from smbd_connection to TCP_Server_Info X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a8efb796db30288fab498156ef1fa03a34d88817;p=thirdparty%2Fkernel%2Fstable.git smb: client: move rdma_readwrite_threshold from smbd_connection to TCP_Server_Info This belongs to the SMB layer not to the transport layer, it just uses the negotiated transport parameters to adjust the value if needed. Cc: Steve French Cc: Tom Talpey Cc: Long Li Cc: linux-cifs@vger.kernel.org Cc: samba-technical@lists.samba.org Acked-by: Namjae Jeon Signed-off-by: Stefan Metzmacher Signed-off-by: Steve French --- diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c index 55bf867be0e2b..9c9f15871c239 100644 --- a/fs/smb/client/cifs_debug.c +++ b/fs/smb/client/cifs_debug.c @@ -476,7 +476,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) "max_readwrite_size: %u rdma_readwrite_threshold: %u", sp->keepalive_interval_msec * 1000, sp->max_read_write_size, - server->smbd_conn->rdma_readwrite_threshold); + server->rdma_readwrite_threshold); seq_printf(m, "\nDebug count_get_receive_buffer: %llu " "count_put_receive_buffer: %llu count_send_empty: %llu", sc->statistics.get_receive_buffer, diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 0fae95cf81c43..80664f937fc73 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -814,6 +814,13 @@ struct TCP_Server_Info { unsigned int max_read; unsigned int max_write; unsigned int min_offload; + /* + * If payload is less than or equal to the threshold, + * use RDMA send/recv to send upper layer I/O. + * If payload is more than the threshold, + * use RDMA read/write through memory registration for I/O. + */ + unsigned int rdma_readwrite_threshold; unsigned int retrans; struct { bool requested; /* "compress" mount option set*/ diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index c3b9d3f6210ff..1c63d2c9cc9c8 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -4411,7 +4411,7 @@ static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms) return false; /* offload also has its overhead, so only do it if desired */ - if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold) + if (io_parms->length < server->rdma_readwrite_threshold) return false; return true; diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c index fe4127b53013a..09bf47c854a8a 100644 --- a/fs/smb/client/smbdirect.c +++ b/fs/smb/client/smbdirect.c @@ -518,10 +518,6 @@ static bool process_negotiation_response( } sp->max_fragmented_send_size = le32_to_cpu(packet->max_fragmented_size); - info->rdma_readwrite_threshold = - rdma_readwrite_threshold > sp->max_fragmented_send_size ? - sp->max_fragmented_send_size : - rdma_readwrite_threshold; sp->max_read_write_size = min_t(u32, @@ -1962,6 +1958,7 @@ struct smbd_connection *smbd_get_connection( struct TCP_Server_Info *server, struct sockaddr *dstaddr) { struct smbd_connection *ret; + const struct smbdirect_socket_parameters *sp; int port = SMBD_PORT; try_again: @@ -1972,6 +1969,16 @@ try_again: port = SMB_PORT; goto try_again; } + if (!ret) + return NULL; + + sp = &ret->socket.parameters; + + server->rdma_readwrite_threshold = + rdma_readwrite_threshold > sp->max_fragmented_send_size ? + sp->max_fragmented_send_size : + rdma_readwrite_threshold; + return ret; } diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h index 8ebbbc0b0499a..4eec2ac4ba80f 100644 --- a/fs/smb/client/smbdirect.h +++ b/fs/smb/client/smbdirect.h @@ -43,13 +43,6 @@ struct smbd_connection { /* Memory registrations */ /* Maximum number of pages in a single RDMA write/read on this connection */ int max_frmr_depth; - /* - * If payload is less than or equal to the threshold, - * use RDMA send/recv to send upper layer I/O. - * If payload is more than the threshold, - * use RDMA read/write through memory registration for I/O. - */ - int rdma_readwrite_threshold; enum ib_mr_type mr_type; struct list_head mr_list; spinlock_t mr_list_lock;