From: Stefan Metzmacher Date: Sun, 14 Sep 2025 23:41:44 +0000 (+0200) Subject: smb: smbdirect: introduce smbdirect_map_sges_from_iter() and helper functions X-Git-Tag: v7.1-rc1~128^2~124 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8a4d6c0d4fc4a138c7569e081389f163d2cee389;p=thirdparty%2Fkernel%2Flinux.git smb: smbdirect: introduce smbdirect_map_sges_from_iter() and helper functions These are basically copies of smb_extract_iter_to_rdma() and its helpers in the client, which will be replaced in the next steps. The goal is to use them also in the server, which will simplify a lot. Cc: Steve French Cc: Tom Talpey Cc: Long Li Cc: Namjae Jeon Cc: David Howells Cc: linux-cifs@vger.kernel.org Cc: samba-technical@lists.samba.org Signed-off-by: Stefan Metzmacher Acked-by: Namjae Jeon Signed-off-by: Steve French --- diff --git a/fs/smb/common/smbdirect/smbdirect_connection.c b/fs/smb/common/smbdirect/smbdirect_connection.c index 8290e45464e33..3483fab39eb81 100644 --- a/fs/smb/common/smbdirect/smbdirect_connection.c +++ b/fs/smb/common/smbdirect/smbdirect_connection.c @@ -5,6 +5,19 @@ */ #include "smbdirect_internal.h" +#include + +struct smbdirect_map_sges { + struct ib_sge *sge; + size_t num_sge; + size_t max_sge; + struct ib_device *device; + u32 local_dma_lkey; + enum dma_data_direction direction; +}; + +static ssize_t smbdirect_map_sges_from_iter(struct iov_iter *iter, size_t len, + struct smbdirect_map_sges *state); static void smbdirect_connection_destroy_mem_pools(struct smbdirect_socket *sc); @@ -337,3 +350,245 @@ skip_free: wake_up(&sc->send_io.pending.dec_wait_queue); } + +static bool smbdirect_map_sges_single_page(struct smbdirect_map_sges *state, + struct page *page, size_t off, size_t len) +{ + struct ib_sge *sge; + u64 addr; + + if (state->num_sge >= state->max_sge) + return false; + + addr = ib_dma_map_page(state->device, page, + off, len, state->direction); + if (ib_dma_mapping_error(state->device, addr)) + return false; + + sge = &state->sge[state->num_sge++]; + sge->addr = addr; + sge->length = len; + sge->lkey = state->local_dma_lkey; + + return true; +} + +/* + * Extract page fragments from a BVEC-class iterator and add them to an ib_sge + * list. The pages are not pinned. + */ +static ssize_t smbdirect_map_sges_from_bvec(struct iov_iter *iter, + struct smbdirect_map_sges *state, + ssize_t maxsize) +{ + const struct bio_vec *bv = iter->bvec; + unsigned long start = iter->iov_offset; + unsigned int i; + ssize_t ret = 0; + + for (i = 0; i < iter->nr_segs; i++) { + size_t off, len; + bool ok; + + len = bv[i].bv_len; + if (start >= len) { + start -= len; + continue; + } + + len = min_t(size_t, maxsize, len - start); + off = bv[i].bv_offset + start; + + ok = smbdirect_map_sges_single_page(state, + bv[i].bv_page, + off, + len); + if (!ok) + return -EIO; + + ret += len; + maxsize -= len; + if (state->num_sge >= state->max_sge || maxsize <= 0) + break; + start = 0; + } + + if (ret > 0) + iov_iter_advance(iter, ret); + return ret; +} + +/* + * Extract fragments from a KVEC-class iterator and add them to an ib_sge list. + * This can deal with vmalloc'd buffers as well as kmalloc'd or static buffers. + * The pages are not pinned. + */ +static ssize_t smbdirect_map_sges_from_kvec(struct iov_iter *iter, + struct smbdirect_map_sges *state, + ssize_t maxsize) +{ + const struct kvec *kv = iter->kvec; + unsigned long start = iter->iov_offset; + unsigned int i; + ssize_t ret = 0; + + for (i = 0; i < iter->nr_segs; i++) { + struct page *page; + unsigned long kaddr; + size_t off, len, seg; + + len = kv[i].iov_len; + if (start >= len) { + start -= len; + continue; + } + + kaddr = (unsigned long)kv[i].iov_base + start; + off = kaddr & ~PAGE_MASK; + len = min_t(size_t, maxsize, len - start); + kaddr &= PAGE_MASK; + + maxsize -= len; + do { + bool ok; + + seg = min_t(size_t, len, PAGE_SIZE - off); + + if (is_vmalloc_or_module_addr((void *)kaddr)) + page = vmalloc_to_page((void *)kaddr); + else + page = virt_to_page((void *)kaddr); + + ok = smbdirect_map_sges_single_page(state, page, off, seg); + if (!ok) + return -EIO; + + ret += seg; + len -= seg; + kaddr += PAGE_SIZE; + off = 0; + } while (len > 0 && state->num_sge < state->max_sge); + + if (state->num_sge >= state->max_sge || maxsize <= 0) + break; + start = 0; + } + + if (ret > 0) + iov_iter_advance(iter, ret); + return ret; +} + +/* + * Extract folio fragments from a FOLIOQ-class iterator and add them to an + * ib_sge list. The folios are not pinned. + */ +static ssize_t smbdirect_map_sges_from_folioq(struct iov_iter *iter, + struct smbdirect_map_sges *state, + ssize_t maxsize) +{ + const struct folio_queue *folioq = iter->folioq; + unsigned int slot = iter->folioq_slot; + ssize_t ret = 0; + size_t offset = iter->iov_offset; + + if (WARN_ON_ONCE(!folioq)) + return -EIO; + + if (slot >= folioq_nr_slots(folioq)) { + folioq = folioq->next; + if (WARN_ON_ONCE(!folioq)) + return -EIO; + slot = 0; + } + + do { + struct folio *folio = folioq_folio(folioq, slot); + size_t fsize = folioq_folio_size(folioq, slot); + + if (offset < fsize) { + size_t part = umin(maxsize, fsize - offset); + bool ok; + + ok = smbdirect_map_sges_single_page(state, + folio_page(folio, 0), + offset, + part); + if (!ok) + return -EIO; + + offset += part; + ret += part; + maxsize -= part; + } + + if (offset >= fsize) { + offset = 0; + slot++; + if (slot >= folioq_nr_slots(folioq)) { + if (!folioq->next) { + WARN_ON_ONCE(ret < iter->count); + break; + } + folioq = folioq->next; + slot = 0; + } + } + } while (state->num_sge < state->max_sge && maxsize > 0); + + iter->folioq = folioq; + iter->folioq_slot = slot; + iter->iov_offset = offset; + iter->count -= ret; + return ret; +} + +/* + * Extract page fragments from up to the given amount of the source iterator + * and build up an ib_sge list that refers to all of those bits. The ib_sge list + * is appended to, up to the maximum number of elements set in the parameter + * block. + * + * The extracted page fragments are not pinned or ref'd in any way; if an + * IOVEC/UBUF-type iterator is to be used, it should be converted to a + * BVEC-type iterator and the pages pinned, ref'd or otherwise held in some + * way. + */ +__maybe_unused /* this is temporary while this file is included in others */ +static ssize_t smbdirect_map_sges_from_iter(struct iov_iter *iter, size_t len, + struct smbdirect_map_sges *state) +{ + ssize_t ret; + size_t before = state->num_sge; + + if (WARN_ON_ONCE(iov_iter_rw(iter) != ITER_SOURCE)) + return -EIO; + + switch (iov_iter_type(iter)) { + case ITER_BVEC: + ret = smbdirect_map_sges_from_bvec(iter, state, len); + break; + case ITER_KVEC: + ret = smbdirect_map_sges_from_kvec(iter, state, len); + break; + case ITER_FOLIOQ: + ret = smbdirect_map_sges_from_folioq(iter, state, len); + break; + default: + WARN_ONCE(1, "iov_iter_type[%u]\n", iov_iter_type(iter)); + return -EIO; + } + + if (ret < 0) { + while (state->num_sge > before) { + struct ib_sge *sge = &state->sge[state->num_sge--]; + + ib_dma_unmap_page(state->device, + sge->addr, + sge->length, + state->direction); + } + } + + return ret; +}