From: Leon Romanovsky Date: Fri, 13 Feb 2026 10:57:37 +0000 (+0200) Subject: RDMA: Move DMA block iterator logic into dedicated files X-Git-Tag: v7.1-rc1~75^2~109 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=6094ea64c69520ed1e770e7c79c43412de202bfa;p=thirdparty%2Fkernel%2Flinux.git RDMA: Move DMA block iterator logic into dedicated files The DMA iterator logic was mixed into verbs and umem-specific code, forcing all users to include rdma/ib_umem.h. Move the block iterator logic into iter.c and rdma/iter.h so that rdma/ib_umem.h and rdma/ib_verbs.h can be separated in a follow-up patch. Link: https://patch.msgid.link/20260213-refactor-umem-v1-1-f3be85847922@nvidia.com Signed-off-by: Leon Romanovsky --- diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index a2a7a9d2e0d3..deffb03c4574 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -12,7 +12,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ multicast.o mad.o smi.o agent.o mad_rmpp.o \ nldev.o restrack.o counters.o ib_core_uverbs.o \ - trace.o lag.o + trace.o lag.o iter.o ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o diff --git a/drivers/infiniband/core/iter.c b/drivers/infiniband/core/iter.c new file mode 100644 index 000000000000..8e543d100657 --- /dev/null +++ b/drivers/infiniband/core/iter.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. */ + +#include +#include + +void __rdma_block_iter_start(struct ib_block_iter *biter, + struct scatterlist *sglist, unsigned int nents, + unsigned long pgsz) +{ + memset(biter, 0, sizeof(struct ib_block_iter)); + biter->__sg = sglist; + biter->__sg_nents = nents; + + /* Driver provides best block size to use */ + biter->__pg_bit = __fls(pgsz); +} +EXPORT_SYMBOL(__rdma_block_iter_start); + +bool __rdma_block_iter_next(struct ib_block_iter *biter) +{ + unsigned int block_offset; + unsigned int delta; + + if (!biter->__sg_nents || !biter->__sg) + return false; + + biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; + block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); + delta = BIT_ULL(biter->__pg_bit) - block_offset; + + while (biter->__sg_nents && biter->__sg && + sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) { + delta -= sg_dma_len(biter->__sg) - biter->__sg_advance; + biter->__sg_advance = 0; + biter->__sg = sg_next(biter->__sg); + biter->__sg_nents--; + } + biter->__sg_advance += delta; + + return true; +} +EXPORT_SYMBOL(__rdma_block_iter_next); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 575b4a4b200b..dc2c46f3bf64 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -3154,44 +3154,6 @@ int rdma_init_netdev(struct ib_device *device, u32 port_num, } EXPORT_SYMBOL(rdma_init_netdev); -void __rdma_block_iter_start(struct ib_block_iter *biter, - struct scatterlist *sglist, unsigned int nents, - unsigned long pgsz) -{ - memset(biter, 0, sizeof(struct ib_block_iter)); - biter->__sg = sglist; - biter->__sg_nents = nents; - - /* Driver provides best block size to use */ - biter->__pg_bit = __fls(pgsz); -} -EXPORT_SYMBOL(__rdma_block_iter_start); - -bool __rdma_block_iter_next(struct ib_block_iter *biter) -{ - unsigned int block_offset; - unsigned int delta; - - if (!biter->__sg_nents || !biter->__sg) - return false; - - biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; - block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); - delta = BIT_ULL(biter->__pg_bit) - block_offset; - - while (biter->__sg_nents && biter->__sg && - sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) { - delta -= sg_dma_len(biter->__sg) - biter->__sg_advance; - biter->__sg_advance = 0; - biter->__sg = sg_next(biter->__sg); - biter->__sg_nents--; - } - biter->__sg_advance += delta; - - return true; -} -EXPORT_SYMBOL(__rdma_block_iter_next); - /** * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct * for the drivers. diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 341bae3d8a1d..41ad8c2018fd 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include "roce_hsi.h" #include "qplib_res.h" diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index b8d49abde099..9fde78b74690 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -32,9 +32,9 @@ #include #include -#include #include #include +#include #include "iw_cxgb4.h" diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index bb59c02b807c..1ef9da94b98f 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -9,9 +9,9 @@ #include #include -#include #include #include +#include #include #define UVERBS_MODULE_NAME efa_ib #include diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index 9f74aadc3047..04136a0281aa 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include "erdma.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index 8e802f118bc9..142c86f462fa 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -32,7 +32,7 @@ */ #include -#include +#include #include "hns_roce_device.h" void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.h b/drivers/infiniband/hw/ionic/ionic_ibdev.h index 82fda1e3cdb6..63828240d659 100644 --- a/drivers/infiniband/hw/ionic/ionic_ibdev.h +++ b/drivers/infiniband/hw/ionic/ionic_ibdev.h @@ -4,9 +4,9 @@ #ifndef _IONIC_IBDEV_H_ #define _IONIC_IBDEV_H_ -#include #include #include +#include #include #include diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index d320d1a228b3..3d49bd57bae7 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -37,8 +37,8 @@ #include #include #include -#include #include +#include #include #include "osdep.h" #include "defs.h" diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index e447acfd2071..a7c8c0fd7019 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -8,7 +8,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 77a72d2b0dd2..650b4a9121ff 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -33,6 +33,7 @@ #include #include +#include #include "mlx4_ib.h" diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index af321f6ef7f5..75d5b5672b5c 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -31,6 +31,7 @@ */ #include +#include #include "mlx5_ib.h" /* diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c index 4e562e0dd9e1..29488fba21a0 100644 --- a/drivers/infiniband/hw/mlx5/umr.c +++ b/drivers/infiniband/hw/mlx5/umr.c @@ -2,6 +2,7 @@ /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */ #include +#include #include "mlx5_ib.h" #include "umr.h" #include "wr.h" diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index ef0635064fba..ee1d3583bbb9 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -35,8 +35,8 @@ */ #include -#include #include +#include #include #include diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index e89be2fbd5eb..c73d4bbee71f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -45,9 +45,9 @@ #include #include #include -#include #include #include +#include #include #include "ocrdma.h" diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 33b4a0e6d3a8..2fa9e07710d3 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -39,9 +39,9 @@ #include #include #include -#include #include #include +#include #include #include diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 763ddc6f25d1..23e547d4b3a7 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -53,8 +53,8 @@ #include #include #include -#include #include +#include #include #include "pvrdma_ring.h" diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 09b7f7d4685e..db92d4623647 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -75,38 +75,6 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem) { return ib_umem_num_dma_blocks(umem, PAGE_SIZE); } - -static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, - struct ib_umem *umem, - unsigned long pgsz) -{ - __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, - umem->sgt_append.sgt.nents, pgsz); - biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1); - biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz); -} - -static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter) -{ - return __rdma_block_iter_next(biter) && biter->__sg_numblocks--; -} - -/** - * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem - * @umem: umem to iterate over - * @biter: block iterator variable - * @pgsz: Page size to split the list into - * - * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The - * returned DMA blocks will be aligned to pgsz and span the range: - * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz) - * - * Performs exactly ib_umem_num_dma_blocks() iterations. - */ -#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ - for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ - __rdma_umem_block_iter_next(biter);) - #ifdef CONFIG_INFINIBAND_USER_MEM struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 3f3827e1c711..7bdd77ed7e20 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2959,22 +2959,6 @@ struct ib_client { u8 no_kverbs_req:1; }; -/* - * IB block DMA iterator - * - * Iterates the DMA-mapped SGL in contiguous memory blocks aligned - * to a HW supported page size. - */ -struct ib_block_iter { - /* internal states */ - struct scatterlist *__sg; /* sg holding the current aligned block */ - dma_addr_t __dma_addr; /* unaligned DMA address of this block */ - size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */ - unsigned int __sg_nents; /* number of SG entries */ - unsigned int __sg_advance; /* number of bytes to advance in sg in next step */ - unsigned int __pg_bit; /* alignment of current block */ -}; - struct ib_device *_ib_alloc_device(size_t size, struct net *net); #define ib_alloc_device(drv_struct, member) \ container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ @@ -3003,38 +2987,6 @@ void ib_unregister_device_queued(struct ib_device *ib_dev); int ib_register_client (struct ib_client *client); void ib_unregister_client(struct ib_client *client); -void __rdma_block_iter_start(struct ib_block_iter *biter, - struct scatterlist *sglist, - unsigned int nents, - unsigned long pgsz); -bool __rdma_block_iter_next(struct ib_block_iter *biter); - -/** - * rdma_block_iter_dma_address - get the aligned dma address of the current - * block held by the block iterator. - * @biter: block iterator holding the memory block - */ -static inline dma_addr_t -rdma_block_iter_dma_address(struct ib_block_iter *biter) -{ - return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); -} - -/** - * rdma_for_each_block - iterate over contiguous memory blocks of the sg list - * @sglist: sglist to iterate over - * @biter: block iterator holding the memory block - * @nents: maximum number of sg entries to iterate over - * @pgsz: best HW supported page size to use - * - * Callers may use rdma_block_iter_dma_address() to get each - * blocks aligned DMA address. - */ -#define rdma_for_each_block(sglist, biter, nents, pgsz) \ - for (__rdma_block_iter_start(biter, sglist, nents, \ - pgsz); \ - __rdma_block_iter_next(biter);) - /** * ib_get_client_data - Get IB client context * @device:Device to get context for diff --git a/include/rdma/iter.h b/include/rdma/iter.h new file mode 100644 index 000000000000..19d64ef04ba9 --- /dev/null +++ b/include/rdma/iter.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. */ + +#ifndef _RDMA_ITER_H_ +#define _RDMA_ITER_H_ + +#include +#include + +/** + * IB block DMA iterator + * + * Iterates the DMA-mapped SGL in contiguous memory blocks aligned + * to a HW supported page size. + */ +struct ib_block_iter { + /* internal states */ + struct scatterlist *__sg; /* sg holding the current aligned block */ + dma_addr_t __dma_addr; /* unaligned DMA address of this block */ + size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */ + unsigned int __sg_nents; /* number of SG entries */ + unsigned int __sg_advance; /* number of bytes to advance in sg in next step */ + unsigned int __pg_bit; /* alignment of current block */ +}; + +void __rdma_block_iter_start(struct ib_block_iter *biter, + struct scatterlist *sglist, + unsigned int nents, + unsigned long pgsz); +bool __rdma_block_iter_next(struct ib_block_iter *biter); + +/** + * rdma_block_iter_dma_address - get the aligned dma address of the current + * block held by the block iterator. + * @biter: block iterator holding the memory block + */ +static inline dma_addr_t +rdma_block_iter_dma_address(struct ib_block_iter *biter) +{ + return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); +} + +/** + * rdma_for_each_block - iterate over contiguous memory blocks of the sg list + * @sglist: sglist to iterate over + * @biter: block iterator holding the memory block + * @nents: maximum number of sg entries to iterate over + * @pgsz: best HW supported page size to use + * + * Callers may use rdma_block_iter_dma_address() to get each + * blocks aligned DMA address. + */ +#define rdma_for_each_block(sglist, biter, nents, pgsz) \ + for (__rdma_block_iter_start(biter, sglist, nents, \ + pgsz); \ + __rdma_block_iter_next(biter);) + +static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, + struct ib_umem *umem, + unsigned long pgsz) +{ + __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, + umem->sgt_append.sgt.nents, pgsz); + biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1); + biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz); +} + +static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter) +{ + return __rdma_block_iter_next(biter) && biter->__sg_numblocks--; +} + +/** + * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem + * @umem: umem to iterate over + * @pgsz: Page size to split the list into + * + * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The + * returned DMA blocks will be aligned to pgsz and span the range: + * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz) + * + * Performs exactly ib_umem_num_dma_blocks() iterations. + */ +#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ + for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ + __rdma_umem_block_iter_next(biter);) + +#endif /* _RDMA_ITER_H_ */