From 7f970a4307def704c777d30ff0e1b779fe680dcd Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 15 Mar 2021 14:12:44 +0100 Subject: [PATCH] 5.10-stable patches added patches: rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch --- ...size-instead-of-dma_get_max_seg_size.patch | 40 +++++++++++++++++++ queue-5.10/series | 1 + 2 files changed, 41 insertions(+) create mode 100644 queue-5.10/rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch diff --git a/queue-5.10/rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch b/queue-5.10/rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch new file mode 100644 index 00000000000..d0afebe9239 --- /dev/null +++ b/queue-5.10/rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch @@ -0,0 +1,40 @@ +From b116c702791a9834e6485f67ca6267d9fdf59b87 Mon Sep 17 00:00:00 2001 +From: Christoph Hellwig +Date: Fri, 6 Nov 2020 19:19:33 +0100 +Subject: RDMA/umem: Use ib_dma_max_seg_size instead of dma_get_max_seg_size + +From: Christoph Hellwig + +commit b116c702791a9834e6485f67ca6267d9fdf59b87 upstream. + +RDMA ULPs must not call DMA mapping APIs directly but instead use the +ib_dma_* wrappers. + +Fixes: 0c16d9635e3a ("RDMA/umem: Move to allocate SG table from pages") +Link: https://lore.kernel.org/r/20201106181941.1878556-3-hch@lst.de +Reported-by: Jason Gunthorpe +Signed-off-by: Christoph Hellwig +Signed-off-by: Jason Gunthorpe +Cc: "Marciniszyn, Mike" +Signed-off-by: Greg Kroah-Hartman +--- + drivers/infiniband/core/umem.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/infiniband/core/umem.c ++++ b/drivers/infiniband/core/umem.c +@@ -220,10 +220,10 @@ struct ib_umem *ib_umem_get(struct ib_de + + cur_base += ret * PAGE_SIZE; + npages -= ret; +- sg = __sg_alloc_table_from_pages( +- &umem->sg_head, page_list, ret, 0, ret << PAGE_SHIFT, +- dma_get_max_seg_size(device->dma_device), sg, npages, +- GFP_KERNEL); ++ sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret, ++ 0, ret << PAGE_SHIFT, ++ ib_dma_max_seg_size(device), sg, npages, ++ GFP_KERNEL); + umem->sg_nents = umem->sg_head.nents; + if (IS_ERR(sg)) { + unpin_user_pages_dirty_lock(page_list, ret, 0); diff --git a/queue-5.10/series b/queue-5.10/series index 9df24b958c6..7b8a6e84e8e 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -287,3 +287,4 @@ mm-page_alloc.c-refactor-initialization-of-struct-page-for-holes-in-memory-layou xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch kvm-arm64-fix-nvhe-hyp-panic-host-context-restore.patch +rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch -- 2.47.3