--- /dev/null
+From b116c702791a9834e6485f67ca6267d9fdf59b87 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Fri, 6 Nov 2020 19:19:33 +0100
+Subject: RDMA/umem: Use ib_dma_max_seg_size instead of dma_get_max_seg_size
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit b116c702791a9834e6485f67ca6267d9fdf59b87 upstream.
+
+RDMA ULPs must not call DMA mapping APIs directly but instead use the
+ib_dma_* wrappers.
+
+Fixes: 0c16d9635e3a ("RDMA/umem: Move to allocate SG table from pages")
+Link: https://lore.kernel.org/r/20201106181941.1878556-3-hch@lst.de
+Reported-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Cc: "Marciniszyn, Mike" <mike.marciniszyn@cornelisnetworks.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/umem.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -220,10 +220,10 @@ struct ib_umem *ib_umem_get(struct ib_de
+
+ cur_base += ret * PAGE_SIZE;
+ npages -= ret;
+- sg = __sg_alloc_table_from_pages(
+- &umem->sg_head, page_list, ret, 0, ret << PAGE_SHIFT,
+- dma_get_max_seg_size(device->dma_device), sg, npages,
+- GFP_KERNEL);
++ sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
++ 0, ret << PAGE_SHIFT,
++ ib_dma_max_seg_size(device), sg, npages,
++ GFP_KERNEL);
+ umem->sg_nents = umem->sg_head.nents;
+ if (IS_ERR(sg)) {
+ unpin_user_pages_dirty_lock(page_list, ret, 0);