]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Mar 2021 13:12:44 +0000 (14:12 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 15 Mar 2021 13:12:44 +0000 (14:12 +0100)
added patches:
rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch

queue-5.10/rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch b/queue-5.10/rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch
new file mode 100644 (file)
index 0000000..d0afebe
--- /dev/null
@@ -0,0 +1,40 @@
+From b116c702791a9834e6485f67ca6267d9fdf59b87 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Fri, 6 Nov 2020 19:19:33 +0100
+Subject: RDMA/umem: Use ib_dma_max_seg_size instead of dma_get_max_seg_size
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit b116c702791a9834e6485f67ca6267d9fdf59b87 upstream.
+
+RDMA ULPs must not call DMA mapping APIs directly but instead use the
+ib_dma_* wrappers.
+
+Fixes: 0c16d9635e3a ("RDMA/umem: Move to allocate SG table from pages")
+Link: https://lore.kernel.org/r/20201106181941.1878556-3-hch@lst.de
+Reported-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Cc: "Marciniszyn, Mike" <mike.marciniszyn@cornelisnetworks.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/umem.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -220,10 +220,10 @@ struct ib_umem *ib_umem_get(struct ib_de
+               cur_base += ret * PAGE_SIZE;
+               npages -= ret;
+-              sg = __sg_alloc_table_from_pages(
+-                      &umem->sg_head, page_list, ret, 0, ret << PAGE_SHIFT,
+-                      dma_get_max_seg_size(device->dma_device), sg, npages,
+-                      GFP_KERNEL);
++              sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
++                              0, ret << PAGE_SHIFT,
++                              ib_dma_max_seg_size(device), sg, npages,
++                              GFP_KERNEL);
+               umem->sg_nents = umem->sg_head.nents;
+               if (IS_ERR(sg)) {
+                       unpin_user_pages_dirty_lock(page_list, ret, 0);
index 9df24b958c64ea1293ac5805aedb1120c759a48a..7b8a6e84e8e38222cb8f43e688671ec02b0d0e68 100644 (file)
@@ -287,3 +287,4 @@ mm-page_alloc.c-refactor-initialization-of-struct-page-for-holes-in-memory-layou
 xen-events-don-t-unmask-an-event-channel-when-an-eoi-is-pending.patch
 xen-events-avoid-handling-the-same-event-on-two-cpus-at-the-same-time.patch
 kvm-arm64-fix-nvhe-hyp-panic-host-context-restore.patch
+rdma-umem-use-ib_dma_max_seg_size-instead-of-dma_get_max_seg_size.patch