]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-min_low_pfn-check...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 8 May 2020 13:29:02 +0000 (15:29 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 8 May 2020 13:29:02 +0000 (15:29 +0200)
queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-min_low_pfn-check.patch [deleted file]
queue-5.4/series

diff --git a/queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-min_low_pfn-check.patch b/queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-min_low_pfn-check.patch
deleted file mode 100644 (file)
index b2946e4..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-From 68a33b1794665ba8a1d1ef1d3bfcc7c587d380a6 Mon Sep 17 00:00:00 2001
-From: Christoph Hellwig <hch@lst.de>
-Date: Tue, 19 Nov 2019 17:38:58 +0100
-Subject: dma-direct: exclude dma_direct_map_resource from the min_low_pfn check
-
-From: Christoph Hellwig <hch@lst.de>
-
-commit 68a33b1794665ba8a1d1ef1d3bfcc7c587d380a6 upstream.
-
-The valid memory address check in dma_capable only makes sense when mapping
-normal memory, not when using dma_map_resource to map a device resource.
-Add a new boolean argument to dma_capable to exclude that check for the
-dma_map_resource case.
-
-Fixes: b12d66278dd6 ("dma-direct: check for overflows on 32 bit DMA addresses")
-Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
-Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/kernel/amd_gart_64.c |    4 ++--
- drivers/xen/swiotlb-xen.c     |    4 ++--
- include/linux/dma-direct.h    |    5 +++--
- kernel/dma/direct.c           |    4 ++--
- kernel/dma/swiotlb.c          |    2 +-
- 5 files changed, 10 insertions(+), 9 deletions(-)
-
---- a/arch/x86/kernel/amd_gart_64.c
-+++ b/arch/x86/kernel/amd_gart_64.c
-@@ -185,13 +185,13 @@ static void iommu_full(struct device *de
- static inline int
- need_iommu(struct device *dev, unsigned long addr, size_t size)
- {
--      return force_iommu || !dma_capable(dev, addr, size);
-+      return force_iommu || !dma_capable(dev, addr, size, true);
- }
- static inline int
- nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
- {
--      return !dma_capable(dev, addr, size);
-+      return !dma_capable(dev, addr, size, true);
- }
- /* Map a single continuous physical area into the IOMMU.
---- a/drivers/xen/swiotlb-xen.c
-+++ b/drivers/xen/swiotlb-xen.c
-@@ -375,7 +375,7 @@ static dma_addr_t xen_swiotlb_map_page(s
-        * we can safely return the device addr and not worry about bounce
-        * buffering it.
-        */
--      if (dma_capable(dev, dev_addr, size) &&
-+      if (dma_capable(dev, dev_addr, size, true) &&
-           !range_straddles_page_boundary(phys, size) &&
-               !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
-               swiotlb_force != SWIOTLB_FORCE)
-@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(s
-       /*
-        * Ensure that the address returned is DMA'ble
-        */
--      if (unlikely(!dma_capable(dev, dev_addr, size))) {
-+      if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
-               swiotlb_tbl_unmap_single(dev, map, size, size, dir,
-                               attrs | DMA_ATTR_SKIP_CPU_SYNC);
-               return DMA_MAPPING_ERROR;
---- a/include/linux/dma-direct.h
-+++ b/include/linux/dma-direct.h
-@@ -25,14 +25,15 @@ static inline phys_addr_t __dma_to_phys(
-       return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
- }
--static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
-+              bool is_ram)
- {
-       dma_addr_t end = addr + size - 1;
-       if (!dev->dma_mask)
-               return false;
--      if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
-+      if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
-           min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
-               return false;
---- a/kernel/dma/direct.c
-+++ b/kernel/dma/direct.c
-@@ -327,7 +327,7 @@ static inline bool dma_direct_possible(s
-               size_t size)
- {
-       return swiotlb_force != SWIOTLB_FORCE &&
--              dma_capable(dev, dma_addr, size);
-+              dma_capable(dev, dma_addr, size, true);
- }
- dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
-@@ -376,7 +376,7 @@ dma_addr_t dma_direct_map_resource(struc
- {
-       dma_addr_t dma_addr = paddr;
--      if (unlikely(!dma_capable(dev, dma_addr, size))) {
-+      if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
-               report_addr(dev, dma_addr, size);
-               return DMA_MAPPING_ERROR;
-       }
---- a/kernel/dma/swiotlb.c
-+++ b/kernel/dma/swiotlb.c
-@@ -678,7 +678,7 @@ bool swiotlb_map(struct device *dev, phy
-       /* Ensure that the address returned is DMA'ble */
-       *dma_addr = __phys_to_dma(dev, *phys);
--      if (unlikely(!dma_capable(dev, *dma_addr, size))) {
-+      if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
-               swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
-                       attrs | DMA_ATTR_SKIP_CPU_SYNC);
-               return false;
index 0c771b7ef9c903360bdde8770d16465a3e1a922a..2f126c44a00843cd5f42682def2620ebd81d2ce8 100644 (file)
@@ -37,7 +37,6 @@ hexagon-clean-up-ioremap.patch
 hexagon-define-ioremap_uc.patch
 alsa-hda-match-both-pci-id-and-ssid-for-driver-blacklist.patch
 x86-kvm-fix-a-missing-prototypes-vmread_error.patch
-dma-direct-exclude-dma_direct_map_resource-from-the-min_low_pfn-check.patch
 platform-x86-gpd-pocket-fan-fix-error-message-when-temp-limits-are-out-of-range.patch
 acpi-pm-s2idle-fix-comment-in-acpi_s2idle_prepare_late.patch
 mac80211-add-ieee80211_is_any_nullfunc.patch