]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop some dma patches from 5.4.y
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Jan 2020 18:34:22 +0000 (19:34 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Jan 2020 18:34:22 +0000 (19:34 +0100)
queue-5.4/dma-direct-don-t-check-swiotlb-force-in-dma_direct_map_resource.patch [deleted file]
queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-min_low_pfn-check.patch [deleted file]
queue-5.4/series

diff --git a/queue-5.4/dma-direct-don-t-check-swiotlb-force-in-dma_direct_map_resource.patch b/queue-5.4/dma-direct-don-t-check-swiotlb-force-in-dma_direct_map_resource.patch
deleted file mode 100644 (file)
index de66d53..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-From 4268ac6ae5870af10a7417b22990d615f72f77e2 Mon Sep 17 00:00:00 2001
-From: Christoph Hellwig <hch@lst.de>
-Date: Tue, 19 Nov 2019 17:35:36 +0100
-Subject: dma-direct: don't check swiotlb=force in dma_direct_map_resource
-
-From: Christoph Hellwig <hch@lst.de>
-
-commit 4268ac6ae5870af10a7417b22990d615f72f77e2 upstream.
-
-When mapping resources we can't just use swiotlb ram for bounce
-buffering.  Switch to a direct dma_capable check instead.
-
-Fixes: cfced786969c ("dma-mapping: remove the default map_resource implementation")
-Reported-by: Robin Murphy <robin.murphy@arm.com>
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
-Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- kernel/dma/direct.c |    2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/dma/direct.c
-+++ b/kernel/dma/direct.c
-@@ -375,7 +375,7 @@ dma_addr_t dma_direct_map_resource(struc
- {
-       dma_addr_t dma_addr = paddr;
--      if (unlikely(!dma_direct_possible(dev, dma_addr, size))) {
-+      if (unlikely(!dma_capable(dev, dma_addr, size))) {
-               report_addr(dev, dma_addr, size);
-               return DMA_MAPPING_ERROR;
-       }
diff --git a/queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-min_low_pfn-check.patch b/queue-5.4/dma-direct-exclude-dma_direct_map_resource-from-the-min_low_pfn-check.patch
deleted file mode 100644 (file)
index c3e1a3d..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-From 68a33b1794665ba8a1d1ef1d3bfcc7c587d380a6 Mon Sep 17 00:00:00 2001
-From: Christoph Hellwig <hch@lst.de>
-Date: Tue, 19 Nov 2019 17:38:58 +0100
-Subject: dma-direct: exclude dma_direct_map_resource from the min_low_pfn check
-
-From: Christoph Hellwig <hch@lst.de>
-
-commit 68a33b1794665ba8a1d1ef1d3bfcc7c587d380a6 upstream.
-
-The valid memory address check in dma_capable only makes sense when mapping
-normal memory, not when using dma_map_resource to map a device resource.
-Add a new boolean argument to dma_capable to exclude that check for the
-dma_map_resource case.
-
-Fixes: b12d66278dd6 ("dma-direct: check for overflows on 32 bit DMA addresses")
-Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
-Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/x86/kernel/amd_gart_64.c |    4 ++--
- drivers/xen/swiotlb-xen.c     |    4 ++--
- include/linux/dma-direct.h    |    5 +++--
- kernel/dma/direct.c           |    4 ++--
- kernel/dma/swiotlb.c          |    2 +-
- 5 files changed, 10 insertions(+), 9 deletions(-)
-
---- a/arch/x86/kernel/amd_gart_64.c
-+++ b/arch/x86/kernel/amd_gart_64.c
-@@ -185,13 +185,13 @@ static void iommu_full(struct device *de
- static inline int
- need_iommu(struct device *dev, unsigned long addr, size_t size)
- {
--      return force_iommu || !dma_capable(dev, addr, size);
-+      return force_iommu || !dma_capable(dev, addr, size, true);
- }
- static inline int
- nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
- {
--      return !dma_capable(dev, addr, size);
-+      return !dma_capable(dev, addr, size, true);
- }
- /* Map a single continuous physical area into the IOMMU.
---- a/drivers/xen/swiotlb-xen.c
-+++ b/drivers/xen/swiotlb-xen.c
-@@ -375,7 +375,7 @@ static dma_addr_t xen_swiotlb_map_page(s
-        * we can safely return the device addr and not worry about bounce
-        * buffering it.
-        */
--      if (dma_capable(dev, dev_addr, size) &&
-+      if (dma_capable(dev, dev_addr, size, true) &&
-           !range_straddles_page_boundary(phys, size) &&
-               !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
-               swiotlb_force != SWIOTLB_FORCE)
-@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(s
-       /*
-        * Ensure that the address returned is DMA'ble
-        */
--      if (unlikely(!dma_capable(dev, dev_addr, size))) {
-+      if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
-               swiotlb_tbl_unmap_single(dev, map, size, size, dir,
-                               attrs | DMA_ATTR_SKIP_CPU_SYNC);
-               return DMA_MAPPING_ERROR;
---- a/include/linux/dma-direct.h
-+++ b/include/linux/dma-direct.h
-@@ -25,14 +25,15 @@ static inline phys_addr_t __dma_to_phys(
-       return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
- }
--static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
-+              bool is_ram)
- {
-       dma_addr_t end = addr + size - 1;
-       if (!dev->dma_mask)
-               return false;
--      if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
-+      if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
-           min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
-               return false;
---- a/kernel/dma/direct.c
-+++ b/kernel/dma/direct.c
-@@ -326,7 +326,7 @@ static inline bool dma_direct_possible(s
-               size_t size)
- {
-       return swiotlb_force != SWIOTLB_FORCE &&
--              dma_capable(dev, dma_addr, size);
-+              dma_capable(dev, dma_addr, size, true);
- }
- dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
-@@ -375,7 +375,7 @@ dma_addr_t dma_direct_map_resource(struc
- {
-       dma_addr_t dma_addr = paddr;
--      if (unlikely(!dma_capable(dev, dma_addr, size))) {
-+      if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
-               report_addr(dev, dma_addr, size);
-               return DMA_MAPPING_ERROR;
-       }
---- a/kernel/dma/swiotlb.c
-+++ b/kernel/dma/swiotlb.c
-@@ -678,7 +678,7 @@ bool swiotlb_map(struct device *dev, phy
-       /* Ensure that the address returned is DMA'ble */
-       *dma_addr = __phys_to_dma(dev, *phys);
--      if (unlikely(!dma_capable(dev, *dma_addr, size))) {
-+      if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
-               swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
-                       attrs | DMA_ATTR_SKIP_CPU_SYNC);
-               return false;
index 5e79c811841c31fdcf9d7cd4ff6edd6fb9dc37bd..5562f8853f12abe3be744221e70c2c2d87082409 100644 (file)
@@ -141,8 +141,6 @@ net-sched-annotate-lockless-accesses-to-qdisc-empty.patch
 kernel-module.c-wakeup-processes-in-module_wq-on-module-unload.patch
 acpi-sysfs-change-acpi_maskable_gpe_max-to-0x100.patch
 perf-callchain-fix-segfault-in-thread__resolve_callchain_sample.patch
-dma-direct-don-t-check-swiotlb-force-in-dma_direct_map_resource.patch
-dma-direct-exclude-dma_direct_map_resource-from-the-min_low_pfn-check.patch
 iommu-vt-d-remove-incorrect-psi-capability-check.patch
 of-overlay-add_changeset_property-memory-leak.patch
 cifs-fix-potential-softlockups-while-refreshing-dfs-cache.patch