--- /dev/null
+From 0a352554da69b02f75ca3389c885c741f1f63235 Mon Sep 17 00:00:00 2001
+From: Nicolas Boichat <drinkcat@chromium.org>
+Date: Thu, 28 Mar 2019 20:43:46 -0700
+Subject: iommu/io-pgtable-arm-v7s: request DMA32 memory, and improve debugging
+
+From: Nicolas Boichat <drinkcat@chromium.org>
+
+commit 0a352554da69b02f75ca3389c885c741f1f63235 upstream.
+
+IOMMUs using ARMv7 short-descriptor format require page tables (level 1
+and 2) to be allocated within the first 4GB of RAM, even on 64-bit
+systems.
+
+For level 1/2 pages, ensure GFP_DMA32 is used if CONFIG_ZONE_DMA32 is
+defined (e.g. on arm64 platforms).
+
+For level 2 pages, allocate a slab cache in SLAB_CACHE_DMA32. Note that
+we do not explicitly pass GFP_DMA[32] to kmem_cache_zalloc, as this is
+not strictly necessary, and would cause a warning in mm/sl*b.c, as we
+did not update GFP_SLAB_BUG_MASK.
+
+Also, print an error when the physical address does not fit in
+32-bit, to make debugging easier in the future.
+
+Link: http://lkml.kernel.org/r/20181210011504.122604-3-drinkcat@chromium.org
+Fixes: ad67f5a6545f ("arm64: replace ZONE_DMA with ZONE_DMA32")
+Signed-off-by: Nicolas Boichat <drinkcat@chromium.org>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Hsin-Yi Wang <hsinyi@chromium.org>
+Cc: Huaisheng Ye <yehs1@lenovo.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Cc: Sasha Levin <Alexander.Levin@microsoft.com>
+Cc: Tomasz Figa <tfiga@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Yingjoe Chen <yingjoe.chen@mediatek.com>
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/io-pgtable-arm-v7s.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+--- a/drivers/iommu/io-pgtable-arm-v7s.c
++++ b/drivers/iommu/io-pgtable-arm-v7s.c
+@@ -161,6 +161,14 @@
+
+ #define ARM_V7S_TCR_PD1 BIT(5)
+
++#ifdef CONFIG_ZONE_DMA32
++#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
++#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
++#else
++#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
++#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
++#endif
++
+ typedef u32 arm_v7s_iopte;
+
+ static bool selftest_running;
+@@ -198,13 +206,16 @@ static void *__arm_v7s_alloc_table(int l
+ void *table = NULL;
+
+ if (lvl == 1)
+- table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
++ table = (void *)__get_free_pages(
++ __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
+ else if (lvl == 2)
+- table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
++ table = kmem_cache_zalloc(data->l2_tables, gfp);
+ phys = virt_to_phys(table);
+- if (phys != (arm_v7s_iopte)phys)
++ if (phys != (arm_v7s_iopte)phys) {
+ /* Doesn't fit in PTE */
++ dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
+ goto out_free;
++ }
+ if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
+ dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+@@ -728,7 +739,7 @@ static struct io_pgtable *arm_v7s_alloc_
+ data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
+ ARM_V7S_TABLE_SIZE(2),
+ ARM_V7S_TABLE_SIZE(2),
+- SLAB_CACHE_DMA, NULL);
++ ARM_V7S_TABLE_SLAB_FLAGS, NULL);
+ if (!data->l2_tables)
+ goto out_free_data;
+
--- /dev/null
+From 6d6ea1e967a246f12cfe2f5fb743b70b2e608d4a Mon Sep 17 00:00:00 2001
+From: Nicolas Boichat <drinkcat@chromium.org>
+Date: Thu, 28 Mar 2019 20:43:42 -0700
+Subject: mm: add support for kmem caches in DMA32 zone
+
+From: Nicolas Boichat <drinkcat@chromium.org>
+
+commit 6d6ea1e967a246f12cfe2f5fb743b70b2e608d4a upstream.
+
+Patch series "iommu/io-pgtable-arm-v7s: Use DMA32 zone for page tables",
+v6.
+
+This is a followup to the discussion in [1], [2].
+
+IOMMUs using ARMv7 short-descriptor format require page tables (level 1
+and 2) to be allocated within the first 4GB of RAM, even on 64-bit
+systems.
+
+For L1 tables that are bigger than a page, we can just use
+__get_free_pages with GFP_DMA32 (on arm64 systems only, arm would still
+use GFP_DMA).
+
+For L2 tables that only take 1KB, it would be a waste to allocate a full
+page, so we considered 3 approaches:
+ 1. This series, adding support for GFP_DMA32 slab caches.
+ 2. genalloc, which requires pre-allocating the maximum number of L2 page
+ tables (4096, so 4MB of memory).
+ 3. page_frag, which is not very memory-efficient as it is unable to reuse
+ freed fragments until the whole page is freed. [3]
+
+This series is the most memory-efficient approach.
+
+stable@ note:
+ We confirmed that this is a regression, and IOMMU errors happen on 4.19
+ and linux-next/master on MT8173 (elm, Acer Chromebook R13). The issue
+ most likely starts from commit ad67f5a6545f ("arm64: replace ZONE_DMA
+ with ZONE_DMA32"), i.e. 4.15, and presumably breaks a number of Mediatek
+ platforms (and maybe others?).
+
+[1] https://lists.linuxfoundation.org/pipermail/iommu/2018-November/030876.html
+[2] https://lists.linuxfoundation.org/pipermail/iommu/2018-December/031696.html
+[3] https://patchwork.codeaurora.org/patch/671639/
+
+This patch (of 3):
+
+IOMMUs using ARMv7 short-descriptor format require page tables to be
+allocated within the first 4GB of RAM, even on 64-bit systems. On arm64,
+this is done by passing GFP_DMA32 flag to memory allocation functions.
+
+For IOMMU L2 tables that only take 1KB, it would be a waste to allocate
+a full page using get_free_pages, so we considered 3 approaches:
+ 1. This patch, adding support for GFP_DMA32 slab caches.
+ 2. genalloc, which requires pre-allocating the maximum number of L2
+ page tables (4096, so 4MB of memory).
+ 3. page_frag, which is not very memory-efficient as it is unable
+ to reuse freed fragments until the whole page is freed.
+
+This change makes it possible to create a custom cache in DMA32 zone using
+kmem_cache_create, then allocate memory using kmem_cache_alloc.
+
+We do not create a DMA32 kmalloc cache array, as there are currently no
+users of kmalloc(..., GFP_DMA32). These calls will continue to trigger a
+warning, as we keep GFP_DMA32 in GFP_SLAB_BUG_MASK.
+
+This implies that calls to kmem_cache_*alloc on a SLAB_CACHE_DMA32
+kmem_cache must _not_ use GFP_DMA32 (it is anyway redundant and
+unnecessary).
+
+Link: http://lkml.kernel.org/r/20181210011504.122604-2-drinkcat@chromium.org
+Signed-off-by: Nicolas Boichat <drinkcat@chromium.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Sasha Levin <Alexander.Levin@microsoft.com>
+Cc: Huaisheng Ye <yehs1@lenovo.com>
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: Yong Wu <yong.wu@mediatek.com>
+Cc: Matthias Brugger <matthias.bgg@gmail.com>
+Cc: Tomasz Figa <tfiga@google.com>
+Cc: Yingjoe Chen <yingjoe.chen@mediatek.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Hsin-Yi Wang <hsinyi@chromium.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/slab.h | 2 ++
+ mm/slab.c | 2 ++
+ mm/slab.h | 3 ++-
+ mm/slab_common.c | 2 +-
+ mm/slub.c | 5 +++++
+ 5 files changed, 12 insertions(+), 2 deletions(-)
+
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -32,6 +32,8 @@
+ #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
+ /* Use GFP_DMA memory */
+ #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
++/* Use GFP_DMA32 memory */
++#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
+ /* DEBUG: Store the last owner for bug hunting */
+ #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
+ /* Panic if kmem_cache_create() fails */
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -2124,6 +2124,8 @@ done:
+ cachep->allocflags = __GFP_COMP;
+ if (flags & SLAB_CACHE_DMA)
+ cachep->allocflags |= GFP_DMA;
++ if (flags & SLAB_CACHE_DMA32)
++ cachep->allocflags |= GFP_DMA32;
+ if (flags & SLAB_RECLAIM_ACCOUNT)
+ cachep->allocflags |= __GFP_RECLAIMABLE;
+ cachep->size = size;
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_fl
+
+
+ /* Legal flag mask for kmem_cache_create(), for various configurations */
+-#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
++#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
++ SLAB_CACHE_DMA32 | SLAB_PANIC | \
+ SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
+
+ #if defined(CONFIG_DEBUG_SLAB)
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_d
+ SLAB_FAILSLAB | SLAB_KASAN)
+
+ #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
+- SLAB_ACCOUNT)
++ SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
+
+ /*
+ * Merge control. If this is set then no merging of slab caches will occur.
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3539,6 +3539,9 @@ static int calculate_sizes(struct kmem_c
+ if (s->flags & SLAB_CACHE_DMA)
+ s->allocflags |= GFP_DMA;
+
++ if (s->flags & SLAB_CACHE_DMA32)
++ s->allocflags |= GFP_DMA32;
++
+ if (s->flags & SLAB_RECLAIM_ACCOUNT)
+ s->allocflags |= __GFP_RECLAIMABLE;
+
+@@ -5633,6 +5636,8 @@ static char *create_unique_id(struct kme
+ */
+ if (s->flags & SLAB_CACHE_DMA)
+ *p++ = 'd';
++ if (s->flags & SLAB_CACHE_DMA32)
++ *p++ = 'D';
+ if (s->flags & SLAB_RECLAIM_ACCOUNT)
+ *p++ = 'a';
+ if (s->flags & SLAB_CONSISTENCY_CHECKS)
--- /dev/null
+From a7f40cfe3b7ada57af9b62fd28430eeb4a7cfcb7 Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@linux.alibaba.com>
+Date: Thu, 28 Mar 2019 20:43:55 -0700
+Subject: mm: mempolicy: make mbind() return -EIO when MPOL_MF_STRICT is specified
+
+From: Yang Shi <yang.shi@linux.alibaba.com>
+
+commit a7f40cfe3b7ada57af9b62fd28430eeb4a7cfcb7 upstream.
+
+When MPOL_MF_STRICT was specified and an existing page was already on a
+node that does not follow the policy, mbind() should return -EIO. But
+commit 6f4576e3687b ("mempolicy: apply page table walker on
+queue_pages_range()") broke the rule.
+
+And commit c8633798497c ("mm: mempolicy: mbind and migrate_pages support
+thp migration") didn't return the correct value for THP mbind() too.
+
+If MPOL_MF_STRICT is set, ignore vma_migratable() to make sure it
+reaches queue_pages_to_pte_range() or queue_pages_pmd() to check if an
+existing page was already on a node that does not follow the policy.
+And, non-migratable vma may be used, return -EIO too if MPOL_MF_MOVE or
+MPOL_MF_MOVE_ALL was specified.
+
+Tested with https://github.com/metan-ucw/ltp/blob/master/testcases/kernel/syscalls/mbind/mbind02.c
+
+[akpm@linux-foundation.org: tweak code comment]
+Link: http://lkml.kernel.org/r/1553020556-38583-1-git-send-email-yang.shi@linux.alibaba.com
+Fixes: 6f4576e3687b ("mempolicy: apply page table walker on queue_pages_range()")
+Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
+Signed-off-by: Oscar Salvador <osalvador@suse.de>
+Reported-by: Cyril Hrubis <chrubis@suse.cz>
+Suggested-by: Kirill A. Shutemov <kirill@shutemov.name>
+Acked-by: Rafael Aquini <aquini@redhat.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Acked-by: David Rientjes <rientjes@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mempolicy.c | 40 +++++++++++++++++++++++++++++++++-------
+ 1 file changed, 33 insertions(+), 7 deletions(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -428,6 +428,13 @@ static inline bool queue_pages_required(
+ return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
+ }
+
++/*
++ * queue_pages_pmd() has three possible return values:
++ * 1 - pages are placed on the right node or queued successfully.
++ * 0 - THP was split.
++ * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
++ * page was already on a node that does not follow the policy.
++ */
+ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+ {
+@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, s
+ unsigned long flags;
+
+ if (unlikely(is_pmd_migration_entry(*pmd))) {
+- ret = 1;
++ ret = -EIO;
+ goto unlock;
+ }
+ page = pmd_page(*pmd);
+@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, s
+ ret = 1;
+ flags = qp->flags;
+ /* go to thp migration */
+- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
++ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
++ if (!vma_migratable(walk->vma)) {
++ ret = -EIO;
++ goto unlock;
++ }
++
+ migrate_page_add(page, qp->pagelist, flags);
++ } else
++ ret = -EIO;
+ unlock:
+ spin_unlock(ptl);
+ out:
+@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
+ ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
+- if (ret)
++ if (ret > 0)
+ return 0;
++ else if (ret < 0)
++ return ret;
+ }
+
+ if (pmd_trans_unstable(pmd))
+@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *
+ continue;
+ if (!queue_pages_required(page, qp))
+ continue;
+- migrate_page_add(page, qp->pagelist, flags);
++ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
++ if (!vma_migratable(vma))
++ break;
++ migrate_page_add(page, qp->pagelist, flags);
++ } else
++ break;
+ }
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
+- return 0;
++ return addr != end ? -EIO : 0;
+ }
+
+ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
+@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigne
+ unsigned long endvma = vma->vm_end;
+ unsigned long flags = qp->flags;
+
+- if (!vma_migratable(vma))
++ /*
++ * Need check MPOL_MF_STRICT to return -EIO if possible
++ * regardless of vma_migratable
++ */
++ if (!vma_migratable(vma) &&
++ !(flags & MPOL_MF_STRICT))
+ return 1;
+
+ if (endvma > end)
+@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigne
+ }
+
+ /* queue pages from current vma */
+- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
++ if (flags & MPOL_MF_VALID)
+ return 0;
+ return 1;
+ }
--- /dev/null
+From d2b2c6dd227ba5b8a802858748ec9a780cb75b47 Mon Sep 17 00:00:00 2001
+From: Lars Persson <lars.persson@axis.com>
+Date: Thu, 28 Mar 2019 20:44:28 -0700
+Subject: mm/migrate.c: add missing flush_dcache_page for non-mapped page migrate
+
+From: Lars Persson <lars.persson@axis.com>
+
+commit d2b2c6dd227ba5b8a802858748ec9a780cb75b47 upstream.
+
+Our MIPS 1004Kc SoCs were seeing random userspace crashes with SIGILL
+and SIGSEGV that could not be traced back to a userspace code bug. They
+had all the magic signs of an I/D cache coherency issue.
+
+Now recently we noticed that the /proc/sys/vm/compact_memory interface
+was quite efficient at provoking this class of userspace crashes.
+
+Studying the code in mm/migrate.c there is a distinction made between
+migrating a page that is mapped at the instant of migration and one that
+is not mapped. Our problem turned out to be the non-mapped pages.
+
+For the non-mapped page the code performs a copy of the page content and
+all relevant meta-data of the page without doing the required D-cache
+maintenance. This leaves dirty data in the D-cache of the CPU and on
+the 1004K cores this data is not visible to the I-cache. A subsequent
+page-fault that triggers a mapping of the page will happily serve the
+process with potentially stale code.
+
+What about ARM then, this bug should have seen greater exposure? Well
+ARM became immune to this flaw back in 2010, see commit c01778001a4f
+("ARM: 6379/1: Assume new page cache pages have dirty D-cache").
+
+My proposed fix moves the D-cache maintenance inside move_to_new_page to
+make it common for both cases.
+
+Link: http://lkml.kernel.org/r/20190315083502.11849-1-larper@axis.com
+Fixes: 97ee0524614 ("flush cache before installing new page at migraton")
+Signed-off-by: Lars Persson <larper@axis.com>
+Reviewed-by: Paul Burton <paul.burton@mips.com>
+Acked-by: Mel Gorman <mgorman@techsingularity.net>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/migrate.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct
+ pte = swp_entry_to_pte(entry);
+ } else if (is_device_public_page(new)) {
+ pte = pte_mkdevmap(pte);
+- flush_dcache_page(new);
+ }
+- } else
+- flush_dcache_page(new);
++ }
+
+ #ifdef CONFIG_HUGETLB_PAGE
+ if (PageHuge(new)) {
+@@ -983,6 +981,13 @@ static int move_to_new_page(struct page
+ */
+ if (!PageMappingFlags(page))
+ page->mapping = NULL;
++
++ if (unlikely(is_zone_device_page(newpage))) {
++ if (is_device_public_page(newpage))
++ flush_dcache_page(newpage);
++ } else
++ flush_dcache_page(newpage);
++
+ }
+ out:
+ return rc;
usb-xhci-dbc-don-t-free-all-memory-with-spinlock-held.patch
xhci-don-t-let-usb3-ports-stuck-in-polling-state-prevent-suspend.patch
usb-cdc-acm-fix-race-during-wakeup-blocking-tx-traffic.patch
+mm-add-support-for-kmem-caches-in-dma32-zone.patch
+iommu-io-pgtable-arm-v7s-request-dma32-memory-and-improve-debugging.patch
+mm-mempolicy-make-mbind-return-eio-when-mpol_mf_strict-is-specified.patch
+mm-migrate.c-add-missing-flush_dcache_page-for-non-mapped-page-migrate.patch