--- /dev/null
+From f1071c3e2473ae19a7f5d892a187c4cab1a61f2e Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Mon, 11 Feb 2019 16:27:58 +0200
+Subject: crypto: ccree - add missing inline qualifier
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit f1071c3e2473ae19a7f5d892a187c4cab1a61f2e upstream.
+
+Commit 1358c13a48c4 ("crypto: ccree - fix resume race condition on init")
+was missing a "inline" qualifier for stub function used when CONFIG_PM
+is not set causing a build warning.
+
+Fixes: 1358c13a48c4 ("crypto: ccree - fix resume race condition on init")
+Cc: stable@kernel.org # v4.20
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccree/cc_pm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/ccree/cc_pm.h
++++ b/drivers/crypto/ccree/cc_pm.h
+@@ -30,7 +30,7 @@ static inline int cc_pm_init(struct cc_d
+ return 0;
+ }
+
+-static void cc_pm_go(struct cc_drvdata *drvdata) {}
++static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
+
+ static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
+
--- /dev/null
+From 2216322919c8608a448d7ebc560a845238a5d6b6 Mon Sep 17 00:00:00 2001
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Date: Mon, 7 Jan 2019 12:41:46 -0500
+Subject: drm: Block fb changes for async plane updates
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+commit 2216322919c8608a448d7ebc560a845238a5d6b6 upstream.
+
+The prepare_fb call always happens on new_plane_state.
+
+The drm_atomic_helper_cleanup_planes checks to see if
+plane state pointer has changed when deciding to call cleanup_fb on
+either the new_plane_state or the old_plane_state.
+
+For a non-async atomic commit the state pointer is swapped, so this
+helper calls prepare_fb on the new_plane_state and cleanup_fb on the
+old_plane_state. This makes sense, since we want to prepare the
+framebuffer we are going to use and cleanup the the framebuffer we are
+no longer using.
+
+For the async atomic update helpers this differs. The async atomic
+update helpers perform in-place updates on the existing state. They call
+drm_atomic_helper_cleanup_planes but the state pointer is not swapped.
+This means that prepare_fb is called on the new_plane_state and
+cleanup_fb is called on the new_plane_state (not the old).
+
+In the case where old_plane_state->fb == new_plane_state->fb then
+there should be no behavioral difference between an async update
+and a non-async commit. But there are issues that arise when
+old_plane_state->fb != new_plane_state->fb.
+
+The first is that the new_plane_state->fb is immediately cleaned up
+after it has been prepared, so we're using a fb that we shouldn't
+be.
+
+The second occurs during a sequence of async atomic updates and
+non-async regular atomic commits. Suppose there are two framebuffers
+being interleaved in a double-buffering scenario, fb1 and fb2:
+
+- Async update, oldfb = NULL, newfb = fb1, prepare fb1, cleanup fb1
+- Async update, oldfb = fb1, newfb = fb2, prepare fb2, cleanup fb2
+- Non-async commit, oldfb = fb2, newfb = fb1, prepare fb1, cleanup fb2
+
+We call cleanup_fb on fb2 twice in this example scenario, and any
+further use will result in use-after-free.
+
+The simple fix to this problem is to block framebuffer changes
+in the drm_atomic_helper_async_check function for now.
+
+v2: Move check by itself, add a FIXME (Daniel)
+
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Cc: <stable@vger.kernel.org> # v4.14+
+Fixes: fef9df8b5945 ("drm/atomic: initial support for asynchronous plane update")
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Acked-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Reviewed-by: Daniel Vetter <daniel@ffwll.ch>
+Signed-off-by: Harry Wentland <harry.wentland@amd.com>
+Link: https://patchwork.freedesktop.org/patch/275364/
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_atomic_helper.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -1584,6 +1584,15 @@ int drm_atomic_helper_async_check(struct
+ old_plane_state->crtc != new_plane_state->crtc)
+ return -EINVAL;
+
++ /*
++ * FIXME: Since prepare_fb and cleanup_fb are always called on
++ * the new_plane_state for async updates we need to block framebuffer
++ * changes. This prevents use of a fb that's been cleaned up and
++ * double cleanups from occuring.
++ */
++ if (old_plane_state->fb != new_plane_state->fb)
++ return -EINVAL;
++
+ funcs = plane->helper_private;
+ if (!funcs->atomic_async_update)
+ return -EINVAL;
--- /dev/null
+From cb6acd01e2e43fd8bad11155752b7699c3d0fb76 Mon Sep 17 00:00:00 2001
+From: Mike Kravetz <mike.kravetz@oracle.com>
+Date: Thu, 28 Feb 2019 16:22:02 -0800
+Subject: hugetlbfs: fix races and page leaks during migration
+
+From: Mike Kravetz <mike.kravetz@oracle.com>
+
+commit cb6acd01e2e43fd8bad11155752b7699c3d0fb76 upstream.
+
+hugetlb pages should only be migrated if they are 'active'. The
+routines set/clear_page_huge_active() modify the active state of hugetlb
+pages.
+
+When a new hugetlb page is allocated at fault time, set_page_huge_active
+is called before the page is locked. Therefore, another thread could
+race and migrate the page while it is being added to page table by the
+fault code. This race is somewhat hard to trigger, but can be seen by
+strategically adding udelay to simulate worst case scheduling behavior.
+Depending on 'how' the code races, various BUG()s could be triggered.
+
+To address this issue, simply delay the set_page_huge_active call until
+after the page is successfully added to the page table.
+
+Hugetlb pages can also be leaked at migration time if the pages are
+associated with a file in an explicitly mounted hugetlbfs filesystem.
+For example, consider a two node system with 4GB worth of huge pages
+available. A program mmaps a 2G file in a hugetlbfs filesystem. It
+then migrates the pages associated with the file from one node to
+another. When the program exits, huge page counts are as follows:
+
+ node0
+ 1024 free_hugepages
+ 1024 nr_hugepages
+
+ node1
+ 0 free_hugepages
+ 1024 nr_hugepages
+
+ Filesystem Size Used Avail Use% Mounted on
+ nodev 4.0G 2.0G 2.0G 50% /var/opt/hugepool
+
+That is as expected. 2G of huge pages are taken from the free_hugepages
+counts, and 2G is the size of the file in the explicitly mounted
+filesystem. If the file is then removed, the counts become:
+
+ node0
+ 1024 free_hugepages
+ 1024 nr_hugepages
+
+ node1
+ 1024 free_hugepages
+ 1024 nr_hugepages
+
+ Filesystem Size Used Avail Use% Mounted on
+ nodev 4.0G 2.0G 2.0G 50% /var/opt/hugepool
+
+Note that the filesystem still shows 2G of pages used, while there
+actually are no huge pages in use. The only way to 'fix' the filesystem
+accounting is to unmount the filesystem
+
+If a hugetlb page is associated with an explicitly mounted filesystem,
+this information in contained in the page_private field. At migration
+time, this information is not preserved. To fix, simply transfer
+page_private from old to new page at migration time if necessary.
+
+There is a related race with removing a huge page from a file and
+migration. When a huge page is removed from the pagecache, the
+page_mapping() field is cleared, yet page_private remains set until the
+page is actually freed by free_huge_page(). A page could be migrated
+while in this state. However, since page_mapping() is not set the
+hugetlbfs specific routine to transfer page_private is not called and we
+leak the page count in the filesystem.
+
+To fix that, check for this condition before migrating a huge page. If
+the condition is detected, return EBUSY for the page.
+
+Link: http://lkml.kernel.org/r/74510272-7319-7372-9ea6-ec914734c179@oracle.com
+Link: http://lkml.kernel.org/r/20190212221400.3512-1-mike.kravetz@oracle.com
+Fixes: bcc54222309c ("mm: hugetlb: introduce page_huge_active")
+Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
+Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: <stable@vger.kernel.org>
+[mike.kravetz@oracle.com: v2]
+ Link: http://lkml.kernel.org/r/7534d322-d782-8ac6-1c8d-a8dc380eb3ab@oracle.com
+[mike.kravetz@oracle.com: update comment and changelog]
+ Link: http://lkml.kernel.org/r/420bcfd6-158b-38e4-98da-26d0cd85bd01@oracle.com
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/hugetlbfs/inode.c | 12 ++++++++++++
+ mm/hugetlb.c | 16 +++++++++++++---
+ mm/migrate.c | 11 +++++++++++
+ 3 files changed, 36 insertions(+), 3 deletions(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct
+ rc = migrate_huge_page_move_mapping(mapping, newpage, page);
+ if (rc != MIGRATEPAGE_SUCCESS)
+ return rc;
++
++ /*
++ * page_private is subpool pointer in hugetlb pages. Transfer to
++ * new page. PagePrivate is not associated with page_private for
++ * hugetlb pages and can not be set here as only page_huge_active
++ * pages can be migrated.
++ */
++ if (page_private(page)) {
++ set_page_private(newpage, page_private(page));
++ set_page_private(page, 0);
++ }
++
+ if (mode != MIGRATE_SYNC_NO_COPY)
+ migrate_page_copy(newpage, page);
+ else
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3625,7 +3625,6 @@ retry_avoidcopy:
+ copy_user_huge_page(new_page, old_page, address, vma,
+ pages_per_huge_page(h));
+ __SetPageUptodate(new_page);
+- set_page_huge_active(new_page);
+
+ mmun_start = haddr;
+ mmun_end = mmun_start + huge_page_size(h);
+@@ -3647,6 +3646,7 @@ retry_avoidcopy:
+ make_huge_pte(vma, new_page, 1));
+ page_remove_rmap(old_page, true);
+ hugepage_add_new_anon_rmap(new_page, vma, haddr);
++ set_page_huge_active(new_page);
+ /* Make the old page be freed below */
+ new_page = old_page;
+ }
+@@ -3731,6 +3731,7 @@ static vm_fault_t hugetlb_no_page(struct
+ pte_t new_pte;
+ spinlock_t *ptl;
+ unsigned long haddr = address & huge_page_mask(h);
++ bool new_page = false;
+
+ /*
+ * Currently, we are forced to kill the process in the event the
+@@ -3792,7 +3793,7 @@ retry:
+ }
+ clear_huge_page(page, address, pages_per_huge_page(h));
+ __SetPageUptodate(page);
+- set_page_huge_active(page);
++ new_page = true;
+
+ if (vma->vm_flags & VM_MAYSHARE) {
+ int err = huge_add_to_page_cache(page, mapping, idx);
+@@ -3863,6 +3864,15 @@ retry:
+ }
+
+ spin_unlock(ptl);
++
++ /*
++ * Only make newly allocated pages active. Existing pages found
++ * in the pagecache could be !page_huge_active() if they have been
++ * isolated for migration.
++ */
++ if (new_page)
++ set_page_huge_active(page);
++
+ unlock_page(page);
+ out:
+ return ret;
+@@ -4097,7 +4107,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
+ * the set_pte_at() write.
+ */
+ __SetPageUptodate(page);
+- set_page_huge_active(page);
+
+ mapping = dst_vma->vm_file->f_mapping;
+ idx = vma_hugecache_offset(h, dst_vma, dst_addr);
+@@ -4165,6 +4174,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_s
+ update_mmu_cache(dst_vma, dst_addr, dst_pte);
+
+ spin_unlock(ptl);
++ set_page_huge_active(page);
+ if (vm_shared)
+ unlock_page(page);
+ ret = 0;
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1293,6 +1293,16 @@ static int unmap_and_move_huge_page(new_
+ lock_page(hpage);
+ }
+
++ /*
++ * Check for pages which are in the process of being freed. Without
++ * page_mapping() set, hugetlbfs specific move page routine will not
++ * be called and we could leak usage counts for subpools.
++ */
++ if (page_private(hpage) && !page_mapping(hpage)) {
++ rc = -EBUSY;
++ goto out_unlock;
++ }
++
+ if (PageAnon(hpage))
+ anon_vma = page_get_anon_vma(hpage);
+
+@@ -1323,6 +1333,7 @@ put_anon:
+ put_new_page = NULL;
+ }
+
++out_unlock:
+ unlock_page(hpage);
+ out:
+ if (rc != -EAGAIN)
--- /dev/null
+From 18836b48ebae20850631ee2916d0cdbb86df813d Mon Sep 17 00:00:00 2001
+From: Jonas Gorski <jonas.gorski@gmail.com>
+Date: Thu, 21 Feb 2019 10:56:42 +0100
+Subject: MIPS: BCM63XX: provide DMA masks for ethernet devices
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+commit 18836b48ebae20850631ee2916d0cdbb86df813d upstream.
+
+The switch to the generic dma ops made dma masks mandatory, breaking
+devices having them not set. In case of bcm63xx, it broke ethernet with
+the following warning when trying to up the device:
+
+[ 2.633123] ------------[ cut here ]------------
+[ 2.637949] WARNING: CPU: 0 PID: 325 at ./include/linux/dma-mapping.h:516 bcm_enetsw_open+0x160/0xbbc
+[ 2.647423] Modules linked in: gpio_button_hotplug
+[ 2.652361] CPU: 0 PID: 325 Comm: ip Not tainted 4.19.16 #0
+[ 2.658080] Stack : 80520000 804cd3ec 00000000 00000000 804ccc00 87085bdc 87d3f9d4 804f9a17
+[ 2.666707] 8049cf18 00000145 80a942a0 00000204 80ac0000 10008400 87085b90 eb3d5ab7
+[ 2.675325] 00000000 00000000 80ac0000 000022b0 00000000 00000000 00000007 00000000
+[ 2.683954] 0000007a 80500000 0013b381 00000000 80000000 00000000 804a1664 80289878
+[ 2.692572] 00000009 00000204 80ac0000 00000200 00000002 00000000 00000000 80a90000
+[ 2.701191] ...
+[ 2.703701] Call Trace:
+[ 2.706244] [<8001f3c8>] show_stack+0x58/0x100
+[ 2.710840] [<800336e4>] __warn+0xe4/0x118
+[ 2.715049] [<800337d4>] warn_slowpath_null+0x48/0x64
+[ 2.720237] [<80289878>] bcm_enetsw_open+0x160/0xbbc
+[ 2.725347] [<802d1d4c>] __dev_open+0xf8/0x16c
+[ 2.729913] [<802d20cc>] __dev_change_flags+0x100/0x1c4
+[ 2.735290] [<802d21b8>] dev_change_flags+0x28/0x70
+[ 2.740326] [<803539e0>] devinet_ioctl+0x310/0x7b0
+[ 2.745250] [<80355fd8>] inet_ioctl+0x1f8/0x224
+[ 2.749939] [<802af290>] sock_ioctl+0x30c/0x488
+[ 2.754632] [<80112b34>] do_vfs_ioctl+0x740/0x7dc
+[ 2.759459] [<80112c20>] ksys_ioctl+0x50/0x94
+[ 2.763955] [<800240b8>] syscall_common+0x34/0x58
+[ 2.768782] ---[ end trace fb1a6b14d74e28b6 ]---
+[ 2.773544] bcm63xx_enetsw bcm63xx_enetsw.0: cannot allocate rx ring 512
+
+Fix this by adding appropriate DMA masks for the platform devices.
+
+Fixes: f8c55dc6e828 ("MIPS: use generic dma noncoherent ops for simple noncoherent platforms")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Cc: linux-mips@linux-mips.org
+Cc: linux-kernel@vger.kernel.org
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: James Hogan <jhogan@kernel.org>
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/bcm63xx/dev-enet.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/mips/bcm63xx/dev-enet.c
++++ b/arch/mips/bcm63xx/dev-enet.c
+@@ -70,6 +70,8 @@ static struct platform_device bcm63xx_en
+
+ static int shared_device_registered;
+
++static u64 enet_dmamask = DMA_BIT_MASK(32);
++
+ static struct resource enet0_res[] = {
+ {
+ .start = -1, /* filled at runtime */
+@@ -99,6 +101,8 @@ static struct platform_device bcm63xx_en
+ .resource = enet0_res,
+ .dev = {
+ .platform_data = &enet0_pd,
++ .dma_mask = &enet_dmamask,
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -131,6 +135,8 @@ static struct platform_device bcm63xx_en
+ .resource = enet1_res,
+ .dev = {
+ .platform_data = &enet1_pd,
++ .dma_mask = &enet_dmamask,
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -157,6 +163,8 @@ static struct platform_device bcm63xx_en
+ .resource = enetsw_res,
+ .dev = {
+ .platform_data = &enetsw_pd,
++ .dma_mask = &enet_dmamask,
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
--- /dev/null
+From e0bf304e4a00d66d90904a6c5b93141f177cf6d2 Mon Sep 17 00:00:00 2001
+From: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+Date: Wed, 27 Feb 2019 10:42:56 +0100
+Subject: MIPS: fix memory setup for platforms with PHYS_OFFSET != 0
+
+From: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+
+commit e0bf304e4a00d66d90904a6c5b93141f177cf6d2 upstream.
+
+For platforms, which use a PHYS_OFFSET != 0, symbol _end also
+contains that offset. So when calling memblock_reserve() for
+reserving kernel the size argument needs to be adjusted.
+
+Fixes: bcec54bf3118 ("mips: switch to NO_BOOTMEM")
+Acked-by: Mike Rapoport <rppt@linux.ibm.com>
+Signed-off-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: James Hogan <jhogan@kernel.org>
+Cc: linux-mips@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/setup.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -384,7 +384,8 @@ static void __init bootmem_init(void)
+ init_initrd();
+ reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
+
+- memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
++ memblock_reserve(PHYS_OFFSET,
++ (reserved_end << PAGE_SHIFT) - PHYS_OFFSET);
+
+ /*
+ * max_low_pfn is not a number of pages. The number of pages
--- /dev/null
+From 94ee12b507db8b5876e31c9d6c9d84f556a4b49f Mon Sep 17 00:00:00 2001
+From: Michael Clark <michaeljclark@mac.com>
+Date: Mon, 11 Feb 2019 17:38:29 +1300
+Subject: MIPS: fix truncation in __cmpxchg_small for short values
+
+From: Michael Clark <michaeljclark@mac.com>
+
+commit 94ee12b507db8b5876e31c9d6c9d84f556a4b49f upstream.
+
+__cmpxchg_small erroneously uses u8 for load comparison which can
+be either char or short. This patch changes the local variable to
+u32 which is sufficiently sized, as the loaded value is already
+masked and shifted appropriately. Using an integer size avoids
+any unnecessary canonicalization from use of non native widths.
+
+This patch is part of a series that adapts the MIPS small word
+atomics code for xchg and cmpxchg on short and char to RISC-V.
+
+Cc: RISC-V Patches <patches@groups.riscv.org>
+Cc: Linux RISC-V <linux-riscv@lists.infradead.org>
+Cc: Linux MIPS <linux-mips@linux-mips.org>
+Signed-off-by: Michael Clark <michaeljclark@mac.com>
+[paul.burton@mips.com:
+ - Fix varialble typo per Jonas Gorski.
+ - Consolidate load variable with other declarations.]
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Fixes: 3ba7f44d2b19 ("MIPS: cmpxchg: Implement 1 byte & 2 byte cmpxchg()")
+Cc: stable@vger.kernel.org # v4.13+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/kernel/cmpxchg.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/mips/kernel/cmpxchg.c
++++ b/arch/mips/kernel/cmpxchg.c
+@@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void
+ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+ {
+- u32 mask, old32, new32, load32;
++ u32 mask, old32, new32, load32, load;
+ volatile u32 *ptr32;
+ unsigned int shift;
+- u8 load;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
--- /dev/null
+From 0a1d52994d440e21def1c2174932410b4f2a98a1 Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Wed, 27 Feb 2019 21:29:52 +0100
+Subject: mm: enforce min addr even if capable() in expand_downwards()
+
+From: Jann Horn <jannh@google.com>
+
+commit 0a1d52994d440e21def1c2174932410b4f2a98a1 upstream.
+
+security_mmap_addr() does a capability check with current_cred(), but
+we can reach this code from contexts like a VFS write handler where
+current_cred() must not be used.
+
+This can be abused on systems without SMAP to make NULL pointer
+dereferences exploitable again.
+
+Fixes: 8869477a49c3 ("security: protect from stack expansion into low vm addresses")
+Cc: stable@kernel.org
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mmap.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2415,12 +2415,11 @@ int expand_downwards(struct vm_area_stru
+ {
+ struct mm_struct *mm = vma->vm_mm;
+ struct vm_area_struct *prev;
+- int error;
++ int error = 0;
+
+ address &= PAGE_MASK;
+- error = security_mmap_addr(address);
+- if (error)
+- return error;
++ if (address < mmap_min_addr)
++ return -EPERM;
+
+ /* Enforce stack_guard_gap */
+ prev = vma->vm_prev;
--- /dev/null
+From e5723f95d6b493dd437f1199cacb41459713b32f Mon Sep 17 00:00:00 2001
+From: Ritesh Harjani <riteshh@codeaurora.org>
+Date: Fri, 22 Feb 2019 19:21:34 +0530
+Subject: mmc: core: Fix NULL ptr crash from mmc_should_fail_request
+
+From: Ritesh Harjani <riteshh@codeaurora.org>
+
+commit e5723f95d6b493dd437f1199cacb41459713b32f upstream.
+
+In case of CQHCI, mrq->cmd may be NULL for data requests (non DCMD).
+In such case mmc_should_fail_request is directly dereferencing
+mrq->cmd while cmd is NULL.
+Fix this by checking for mrq->cmd pointer.
+
+Fixes: 72a5af554df8 ("mmc: core: Add support for handling CQE requests")
+Signed-off-by: Ritesh Harjani <riteshh@codeaurora.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/core/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -95,7 +95,7 @@ static void mmc_should_fail_request(stru
+ if (!data)
+ return;
+
+- if (cmd->error || data->error ||
++ if ((cmd && cmd->error) || data->error ||
+ !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
+ return;
+
--- /dev/null
+From d07e9fadf3a6b466ca3ae90fa4859089ff20530f Mon Sep 17 00:00:00 2001
+From: Alamy Liu <alamy.liu@gmail.com>
+Date: Mon, 25 Feb 2019 11:22:14 -0800
+Subject: mmc: cqhci: Fix a tiny potential memory leak on error condition
+
+From: Alamy Liu <alamy.liu@gmail.com>
+
+commit d07e9fadf3a6b466ca3ae90fa4859089ff20530f upstream.
+
+Free up the allocated memory in the case of error return
+
+The value of mmc_host->cqe_enabled stays 'false'. Thus, cqhci_disable
+(mmc_cqe_ops->cqe_disable) won't be called to free the memory. Also,
+cqhci_disable() seems to be designed to disable and free all resources, not
+suitable to handle this corner case.
+
+Fixes: a4080225f51d ("mmc: cqhci: support for command queue enabled host")
+Signed-off-by: Alamy Liu <alamy.liu@gmail.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/cqhci.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/cqhci.c
++++ b/drivers/mmc/host/cqhci.c
+@@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct c
+ cq_host->desc_size,
+ &cq_host->desc_dma_base,
+ GFP_KERNEL);
++ if (!cq_host->desc_base)
++ return -ENOMEM;
++
+ cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->data_size,
+ &cq_host->trans_desc_dma_base,
+ GFP_KERNEL);
+- if (!cq_host->desc_base || !cq_host->trans_desc_base)
++ if (!cq_host->trans_desc_base) {
++ dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
++ cq_host->desc_base,
++ cq_host->desc_dma_base);
++ cq_host->desc_base = NULL;
++ cq_host->desc_dma_base = 0;
+ return -ENOMEM;
++ }
+
+ pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
--- /dev/null
+From 27ec9dc17c48ea2e642ccb90b4ebf7fd47468911 Mon Sep 17 00:00:00 2001
+From: Alamy Liu <alamy.liu@gmail.com>
+Date: Mon, 25 Feb 2019 11:22:13 -0800
+Subject: mmc: cqhci: fix space allocated for transfer descriptor
+
+From: Alamy Liu <alamy.liu@gmail.com>
+
+commit 27ec9dc17c48ea2e642ccb90b4ebf7fd47468911 upstream.
+
+There is not enough space being allocated when DCMD is disabled.
+
+CQE_DCMD is not necessary to be enabled when CQE is enabled.
+(Software could halt CQE to send command)
+
+In the case that CQE_DCMD is not enabled, it still needs to allocate
+space for data transfer. For instance:
+ CQE_DCMD is enabled: 31 slots space (one slot used by DCMD)
+ CQE_DCMD is disabled: 32 slots space
+
+Fixes: a4080225f51d ("mmc: cqhci: support for command queue enabled host")
+Signed-off-by: Alamy Liu <alamy.liu@gmail.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/cqhci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/cqhci.c
++++ b/drivers/mmc/host/cqhci.c
+@@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct c
+ cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
+
+ cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
+- (cq_host->num_slots - 1);
++ cq_host->mmc->cqe_qdepth;
+
+ pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
--- /dev/null
+From e30be063d6dbcc0f18b1eb25fa709fdef89201fb Mon Sep 17 00:00:00 2001
+From: BOUGH CHEN <haibo.chen@nxp.com>
+Date: Thu, 28 Feb 2019 10:15:42 +0000
+Subject: mmc: sdhci-esdhc-imx: correct the fix of ERR004536
+
+From: BOUGH CHEN <haibo.chen@nxp.com>
+
+commit e30be063d6dbcc0f18b1eb25fa709fdef89201fb upstream.
+
+Commit 18094430d6b5 ("mmc: sdhci-esdhc-imx: add ADMA Length
+Mismatch errata fix") involve the fix of ERR004536, but the
+fix is incorrect. Double confirm with IC, need to clear the
+bit 7 of register 0x6c rather than set this bit 7.
+Here is the definition of bit 7 of 0x6c:
+ 0: enable the new IC fix for ERR004536
+ 1: do not use the IC fix, keep the same as before
+
+Find this issue on i.MX845s-evk board when enable CMDQ, and
+let system in heavy loading.
+
+root@imx8mmevk:~# dd if=/dev/mmcblk2 of=/dev/null bs=1M &
+root@imx8mmevk:~# memtester 1000M > /dev/zero &
+root@imx8mmevk:~# [ 139.897220] mmc2: cqhci: timeout for tag 16
+[ 139.901417] mmc2: cqhci: ============ CQHCI REGISTER DUMP ===========
+[ 139.907862] mmc2: cqhci: Caps: 0x0000310a | Version: 0x00000510
+[ 139.914311] mmc2: cqhci: Config: 0x00001001 | Control: 0x00000000
+[ 139.920753] mmc2: cqhci: Int stat: 0x00000000 | Int enab: 0x00000006
+[ 139.927193] mmc2: cqhci: Int sig: 0x00000006 | Int Coal: 0x00000000
+[ 139.933634] mmc2: cqhci: TDL base: 0x7809c000 | TDL up32: 0x00000000
+[ 139.940073] mmc2: cqhci: Doorbell: 0x00030000 | TCN: 0x00000000
+[ 139.946518] mmc2: cqhci: Dev queue: 0x00010000 | Dev Pend: 0x00010000
+[ 139.952967] mmc2: cqhci: Task clr: 0x00000000 | SSC1: 0x00011000
+[ 139.959411] mmc2: cqhci: SSC2: 0x00000001 | DCMD rsp: 0x00000000
+[ 139.965857] mmc2: cqhci: RED mask: 0xfdf9a080 | TERRI: 0x00000000
+[ 139.972308] mmc2: cqhci: Resp idx: 0x0000002e | Resp arg: 0x00000900
+[ 139.978761] mmc2: sdhci: ============ SDHCI REGISTER DUMP ===========
+[ 139.985214] mmc2: sdhci: Sys addr: 0xb2c19000 | Version: 0x00000002
+[ 139.991669] mmc2: sdhci: Blk size: 0x00000200 | Blk cnt: 0x00000400
+[ 139.998127] mmc2: sdhci: Argument: 0x40110400 | Trn mode: 0x00000033
+[ 140.004618] mmc2: sdhci: Present: 0x01088a8f | Host ctl: 0x00000030
+[ 140.011113] mmc2: sdhci: Power: 0x00000002 | Blk gap: 0x00000080
+[ 140.017583] mmc2: sdhci: Wake-up: 0x00000008 | Clock: 0x0000000f
+[ 140.024039] mmc2: sdhci: Timeout: 0x0000008f | Int stat: 0x00000000
+[ 140.030497] mmc2: sdhci: Int enab: 0x107f4000 | Sig enab: 0x107f4000
+[ 140.036972] mmc2: sdhci: AC12 err: 0x00000000 | Slot int: 0x00000502
+[ 140.043426] mmc2: sdhci: Caps: 0x07eb0000 | Caps_1: 0x8000b407
+[ 140.049867] mmc2: sdhci: Cmd: 0x00002c1a | Max curr: 0x00ffffff
+[ 140.056314] mmc2: sdhci: Resp[0]: 0x00000900 | Resp[1]: 0xffffffff
+[ 140.062755] mmc2: sdhci: Resp[2]: 0x328f5903 | Resp[3]: 0x00d00f00
+[ 140.069195] mmc2: sdhci: Host ctl2: 0x00000008
+[ 140.073640] mmc2: sdhci: ADMA Err: 0x00000007 | ADMA Ptr: 0x7809c108
+[ 140.080079] mmc2: sdhci: ============================================
+[ 140.086662] mmc2: running CQE recovery
+
+Fixes: 18094430d6b5 ("mmc: sdhci-esdhc-imx: add ADMA Length Mismatch errata fix")
+Signed-off-by: Haibo Chen <haibo.chen@nxp.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-esdhc-imx.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1097,11 +1097,12 @@ static void sdhci_esdhc_imx_hwinit(struc
+ writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
+ | ESDHC_BURST_LEN_EN_INCR,
+ host->ioaddr + SDHCI_HOST_CONTROL);
++
+ /*
+- * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+- * TO1.1, it's harmless for MX6SL
+- */
+- writel(readl(host->ioaddr + 0x6c) | BIT(7),
++ * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
++ * TO1.1, it's harmless for MX6SL
++ */
++ writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
+ host->ioaddr + 0x6c);
+
+ /* disable DLL_CTRL delay line settings */
--- /dev/null
+From c9bd505dbd9d3dc80c496f88eafe70affdcf1ba6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jonathan=20Neusch=C3=A4fer?= <j.neuschaefer@gmx.net>
+Date: Sun, 10 Feb 2019 18:31:07 +0100
+Subject: mmc: spi: Fix card detection during probe
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
+
+commit c9bd505dbd9d3dc80c496f88eafe70affdcf1ba6 upstream.
+
+When using the mmc_spi driver with a card-detect pin, I noticed that the
+card was not detected immediately after probe, but only after it was
+unplugged and plugged back in (and the CD IRQ fired).
+
+The call tree looks something like this:
+
+mmc_spi_probe
+ mmc_add_host
+ mmc_start_host
+ _mmc_detect_change
+ mmc_schedule_delayed_work(&host->detect, 0)
+ mmc_rescan
+ host->bus_ops->detect(host)
+ mmc_detect
+ _mmc_detect_card_removed
+ host->ops->get_cd(host)
+ mmc_gpio_get_cd -> -ENOSYS (ctx->cd_gpio not set)
+ mmc_gpiod_request_cd
+ ctx->cd_gpio = desc
+
+To fix this issue, call mmc_detect_change after the card-detect GPIO/IRQ
+is registered.
+
+Signed-off-by: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/mmc_spi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -1447,6 +1447,7 @@ static int mmc_spi_probe(struct spi_devi
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+ mmc_gpiod_request_cd_irq(mmc);
+ }
++ mmc_detect_change(mmc, 0);
+
+ if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
+ has_ro = true;
--- /dev/null
+From 5603731a15ef9ca317c122cc8c959f1dee1798b4 Mon Sep 17 00:00:00 2001
+From: Takeshi Saito <takeshi.saito.xv@renesas.com>
+Date: Thu, 21 Feb 2019 20:38:05 +0100
+Subject: mmc: tmio: fix access width of Block Count Register
+
+From: Takeshi Saito <takeshi.saito.xv@renesas.com>
+
+commit 5603731a15ef9ca317c122cc8c959f1dee1798b4 upstream.
+
+In R-Car Gen2 or later, the maximum number of transfer blocks are
+changed from 0xFFFF to 0xFFFFFFFF. Therefore, Block Count Register
+should use iowrite32().
+
+If another system (U-boot, Hypervisor OS, etc) uses bit[31:16], this
+value will not be cleared. So, SD/MMC card initialization fails.
+
+So, check for the bigger register and use apropriate write. Also, mark
+the register as extended on Gen2.
+
+Signed-off-by: Takeshi Saito <takeshi.saito.xv@renesas.com>
+[wsa: use max_blk_count in if(), add Gen2, update commit message]
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Cc: stable@kernel.org
+Reviewed-by: Simon Horman <horms+renesas@verge.net.au>
+[Ulf: Fixed build error]
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/renesas_sdhi_sys_dmac.c | 1 +
+ drivers/mmc/host/tmio_mmc.h | 5 +++++
+ drivers/mmc/host/tmio_mmc_core.c | 6 +++++-
+ 3 files changed, 11 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+@@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data
+ .scc_offset = 0x0300,
+ .taps = rcar_gen2_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
++ .max_blk_count = 0xffffffff,
+ };
+
+ /* Definitions for sampling clocks */
+--- a/drivers/mmc/host/tmio_mmc.h
++++ b/drivers/mmc/host/tmio_mmc.h
+@@ -271,6 +271,11 @@ static inline void sd_ctrl_write32_as_16
+ iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+ }
+
++static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
++{
++ iowrite32(val, host->ctl + (addr << host->bus_shift));
++}
++
+ static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
+ const u32 *buf, int count)
+ {
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -43,6 +43,7 @@
+ #include <linux/regulator/consumer.h>
+ #include <linux/mmc/sdio.h>
+ #include <linux/scatterlist.h>
++#include <linux/sizes.h>
+ #include <linux/spinlock.h>
+ #include <linux/swiotlb.h>
+ #include <linux/workqueue.h>
+@@ -692,7 +693,10 @@ static int tmio_mmc_start_data(struct tm
+
+ /* Set transfer length / blocksize */
+ sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
+- sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
++ if (host->mmc->max_blk_count >= SZ_64K)
++ sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
++ else
++ sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+
+ tmio_mmc_start_dma(host, data);
+
--- /dev/null
+From 5c27ff5db1491a947264d6d4e4cbe43ae6535bae Mon Sep 17 00:00:00 2001
+From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Date: Mon, 18 Feb 2019 20:45:40 +0300
+Subject: mmc: tmio_mmc_core: don't claim spurious interrupts
+
+From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+
+commit 5c27ff5db1491a947264d6d4e4cbe43ae6535bae upstream.
+
+I have encountered an interrupt storm during the eMMC chip probing (and
+the chip finally didn't get detected). It turned out that U-Boot left
+the DMAC interrupts enabled while the Linux driver didn't use those.
+The SDHI driver's interrupt handler somehow assumes that, even if an
+SDIO interrupt didn't happen, it should return IRQ_HANDLED. I think
+that if none of the enabled interrupts happened and got handled, we
+should return IRQ_NONE -- that way the kernel IRQ code recoginizes
+a spurious interrupt and masks it off pretty quickly...
+
+Fixes: 7729c7a232a9 ("mmc: tmio: Provide separate interrupt handlers")
+Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Reviewed-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Tested-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Simon Horman <horms+renesas@verge.net.au>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/tmio_mmc_core.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -618,7 +618,7 @@ static bool __tmio_mmc_sdcard_irq(struct
+ return false;
+ }
+
+-static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
++static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
+ {
+ struct mmc_host *mmc = host->mmc;
+ struct tmio_mmc_data *pdata = host->pdata;
+@@ -626,7 +626,7 @@ static void __tmio_mmc_sdio_irq(struct t
+ unsigned int sdio_status;
+
+ if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
+- return;
++ return false;
+
+ status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+ ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
+@@ -639,6 +639,8 @@ static void __tmio_mmc_sdio_irq(struct t
+
+ if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
+ mmc_signal_sdio_irq(mmc);
++
++ return ireg;
+ }
+
+ irqreturn_t tmio_mmc_irq(int irq, void *devid)
+@@ -657,9 +659,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *
+ if (__tmio_mmc_sdcard_irq(host, ireg, status))
+ return IRQ_HANDLED;
+
+- __tmio_mmc_sdio_irq(host);
++ if (__tmio_mmc_sdio_irq(host))
++ return IRQ_HANDLED;
+
+- return IRQ_HANDLED;
++ return IRQ_NONE;
+ }
+ EXPORT_SYMBOL_GPL(tmio_mmc_irq);
+
kvm-nsvm-clear-events-pending-from-svm_complete_inte.patch
kvm-selftests-fix-region-overlap-check-in-kvm_util.patch
kvm-selftests-check-returned-evmcs-version-range.patch
+mmc-spi-fix-card-detection-during-probe.patch
+mmc-tmio_mmc_core-don-t-claim-spurious-interrupts.patch
+mmc-tmio-fix-access-width-of-block-count-register.patch
+mmc-core-fix-null-ptr-crash-from-mmc_should_fail_request.patch
+mmc-cqhci-fix-space-allocated-for-transfer-descriptor.patch
+mmc-cqhci-fix-a-tiny-potential-memory-leak-on-error-condition.patch
+mmc-sdhci-esdhc-imx-correct-the-fix-of-err004536.patch
+mm-enforce-min-addr-even-if-capable-in-expand_downwards.patch
+drm-block-fb-changes-for-async-plane-updates.patch
+hugetlbfs-fix-races-and-page-leaks-during-migration.patch
+crypto-ccree-add-missing-inline-qualifier.patch
+mips-fix-truncation-in-__cmpxchg_small-for-short-values.patch
+mips-bcm63xx-provide-dma-masks-for-ethernet-devices.patch
+mips-fix-memory-setup-for-platforms-with-phys_offset-0.patch