--- /dev/null
+From b7faf971081a4e56147f082234bfff55135305cb Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Wed, 27 May 2020 14:18:45 -0700
+Subject: ARC: elf: use right ELF_ARCH
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit b7faf971081a4e56147f082234bfff55135305cb upstream.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/include/asm/elf.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arc/include/asm/elf.h
++++ b/arch/arc/include/asm/elf.h
+@@ -26,7 +26,7 @@
+ #define R_ARC_32_PCREL 0x31
+
+ /*to set parameters in the core dumps */
+-#define ELF_ARCH EM_ARCOMPACT
++#define ELF_ARCH EM_ARC_INUSE
+ #define ELF_CLASS ELFCLASS32
+
+ #ifdef CONFIG_CPU_BIG_ENDIAN
--- /dev/null
+From 00fdec98d9881bf5173af09aebd353ab3b9ac729 Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Tue, 19 May 2020 22:28:32 -0700
+Subject: ARC: entry: fix potential EFA clobber when TIF_SYSCALL_TRACE
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit 00fdec98d9881bf5173af09aebd353ab3b9ac729 upstream.
+
+Trap handler for syscall tracing reads EFA (Exception Fault Address),
+in case strace wants PC of trap instruction (EFA is not part of pt_regs
+as of current code).
+
+However this EFA read is racy as it happens after dropping to pure
+kernel mode (re-enabling interrupts). A taken interrupt could
+context-switch, trigger a different task's trap, clobbering EFA for this
+execution context.
+
+Fix this by reading EFA early, before re-enabling interrupts. A slight
+side benefit is de-duplication of FAKE_RET_FROM_EXCPN in trap handler.
+The trap handler is common to both ARCompact and ARCv2 builds too.
+
+This just came out of code rework/review and no real problem was reported
+but is clearly a potential problem specially for strace.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/kernel/entry.S | 16 +++++-----------
+ 1 file changed, 5 insertions(+), 11 deletions(-)
+
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -156,7 +156,6 @@ END(EV_Extension)
+ tracesys:
+ ; save EFA in case tracer wants the PC of traced task
+ ; using ERET won't work since next-PC has already committed
+- lr r12, [efa]
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
+ st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
+
+@@ -199,15 +198,9 @@ tracesys_exit:
+ ; Breakpoint TRAP
+ ; ---------------------------------------------
+ trap_with_param:
+-
+- ; stop_pc info by gdb needs this info
+- lr r0, [efa]
++ mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
+ mov r1, sp
+
+- ; Now that we have read EFA, it is safe to do "fake" rtie
+- ; and get out of CPU exception mode
+- FAKE_RET_FROM_EXCPN
+-
+ ; Save callee regs in case gdb wants to have a look
+ ; SP will grow up by size of CALLEE Reg-File
+ ; NOTE: clobbers r12
+@@ -234,6 +227,10 @@ ENTRY(EV_Trap)
+
+ EXCEPTION_PROLOGUE
+
++ lr r12, [efa]
++
++ FAKE_RET_FROM_EXCPN
++
+ ;============ TRAP 1 :breakpoints
+ ; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR)
+ bmsk.f 0, r9, 7
+@@ -241,9 +238,6 @@ ENTRY(EV_Trap)
+
+ ;============ TRAP (no param): syscall top level
+
+- ; First return from Exception to pure K mode (Exception/IRQs renabled)
+- FAKE_RET_FROM_EXCPN
+-
+ ; If syscall tracing ongoing, invoke pre-post-hooks
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
--- /dev/null
+From 4b661d6133c5d3a7c9aca0b4ee5a78c7766eff3f Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Mon, 6 Jul 2020 17:38:01 +0100
+Subject: arm64: arch_timer: Disable the compat vdso for cores affected by ARM64_WORKAROUND_1418040
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 4b661d6133c5d3a7c9aca0b4ee5a78c7766eff3f upstream.
+
+ARM64_WORKAROUND_1418040 requires that AArch32 EL0 accesses to
+the virtual counter register are trapped and emulated by the kernel.
+This makes the vdso pretty pointless, and in some cases livelock
+prone.
+
+Provide a workaround entry that limits the vdso to 64bit tasks.
+
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200706163802.1836732-4-maz@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/clocksource/arm_arch_timer.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -381,6 +381,14 @@ static const struct arch_timer_erratum_w
+ .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
+ },
+ #endif
++#ifdef CONFIG_ARM64_ERRATUM_1418040
++ {
++ .match_type = ate_match_local_cap_id,
++ .id = (void *)ARM64_WORKAROUND_1418040,
++ .desc = "ARM erratum 1418040",
++ .disable_compat_vdso = true,
++ },
++#endif
+ };
+
+ typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
--- /dev/null
+From 6bf9cd2eed9aee6d742bb9296c994a91f5316949 Mon Sep 17 00:00:00 2001
+From: Boris Burkov <boris@bur.io>
+Date: Wed, 17 Jun 2020 11:35:19 -0700
+Subject: btrfs: fix fatal extent_buffer readahead vs releasepage race
+
+From: Boris Burkov <boris@bur.io>
+
+commit 6bf9cd2eed9aee6d742bb9296c994a91f5316949 upstream.
+
+Under somewhat convoluted conditions, it is possible to attempt to
+release an extent_buffer that is under io, which triggers a BUG_ON in
+btrfs_release_extent_buffer_pages.
+
+This relies on a few different factors. First, extent_buffer reads done
+as readahead for searching use WAIT_NONE, so they free the local extent
+buffer reference while the io is outstanding. However, they should still
+be protected by TREE_REF. However, if the system is doing signficant
+reclaim, and simultaneously heavily accessing the extent_buffers, it is
+possible for releasepage to race with two concurrent readahead attempts
+in a way that leaves TREE_REF unset when the readahead extent buffer is
+released.
+
+Essentially, if two tasks race to allocate a new extent_buffer, but the
+winner who attempts the first io is rebuffed by a page being locked
+(likely by the reclaim itself) then the loser will still go ahead with
+issuing the readahead. The loser's call to find_extent_buffer must also
+race with the reclaim task reading the extent_buffer's refcount as 1 in
+a way that allows the reclaim to re-clear the TREE_REF checked by
+find_extent_buffer.
+
+The following represents an example execution demonstrating the race:
+
+ CPU0 CPU1 CPU2
+reada_for_search reada_for_search
+ readahead_tree_block readahead_tree_block
+ find_create_tree_block find_create_tree_block
+ alloc_extent_buffer alloc_extent_buffer
+ find_extent_buffer // not found
+ allocates eb
+ lock pages
+ associate pages to eb
+ insert eb into radix tree
+ set TREE_REF, refs == 2
+ unlock pages
+ read_extent_buffer_pages // WAIT_NONE
+ not uptodate (brand new eb)
+ lock_page
+ if !trylock_page
+ goto unlock_exit // not an error
+ free_extent_buffer
+ release_extent_buffer
+ atomic_dec_and_test refs to 1
+ find_extent_buffer // found
+ try_release_extent_buffer
+ take refs_lock
+ reads refs == 1; no io
+ atomic_inc_not_zero refs to 2
+ mark_buffer_accessed
+ check_buffer_tree_ref
+ // not STALE, won't take refs_lock
+ refs == 2; TREE_REF set // no action
+ read_extent_buffer_pages // WAIT_NONE
+ clear TREE_REF
+ release_extent_buffer
+ atomic_dec_and_test refs to 1
+ unlock_page
+ still not uptodate (CPU1 read failed on trylock_page)
+ locks pages
+ set io_pages > 0
+ submit io
+ return
+ free_extent_buffer
+ release_extent_buffer
+ dec refs to 0
+ delete from radix tree
+ btrfs_release_extent_buffer_pages
+ BUG_ON(io_pages > 0)!!!
+
+We observe this at a very low rate in production and were also able to
+reproduce it in a test environment by introducing some spurious delays
+and by introducing probabilistic trylock_page failures.
+
+To fix it, we apply check_tree_ref at a point where it could not
+possibly be unset by a competing task: after io_pages has been
+incremented. All the codepaths that clear TREE_REF check for io, so they
+would not be able to clear it after this point until the io is done.
+
+Stack trace, for reference:
+[1417839.424739] ------------[ cut here ]------------
+[1417839.435328] kernel BUG at fs/btrfs/extent_io.c:4841!
+[1417839.447024] invalid opcode: 0000 [#1] SMP
+[1417839.502972] RIP: 0010:btrfs_release_extent_buffer_pages+0x20/0x1f0
+[1417839.517008] Code: ed e9 ...
+[1417839.558895] RSP: 0018:ffffc90020bcf798 EFLAGS: 00010202
+[1417839.570816] RAX: 0000000000000002 RBX: ffff888102d6def0 RCX: 0000000000000028
+[1417839.586962] RDX: 0000000000000002 RSI: ffff8887f0296482 RDI: ffff888102d6def0
+[1417839.603108] RBP: ffff88885664a000 R08: 0000000000000046 R09: 0000000000000238
+[1417839.619255] R10: 0000000000000028 R11: ffff88885664af68 R12: 0000000000000000
+[1417839.635402] R13: 0000000000000000 R14: ffff88875f573ad0 R15: ffff888797aafd90
+[1417839.651549] FS: 00007f5a844fa700(0000) GS:ffff88885f680000(0000) knlGS:0000000000000000
+[1417839.669810] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[1417839.682887] CR2: 00007f7884541fe0 CR3: 000000049f609002 CR4: 00000000003606e0
+[1417839.699037] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[1417839.715187] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[1417839.731320] Call Trace:
+[1417839.737103] release_extent_buffer+0x39/0x90
+[1417839.746913] read_block_for_search.isra.38+0x2a3/0x370
+[1417839.758645] btrfs_search_slot+0x260/0x9b0
+[1417839.768054] btrfs_lookup_file_extent+0x4a/0x70
+[1417839.778427] btrfs_get_extent+0x15f/0x830
+[1417839.787665] ? submit_extent_page+0xc4/0x1c0
+[1417839.797474] ? __do_readpage+0x299/0x7a0
+[1417839.806515] __do_readpage+0x33b/0x7a0
+[1417839.815171] ? btrfs_releasepage+0x70/0x70
+[1417839.824597] extent_readpages+0x28f/0x400
+[1417839.833836] read_pages+0x6a/0x1c0
+[1417839.841729] ? startup_64+0x2/0x30
+[1417839.849624] __do_page_cache_readahead+0x13c/0x1a0
+[1417839.860590] filemap_fault+0x6c7/0x990
+[1417839.869252] ? xas_load+0x8/0x80
+[1417839.876756] ? xas_find+0x150/0x190
+[1417839.884839] ? filemap_map_pages+0x295/0x3b0
+[1417839.894652] __do_fault+0x32/0x110
+[1417839.902540] __handle_mm_fault+0xacd/0x1000
+[1417839.912156] handle_mm_fault+0xaa/0x1c0
+[1417839.921004] __do_page_fault+0x242/0x4b0
+[1417839.930044] ? page_fault+0x8/0x30
+[1417839.937933] page_fault+0x1e/0x30
+[1417839.945631] RIP: 0033:0x33c4bae
+[1417839.952927] Code: Bad RIP value.
+[1417839.960411] RSP: 002b:00007f5a844f7350 EFLAGS: 00010206
+[1417839.972331] RAX: 000000000000006e RBX: 1614b3ff6a50398a RCX: 0000000000000000
+[1417839.988477] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000002
+[1417840.004626] RBP: 00007f5a844f7420 R08: 000000000000006e R09: 00007f5a94aeccb8
+[1417840.020784] R10: 00007f5a844f7350 R11: 0000000000000000 R12: 00007f5a94aecc79
+[1417840.036932] R13: 00007f5a94aecc78 R14: 00007f5a94aecc90 R15: 00007f5a94aecc40
+
+CC: stable@vger.kernel.org # 4.4+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Boris Burkov <boris@bur.io>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c | 40 ++++++++++++++++++++++++----------------
+ 1 file changed, 24 insertions(+), 16 deletions(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4862,25 +4862,28 @@ struct extent_buffer *alloc_dummy_extent
+ static void check_buffer_tree_ref(struct extent_buffer *eb)
+ {
+ int refs;
+- /* the ref bit is tricky. We have to make sure it is set
+- * if we have the buffer dirty. Otherwise the
+- * code to free a buffer can end up dropping a dirty
+- * page
++ /*
++ * The TREE_REF bit is first set when the extent_buffer is added
++ * to the radix tree. It is also reset, if unset, when a new reference
++ * is created by find_extent_buffer.
+ *
+- * Once the ref bit is set, it won't go away while the
+- * buffer is dirty or in writeback, and it also won't
+- * go away while we have the reference count on the
+- * eb bumped.
++ * It is only cleared in two cases: freeing the last non-tree
++ * reference to the extent_buffer when its STALE bit is set or
++ * calling releasepage when the tree reference is the only reference.
+ *
+- * We can't just set the ref bit without bumping the
+- * ref on the eb because free_extent_buffer might
+- * see the ref bit and try to clear it. If this happens
+- * free_extent_buffer might end up dropping our original
+- * ref by mistake and freeing the page before we are able
+- * to add one more ref.
++ * In both cases, care is taken to ensure that the extent_buffer's
++ * pages are not under io. However, releasepage can be concurrently
++ * called with creating new references, which is prone to race
++ * conditions between the calls to check_buffer_tree_ref in those
++ * codepaths and clearing TREE_REF in try_release_extent_buffer.
+ *
+- * So bump the ref count first, then set the bit. If someone
+- * beat us to it, drop the ref we added.
++ * The actual lifetime of the extent_buffer in the radix tree is
++ * adequately protected by the refcount, but the TREE_REF bit and
++ * its corresponding reference are not. To protect against this
++ * class of races, we call check_buffer_tree_ref from the codepaths
++ * which trigger io after they set eb->io_pages. Note that once io is
++ * initiated, TREE_REF can no longer be cleared, so that is the
++ * moment at which any such race is best fixed.
+ */
+ refs = atomic_read(&eb->refs);
+ if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+@@ -5344,6 +5347,11 @@ int read_extent_buffer_pages(struct exte
+ clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
+ eb->read_mirror = 0;
+ atomic_set(&eb->io_pages, num_reads);
++ /*
++ * It is possible for releasepage to clear the TREE_REF bit before we
++ * set io_pages. See check_buffer_tree_ref for a more detailed comment.
++ */
++ check_buffer_tree_ref(eb);
+ for (i = 0; i < num_pages; i++) {
+ page = eb->pages[i];
+
--- /dev/null
+From 6958c1c640af8c3f40fa8a2eee3b5b905d95b677 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 8 Jul 2020 12:25:20 -0400
+Subject: dm: use noio when sending kobject event
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 6958c1c640af8c3f40fa8a2eee3b5b905d95b677 upstream.
+
+kobject_uevent may allocate memory and it may be called while there are dm
+devices suspended. The allocation may recurse into a suspended device,
+causing a deadlock. We must set the noio flag when sending a uevent.
+
+The observed deadlock was reported here:
+https://www.redhat.com/archives/dm-devel/2020-March/msg00025.html
+
+Reported-by: Khazhismel Kumykov <khazhy@google.com>
+Reported-by: Tahsin Erdogan <tahsin@google.com>
+Reported-by: Gabriel Krisman Bertazi <krisman@collabora.com>
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -12,6 +12,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/sched/mm.h>
+ #include <linux/sched/signal.h>
+ #include <linux/blkpg.h>
+ #include <linux/bio.h>
+@@ -2665,17 +2666,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fas
+ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie)
+ {
++ int r;
++ unsigned noio_flag;
+ char udev_cookie[DM_COOKIE_LENGTH];
+ char *envp[] = { udev_cookie, NULL };
+
++ noio_flag = memalloc_noio_save();
++
+ if (!cookie)
+- return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
++ r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ else {
+ snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
+ DM_COOKIE_ENV_VAR_NAME, cookie);
+- return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+- action, envp);
++ r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
++ action, envp);
+ }
++
++ memalloc_noio_restore(noio_flag);
++
++ return r;
+ }
+
+ uint32_t dm_next_uevent_seq(struct mapped_device *md)
--- /dev/null
+From 41855a898650803e24b284173354cc3e44d07725 Mon Sep 17 00:00:00 2001
+From: Tom Rix <trix@redhat.com>
+Date: Mon, 6 Jul 2020 05:28:57 -0700
+Subject: drm/radeon: fix double free
+
+From: Tom Rix <trix@redhat.com>
+
+commit 41855a898650803e24b284173354cc3e44d07725 upstream.
+
+clang static analysis flags this error
+
+drivers/gpu/drm/radeon/ci_dpm.c:5652:9: warning: Use of memory after it is freed [unix.Malloc]
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ ^~~~~~~~~~~~~~~~~~~~~~~~~~
+drivers/gpu/drm/radeon/ci_dpm.c:5654:2: warning: Attempt to free released memory [unix.Malloc]
+ kfree(rdev->pm.dpm.ps);
+ ^~~~~~~~~~~~~~~~~~~~~~
+
+problem is reported in ci_dpm_fini, with these code blocks.
+
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ }
+ kfree(rdev->pm.dpm.ps);
+
+The first free happens in ci_parse_power_table where it cleans up locally
+on a failure. ci_dpm_fini also does a cleanup.
+
+ ret = ci_parse_power_table(rdev);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+
+So remove the cleanup in ci_parse_power_table and
+move the num_ps calculation to inside the loop so ci_dpm_fini
+will know how many array elements to free.
+
+Fixes: cc8dbbb4f62a ("drm/radeon: add dpm support for CI dGPUs (v2)")
+
+Signed-off-by: Tom Rix <trix@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/ci_dpm.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -5551,6 +5551,7 @@ static int ci_parse_power_table(struct r
+ if (!rdev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
++ rdev->pm.dpm.num_ps = 0;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+@@ -5560,10 +5561,8 @@ static int ci_parse_power_table(struct r
+ if (!rdev->pm.power_state[i].clock_info)
+ return -EINVAL;
+ ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
+- if (ps == NULL) {
+- kfree(rdev->pm.dpm.ps);
++ if (ps == NULL)
+ return -ENOMEM;
+- }
+ rdev->pm.dpm.ps[i].ps_priv = ps;
+ ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+ non_clock_info,
+@@ -5585,8 +5584,8 @@ static int ci_parse_power_table(struct r
+ k++;
+ }
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
++ rdev->pm.dpm.num_ps = i + 1;
+ }
+- rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+ /* fill in the vce power states */
+ for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
--- /dev/null
+From 528a9539348a0234375dfaa1ca5dbbb2f8f8e8d2 Mon Sep 17 00:00:00 2001
+From: Janosch Frank <frankja@linux.ibm.com>
+Date: Tue, 7 Jul 2020 15:38:54 +0200
+Subject: s390/mm: fix huge pte soft dirty copying
+
+From: Janosch Frank <frankja@linux.ibm.com>
+
+commit 528a9539348a0234375dfaa1ca5dbbb2f8f8e8d2 upstream.
+
+If the pmd is soft dirty we must mark the pte as soft dirty (and not dirty).
+This fixes some cases for guest migration with huge page backings.
+
+Cc: <stable@vger.kernel.org> # 4.8
+Fixes: bc29b7ac1d9f ("s390/mm: clean up pte/pmd encoding")
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/mm/hugetlbpage.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsign
+ _PAGE_YOUNG);
+ #ifdef CONFIG_MEM_SOFT_DIRTY
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
+- _PAGE_DIRTY);
++ _PAGE_SOFT_DIRTY);
+ #endif
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
+ _PAGE_NOEXEC);
kvm-x86-inject-gp-if-guest-attempts-to-toggle-cr4.la57-in-64-bit-mode.patch
kvm-x86-mark-cr4.tsd-as-being-possibly-owned-by-the-guest.patch
revert-ath9k-fix-general-protection-fault-in-ath9k_hif_usb_rx_cb.patch
+btrfs-fix-fatal-extent_buffer-readahead-vs-releasepage-race.patch
+drm-radeon-fix-double-free.patch
+dm-use-noio-when-sending-kobject-event.patch
+arc-entry-fix-potential-efa-clobber-when-tif_syscall_trace.patch
+arc-elf-use-right-elf_arch.patch
+s390-mm-fix-huge-pte-soft-dirty-copying.patch
+arm64-arch_timer-disable-the-compat-vdso-for-cores-affected-by-arm64_workaround_1418040.patch