--- /dev/null
+From 88cda00733f0731711c76e535d4972c296ac512e Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Wed, 21 Jun 2017 22:45:08 +0100
+Subject: ARM64: dts: marvell: armada37xx: Fix timer interrupt specifiers
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 88cda00733f0731711c76e535d4972c296ac512e upstream.
+
+Contrary to popular belief, PPIs connected to a GICv3 to not have
+an affinity field similar to that of GICv2. That is consistent
+with the fact that GICv3 is designed to accomodate thousands of
+CPUs, and fitting them as a bitmap in a byte is... difficult.
+
+Fixes: adbc3695d9e4 ("arm64: dts: add the Marvell Armada 3700 family and a development board")
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/marvell/armada-37xx.dtsi | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+@@ -75,14 +75,10 @@
+
+ timer {
+ compatible = "arm,armv8-timer";
+- interrupts = <GIC_PPI 13
+- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
+- <GIC_PPI 14
+- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
+- <GIC_PPI 11
+- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
+- <GIC_PPI 10
+- (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
++ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ soc {
--- /dev/null
+From 48f99c8ec0b25756d0283ab058826ae07d14fad7 Mon Sep 17 00:00:00 2001
+From: Dong Bo <dongbo4@huawei.com>
+Date: Tue, 25 Apr 2017 14:11:29 +0800
+Subject: arm64: Preventing READ_IMPLIES_EXEC propagation
+
+From: Dong Bo <dongbo4@huawei.com>
+
+commit 48f99c8ec0b25756d0283ab058826ae07d14fad7 upstream.
+
+Like arch/arm/, we inherit the READ_IMPLIES_EXEC personality flag across
+fork(). This is undesirable for a number of reasons:
+
+ * ELF files that don't require executable stack can end up with it
+ anyway
+
+ * We end up performing un-necessary I-cache maintenance when mapping
+ what should be non-executable pages
+
+ * Restricting what is executable is generally desirable when defending
+ against overflow attacks
+
+This patch clears the personality flag when setting up the personality for
+newly spwaned native tasks. Given that semi-recent AArch64 toolchains emit
+a non-executable PT_GNU_STACK header, userspace applications can already
+not rely on READ_IMPLIES_EXEC so shouldn't be adversely affected by this
+change.
+
+Reported-by: Peter Maydell <peter.maydell@linaro.org>
+Signed-off-by: Dong Bo <dongbo4@huawei.com>
+[will: added comment to compat code, rewrote commit message]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/elf.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm64/include/asm/elf.h
++++ b/arch/arm64/include/asm/elf.h
+@@ -141,6 +141,7 @@ typedef struct user_fpsimd_state elf_fpr
+ ({ \
+ clear_bit(TIF_32BIT, ¤t->mm->context.flags); \
+ clear_thread_flag(TIF_32BIT); \
++ current->personality &= ~READ_IMPLIES_EXEC; \
+ })
+
+ /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+@@ -187,6 +188,11 @@ typedef compat_elf_greg_t compat_elf_gr
+ ((x)->e_flags & EF_ARM_EABI_MASK))
+
+ #define compat_start_thread compat_start_thread
++/*
++ * Unlike the native SET_PERSONALITY macro, the compat version inherits
++ * READ_IMPLIES_EXEC across a fork() since this is the behaviour on
++ * arch/arm/.
++ */
+ #define COMPAT_SET_PERSONALITY(ex) \
+ ({ \
+ set_bit(TIF_32BIT, ¤t->mm->context.flags); \
--- /dev/null
+From 13b9abfc92be7c4454bff912021b9f835dea6e15 Mon Sep 17 00:00:00 2001
+From: Michael Kelley <mikelley@microsoft.com>
+Date: Thu, 18 May 2017 10:46:07 -0700
+Subject: Drivers: hv: vmbus: Close timing hole that can corrupt per-cpu page
+
+From: Michael Kelley <mikelley@microsoft.com>
+
+commit 13b9abfc92be7c4454bff912021b9f835dea6e15 upstream.
+
+Extend the disabling of preemption to include the hypercall so that
+another thread can't get the CPU and corrupt the per-cpu page used
+for hypercall arguments.
+
+Signed-off-by: Michael Kelley <mikelley@microsoft.com>
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hv/hv.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/hv/hv.c
++++ b/drivers/hv/hv.c
+@@ -82,10 +82,15 @@ int hv_post_message(union hv_connection_
+ aligned_msg->message_type = message_type;
+ aligned_msg->payload_size = payload_size;
+ memcpy((void *)aligned_msg->payload, payload, payload_size);
+- put_cpu_ptr(hv_cpu);
+
+ status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
+
++ /* Preemption must remain disabled until after the hypercall
++ * so some other thread can't get scheduled onto this cpu and
++ * corrupt the per-cpu post_msg_page
++ */
++ put_cpu_ptr(hv_cpu);
++
+ return status & 0xFFFF;
+ }
+
--- /dev/null
+From da029c11e6b12f321f36dac8771e833b65cec962 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Fri, 7 Jul 2017 11:57:29 -0700
+Subject: exec: Limit arg stack to at most 75% of _STK_LIM
+
+From: Kees Cook <keescook@chromium.org>
+
+commit da029c11e6b12f321f36dac8771e833b65cec962 upstream.
+
+To avoid pathological stack usage or the need to special-case setuid
+execs, just limit all arg stack usage to at most 75% of _STK_LIM (6MB).
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/exec.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -220,8 +220,7 @@ static struct page *get_arg_page(struct
+
+ if (write) {
+ unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+- unsigned long ptr_size;
+- struct rlimit *rlim;
++ unsigned long ptr_size, limit;
+
+ /*
+ * Since the stack will hold pointers to the strings, we
+@@ -250,14 +249,16 @@ static struct page *get_arg_page(struct
+ return page;
+
+ /*
+- * Limit to 1/4-th the stack size for the argv+env strings.
++ * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
++ * (whichever is smaller) for the argv+env strings.
+ * This ensures that:
+ * - the remaining binfmt code will not run out of stack space,
+ * - the program will have a reasonable amount of stack left
+ * to work from.
+ */
+- rlim = current->signal->rlim;
+- if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
++ limit = _STK_LIM / 4 * 3;
++ limit = min(limit, rlimit(RLIMIT_STACK) / 4);
++ if (size > limit)
+ goto fail;
+ }
+
--- /dev/null
+From 99b19d16471e9c3faa85cad38abc9cbbe04c6d55 Mon Sep 17 00:00:00 2001
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Mon, 24 Oct 2016 16:16:13 -0500
+Subject: mnt: In propgate_umount handle visiting mounts in any order
+
+From: Eric W. Biederman <ebiederm@xmission.com>
+
+commit 99b19d16471e9c3faa85cad38abc9cbbe04c6d55 upstream.
+
+While investigating some poor umount performance I realized that in
+the case of overlapping mount trees where some of the mounts are locked
+the code has been failing to unmount all of the mounts it should
+have been unmounting.
+
+This failure to unmount all of the necessary
+mounts can be reproduced with:
+
+$ cat locked_mounts_test.sh
+
+mount -t tmpfs test-base /mnt
+mount --make-shared /mnt
+mkdir -p /mnt/b
+
+mount -t tmpfs test1 /mnt/b
+mount --make-shared /mnt/b
+mkdir -p /mnt/b/10
+
+mount -t tmpfs test2 /mnt/b/10
+mount --make-shared /mnt/b/10
+mkdir -p /mnt/b/10/20
+
+mount --rbind /mnt/b /mnt/b/10/20
+
+unshare -Urm --propagation unchaged /bin/sh -c 'sleep 5; if [ $(grep test /proc/self/mountinfo | wc -l) -eq 1 ] ; then echo SUCCESS ; else echo FAILURE ; fi'
+sleep 1
+umount -l /mnt/b
+wait %%
+
+$ unshare -Urm ./locked_mounts_test.sh
+
+This failure is corrected by removing the prepass that marks mounts
+that may be umounted.
+
+A first pass is added that umounts mounts if possible and if not sets
+mount mark if they could be unmounted if they weren't locked and adds
+them to a list to umount possibilities. This first pass reconsiders
+the mounts parent if it is on the list of umount possibilities, ensuring
+that information of umoutability will pass from child to mount parent.
+
+A second pass then walks through all mounts that are umounted and processes
+their children unmounting them or marking them for reparenting.
+
+A last pass cleans up the state on the mounts that could not be umounted
+and if applicable reparents them to their first parent that remained
+mounted.
+
+While a bit longer than the old code this code is much more robust
+as it allows information to flow up from the leaves and down
+from the trunk making the order in which mounts are encountered
+in the umount propgation tree irrelevant.
+
+Fixes: 0c56fe31420c ("mnt: Don't propagate unmounts to locked mounts")
+Reviewed-by: Andrei Vagin <avagin@virtuozzo.com>
+Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/mount.h | 2
+ fs/namespace.c | 2
+ fs/pnode.c | 150 +++++++++++++++++++++++++++++++++------------------------
+ 3 files changed, 91 insertions(+), 63 deletions(-)
+
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -58,7 +58,7 @@ struct mount {
+ struct mnt_namespace *mnt_ns; /* containing namespace */
+ struct mountpoint *mnt_mp; /* where is it mounted */
+ struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
+- struct list_head mnt_reparent; /* reparent list entry */
++ struct list_head mnt_umounting; /* list entry for umount propagation */
+ #ifdef CONFIG_FSNOTIFY
+ struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
+ __u32 mnt_fsnotify_mask;
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -236,7 +236,7 @@ static struct mount *alloc_vfsmnt(const
+ INIT_LIST_HEAD(&mnt->mnt_slave_list);
+ INIT_LIST_HEAD(&mnt->mnt_slave);
+ INIT_HLIST_NODE(&mnt->mnt_mp_list);
+- INIT_LIST_HEAD(&mnt->mnt_reparent);
++ INIT_LIST_HEAD(&mnt->mnt_umounting);
+ init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
+ }
+ return mnt;
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -413,86 +413,95 @@ void propagate_mount_unlock(struct mount
+ }
+ }
+
+-/*
+- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
+- */
+-static void mark_umount_candidates(struct mount *mnt)
++static void umount_one(struct mount *mnt, struct list_head *to_umount)
+ {
+- struct mount *parent = mnt->mnt_parent;
+- struct mount *m;
+-
+- BUG_ON(parent == mnt);
+-
+- for (m = propagation_next(parent, parent); m;
+- m = propagation_next(m, parent)) {
+- struct mount *child = __lookup_mnt(&m->mnt,
+- mnt->mnt_mountpoint);
+- if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
+- continue;
+- if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
+- SET_MNT_MARK(child);
+- }
+- }
++ CLEAR_MNT_MARK(mnt);
++ mnt->mnt.mnt_flags |= MNT_UMOUNT;
++ list_del_init(&mnt->mnt_child);
++ list_del_init(&mnt->mnt_umounting);
++ list_move_tail(&mnt->mnt_list, to_umount);
+ }
+
+ /*
+ * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
+ * parent propagates to.
+ */
+-static void __propagate_umount(struct mount *mnt, struct list_head *to_reparent)
++static bool __propagate_umount(struct mount *mnt,
++ struct list_head *to_umount,
++ struct list_head *to_restore)
+ {
+- struct mount *parent = mnt->mnt_parent;
+- struct mount *m;
++ bool progress = false;
++ struct mount *child;
+
+- BUG_ON(parent == mnt);
+-
+- for (m = propagation_next(parent, parent); m;
+- m = propagation_next(m, parent)) {
+- struct mount *topper;
+- struct mount *child = __lookup_mnt(&m->mnt,
+- mnt->mnt_mountpoint);
+- /*
+- * umount the child only if the child has no children
+- * and the child is marked safe to unmount.
+- */
+- if (!child || !IS_MNT_MARKED(child))
++ /*
++ * The state of the parent won't change if this mount is
++ * already unmounted or marked as without children.
++ */
++ if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
++ goto out;
++
++ /* Verify topper is the only grandchild that has not been
++ * speculatively unmounted.
++ */
++ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
++ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
++ continue;
++ if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
+ continue;
+- CLEAR_MNT_MARK(child);
++ /* Found a mounted child */
++ goto children;
++ }
+
+- /* If there is exactly one mount covering all of child
+- * replace child with that mount.
+- */
+- topper = find_topper(child);
+- if (topper)
+- list_add_tail(&topper->mnt_reparent, to_reparent);
+-
+- if (topper || list_empty(&child->mnt_mounts)) {
+- list_del_init(&child->mnt_child);
+- list_del_init(&child->mnt_reparent);
+- child->mnt.mnt_flags |= MNT_UMOUNT;
+- list_move_tail(&child->mnt_list, &mnt->mnt_list);
++ /* Mark mounts that can be unmounted if not locked */
++ SET_MNT_MARK(mnt);
++ progress = true;
++
++ /* If a mount is without children and not locked umount it. */
++ if (!IS_MNT_LOCKED(mnt)) {
++ umount_one(mnt, to_umount);
++ } else {
++children:
++ list_move_tail(&mnt->mnt_umounting, to_restore);
++ }
++out:
++ return progress;
++}
++
++static void umount_list(struct list_head *to_umount,
++ struct list_head *to_restore)
++{
++ struct mount *mnt, *child, *tmp;
++ list_for_each_entry(mnt, to_umount, mnt_list) {
++ list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
++ /* topper? */
++ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
++ list_move_tail(&child->mnt_umounting, to_restore);
++ else
++ umount_one(child, to_umount);
+ }
+ }
+ }
+
+-static void reparent_mounts(struct list_head *to_reparent)
++static void restore_mounts(struct list_head *to_restore)
+ {
+- while (!list_empty(to_reparent)) {
++ /* Restore mounts to a clean working state */
++ while (!list_empty(to_restore)) {
+ struct mount *mnt, *parent;
+ struct mountpoint *mp;
+
+- mnt = list_first_entry(to_reparent, struct mount, mnt_reparent);
+- list_del_init(&mnt->mnt_reparent);
++ mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
++ CLEAR_MNT_MARK(mnt);
++ list_del_init(&mnt->mnt_umounting);
+
+- /* Where should this mount be reparented to? */
++ /* Should this mount be reparented? */
+ mp = mnt->mnt_mp;
+ parent = mnt->mnt_parent;
+ while (parent->mnt.mnt_flags & MNT_UMOUNT) {
+ mp = parent->mnt_mp;
+ parent = parent->mnt_parent;
+ }
+-
+- mnt_change_mountpoint(parent, mp, mnt);
++ if (parent != mnt->mnt_parent)
++ mnt_change_mountpoint(parent, mp, mnt);
+ }
+ }
+
+@@ -506,15 +515,34 @@ static void reparent_mounts(struct list_
+ int propagate_umount(struct list_head *list)
+ {
+ struct mount *mnt;
+- LIST_HEAD(to_reparent);
+-
+- list_for_each_entry_reverse(mnt, list, mnt_list)
+- mark_umount_candidates(mnt);
++ LIST_HEAD(to_restore);
++ LIST_HEAD(to_umount);
+
+- list_for_each_entry(mnt, list, mnt_list)
+- __propagate_umount(mnt, &to_reparent);
++ list_for_each_entry(mnt, list, mnt_list) {
++ struct mount *parent = mnt->mnt_parent;
++ struct mount *m;
++
++ for (m = propagation_next(parent, parent); m;
++ m = propagation_next(m, parent)) {
++ struct mount *child = __lookup_mnt(&m->mnt,
++ mnt->mnt_mountpoint);
++ if (!child)
++ continue;
++
++ /* Check the child and parents while progress is made */
++ while (__propagate_umount(child,
++ &to_umount, &to_restore)) {
++ /* Is the parent a umount candidate? */
++ child = child->mnt_parent;
++ if (list_empty(&child->mnt_umounting))
++ break;
++ }
++ }
++ }
+
+- reparent_mounts(&to_reparent);
++ umount_list(&to_umount, &to_restore);
++ restore_mounts(&to_restore);
++ list_splice_tail(&to_umount, list);
+
+ return 0;
+ }
--- /dev/null
+From 570487d3faf2a1d8a220e6ee10f472163123d7da Mon Sep 17 00:00:00 2001
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Mon, 15 May 2017 14:42:07 -0500
+Subject: mnt: In umount propagation reparent in a separate pass
+
+From: Eric W. Biederman <ebiederm@xmission.com>
+
+commit 570487d3faf2a1d8a220e6ee10f472163123d7da upstream.
+
+It was observed that in some pathlogical cases that the current code
+does not unmount everything it should. After investigation it
+was determined that the issue is that mnt_change_mntpoint can
+can change which mounts are available to be unmounted during mount
+propagation which is wrong.
+
+The trivial reproducer is:
+$ cat ./pathological.sh
+
+mount -t tmpfs test-base /mnt
+cd /mnt
+mkdir 1 2 1/1
+mount --bind 1 1
+mount --make-shared 1
+mount --bind 1 2
+mount --bind 1/1 1/1
+mount --bind 1/1 1/1
+echo
+grep test-base /proc/self/mountinfo
+umount 1/1
+echo
+grep test-base /proc/self/mountinfo
+
+$ unshare -Urm ./pathological.sh
+
+The expected output looks like:
+46 31 0:25 / /mnt rw,relatime - tmpfs test-base rw,uid=1000,gid=1000
+47 46 0:25 /1 /mnt/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+48 46 0:25 /1 /mnt/2 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+49 54 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+50 53 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+51 49 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+54 47 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+53 48 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+52 50 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+
+46 31 0:25 / /mnt rw,relatime - tmpfs test-base rw,uid=1000,gid=1000
+47 46 0:25 /1 /mnt/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+48 46 0:25 /1 /mnt/2 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+
+The output without the fix looks like:
+46 31 0:25 / /mnt rw,relatime - tmpfs test-base rw,uid=1000,gid=1000
+47 46 0:25 /1 /mnt/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+48 46 0:25 /1 /mnt/2 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+49 54 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+50 53 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+51 49 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+54 47 0:25 /1/1 /mnt/1/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+53 48 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+52 50 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+
+46 31 0:25 / /mnt rw,relatime - tmpfs test-base rw,uid=1000,gid=1000
+47 46 0:25 /1 /mnt/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+48 46 0:25 /1 /mnt/2 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+52 48 0:25 /1/1 /mnt/2/1 rw,relatime shared:1 - tmpfs test-base rw,uid=1000,gid=1000
+
+That last mount in the output was in the propgation tree to be unmounted but
+was missed because the mnt_change_mountpoint changed it's parent before the walk
+through the mount propagation tree observed it.
+
+Fixes: 1064f874abc0 ("mnt: Tuck mounts under others instead of creating shadow/side mounts.")
+Acked-by: Andrei Vagin <avagin@virtuozzo.com>
+Reviewed-by: Ram Pai <linuxram@us.ibm.com>
+Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/mount.h | 1 +
+ fs/namespace.c | 1 +
+ fs/pnode.c | 35 ++++++++++++++++++++++++++++++-----
+ 3 files changed, 32 insertions(+), 5 deletions(-)
+
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -58,6 +58,7 @@ struct mount {
+ struct mnt_namespace *mnt_ns; /* containing namespace */
+ struct mountpoint *mnt_mp; /* where is it mounted */
+ struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
++ struct list_head mnt_reparent; /* reparent list entry */
+ #ifdef CONFIG_FSNOTIFY
+ struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
+ __u32 mnt_fsnotify_mask;
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -236,6 +236,7 @@ static struct mount *alloc_vfsmnt(const
+ INIT_LIST_HEAD(&mnt->mnt_slave_list);
+ INIT_LIST_HEAD(&mnt->mnt_slave);
+ INIT_HLIST_NODE(&mnt->mnt_mp_list);
++ INIT_LIST_HEAD(&mnt->mnt_reparent);
+ init_fs_pin(&mnt->mnt_umount, drop_mountpoint);
+ }
+ return mnt;
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -439,7 +439,7 @@ static void mark_umount_candidates(struc
+ * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
+ * parent propagates to.
+ */
+-static void __propagate_umount(struct mount *mnt)
++static void __propagate_umount(struct mount *mnt, struct list_head *to_reparent)
+ {
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
+@@ -464,17 +464,38 @@ static void __propagate_umount(struct mo
+ */
+ topper = find_topper(child);
+ if (topper)
+- mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
+- topper);
++ list_add_tail(&topper->mnt_reparent, to_reparent);
+
+- if (list_empty(&child->mnt_mounts)) {
++ if (topper || list_empty(&child->mnt_mounts)) {
+ list_del_init(&child->mnt_child);
++ list_del_init(&child->mnt_reparent);
+ child->mnt.mnt_flags |= MNT_UMOUNT;
+ list_move_tail(&child->mnt_list, &mnt->mnt_list);
+ }
+ }
+ }
+
++static void reparent_mounts(struct list_head *to_reparent)
++{
++ while (!list_empty(to_reparent)) {
++ struct mount *mnt, *parent;
++ struct mountpoint *mp;
++
++ mnt = list_first_entry(to_reparent, struct mount, mnt_reparent);
++ list_del_init(&mnt->mnt_reparent);
++
++ /* Where should this mount be reparented to? */
++ mp = mnt->mnt_mp;
++ parent = mnt->mnt_parent;
++ while (parent->mnt.mnt_flags & MNT_UMOUNT) {
++ mp = parent->mnt_mp;
++ parent = parent->mnt_parent;
++ }
++
++ mnt_change_mountpoint(parent, mp, mnt);
++ }
++}
++
+ /*
+ * collect all mounts that receive propagation from the mount in @list,
+ * and return these additional mounts in the same list.
+@@ -485,11 +506,15 @@ static void __propagate_umount(struct mo
+ int propagate_umount(struct list_head *list)
+ {
+ struct mount *mnt;
++ LIST_HEAD(to_reparent);
+
+ list_for_each_entry_reverse(mnt, list, mnt_list)
+ mark_umount_candidates(mnt);
+
+ list_for_each_entry(mnt, list, mnt_list)
+- __propagate_umount(mnt);
++ __propagate_umount(mnt, &to_reparent);
++
++ reparent_mounts(&to_reparent);
++
+ return 0;
+ }
--- /dev/null
+From 296990deb389c7da21c78030376ba244dc1badf5 Mon Sep 17 00:00:00 2001
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Mon, 24 Oct 2016 17:25:19 -0500
+Subject: mnt: Make propagate_umount less slow for overlapping mount propagation trees
+
+From: Eric W. Biederman <ebiederm@xmission.com>
+
+commit 296990deb389c7da21c78030376ba244dc1badf5 upstream.
+
+Andrei Vagin pointed out that time to executue propagate_umount can go
+non-linear (and take a ludicrious amount of time) when the mount
+propogation trees of the mounts to be unmunted by a lazy unmount
+overlap.
+
+Make the walk of the mount propagation trees nearly linear by
+remembering which mounts have already been visited, allowing
+subsequent walks to detect when walking a mount propgation tree or a
+subtree of a mount propgation tree would be duplicate work and to skip
+them entirely.
+
+Walk the list of mounts whose propgatation trees need to be traversed
+from the mount highest in the mount tree to mounts lower in the mount
+tree so that odds are higher that the code will walk the largest trees
+first, allowing later tree walks to be skipped entirely.
+
+Add cleanup_umount_visitation to remover the code's memory of which
+mounts have been visited.
+
+Add the functions last_slave and skip_propagation_subtree to allow
+skipping appropriate parts of the mount propagation tree without
+needing to change the logic of the rest of the code.
+
+A script to generate overlapping mount propagation trees:
+
+$ cat runs.h
+set -e
+mount -t tmpfs zdtm /mnt
+mkdir -p /mnt/1 /mnt/2
+mount -t tmpfs zdtm /mnt/1
+mount --make-shared /mnt/1
+mkdir /mnt/1/1
+
+iteration=10
+if [ -n "$1" ] ; then
+ iteration=$1
+fi
+
+for i in $(seq $iteration); do
+ mount --bind /mnt/1/1 /mnt/1/1
+done
+
+mount --rbind /mnt/1 /mnt/2
+
+TIMEFORMAT='%Rs'
+nr=$(( ( 2 ** ( $iteration + 1 ) ) + 1 ))
+echo -n "umount -l /mnt/1 -> $nr "
+time umount -l /mnt/1
+
+nr=$(cat /proc/self/mountinfo | grep zdtm | wc -l )
+time umount -l /mnt/2
+
+$ for i in $(seq 9 19); do echo $i; unshare -Urm bash ./run.sh $i; done
+
+Here are the performance numbers with and without the patch:
+
+ mhash | 8192 | 8192 | 1048576 | 1048576
+ mounts | before | after | before | after
+ ------------------------------------------------
+ 1025 | 0.040s | 0.016s | 0.038s | 0.019s
+ 2049 | 0.094s | 0.017s | 0.080s | 0.018s
+ 4097 | 0.243s | 0.019s | 0.206s | 0.023s
+ 8193 | 1.202s | 0.028s | 1.562s | 0.032s
+ 16385 | 9.635s | 0.036s | 9.952s | 0.041s
+ 32769 | 60.928s | 0.063s | 44.321s | 0.064s
+ 65537 | | 0.097s | | 0.097s
+ 131073 | | 0.233s | | 0.176s
+ 262145 | | 0.653s | | 0.344s
+ 524289 | | 2.305s | | 0.735s
+ 1048577 | | 7.107s | | 2.603s
+
+Andrei Vagin reports fixing the performance problem is part of the
+work to fix CVE-2016-6213.
+
+Fixes: a05964f3917c ("[PATCH] shared mounts handling: umount")
+Reported-by: Andrei Vagin <avagin@openvz.org>
+Reviewed-by: Andrei Vagin <avagin@virtuozzo.com>
+Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pnode.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 62 insertions(+), 1 deletion(-)
+
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -24,6 +24,11 @@ static inline struct mount *first_slave(
+ return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
+ }
+
++static inline struct mount *last_slave(struct mount *p)
++{
++ return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
++}
++
+ static inline struct mount *next_slave(struct mount *p)
+ {
+ return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
+@@ -162,6 +167,19 @@ static struct mount *propagation_next(st
+ }
+ }
+
++static struct mount *skip_propagation_subtree(struct mount *m,
++ struct mount *origin)
++{
++ /*
++ * Advance m such that propagation_next will not return
++ * the slaves of m.
++ */
++ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
++ m = last_slave(m);
++
++ return m;
++}
++
+ static struct mount *next_group(struct mount *m, struct mount *origin)
+ {
+ while (1) {
+@@ -505,6 +523,15 @@ static void restore_mounts(struct list_h
+ }
+ }
+
++static void cleanup_umount_visitations(struct list_head *visited)
++{
++ while (!list_empty(visited)) {
++ struct mount *mnt =
++ list_first_entry(visited, struct mount, mnt_umounting);
++ list_del_init(&mnt->mnt_umounting);
++ }
++}
++
+ /*
+ * collect all mounts that receive propagation from the mount in @list,
+ * and return these additional mounts in the same list.
+@@ -517,11 +544,23 @@ int propagate_umount(struct list_head *l
+ struct mount *mnt;
+ LIST_HEAD(to_restore);
+ LIST_HEAD(to_umount);
++ LIST_HEAD(visited);
+
+- list_for_each_entry(mnt, list, mnt_list) {
++ /* Find candidates for unmounting */
++ list_for_each_entry_reverse(mnt, list, mnt_list) {
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
+
++ /*
++ * If this mount has already been visited it is known that it's
++ * entire peer group and all of their slaves in the propagation
++ * tree for the mountpoint has already been visited and there is
++ * no need to visit them again.
++ */
++ if (!list_empty(&mnt->mnt_umounting))
++ continue;
++
++ list_add_tail(&mnt->mnt_umounting, &visited);
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+ struct mount *child = __lookup_mnt(&m->mnt,
+@@ -529,6 +568,27 @@ int propagate_umount(struct list_head *l
+ if (!child)
+ continue;
+
++ if (!list_empty(&child->mnt_umounting)) {
++ /*
++ * If the child has already been visited it is
++ * know that it's entire peer group and all of
++ * their slaves in the propgation tree for the
++ * mountpoint has already been visited and there
++ * is no need to visit this subtree again.
++ */
++ m = skip_propagation_subtree(m, parent);
++ continue;
++ } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
++ /*
++ * We have come accross an partially unmounted
++ * mount in list that has not been visited yet.
++ * Remember it has been visited and continue
++ * about our merry way.
++ */
++ list_add_tail(&child->mnt_umounting, &visited);
++ continue;
++ }
++
+ /* Check the child and parents while progress is made */
+ while (__propagate_umount(child,
+ &to_umount, &to_restore)) {
+@@ -542,6 +602,7 @@ int propagate_umount(struct list_head *l
+
+ umount_list(&to_umount, &to_restore);
+ restore_mounts(&to_restore);
++ cleanup_umount_visitations(&visited);
+ list_splice_tail(&to_umount, list);
+
+ return 0;
--- /dev/null
+From 3360acdf839170b612f5b212539694c20e3f16d0 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Fri, 9 Jun 2017 10:59:07 +0100
+Subject: nvmem: core: fix leaks on registration errors
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 3360acdf839170b612f5b212539694c20e3f16d0 upstream.
+
+Make sure to deregister and release the nvmem device and underlying
+memory on registration errors.
+
+Note that the private data must be freed using put_device() once the
+struct device has been initialised.
+
+Also note that there's a related reference leak in the deregistration
+function as reported by Mika Westerberg which is being fixed separately.
+
+Fixes: b6c217ab9be6 ("nvmem: Add backwards compatibility support for older EEPROM drivers.")
+Fixes: eace75cfdcf7 ("nvmem: Add a simple NVMEM framework for nvmem providers")
+Cc: Andrew Lunn <andrew@lunn.ch>
+Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Cc: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Acked-by: Andrey Smirnov <andrew.smirnov@gmail.com>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvmem/core.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -489,21 +489,24 @@ struct nvmem_device *nvmem_register(cons
+
+ rval = device_add(&nvmem->dev);
+ if (rval)
+- goto out;
++ goto err_put_device;
+
+ if (config->compat) {
+ rval = nvmem_setup_compat(nvmem, config);
+ if (rval)
+- goto out;
++ goto err_device_del;
+ }
+
+ if (config->cells)
+ nvmem_add_cells(nvmem, config);
+
+ return nvmem;
+-out:
+- ida_simple_remove(&nvmem_ida, nvmem->id);
+- kfree(nvmem);
++
++err_device_del:
++ device_del(&nvmem->dev);
++err_put_device:
++ put_device(&nvmem->dev);
++
+ return ERR_PTR(rval);
+ }
+ EXPORT_SYMBOL_GPL(nvmem_register);
--- /dev/null
+From 1e2a516e89fc412a754327522ab271b42f99c6b4 Mon Sep 17 00:00:00 2001
+From: Balbir Singh <bsingharora@gmail.com>
+Date: Thu, 29 Jun 2017 21:57:26 +1000
+Subject: powerpc/kexec: Fix radix to hash kexec due to IAMR/AMOR
+
+From: Balbir Singh <bsingharora@gmail.com>
+
+commit 1e2a516e89fc412a754327522ab271b42f99c6b4 upstream.
+
+This patch fixes a crash seen while doing a kexec from radix mode to
+hash mode. Key 0 is special in hash and used in the RPN by default, we
+set the key values to 0 today. In radix mode key 0 is used to control
+supervisor<->user access. In hash key 0 is used by default, so the
+first instruction after the switch causes a crash on kexec.
+
+Commit 3b10d0095a1e ("powerpc/mm/radix: Prevent kernel execution of
+user space") introduced the setting of IAMR and AMOR values to prevent
+execution of user mode instructions from supervisor mode. We need to
+clean up these SPR's on kexec.
+
+Fixes: 3b10d0095a1e ("powerpc/mm/radix: Prevent kernel execution of user space")
+Reported-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Balbir Singh <bsingharora@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/misc_64.S | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -614,6 +614,18 @@ _GLOBAL(kexec_sequence)
+ li r0,0
+ std r0,16(r1)
+
++BEGIN_FTR_SECTION
++ /*
++ * This is the best time to turn AMR/IAMR off.
++ * key 0 is used in radix for supervisor<->user
++ * protection, but on hash key 0 is reserved
++ * ideally we want to enter with a clean state.
++ * NOTE, we rely on r0 being 0 from above.
++ */
++ mtspr SPRN_IAMR,r0
++ mtspr SPRN_AMOR,r0
++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
++
+ /* save regs for local vars on new stack.
+ * yes, we won't go back, but ...
+ */
--- /dev/null
+From 6b5fc3a1331810db407c9e0e673dc1837afdc9d0 Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Date: Fri, 28 Apr 2017 20:11:09 -0700
+Subject: rcu: Add memory barriers for NOCB leader wakeup
+
+From: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+commit 6b5fc3a1331810db407c9e0e673dc1837afdc9d0 upstream.
+
+Wait/wakeup operations do not guarantee ordering on their own. Instead,
+either locking or memory barriers are required. This commit therefore
+adds memory barriers to wake_nocb_leader() and nocb_leader_wait().
+
+Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Tested-by: Krister Johansen <kjlx@templeofstupid.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/rcu/tree_plugin.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -1769,6 +1769,7 @@ static void wake_nocb_leader(struct rcu_
+ if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
+ /* Prior smp_mb__after_atomic() orders against prior enqueue. */
+ WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
++ smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
+ swake_up(&rdp_leader->nocb_wq);
+ }
+ }
+@@ -2023,6 +2024,7 @@ wait_again:
+ * nocb_gp_head, where they await a grace period.
+ */
+ gotcbs = false;
++ smp_mb(); /* wakeup before ->nocb_head reads. */
+ for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
+ rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
+ if (!rdp->nocb_gp_head)
--- /dev/null
+From 796a3bae2fba6810427efdb314a1c126c9490fb3 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 29 Jun 2017 08:46:12 -0700
+Subject: selftests/capabilities: Fix the test_execve test
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit 796a3bae2fba6810427efdb314a1c126c9490fb3 upstream.
+
+test_execve does rather odd mount manipulations to safely create
+temporary setuid and setgid executables that aren't visible to the
+rest of the system. Those executables end up in the test's cwd, but
+that cwd is MNT_DETACHed.
+
+The core namespace code considers MNT_DETACHed trees to belong to no
+mount namespace at all and, in general, MNT_DETACHed trees are only
+barely function. This interacted with commit 380cf5ba6b0a ("fs:
+Treat foreign mounts as nosuid") to cause all MNT_DETACHed trees to
+act as though they're nosuid, breaking the test.
+
+Fix it by just not detaching the tree. It's still in a private
+mount namespace and is therefore still invisible to the rest of the
+system (except via /proc, and the same nosuid logic will protect all
+other programs on the system from believing in test_execve's setuid
+bits).
+
+While we're at it, fix some blatant whitespace problems.
+
+Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
+Fixes: 380cf5ba6b0a ("fs: Treat foreign mounts as nosuid")
+Cc: "Eric W. Biederman" <ebiederm@xmission.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Shuah Khan <shuahkh@osg.samsung.com>
+Cc: Greg KH <greg@kroah.com>
+Cc: linux-kselftest@vger.kernel.org
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Shuah Khan <shuahkh@osg.samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/testing/selftests/capabilities/test_execve.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/tools/testing/selftests/capabilities/test_execve.c
++++ b/tools/testing/selftests/capabilities/test_execve.c
+@@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void)
+
+ if (chdir(cwd) != 0)
+ err(1, "chdir to private tmpfs");
+-
+- if (umount2(".", MNT_DETACH) != 0)
+- err(1, "detach private tmpfs");
+ }
+
+ static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
+@@ -248,7 +245,7 @@ static int do_tests(int uid, const char
+ err(1, "chown");
+ if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
+ err(1, "chmod");
+-}
++ }
+
+ capng_get_caps_process();
+
+@@ -384,7 +381,7 @@ static int do_tests(int uid, const char
+ } else {
+ printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
+ exec_other_validate_cap("./validate_cap_sgidnonroot",
+- false, false, true, false);
++ false, false, true, false);
+
+ if (fork_wait()) {
+ printf("[RUN]\tNon-root +ia, sgidroot => i\n");
arm64-move-elf_et_dyn_base-to-4gb-4mb.patch
powerpc-move-elf_et_dyn_base-to-4gb-4mb.patch
s390-reduce-elf_et_dyn_base.patch
+exec-limit-arg-stack-to-at-most-75-of-_stk_lim.patch
+powerpc-kexec-fix-radix-to-hash-kexec-due-to-iamr-amor.patch
+arm64-dts-marvell-armada37xx-fix-timer-interrupt-specifiers.patch
+arm64-preventing-read_implies_exec-propagation.patch
+vt-fix-unchecked-__put_user-in-tioclinux-ioctls.patch
+rcu-add-memory-barriers-for-nocb-leader-wakeup.patch
+nvmem-core-fix-leaks-on-registration-errors.patch
+drivers-hv-vmbus-close-timing-hole-that-can-corrupt-per-cpu-page.patch
+mnt-in-umount-propagation-reparent-in-a-separate-pass.patch
+mnt-in-propgate_umount-handle-visiting-mounts-in-any-order.patch
+mnt-make-propagate_umount-less-slow-for-overlapping-mount-propagation-trees.patch
+selftests-capabilities-fix-the-test_execve-test.patch
--- /dev/null
+From 6987dc8a70976561d22450b5858fc9767788cc1c Mon Sep 17 00:00:00 2001
+From: Adam Borowski <kilobyte@angband.pl>
+Date: Sat, 3 Jun 2017 09:35:06 +0200
+Subject: vt: fix unchecked __put_user() in tioclinux ioctls
+
+From: Adam Borowski <kilobyte@angband.pl>
+
+commit 6987dc8a70976561d22450b5858fc9767788cc1c upstream.
+
+Only read access is checked before this call.
+
+Actually, at the moment this is not an issue, as every in-tree arch does
+the same manual checks for VERIFY_READ vs VERIFY_WRITE, relying on the MMU
+to tell them apart, but this wasn't the case in the past and may happen
+again on some odd arch in the future.
+
+If anyone cares about 3.7 and earlier, this is a security hole (untested)
+on real 80386 CPUs.
+
+Signed-off-by: Adam Borowski <kilobyte@angband.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/vt/vt.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -2709,13 +2709,13 @@ int tioclinux(struct tty_struct *tty, un
+ * related to the kernel should not use this.
+ */
+ data = vt_get_shift_state();
+- ret = __put_user(data, p);
++ ret = put_user(data, p);
+ break;
+ case TIOCL_GETMOUSEREPORTING:
+ console_lock(); /* May be overkill */
+ data = mouse_reporting();
+ console_unlock();
+- ret = __put_user(data, p);
++ ret = put_user(data, p);
+ break;
+ case TIOCL_SETVESABLANK:
+ console_lock();
+@@ -2724,7 +2724,7 @@ int tioclinux(struct tty_struct *tty, un
+ break;
+ case TIOCL_GETKMSGREDIRECT:
+ data = vt_get_kmsg_redirect();
+- ret = __put_user(data, p);
++ ret = put_user(data, p);
+ break;
+ case TIOCL_SETKMSGREDIRECT:
+ if (!capable(CAP_SYS_ADMIN)) {