--- /dev/null
+From 0633da48f0793aeba27f82d30605624416723a91 Mon Sep 17 00:00:00 2001
+From: Ian Kent <raven@themaw.net>
+Date: Tue, 21 Aug 2018 21:51:45 -0700
+Subject: autofs: fix autofs_sbi() does not check super block type
+
+From: Ian Kent <raven@themaw.net>
+
+commit 0633da48f0793aeba27f82d30605624416723a91 upstream.
+
+autofs_sbi() does not check the superblock magic number to verify it has
+been given an autofs super block.
+
+Link: http://lkml.kernel.org/r/153475422934.17131.7563724552005298277.stgit@pluto.themaw.net
+Reported-by: <syzbot+87c3c541582e56943277@syzkaller.appspotmail.com>
+Signed-off-by: Ian Kent <raven@themaw.net>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Guenter Roeck <groeck@google.com>
+Cc: Zubin Mithra <zsm@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/autofs/autofs_i.h | 4 +++-
+ fs/autofs/inode.c | 1 -
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/autofs/autofs_i.h
++++ b/fs/autofs/autofs_i.h
+@@ -26,6 +26,7 @@
+ #include <linux/list.h>
+ #include <linux/completion.h>
+ #include <linux/file.h>
++#include <linux/magic.h>
+
+ /* This is the range of ioctl() numbers we claim as ours */
+ #define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
+@@ -124,7 +125,8 @@ struct autofs_sb_info {
+
+ static inline struct autofs_sb_info *autofs_sbi(struct super_block *sb)
+ {
+- return (struct autofs_sb_info *)(sb->s_fs_info);
++ return sb->s_magic != AUTOFS_SUPER_MAGIC ?
++ NULL : (struct autofs_sb_info *)(sb->s_fs_info);
+ }
+
+ static inline struct autofs_info *autofs_dentry_ino(struct dentry *dentry)
+--- a/fs/autofs/inode.c
++++ b/fs/autofs/inode.c
+@@ -10,7 +10,6 @@
+ #include <linux/seq_file.h>
+ #include <linux/pagemap.h>
+ #include <linux/parser.h>
+-#include <linux/magic.h>
+
+ #include "autofs_i.h"
+
--- /dev/null
+From e2c631ba75a7e727e8db0a9d30a06bfd434adb3a Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 5 Sep 2018 10:41:58 +0200
+Subject: clocksource: Revert "Remove kthread"
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit e2c631ba75a7e727e8db0a9d30a06bfd434adb3a upstream.
+
+I turns out that the silly spawn kthread from worker was actually needed.
+
+clocksource_watchdog_kthread() cannot be called directly from
+clocksource_watchdog_work(), because clocksource_select() calls
+timekeeping_notify() which uses stop_machine(). One cannot use
+stop_machine() from a workqueue() due lock inversions wrt CPU hotplug.
+
+Revert the patch but add a comment that explain why we jump through such
+apparently silly hoops.
+
+Fixes: 7197e77abcb6 ("clocksource: Remove kthread")
+Reported-by: Siegfried Metz <frame@mailbox.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Niklas Cassel <niklas.cassel@linaro.org>
+Tested-by: Kevin Shanahan <kevin@shanahan.id.au>
+Tested-by: viktor_jaegerskuepper@freenet.de
+Tested-by: Siegfried Metz <frame@mailbox.org>
+Cc: rafael.j.wysocki@intel.com
+Cc: len.brown@intel.com
+Cc: diego.viola@gmail.com
+Cc: rui.zhang@intel.com
+Cc: bjorn.andersson@linaro.org
+Link: https://lkml.kernel.org/r/20180905084158.GR24124@hirez.programming.kicks-ass.net
+Cc: Siegfried Metz <frame@mailbox.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/clocksource.c | 40 ++++++++++++++++++++++++++++++----------
+ 1 file changed, 30 insertions(+), 10 deletions(-)
+
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -129,19 +129,40 @@ static void inline clocksource_watchdog_
+ spin_unlock_irqrestore(&watchdog_lock, *flags);
+ }
+
++static int clocksource_watchdog_kthread(void *data);
++static void __clocksource_change_rating(struct clocksource *cs, int rating);
++
+ /*
+ * Interval: 0.5sec Threshold: 0.0625s
+ */
+ #define WATCHDOG_INTERVAL (HZ >> 1)
+ #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
+
++static void clocksource_watchdog_work(struct work_struct *work)
++{
++ /*
++ * We cannot directly run clocksource_watchdog_kthread() here, because
++ * clocksource_select() calls timekeeping_notify() which uses
++ * stop_machine(). One cannot use stop_machine() from a workqueue() due
++ * lock inversions wrt CPU hotplug.
++ *
++ * Also, we only ever run this work once or twice during the lifetime
++ * of the kernel, so there is no point in creating a more permanent
++ * kthread for this.
++ *
++ * If kthread_run fails the next watchdog scan over the
++ * watchdog_list will find the unstable clock again.
++ */
++ kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
++}
++
+ static void __clocksource_unstable(struct clocksource *cs)
+ {
+ cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
+ cs->flags |= CLOCK_SOURCE_UNSTABLE;
+
+ /*
+- * If the clocksource is registered clocksource_watchdog_work() will
++ * If the clocksource is registered clocksource_watchdog_kthread() will
+ * re-rate and re-select.
+ */
+ if (list_empty(&cs->list)) {
+@@ -152,7 +173,7 @@ static void __clocksource_unstable(struc
+ if (cs->mark_unstable)
+ cs->mark_unstable(cs);
+
+- /* kick clocksource_watchdog_work() */
++ /* kick clocksource_watchdog_kthread() */
+ if (finished_booting)
+ schedule_work(&watchdog_work);
+ }
+@@ -162,7 +183,7 @@ static void __clocksource_unstable(struc
+ * @cs: clocksource to be marked unstable
+ *
+ * This function is called by the x86 TSC code to mark clocksources as unstable;
+- * it defers demotion and re-selection to a work.
++ * it defers demotion and re-selection to a kthread.
+ */
+ void clocksource_mark_unstable(struct clocksource *cs)
+ {
+@@ -387,9 +408,7 @@ static void clocksource_dequeue_watchdog
+ }
+ }
+
+-static void __clocksource_change_rating(struct clocksource *cs, int rating);
+-
+-static int __clocksource_watchdog_work(void)
++static int __clocksource_watchdog_kthread(void)
+ {
+ struct clocksource *cs, *tmp;
+ unsigned long flags;
+@@ -414,12 +433,13 @@ static int __clocksource_watchdog_work(v
+ return select;
+ }
+
+-static void clocksource_watchdog_work(struct work_struct *work)
++static int clocksource_watchdog_kthread(void *data)
+ {
+ mutex_lock(&clocksource_mutex);
+- if (__clocksource_watchdog_work())
++ if (__clocksource_watchdog_kthread())
+ clocksource_select();
+ mutex_unlock(&clocksource_mutex);
++ return 0;
+ }
+
+ static bool clocksource_is_watchdog(struct clocksource *cs)
+@@ -438,7 +458,7 @@ static void clocksource_enqueue_watchdog
+ static void clocksource_select_watchdog(bool fallback) { }
+ static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
+ static inline void clocksource_resume_watchdog(void) { }
+-static inline int __clocksource_watchdog_work(void) { return 0; }
++static inline int __clocksource_watchdog_kthread(void) { return 0; }
+ static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
+ void clocksource_mark_unstable(struct clocksource *cs) { }
+
+@@ -672,7 +692,7 @@ static int __init clocksource_done_booti
+ /*
+ * Run the watchdog first to eliminate unstable clock sources
+ */
+- __clocksource_watchdog_work();
++ __clocksource_watchdog_kthread();
+ clocksource_select();
+ mutex_unlock(&clocksource_mutex);
+ return 0;