]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 26 Jun 2017 07:00:39 +0000 (09:00 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 26 Jun 2017 07:00:39 +0000 (09:00 +0200)
added patches:
input-i8042-add-fujitsu-lifebook-ah544-to-notimeout-list.patch
powerpc-kprobes-pause-function_graph-tracing-during-jprobes-handling.patch
time-fix-clock-read-clock-race-around-clocksource-changes.patch

queue-4.4/input-i8042-add-fujitsu-lifebook-ah544-to-notimeout-list.patch [new file with mode: 0644]
queue-4.4/powerpc-kprobes-pause-function_graph-tracing-during-jprobes-handling.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/time-fix-clock-read-clock-race-around-clocksource-changes.patch [new file with mode: 0644]

diff --git a/queue-4.4/input-i8042-add-fujitsu-lifebook-ah544-to-notimeout-list.patch b/queue-4.4/input-i8042-add-fujitsu-lifebook-ah544-to-notimeout-list.patch
new file mode 100644 (file)
index 0000000..ad2cf46
--- /dev/null
@@ -0,0 +1,40 @@
+From 817ae460c784f32cd45e60b2b1b21378c3c6a847 Mon Sep 17 00:00:00 2001
+From: Daniel Drake <drake@endlessm.com>
+Date: Mon, 19 Jun 2017 19:48:52 -0700
+Subject: Input: i8042 - add Fujitsu Lifebook AH544 to notimeout list
+
+From: Daniel Drake <drake@endlessm.com>
+
+commit 817ae460c784f32cd45e60b2b1b21378c3c6a847 upstream.
+
+Without this quirk, the touchpad is not responsive on this product, with
+the following message repeated in the logs:
+
+ psmouse serio1: bad data from KBC - timeout
+
+Add it to the notimeout list alongside other similar Fujitsu laptops.
+
+Signed-off-by: Daniel Drake <drake@endlessm.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/serio/i8042-x86ia64io.h |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -788,6 +788,13 @@ static const struct dmi_system_id __init
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+               },
+       },
++      {
++              /* Fujitsu UH554 laptop */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
++              },
++      },
+       { }
+ };
diff --git a/queue-4.4/powerpc-kprobes-pause-function_graph-tracing-during-jprobes-handling.patch b/queue-4.4/powerpc-kprobes-pause-function_graph-tracing-during-jprobes-handling.patch
new file mode 100644 (file)
index 0000000..ae0cdc3
--- /dev/null
@@ -0,0 +1,57 @@
+From a9f8553e935f26cb5447f67e280946b0923cd2dc Mon Sep 17 00:00:00 2001
+From: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com>
+Date: Thu, 1 Jun 2017 16:18:15 +0530
+Subject: powerpc/kprobes: Pause function_graph tracing during jprobes handling
+
+From: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+
+commit a9f8553e935f26cb5447f67e280946b0923cd2dc upstream.
+
+This fixes a crash when function_graph and jprobes are used together.
+This is essentially commit 237d28db036e ("ftrace/jprobes/x86: Fix
+conflict between jprobes and function graph tracing"), but for powerpc.
+
+Jprobes breaks function_graph tracing since the jprobe hook needs to use
+jprobe_return(), which never returns back to the hook, but instead to
+the original jprobe'd function. The solution is to momentarily pause
+function_graph tracing before invoking the jprobe hook and re-enable it
+when returning back to the original jprobe'd function.
+
+Fixes: 6794c78243bf ("powerpc64: port of the function graph tracer")
+Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/kprobes.c |   11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -514,6 +514,15 @@ int __kprobes setjmp_pre_handler(struct
+ #endif
+ #endif
++      /*
++       * jprobes use jprobe_return() which skips the normal return
++       * path of the function, and this messes up the accounting of the
++       * function graph tracer.
++       *
++       * Pause function graph tracing while performing the jprobe function.
++       */
++      pause_graph_tracing();
++
+       return 1;
+ }
+@@ -536,6 +545,8 @@ int __kprobes longjmp_break_handler(stru
+        * saved regs...
+        */
+       memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
++      /* It's OK to start function graph tracing again */
++      unpause_graph_tracing();
+       preempt_enable_no_resched();
+       return 1;
+ }
index ba7e513eed78de1c3d74040f4823190989979fdc..d3499a9292844d81135abbe3c9a675126fb51ae3 100644 (file)
@@ -5,3 +5,6 @@ kvm-ppc-book3s-hv-preserve-userspace-htm-state-properly.patch
 cifs-improve-readdir-verbosity.patch
 hid-add-quirk-for-dell-pixart-oem-mouse.patch
 signal-only-reschedule-timers-on-signals-timers-have-sent.patch
+powerpc-kprobes-pause-function_graph-tracing-during-jprobes-handling.patch
+input-i8042-add-fujitsu-lifebook-ah544-to-notimeout-list.patch
+time-fix-clock-read-clock-race-around-clocksource-changes.patch
diff --git a/queue-4.4/time-fix-clock-read-clock-race-around-clocksource-changes.patch b/queue-4.4/time-fix-clock-read-clock-race-around-clocksource-changes.patch
new file mode 100644 (file)
index 0000000..6413c1e
--- /dev/null
@@ -0,0 +1,206 @@
+From ceea5e3771ed2378668455fa21861bead7504df5 Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Thu, 8 Jun 2017 16:44:20 -0700
+Subject: time: Fix clock->read(clock) race around clocksource changes
+
+From: John Stultz <john.stultz@linaro.org>
+
+commit ceea5e3771ed2378668455fa21861bead7504df5 upstream.
+
+In tests, which excercise switching of clocksources, a NULL
+pointer dereference can be observed on AMR64 platforms in the
+clocksource read() function:
+
+u64 clocksource_mmio_readl_down(struct clocksource *c)
+{
+       return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
+}
+
+This is called from the core timekeeping code via:
+
+       cycle_now = tkr->read(tkr->clock);
+
+tkr->read is the cached tkr->clock->read() function pointer.
+When the clocksource is changed then tkr->clock and tkr->read
+are updated sequentially. The code above results in a sequential
+load operation of tkr->read and tkr->clock as well.
+
+If the store to tkr->clock hits between the loads of tkr->read
+and tkr->clock, then the old read() function is called with the
+new clock pointer. As a consequence the read() function
+dereferences a different data structure and the resulting 'reg'
+pointer can point anywhere including NULL.
+
+This problem was introduced when the timekeeping code was
+switched over to use struct tk_read_base. Before that, it was
+theoretically possible as well when the compiler decided to
+reload clock in the code sequence:
+
+     now = tk->clock->read(tk->clock);
+
+Add a helper function which avoids the issue by reading
+tk_read_base->clock once into a local variable clk and then issue
+the read function via clk->read(clk). This guarantees that the
+read() function always gets the proper clocksource pointer handed
+in.
+
+Since there is now no use for the tkr.read pointer, this patch
+also removes it, and to address stopping the fast timekeeper
+during suspend/resume, it introduces a dummy clocksource to use
+rather then just a dummy read function.
+
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Richard Cochran <richardcochran@gmail.com>
+Cc: Stephen Boyd <stephen.boyd@linaro.org>
+Cc: Miroslav Lichvar <mlichvar@redhat.com>
+Cc: Daniel Mentz <danielmentz@google.com>
+Link: http://lkml.kernel.org/r/1496965462-20003-2-git-send-email-john.stultz@linaro.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/timekeeper_internal.h |    1 
+ kernel/time/timekeeping.c           |   47 ++++++++++++++++++++++++++----------
+ 2 files changed, 34 insertions(+), 14 deletions(-)
+
+--- a/include/linux/timekeeper_internal.h
++++ b/include/linux/timekeeper_internal.h
+@@ -29,7 +29,6 @@
+  */
+ struct tk_read_base {
+       struct clocksource      *clock;
+-      cycle_t                 (*read)(struct clocksource *cs);
+       cycle_t                 mask;
+       cycle_t                 cycle_last;
+       u32                     mult;
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(
+       tk->offs_boot = ktime_add(tk->offs_boot, delta);
+ }
++/*
++ * tk_clock_read - atomic clocksource read() helper
++ *
++ * This helper is necessary to use in the read paths because, while the
++ * seqlock ensures we don't return a bad value while structures are updated,
++ * it doesn't protect from potential crashes. There is the possibility that
++ * the tkr's clocksource may change between the read reference, and the
++ * clock reference passed to the read function.  This can cause crashes if
++ * the wrong clocksource is passed to the wrong read function.
++ * This isn't necessary to use when holding the timekeeper_lock or doing
++ * a read of the fast-timekeeper tkrs (which is protected by its own locking
++ * and update logic).
++ */
++static inline u64 tk_clock_read(struct tk_read_base *tkr)
++{
++      struct clocksource *clock = READ_ONCE(tkr->clock);
++
++      return clock->read(clock);
++}
++
+ #ifdef CONFIG_DEBUG_TIMEKEEPING
+ #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
+@@ -173,7 +193,7 @@ static inline cycle_t timekeeping_get_de
+        */
+       do {
+               seq = read_seqcount_begin(&tk_core.seq);
+-              now = tkr->read(tkr->clock);
++              now = tk_clock_read(tkr);
+               last = tkr->cycle_last;
+               mask = tkr->mask;
+               max = tkr->clock->max_cycles;
+@@ -207,7 +227,7 @@ static inline cycle_t timekeeping_get_de
+       cycle_t cycle_now, delta;
+       /* read clocksource */
+-      cycle_now = tkr->read(tkr->clock);
++      cycle_now = tk_clock_read(tkr);
+       /* calculate the delta since the last update_wall_time */
+       delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
+@@ -235,12 +255,10 @@ static void tk_setup_internals(struct ti
+       old_clock = tk->tkr_mono.clock;
+       tk->tkr_mono.clock = clock;
+-      tk->tkr_mono.read = clock->read;
+       tk->tkr_mono.mask = clock->mask;
+-      tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
++      tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
+       tk->tkr_raw.clock = clock;
+-      tk->tkr_raw.read = clock->read;
+       tk->tkr_raw.mask = clock->mask;
+       tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
+@@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_f
+               now += timekeeping_delta_to_ns(tkr,
+                               clocksource_delta(
+-                                      tkr->read(tkr->clock),
++                                      tk_clock_read(tkr),
+                                       tkr->cycle_last,
+                                       tkr->mask));
+       } while (read_seqcount_retry(&tkf->seq, seq));
+@@ -432,6 +450,10 @@ static cycle_t dummy_clock_read(struct c
+       return cycles_at_suspend;
+ }
++static struct clocksource dummy_clock = {
++      .read = dummy_clock_read,
++};
++
+ /**
+  * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
+  * @tk: Timekeeper to snapshot.
+@@ -448,13 +470,13 @@ static void halt_fast_timekeeper(struct
+       struct tk_read_base *tkr = &tk->tkr_mono;
+       memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
+-      cycles_at_suspend = tkr->read(tkr->clock);
+-      tkr_dummy.read = dummy_clock_read;
++      cycles_at_suspend = tk_clock_read(tkr);
++      tkr_dummy.clock = &dummy_clock;
+       update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
+       tkr = &tk->tkr_raw;
+       memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
+-      tkr_dummy.read = dummy_clock_read;
++      tkr_dummy.clock = &dummy_clock;
+       update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
+ }
+@@ -618,11 +640,10 @@ static void timekeeping_update(struct ti
+  */
+ static void timekeeping_forward_now(struct timekeeper *tk)
+ {
+-      struct clocksource *clock = tk->tkr_mono.clock;
+       cycle_t cycle_now, delta;
+       s64 nsec;
+-      cycle_now = tk->tkr_mono.read(clock);
++      cycle_now = tk_clock_read(&tk->tkr_mono);
+       delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
+       tk->tkr_mono.cycle_last = cycle_now;
+       tk->tkr_raw.cycle_last  = cycle_now;
+@@ -1405,7 +1426,7 @@ void timekeeping_resume(void)
+        * The less preferred source will only be tried if there is no better
+        * usable source. The rtc part is handled separately in rtc core code.
+        */
+-      cycle_now = tk->tkr_mono.read(clock);
++      cycle_now = tk_clock_read(&tk->tkr_mono);
+       if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
+               cycle_now > tk->tkr_mono.cycle_last) {
+               u64 num, max = ULLONG_MAX;
+@@ -1800,7 +1821,7 @@ void update_wall_time(void)
+ #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+       offset = real_tk->cycle_interval;
+ #else
+-      offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
++      offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
+                                  tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
+ #endif