]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 6 Dec 2016 20:23:49 +0000 (21:23 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 6 Dec 2016 20:23:49 +0000 (21:23 +0100)
added patches:
arm64-cpufeature-schedule-enable-calls-instead-of-calling-them-via-ipi.patch
arm64-mm-set-pstate.pan-from-the-cpu_enable_pan-call.patch
arm64-suspend-reconfigure-pstate-after-resume-from-idle.patch

queue-4.8/arm64-cpufeature-schedule-enable-calls-instead-of-calling-them-via-ipi.patch [new file with mode: 0644]
queue-4.8/arm64-mm-set-pstate.pan-from-the-cpu_enable_pan-call.patch [new file with mode: 0644]
queue-4.8/arm64-suspend-reconfigure-pstate-after-resume-from-idle.patch [new file with mode: 0644]
queue-4.8/series

diff --git a/queue-4.8/arm64-cpufeature-schedule-enable-calls-instead-of-calling-them-via-ipi.patch b/queue-4.8/arm64-cpufeature-schedule-enable-calls-instead-of-calling-them-via-ipi.patch
new file mode 100644 (file)
index 0000000..d15b5f6
--- /dev/null
@@ -0,0 +1,135 @@
+From 2a6dcb2b5f3e21592ca8dfa198dcce7bec09b020 Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Tue, 18 Oct 2016 11:27:46 +0100
+Subject: arm64: cpufeature: Schedule enable() calls instead of calling them via IPI
+
+From: James Morse <james.morse@arm.com>
+
+commit 2a6dcb2b5f3e21592ca8dfa198dcce7bec09b020 upstream.
+
+The enable() call for a cpufeature/errata is called using on_each_cpu().
+This issues a cross-call IPI to get the work done. Implicitly, this
+stashes the running PSTATE in SPSR when the CPU receives the IPI, and
+restores it when we return. This means an enable() call can never modify
+PSTATE.
+
+To allow PAN to do this, change the on_each_cpu() call to use
+stop_machine(). This schedules the work on each CPU which allows
+us to modify PSTATE.
+
+This involves changing the protype of all the enable() functions.
+
+enable_cpu_capabilities() is called during boot and enables the feature
+on all online CPUs. This path now uses stop_machine(). CPU features for
+hotplug'd CPUs are enabled by verify_local_cpu_features() which only
+acts on the local CPU, and can already modify the running PSTATE as it
+is called from secondary_start_kernel().
+
+Reported-by: Tony Thompson <anthony.thompson@arm.com>
+Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+[Removed enable() hunks for A53 workaround]
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/cpufeature.h |    2 +-
+ arch/arm64/include/asm/processor.h  |    6 +++---
+ arch/arm64/kernel/cpufeature.c      |   10 +++++++++-
+ arch/arm64/kernel/traps.c           |    3 ++-
+ arch/arm64/mm/fault.c               |    6 ++++--
+ 5 files changed, 19 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -90,7 +90,7 @@ struct arm64_cpu_capabilities {
+       u16 capability;
+       int def_scope;                  /* default scope */
+       bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
+-      void (*enable)(void *);         /* Called on all active CPUs */
++      int (*enable)(void *);          /* Called on all active CPUs */
+       union {
+               struct {        /* To be used for erratum handling only */
+                       u32 midr_model;
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -190,8 +190,8 @@ static inline void spin_lock_prefetch(co
+ #endif
+-void cpu_enable_pan(void *__unused);
+-void cpu_enable_uao(void *__unused);
+-void cpu_enable_cache_maint_trap(void *__unused);
++int cpu_enable_pan(void *__unused);
++int cpu_enable_uao(void *__unused);
++int cpu_enable_cache_maint_trap(void *__unused);
+ #endif /* __ASM_PROCESSOR_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -19,7 +19,9 @@
+ #define pr_fmt(fmt) "CPU features: " fmt
+ #include <linux/bsearch.h>
++#include <linux/cpumask.h>
+ #include <linux/sort.h>
++#include <linux/stop_machine.h>
+ #include <linux/types.h>
+ #include <asm/cpu.h>
+ #include <asm/cpufeature.h>
+@@ -936,7 +938,13 @@ void __init enable_cpu_capabilities(cons
+ {
+       for (; caps->matches; caps++)
+               if (caps->enable && cpus_have_cap(caps->capability))
+-                      on_each_cpu(caps->enable, NULL, true);
++                      /*
++                       * Use stop_machine() as it schedules the work allowing
++                       * us to modify PSTATE, instead of on_each_cpu() which
++                       * uses an IPI, giving us a PSTATE that disappears when
++                       * we return.
++                       */
++                      stop_machine(caps->enable, NULL, cpu_online_mask);
+ }
+ /*
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -428,9 +428,10 @@ asmlinkage void __exception do_undefinst
+       force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ }
+-void cpu_enable_cache_maint_trap(void *__unused)
++int cpu_enable_cache_maint_trap(void *__unused)
+ {
+       config_sctlr_el1(SCTLR_EL1_UCI, 0);
++      return 0;
+ }
+ #define __user_cache_maint(insn, address, res)                        \
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -671,9 +671,10 @@ asmlinkage int __exception do_debug_exce
+ NOKPROBE_SYMBOL(do_debug_exception);
+ #ifdef CONFIG_ARM64_PAN
+-void cpu_enable_pan(void *__unused)
++int cpu_enable_pan(void *__unused)
+ {
+       config_sctlr_el1(SCTLR_EL1_SPAN, 0);
++      return 0;
+ }
+ #endif /* CONFIG_ARM64_PAN */
+@@ -684,8 +685,9 @@ void cpu_enable_pan(void *__unused)
+  * We need to enable the feature at runtime (instead of adding it to
+  * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
+  */
+-void cpu_enable_uao(void *__unused)
++int cpu_enable_uao(void *__unused)
+ {
+       asm(SET_PSTATE_UAO(1));
++      return 0;
+ }
+ #endif /* CONFIG_ARM64_UAO */
diff --git a/queue-4.8/arm64-mm-set-pstate.pan-from-the-cpu_enable_pan-call.patch b/queue-4.8/arm64-mm-set-pstate.pan-from-the-cpu_enable_pan-call.patch
new file mode 100644 (file)
index 0000000..4535afd
--- /dev/null
@@ -0,0 +1,59 @@
+From 7209c868600bd8926e37c10b9aae83124ccc1dd8 Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Tue, 18 Oct 2016 11:27:47 +0100
+Subject: arm64: mm: Set PSTATE.PAN from the cpu_enable_pan() call
+
+From: James Morse <james.morse@arm.com>
+
+commit 7209c868600bd8926e37c10b9aae83124ccc1dd8 upstream.
+
+Commit 338d4f49d6f7 ("arm64: kernel: Add support for Privileged Access
+Never") enabled PAN by enabling the 'SPAN' feature-bit in SCTLR_EL1.
+This means the PSTATE.PAN bit won't be set until the next return to the
+kernel from userspace. On a preemptible kernel we may schedule work that
+accesses userspace on a CPU before it has done this.
+
+Now that cpufeature enable() calls are scheduled via stop_machine(), we
+can set PSTATE.PAN from the cpu_enable_pan() call.
+
+Add WARN_ON_ONCE(in_interrupt()) to check the PSTATE value we updated
+is not immediately discarded.
+
+Reported-by: Tony Thompson <anthony.thompson@arm.com>
+Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+[will: fixed typo in comment]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/fault.c |    9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -29,7 +29,9 @@
+ #include <linux/sched.h>
+ #include <linux/highmem.h>
+ #include <linux/perf_event.h>
++#include <linux/preempt.h>
++#include <asm/bug.h>
+ #include <asm/cpufeature.h>
+ #include <asm/exception.h>
+ #include <asm/debug-monitors.h>
+@@ -673,7 +675,14 @@ NOKPROBE_SYMBOL(do_debug_exception);
+ #ifdef CONFIG_ARM64_PAN
+ int cpu_enable_pan(void *__unused)
+ {
++      /*
++       * We modify PSTATE. This won't work from irq context as the PSTATE
++       * is discarded once we return from the exception.
++       */
++      WARN_ON_ONCE(in_interrupt());
++
+       config_sctlr_el1(SCTLR_EL1_SPAN, 0);
++      asm(SET_PSTATE_PAN(1));
+       return 0;
+ }
+ #endif /* CONFIG_ARM64_PAN */
diff --git a/queue-4.8/arm64-suspend-reconfigure-pstate-after-resume-from-idle.patch b/queue-4.8/arm64-suspend-reconfigure-pstate-after-resume-from-idle.patch
new file mode 100644 (file)
index 0000000..85b83bc
--- /dev/null
@@ -0,0 +1,91 @@
+From d08544127d9fb4505635e3cb6871fd50a42947bd Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Tue, 18 Oct 2016 11:27:48 +0100
+Subject: arm64: suspend: Reconfigure PSTATE after resume from idle
+
+From: James Morse <james.morse@arm.com>
+
+commit d08544127d9fb4505635e3cb6871fd50a42947bd upstream.
+
+The suspend/resume path in kernel/sleep.S, as used by cpu-idle, does not
+save/restore PSTATE. As a result of this cpufeatures that were detected
+and have bits in PSTATE get lost when we resume from idle.
+
+UAO gets set appropriately on the next context switch. PAN will be
+re-enabled next time we return from user-space, but on a preemptible
+kernel we may run work accessing user space before this point.
+
+Add code to re-enable theses two features in __cpu_suspend_exit().
+We re-use uao_thread_switch() passing current.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/exec.h |    3 +++
+ arch/arm64/kernel/process.c   |    3 ++-
+ arch/arm64/kernel/suspend.c   |   11 +++++++++++
+ 3 files changed, 16 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/exec.h
++++ b/arch/arm64/include/asm/exec.h
+@@ -18,6 +18,9 @@
+ #ifndef __ASM_EXEC_H
+ #define __ASM_EXEC_H
++#include <linux/sched.h>
++
+ extern unsigned long arch_align_stack(unsigned long sp);
++void uao_thread_switch(struct task_struct *next);
+ #endif        /* __ASM_EXEC_H */
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -49,6 +49,7 @@
+ #include <asm/alternative.h>
+ #include <asm/compat.h>
+ #include <asm/cacheflush.h>
++#include <asm/exec.h>
+ #include <asm/fpsimd.h>
+ #include <asm/mmu_context.h>
+ #include <asm/processor.h>
+@@ -303,7 +304,7 @@ static void tls_thread_switch(struct tas
+ }
+ /* Restore the UAO state depending on next's addr_limit */
+-static void uao_thread_switch(struct task_struct *next)
++void uao_thread_switch(struct task_struct *next)
+ {
+       if (IS_ENABLED(CONFIG_ARM64_UAO)) {
+               if (task_thread_info(next)->addr_limit == KERNEL_DS)
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -1,8 +1,11 @@
+ #include <linux/ftrace.h>
+ #include <linux/percpu.h>
+ #include <linux/slab.h>
++#include <asm/alternative.h>
+ #include <asm/cacheflush.h>
++#include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
++#include <asm/exec.h>
+ #include <asm/pgtable.h>
+ #include <asm/memory.h>
+ #include <asm/mmu_context.h>
+@@ -48,6 +51,14 @@ void notrace __cpu_suspend_exit(void)
+       set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+       /*
++       * PSTATE was not saved over suspend/resume, re-enable any detected
++       * features that might not have been set correctly.
++       */
++      asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
++                      CONFIG_ARM64_PAN));
++      uao_thread_switch(current);
++
++      /*
+        * Restore HW breakpoint registers to sane values
+        * before debug exceptions are possibly reenabled
+        * through local_dbg_restore.
index 566fbc8b4d3bafaf5f0ca77142000e98fa274fc5..e9e1c56d3fd3034142acec7a4da22747936d3219 100644 (file)
@@ -30,3 +30,6 @@ drm-mediatek-fix-null-pointer-dereference.patch
 perf-x86-restore-task_size-check-on-frame-pointer.patch
 clk-sunxi-fix-m-factor-computation-for-apb1.patch
 batman-adv-detect-missing-primaryif-during-tp_send-as-error.patch
+arm64-cpufeature-schedule-enable-calls-instead-of-calling-them-via-ipi.patch
+arm64-mm-set-pstate.pan-from-the-cpu_enable_pan-call.patch
+arm64-suspend-reconfigure-pstate-after-resume-from-idle.patch