]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 7 Mar 2018 17:36:56 +0000 (09:36 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 7 Mar 2018 17:36:56 +0000 (09:36 -0800)
added patches:
arm-dts-logicpd-som-lv-fix-i2c1-pinmux.patch
arm-dts-logicpd-torpedo-fix-i2c1-pinmux.patch
dm-io-fix-duplicate-bio-completion-due-to-missing-ref-count.patch
md-only-allow-remove_and_add_spares-when-no-sync_thread-running.patch
x86-mm-give-each-mm-tlb-flush-generation-a-unique-id.patch
x86-speculation-use-indirect-branch-prediction-barrier-in-context-switch.patch

queue-4.9/arm-dts-logicpd-som-lv-fix-i2c1-pinmux.patch [new file with mode: 0644]
queue-4.9/arm-dts-logicpd-torpedo-fix-i2c1-pinmux.patch [new file with mode: 0644]
queue-4.9/dm-io-fix-duplicate-bio-completion-due-to-missing-ref-count.patch [new file with mode: 0644]
queue-4.9/md-only-allow-remove_and_add_spares-when-no-sync_thread-running.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/x86-mm-give-each-mm-tlb-flush-generation-a-unique-id.patch [new file with mode: 0644]
queue-4.9/x86-speculation-use-indirect-branch-prediction-barrier-in-context-switch.patch [new file with mode: 0644]

diff --git a/queue-4.9/arm-dts-logicpd-som-lv-fix-i2c1-pinmux.patch b/queue-4.9/arm-dts-logicpd-som-lv-fix-i2c1-pinmux.patch
new file mode 100644 (file)
index 0000000..bff18d2
--- /dev/null
@@ -0,0 +1,52 @@
+From 84c7efd607e7fb6933920322086db64654f669b2 Mon Sep 17 00:00:00 2001
+From: Adam Ford <aford173@gmail.com>
+Date: Sat, 27 Jan 2018 15:27:05 -0600
+Subject: ARM: dts: LogicPD SOM-LV: Fix I2C1 pinmux
+
+From: Adam Ford <aford173@gmail.com>
+
+commit 84c7efd607e7fb6933920322086db64654f669b2 upstream.
+
+The pinmuxing was missing for I2C1 which was causing intermittent issues
+with the PMIC which is connected to I2C1.  The bootloader did not quite
+configure the I2C1 either, so when running at 2.6MHz, it was generating
+errors at times.
+
+This correctly sets the I2C1 pinmuxing so it can operate at 2.6MHz
+
+Fixes: ab8dd3aed011 ("ARM: DTS: Add minimal Support for Logic PD DM3730
+SOM-LV")
+
+Signed-off-by: Adam Ford <aford173@gmail.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/logicpd-som-lv.dtsi |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
++++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+@@ -97,6 +97,8 @@
+ };
+ &i2c1 {
++      pinctrl-names = "default";
++      pinctrl-0 = <&i2c1_pins>;
+       clock-frequency = <2600000>;
+       twl: twl@48 {
+@@ -215,7 +217,12 @@
+               >;
+       };
+-
++      i2c1_pins: pinmux_i2c1_pins {
++              pinctrl-single,pins = <
++                      OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0)        /* i2c1_scl.i2c1_scl */
++                      OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0)        /* i2c1_sda.i2c1_sda */
++              >;
++      };
+ };
+ &omap3_pmx_wkup {
diff --git a/queue-4.9/arm-dts-logicpd-torpedo-fix-i2c1-pinmux.patch b/queue-4.9/arm-dts-logicpd-torpedo-fix-i2c1-pinmux.patch
new file mode 100644 (file)
index 0000000..c86d235
--- /dev/null
@@ -0,0 +1,51 @@
+From 74402055a2d3ec998a1ded599e86185a27d9bbf4 Mon Sep 17 00:00:00 2001
+From: Adam Ford <aford173@gmail.com>
+Date: Thu, 25 Jan 2018 14:10:37 -0600
+Subject: ARM: dts: LogicPD Torpedo: Fix I2C1 pinmux
+
+From: Adam Ford <aford173@gmail.com>
+
+commit 74402055a2d3ec998a1ded599e86185a27d9bbf4 upstream.
+
+The pinmuxing was missing for I2C1 which was causing intermittent issues
+with the PMIC which is connected to I2C1.  The bootloader did not quite
+configure the I2C1 either, so when running at 2.6MHz, it was generating
+errors at time.
+
+This correctly sets the I2C1 pinmuxing so it can operate at 2.6MHz
+
+Fixes: 687c27676151 ("ARM: dts: Add minimal support for LogicPD Torpedo
+DM3730 devkit")
+
+Signed-off-by: Adam Ford <aford173@gmail.com>
+Signed-off-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/logicpd-torpedo-som.dtsi |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
++++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+@@ -100,6 +100,8 @@
+ };
+ &i2c1 {
++      pinctrl-names = "default";
++      pinctrl-0 = <&i2c1_pins>;
+       clock-frequency = <2600000>;
+       twl: twl@48 {
+@@ -207,6 +209,12 @@
+                       OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0)        /* hsusb0_data7.hsusb0_data7 */
+               >;
+       };
++      i2c1_pins: pinmux_i2c1_pins {
++              pinctrl-single,pins = <
++                      OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0)        /* i2c1_scl.i2c1_scl */
++                      OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0)        /* i2c1_sda.i2c1_sda */
++              >;
++      };
+ };
+ &uart2 {
diff --git a/queue-4.9/dm-io-fix-duplicate-bio-completion-due-to-missing-ref-count.patch b/queue-4.9/dm-io-fix-duplicate-bio-completion-due-to-missing-ref-count.patch
new file mode 100644 (file)
index 0000000..4c48a52
--- /dev/null
@@ -0,0 +1,37 @@
+From feb7695fe9fb83084aa29de0094774f4c9d4c9fc Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Tue, 20 Jun 2017 19:14:30 -0400
+Subject: dm io: fix duplicate bio completion due to missing ref count
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit feb7695fe9fb83084aa29de0094774f4c9d4c9fc upstream.
+
+If only a subset of the devices associated with multiple regions support
+a given special operation (eg. DISCARD) then the dec_count() that is
+used to set error for the region must increment the io->count.
+
+Otherwise, when the dec_count() is called it can cause the dm-io
+caller's bio to be completed multiple times.  As was reported against
+the dm-mirror target that had mirror legs with a mix of discard
+capabilities.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=196077
+Reported-by: Zhang Yi <yizhan@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-io.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -302,6 +302,7 @@ static void do_region(int op, int op_fla
+               special_cmd_max_sectors = q->limits.max_write_same_sectors;
+       if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
+           special_cmd_max_sectors == 0) {
++              atomic_inc(&io->count);
+               dec_count(io, region, -EOPNOTSUPP);
+               return;
+       }
diff --git a/queue-4.9/md-only-allow-remove_and_add_spares-when-no-sync_thread-running.patch b/queue-4.9/md-only-allow-remove_and_add_spares-when-no-sync_thread-running.patch
new file mode 100644 (file)
index 0000000..47d0c89
--- /dev/null
@@ -0,0 +1,60 @@
+From 39772f0a7be3b3dc26c74ea13fe7847fd1522c8b Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.com>
+Date: Sat, 3 Feb 2018 09:19:30 +1100
+Subject: md: only allow remove_and_add_spares when no sync_thread running.
+
+From: NeilBrown <neilb@suse.com>
+
+commit 39772f0a7be3b3dc26c74ea13fe7847fd1522c8b upstream.
+
+The locking protocols in md assume that a device will
+never be removed from an array during resync/recovery/reshape.
+When that isn't happening, rcu or reconfig_mutex is needed
+to protect an rdev pointer while taking a refcount.  When
+it is happening, that protection isn't needed.
+
+Unfortunately there are cases were remove_and_add_spares() is
+called when recovery might be happening: is state_store(),
+slot_store() and hot_remove_disk().
+In each case, this is just an optimization, to try to expedite
+removal from the personality so the device can be removed from
+the array.  If resync etc is happening, we just have to wait
+for md_check_recover to find a suitable time to call
+remove_and_add_spares().
+
+This optimization and not essential so it doesn't
+matter if it fails.
+So change remove_and_add_spares() to abort early if
+resync/recovery/reshape is happening, unless it is called
+from md_check_recovery() as part of a newly started recovery.
+The parameter "this" is only NULL when called from
+md_check_recovery() so when it is NULL, there is no need to abort.
+
+As this can result in a NULL dereference, the fix is suitable
+for -stable.
+
+cc: yuyufen <yuyufen@huawei.com>
+Cc: Tomasz Majchrzak <tomasz.majchrzak@intel.com>
+Fixes: 8430e7e0af9a ("md: disconnect device from personality before trying to remove it.")
+Cc: stable@ver.kernel.org (v4.8+)
+Signed-off-by: NeilBrown <neilb@suse.com>
+Signed-off-by: Shaohua Li <sh.li@alibaba-inc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/md.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8224,6 +8224,10 @@ static int remove_and_add_spares(struct
+       int removed = 0;
+       bool remove_some = false;
++      if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
++              /* Mustn't remove devices when resync thread is running */
++              return 0;
++
+       rdev_for_each(rdev, mddev) {
+               if ((this == NULL || rdev == this) &&
+                   rdev->raid_disk >= 0 &&
index 3d2b87ca92c467459b9859f4fde3eea09118ffef..585230a391f6e4480323b130e61e8e7ab92fa3cd 100644 (file)
@@ -22,3 +22,9 @@ kvm-mmu-fix-overlap-between-public-and-private-memslots.patch
 kvm-x86-remove-indirect-msr-op-calls-from-spec_ctrl.patch
 kvm-vmx-optimize-vmx_vcpu_run-and-svm_vcpu_run-by-marking-the-rdmsr-path-as-unlikely.patch
 pci-aspm-deal-with-missing-root-ports-in-link-state-handling.patch
+dm-io-fix-duplicate-bio-completion-due-to-missing-ref-count.patch
+arm-dts-logicpd-som-lv-fix-i2c1-pinmux.patch
+arm-dts-logicpd-torpedo-fix-i2c1-pinmux.patch
+x86-mm-give-each-mm-tlb-flush-generation-a-unique-id.patch
+x86-speculation-use-indirect-branch-prediction-barrier-in-context-switch.patch
+md-only-allow-remove_and_add_spares-when-no-sync_thread-running.patch
diff --git a/queue-4.9/x86-mm-give-each-mm-tlb-flush-generation-a-unique-id.patch b/queue-4.9/x86-mm-give-each-mm-tlb-flush-generation-a-unique-id.patch
new file mode 100644 (file)
index 0000000..68472a7
--- /dev/null
@@ -0,0 +1,111 @@
+From f39681ed0f48498b80455095376f11535feea332 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Thu, 29 Jun 2017 08:53:15 -0700
+Subject: x86/mm: Give each mm TLB flush generation a unique ID
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit f39681ed0f48498b80455095376f11535feea332 upstream.
+
+This adds two new variables to mmu_context_t: ctx_id and tlb_gen.
+ctx_id uniquely identifies the mm_struct and will never be reused.
+For a given mm_struct (and hence ctx_id), tlb_gen is a monotonic
+count of the number of times that a TLB flush has been requested.
+The pair (ctx_id, tlb_gen) can be used as an identifier for TLB
+flush actions and will be used in subsequent patches to reliably
+determine whether all needed TLB flushes have occurred on a given
+CPU.
+
+This patch is split out for ease of review.  By itself, it has no
+real effect other than creating and updating the new variables.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Reviewed-by: Nadav Amit <nadav.amit@gmail.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: linux-mm@kvack.org
+Link: http://lkml.kernel.org/r/413a91c24dab3ed0caa5f4e4d017d87b0857f920.1498751203.git.luto@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mmu.h         |   15 +++++++++++++--
+ arch/x86/include/asm/mmu_context.h |    5 +++++
+ arch/x86/mm/tlb.c                  |    2 ++
+ 3 files changed, 20 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -3,12 +3,18 @@
+ #include <linux/spinlock.h>
+ #include <linux/mutex.h>
++#include <linux/atomic.h>
+ /*
+- * The x86 doesn't have a mmu context, but
+- * we put the segment information here.
++ * x86 has arch-specific MMU state beyond what lives in mm_struct.
+  */
+ typedef struct {
++      /*
++       * ctx_id uniquely identifies this mm_struct.  A ctx_id will never
++       * be reused, and zero is not a valid ctx_id.
++       */
++      u64 ctx_id;
++
+ #ifdef CONFIG_MODIFY_LDT_SYSCALL
+       struct ldt_struct *ldt;
+ #endif
+@@ -33,6 +39,11 @@ typedef struct {
+ #endif
+ } mm_context_t;
++#define INIT_MM_CONTEXT(mm)                                           \
++      .context = {                                                    \
++              .ctx_id = 1,                                            \
++      }
++
+ void leave_mm(int cpu);
+ #endif /* _ASM_X86_MMU_H */
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -12,6 +12,9 @@
+ #include <asm/tlbflush.h>
+ #include <asm/paravirt.h>
+ #include <asm/mpx.h>
++
++extern atomic64_t last_mm_ctx_id;
++
+ #ifndef CONFIG_PARAVIRT
+ static inline void paravirt_activate_mm(struct mm_struct *prev,
+                                       struct mm_struct *next)
+@@ -106,6 +109,8 @@ static inline void enter_lazy_tlb(struct
+ static inline int init_new_context(struct task_struct *tsk,
+                                  struct mm_struct *mm)
+ {
++      mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
++
+       #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+       if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
+               /* pkey 0 is the default and always allocated */
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -29,6 +29,8 @@
+  *    Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
+  */
++atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
++
+ struct flush_tlb_info {
+       struct mm_struct *flush_mm;
+       unsigned long flush_start;
diff --git a/queue-4.9/x86-speculation-use-indirect-branch-prediction-barrier-in-context-switch.patch b/queue-4.9/x86-speculation-use-indirect-branch-prediction-barrier-in-context-switch.patch
new file mode 100644 (file)
index 0000000..3e7f534
--- /dev/null
@@ -0,0 +1,124 @@
+From 18bf3c3ea8ece8f03b6fc58508f2dfd23c7711c7 Mon Sep 17 00:00:00 2001
+From: Tim Chen <tim.c.chen@linux.intel.com>
+Date: Mon, 29 Jan 2018 22:04:47 +0000
+Subject: x86/speculation: Use Indirect Branch Prediction Barrier in context switch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tim Chen <tim.c.chen@linux.intel.com>
+
+commit 18bf3c3ea8ece8f03b6fc58508f2dfd23c7711c7 upstream.
+
+Flush indirect branches when switching into a process that marked itself
+non dumpable. This protects high value processes like gpg better,
+without having too high performance overhead.
+
+If done naïvely, we could switch to a kernel idle thread and then back
+to the original process, such as:
+
+    process A -> idle -> process A
+
+In such scenario, we do not have to do IBPB here even though the process
+is non-dumpable, as we are switching back to the same process after a
+hiatus.
+
+To avoid the redundant IBPB, which is expensive, we track the last mm
+user context ID. The cost is to have an extra u64 mm context id to track
+the last mm we were using before switching to the init_mm used by idle.
+Avoiding the extra IBPB is probably worth the extra memory for this
+common scenario.
+
+For those cases where tlb_defer_switch_to_init_mm() returns true (non
+PCID), lazy tlb will defer switch to init_mm, so we will not be changing
+the mm for the process A -> idle -> process A switch. So IBPB will be
+skipped for this case.
+
+Thanks to the reviewers and Andy Lutomirski for the suggestion of
+using ctx_id which got rid of the problem of mm pointer recycling.
+
+Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: ak@linux.intel.com
+Cc: karahmed@amazon.de
+Cc: arjan@linux.intel.com
+Cc: torvalds@linux-foundation.org
+Cc: linux@dominikbrodowski.net
+Cc: peterz@infradead.org
+Cc: bp@alien8.de
+Cc: luto@kernel.org
+Cc: pbonzini@redhat.com
+Link: https://lkml.kernel.org/r/1517263487-3708-1-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/tlbflush.h |    2 ++
+ arch/x86/mm/tlb.c               |   31 +++++++++++++++++++++++++++++++
+ 2 files changed, 33 insertions(+)
+
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -68,6 +68,8 @@ static inline void invpcid_flush_all_non
+ struct tlb_state {
+       struct mm_struct *active_mm;
+       int state;
++      /* last user mm's ctx id */
++      u64 last_ctx_id;
+       /*
+        * Access to this CR4 shadow and to H/W CR4 is protected by
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -10,6 +10,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/mmu_context.h>
++#include <asm/nospec-branch.h>
+ #include <asm/cache.h>
+ #include <asm/apic.h>
+ #include <asm/uv/uv.h>
+@@ -106,6 +107,28 @@ void switch_mm_irqs_off(struct mm_struct
+       unsigned cpu = smp_processor_id();
+       if (likely(prev != next)) {
++              u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
++
++              /*
++               * Avoid user/user BTB poisoning by flushing the branch
++               * predictor when switching between processes. This stops
++               * one process from doing Spectre-v2 attacks on another.
++               *
++               * As an optimization, flush indirect branches only when
++               * switching into processes that disable dumping. This
++               * protects high value processes like gpg, without having
++               * too high performance overhead. IBPB is *expensive*!
++               *
++               * This will not flush branches when switching into kernel
++               * threads. It will also not flush if we switch to idle
++               * thread and back to the same process. It will flush if we
++               * switch to a different non-dumpable process.
++               */
++              if (tsk && tsk->mm &&
++                  tsk->mm->context.ctx_id != last_ctx_id &&
++                  get_dumpable(tsk->mm) != SUID_DUMP_USER)
++                      indirect_branch_prediction_barrier();
++
+               if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+                       /*
+                        * If our current stack is in vmalloc space and isn't
+@@ -120,6 +143,14 @@ void switch_mm_irqs_off(struct mm_struct
+                               set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
+               }
++              /*
++               * Record last user mm's context id, so we can avoid
++               * flushing branch buffer with IBPB if we switch back
++               * to the same user.
++               */
++              if (next != &init_mm)
++                      this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
++
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               this_cpu_write(cpu_tlbstate.active_mm, next);