]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Jun 2018 16:47:28 +0000 (18:47 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Jun 2018 16:47:28 +0000 (18:47 +0200)
added patches:
bonding-correctly-update-link-status-during-mii-commit-phase.patch
bonding-fix-active-backup-transition.patch
bonding-require-speed-duplex-only-for-802.3ad-alb-and-tlb.patch
x86-fpu-hard-disable-lazy-fpu-mode.patch

queue-4.9/bonding-correctly-update-link-status-during-mii-commit-phase.patch [new file with mode: 0644]
queue-4.9/bonding-fix-active-backup-transition.patch [new file with mode: 0644]
queue-4.9/bonding-require-speed-duplex-only-for-802.3ad-alb-and-tlb.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/x86-fpu-hard-disable-lazy-fpu-mode.patch [new file with mode: 0644]

diff --git a/queue-4.9/bonding-correctly-update-link-status-during-mii-commit-phase.patch b/queue-4.9/bonding-correctly-update-link-status-during-mii-commit-phase.patch
new file mode 100644 (file)
index 0000000..c815347
--- /dev/null
@@ -0,0 +1,42 @@
+From b5bf0f5b16b9c316c34df9f31d4be8729eb86845 Mon Sep 17 00:00:00 2001
+From: Mahesh Bandewar <maheshb@google.com>
+Date: Mon, 27 Mar 2017 11:37:37 -0700
+Subject: bonding: correctly update link status during mii-commit phase
+
+From: Mahesh Bandewar <maheshb@google.com>
+
+commit b5bf0f5b16b9c316c34df9f31d4be8729eb86845 upstream.
+
+bond_miimon_commit() marks the link UP after attempting to get the speed
+and duplex settings for the link. There is a possibility that
+bond_update_speed_duplex() could fail. This is another place where it
+could result into an inconsistent bonding link state.
+
+With this patch the link will be marked UP only if the speed and duplex
+values retrieved have sane values and processed further.
+
+Signed-off-by: Mahesh Bandewar <maheshb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Nate Clark <nate@neworld.us>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/bonding/bond_main.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2140,7 +2140,12 @@ static void bond_miimon_commit(struct bo
+                       continue;
+               case BOND_LINK_UP:
+-                      bond_update_speed_duplex(slave);
++                      if (bond_update_speed_duplex(slave)) {
++                              netdev_warn(bond->dev,
++                                          "failed to get link speed/duplex for %s\n",
++                                          slave->dev->name);
++                              continue;
++                      }
+                       bond_set_slave_link_state(slave, BOND_LINK_UP,
+                                                 BOND_SLAVE_NOTIFY_NOW);
+                       slave->last_link_up = jiffies;
diff --git a/queue-4.9/bonding-fix-active-backup-transition.patch b/queue-4.9/bonding-fix-active-backup-transition.patch
new file mode 100644 (file)
index 0000000..161510f
--- /dev/null
@@ -0,0 +1,77 @@
+From 3f3c278c94dd994fe0d9f21679ae19b9c0a55292 Mon Sep 17 00:00:00 2001
+From: Mahesh Bandewar <maheshb@google.com>
+Date: Mon, 3 Apr 2017 18:38:39 -0700
+Subject: bonding: fix active-backup transition
+
+From: Mahesh Bandewar <maheshb@google.com>
+
+commit 3f3c278c94dd994fe0d9f21679ae19b9c0a55292 upstream.
+
+Earlier patch c4adfc822bf5 ("bonding: make speed, duplex setting
+consistent with link state") made an attempt to keep slave state
+consistent with speed and duplex settings. Unfortunately link-state
+transition is used to change the active link especially when used
+in conjunction with mii-mon. The above mentioned patch broke that
+logic. Also when speed and duplex settings for a link are updated
+during a link-event, the link-status should not be changed to
+invoke correct transition logic.
+
+This patch fixes this issue by moving the link-state update outside
+of the bond_update_speed_duplex() fn and to the places where this fn
+is called and update link-state selectively.
+
+Fixes: c4adfc822bf5 ("bonding: make speed, duplex setting consistent
+with link state")
+Signed-off-by: Mahesh Bandewar <maheshb@google.com>
+Reviewed-by: Andy Gospodarek <andy@greyhouse.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Nate Clark <nate@neworld.us>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/bonding/bond_main.c |   13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -384,20 +384,15 @@ static int bond_update_speed_duplex(stru
+       slave->duplex = DUPLEX_UNKNOWN;
+       res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
+-      if (res < 0) {
+-              slave->link = BOND_LINK_DOWN;
++      if (res < 0)
+               return 1;
+-      }
+-      if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) {
+-              slave->link = BOND_LINK_DOWN;
++      if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
+               return 1;
+-      }
+       switch (ecmd.base.duplex) {
+       case DUPLEX_FULL:
+       case DUPLEX_HALF:
+               break;
+       default:
+-              slave->link = BOND_LINK_DOWN;
+               return 1;
+       }
+@@ -1536,7 +1531,8 @@ int bond_enslave(struct net_device *bond
+       new_slave->delay = 0;
+       new_slave->link_failure_count = 0;
+-      bond_update_speed_duplex(new_slave);
++      if (bond_update_speed_duplex(new_slave))
++              new_slave->link = BOND_LINK_DOWN;
+       new_slave->last_rx = jiffies -
+               (msecs_to_jiffies(bond->params.arp_interval) + 1);
+@@ -2141,6 +2137,7 @@ static void bond_miimon_commit(struct bo
+               case BOND_LINK_UP:
+                       if (bond_update_speed_duplex(slave)) {
++                              slave->link = BOND_LINK_DOWN;
+                               netdev_warn(bond->dev,
+                                           "failed to get link speed/duplex for %s\n",
+                                           slave->dev->name);
diff --git a/queue-4.9/bonding-require-speed-duplex-only-for-802.3ad-alb-and-tlb.patch b/queue-4.9/bonding-require-speed-duplex-only-for-802.3ad-alb-and-tlb.patch
new file mode 100644 (file)
index 0000000..c7d802b
--- /dev/null
@@ -0,0 +1,68 @@
+From ad729bc9acfb7c47112964b4877ef5404578ed13 Mon Sep 17 00:00:00 2001
+From: Andreas Born <futur.andy@googlemail.com>
+Date: Thu, 10 Aug 2017 06:41:44 +0200
+Subject: bonding: require speed/duplex only for 802.3ad, alb and tlb
+
+From: Andreas Born <futur.andy@googlemail.com>
+
+commit ad729bc9acfb7c47112964b4877ef5404578ed13 upstream.
+
+The patch c4adfc822bf5 ("bonding: make speed, duplex setting consistent
+with link state") puts the link state to down if
+bond_update_speed_duplex() cannot retrieve speed and duplex settings.
+Assumably the patch was written with 802.3ad mode in mind which relies
+on link speed/duplex settings. For other modes like active-backup these
+settings are not required. Thus, only for these other modes, this patch
+reintroduces support for slaves that do not support reporting speed or
+duplex such as wireless devices. This fixes the regression reported in
+bug 196547 (https://bugzilla.kernel.org/show_bug.cgi?id=196547).
+
+Fixes: c4adfc822bf5 ("bonding: make speed, duplex setting consistent
+with link state")
+Signed-off-by: Andreas Born <futur.andy@googlemail.com>
+Acked-by: Mahesh Bandewar <maheshb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Nate Clark <nate@neworld.us>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/bonding/bond_main.c |    6 ++++--
+ include/net/bonding.h           |    5 +++++
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1531,7 +1531,8 @@ int bond_enslave(struct net_device *bond
+       new_slave->delay = 0;
+       new_slave->link_failure_count = 0;
+-      if (bond_update_speed_duplex(new_slave))
++      if (bond_update_speed_duplex(new_slave) &&
++          bond_needs_speed_duplex(bond))
+               new_slave->link = BOND_LINK_DOWN;
+       new_slave->last_rx = jiffies -
+@@ -2136,7 +2137,8 @@ static void bond_miimon_commit(struct bo
+                       continue;
+               case BOND_LINK_UP:
+-                      if (bond_update_speed_duplex(slave)) {
++                      if (bond_update_speed_duplex(slave) &&
++                          bond_needs_speed_duplex(bond)) {
+                               slave->link = BOND_LINK_DOWN;
+                               netdev_warn(bond->dev,
+                                           "failed to get link speed/duplex for %s\n",
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -277,6 +277,11 @@ static inline bool bond_is_lb(const stru
+              BOND_MODE(bond) == BOND_MODE_ALB;
+ }
++static inline bool bond_needs_speed_duplex(const struct bonding *bond)
++{
++      return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
++}
++
+ static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
+ {
+       return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f65f13a373f94a168986ba18fde920af687e1f0a 100644 (file)
@@ -0,0 +1,4 @@
+x86-fpu-hard-disable-lazy-fpu-mode.patch
+bonding-correctly-update-link-status-during-mii-commit-phase.patch
+bonding-fix-active-backup-transition.patch
+bonding-require-speed-duplex-only-for-802.3ad-alb-and-tlb.patch
diff --git a/queue-4.9/x86-fpu-hard-disable-lazy-fpu-mode.patch b/queue-4.9/x86-fpu-hard-disable-lazy-fpu-mode.patch
new file mode 100644 (file)
index 0000000..16c820f
--- /dev/null
@@ -0,0 +1,196 @@
+From ca6938a1cd8a1c5e861a99b67f84ac166fc2b9e7 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@kernel.org>
+Date: Tue, 4 Oct 2016 20:34:31 -0400
+Subject: x86/fpu: Hard-disable lazy FPU mode
+
+From: Andy Lutomirski <luto@kernel.org>
+
+commit ca6938a1cd8a1c5e861a99b67f84ac166fc2b9e7 upstream.
+
+Since commit:
+
+  58122bf1d856 ("x86/fpu: Default eagerfpu=on on all CPUs")
+
+... in Linux 4.6, eager FPU mode has been the default on all x86
+systems, and no one has reported any regressions.
+
+This patch removes the ability to enable lazy mode: use_eager_fpu()
+becomes "return true" and all of the FPU mode selection machinery is
+removed.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Rik van Riel <riel@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: pbonzini@redhat.com
+Link: http://lkml.kernel.org/r/1475627678-20788-3-git-send-email-riel@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/cpufeatures.h  |    2 
+ arch/x86/include/asm/fpu/internal.h |    2 
+ arch/x86/kernel/fpu/init.c          |   91 +-----------------------------------
+ 3 files changed, 5 insertions(+), 90 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -104,7 +104,7 @@
+ #define X86_FEATURE_EXTD_APICID       ( 3*32+26) /* has extended APICID (8 bits) */
+ #define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
+ #define X86_FEATURE_APERFMPERF        ( 3*32+28) /* APERFMPERF */
+-#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
++/* free, was #define X86_FEATURE_EAGER_FPU    ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
+ #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -62,7 +62,7 @@ extern u64 fpu__get_supported_xfeatures_
+  */
+ static __always_inline __pure bool use_eager_fpu(void)
+ {
+-      return static_cpu_has(X86_FEATURE_EAGER_FPU);
++      return true;
+ }
+ static __always_inline __pure bool use_xsaveopt(void)
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -15,10 +15,7 @@
+  */
+ static void fpu__init_cpu_ctx_switch(void)
+ {
+-      if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
+-              stts();
+-      else
+-              clts();
++      clts();
+ }
+ /*
+@@ -234,82 +231,16 @@ static void __init fpu__init_system_xsta
+ }
+ /*
+- * FPU context switching strategies:
+- *
+- * Against popular belief, we don't do lazy FPU saves, due to the
+- * task migration complications it brings on SMP - we only do
+- * lazy FPU restores.
+- *
+- * 'lazy' is the traditional strategy, which is based on setting
+- * CR0::TS to 1 during context-switch (instead of doing a full
+- * restore of the FPU state), which causes the first FPU instruction
+- * after the context switch (whenever it is executed) to fault - at
+- * which point we lazily restore the FPU state into FPU registers.
+- *
+- * Tasks are of course under no obligation to execute FPU instructions,
+- * so it can easily happen that another context-switch occurs without
+- * a single FPU instruction being executed. If we eventually switch
+- * back to the original task (that still owns the FPU) then we have
+- * not only saved the restores along the way, but we also have the
+- * FPU ready to be used for the original task.
+- *
+- * 'lazy' is deprecated because it's almost never a performance win
+- * and it's much more complicated than 'eager'.
+- *
+- * 'eager' switching is by default on all CPUs, there we switch the FPU
+- * state during every context switch, regardless of whether the task
+- * has used FPU instructions in that time slice or not. This is done
+- * because modern FPU context saving instructions are able to optimize
+- * state saving and restoration in hardware: they can detect both
+- * unused and untouched FPU state and optimize accordingly.
+- *
+- * [ Note that even in 'lazy' mode we might optimize context switches
+- *   to use 'eager' restores, if we detect that a task is using the FPU
+- *   frequently. See the fpu->counter logic in fpu/internal.h for that. ]
+- */
+-static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
+-
+-/*
+  * Find supported xfeatures based on cpu features and command-line input.
+  * This must be called after fpu__init_parse_early_param() is called and
+  * xfeatures_mask is enumerated.
+  */
+ u64 __init fpu__get_supported_xfeatures_mask(void)
+ {
+-      /* Support all xfeatures known to us */
+-      if (eagerfpu != DISABLE)
+-              return XCNTXT_MASK;
+-
+-      /* Warning of xfeatures being disabled for no eagerfpu mode */
+-      if (xfeatures_mask & XFEATURE_MASK_EAGER) {
+-              pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
+-                      xfeatures_mask & XFEATURE_MASK_EAGER);
+-      }
+-
+-      /* Return a mask that masks out all features requiring eagerfpu mode */
+-      return ~XFEATURE_MASK_EAGER;
++      return XCNTXT_MASK;
+ }
+-/*
+- * Disable features dependent on eagerfpu.
+- */
+-static void __init fpu__clear_eager_fpu_features(void)
+-{
+-      setup_clear_cpu_cap(X86_FEATURE_MPX);
+-}
+-
+-/*
+- * Pick the FPU context switching strategy:
+- *
+- * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
+- * the following is true:
+- *
+- * (1) the cpu has xsaveopt, as it has the optimization and doing eager
+- *     FPU switching has a relatively low cost compared to a plain xsave;
+- * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
+- *     switching. Should the kernel boot with noxsaveopt, we support MPX
+- *     with eager FPU switching at a higher cost.
+- */
++/* Legacy code to initialize eager fpu mode. */
+ static void __init fpu__init_system_ctx_switch(void)
+ {
+       static bool on_boot_cpu __initdata = 1;
+@@ -318,17 +249,6 @@ static void __init fpu__init_system_ctx_
+       on_boot_cpu = 0;
+       WARN_ON_FPU(current->thread.fpu.fpstate_active);
+-
+-      if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
+-              eagerfpu = ENABLE;
+-
+-      if (xfeatures_mask & XFEATURE_MASK_EAGER)
+-              eagerfpu = ENABLE;
+-
+-      if (eagerfpu == ENABLE)
+-              setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+-
+-      printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
+ }
+ /*
+@@ -337,11 +257,6 @@ static void __init fpu__init_system_ctx_
+  */
+ static void __init fpu__init_parse_early_param(void)
+ {
+-      if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
+-              eagerfpu = DISABLE;
+-              fpu__clear_eager_fpu_features();
+-      }
+-
+       if (cmdline_find_option_bool(boot_command_line, "no387"))
+               setup_clear_cpu_cap(X86_FEATURE_FPU);