]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Mar 2020 13:06:42 +0000 (14:06 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 3 Mar 2020 13:06:42 +0000 (14:06 +0100)
added patches:
kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch
kvm-svm-override-default-mmio-mask-if-memory-encryption-is-enabled.patch
mwifiex-delete-unused-mwifiex_get_intf_num.patch
mwifiex-drop-most-magic-numbers-from-mwifiex_process_tdls_action_frame.patch
namei-only-return-echild-from-follow_dotdot_rcu.patch
sched-fair-optimize-select_idle_cpu.patch

queue-4.19/kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch [new file with mode: 0644]
queue-4.19/kvm-svm-override-default-mmio-mask-if-memory-encryption-is-enabled.patch [new file with mode: 0644]
queue-4.19/mwifiex-delete-unused-mwifiex_get_intf_num.patch [new file with mode: 0644]
queue-4.19/mwifiex-drop-most-magic-numbers-from-mwifiex_process_tdls_action_frame.patch [new file with mode: 0644]
queue-4.19/namei-only-return-echild-from-follow_dotdot_rcu.patch [new file with mode: 0644]
queue-4.19/sched-fair-optimize-select_idle_cpu.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch b/queue-4.19/kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch
new file mode 100644 (file)
index 0000000..c2edc77
--- /dev/null
@@ -0,0 +1,77 @@
+From fcfbc617547fc6d9552cb6c1c563b6a90ee98085 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+Date: Thu, 9 Jan 2020 15:56:18 -0800
+Subject: KVM: Check for a bad hva before dropping into the ghc slow path
+
+From: Sean Christopherson <sean.j.christopherson@intel.com>
+
+commit fcfbc617547fc6d9552cb6c1c563b6a90ee98085 upstream.
+
+When reading/writing using the guest/host cache, check for a bad hva
+before checking for a NULL memslot, which triggers the slow path for
+handing cross-page accesses.  Because the memslot is nullified on error
+by __kvm_gfn_to_hva_cache_init(), if the bad hva is encountered after
+crossing into a new page, then the kvm_{read,write}_guest() slow path
+could potentially write/access the first chunk prior to detecting the
+bad hva.
+
+Arguably, performing a partial access is semantically correct from an
+architectural perspective, but that behavior is certainly not intended.
+In the original implementation, memslot was not explicitly nullified
+and therefore the partial access behavior varied based on whether the
+memslot itself was null, or if the hva was simply bad.  The current
+behavior was introduced as a seemingly unintentional side effect in
+commit f1b9dd5eb86c ("kvm: Disallow wraparound in
+kvm_gfn_to_hva_cache_init"), which justified the change with "since some
+callers don't check the return code from this function, it sit seems
+prudent to clear ghc->memslot in the event of an error".
+
+Regardless of intent, the partial access is dependent on _not_ checking
+the result of the cache initialization, which is arguably a bug in its
+own right, at best simply weird.
+
+Fixes: 8f964525a121 ("KVM: Allow cross page reads and writes from cached translations.")
+Cc: Jim Mattson <jmattson@google.com>
+Cc: Andrew Honig <ahonig@google.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/kvm_main.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2024,12 +2024,12 @@ int kvm_write_guest_offset_cached(struct
+       if (slots->generation != ghc->generation)
+               __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
+-      if (unlikely(!ghc->memslot))
+-              return kvm_write_guest(kvm, gpa, data, len);
+-
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
++      if (unlikely(!ghc->memslot))
++              return kvm_write_guest(kvm, gpa, data, len);
++
+       r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
+       if (r)
+               return -EFAULT;
+@@ -2057,12 +2057,12 @@ int kvm_read_guest_cached(struct kvm *kv
+       if (slots->generation != ghc->generation)
+               __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
+-      if (unlikely(!ghc->memslot))
+-              return kvm_read_guest(kvm, ghc->gpa, data, len);
+-
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
++      if (unlikely(!ghc->memslot))
++              return kvm_read_guest(kvm, ghc->gpa, data, len);
++
+       r = __copy_from_user(data, (void __user *)ghc->hva, len);
+       if (r)
+               return -EFAULT;
diff --git a/queue-4.19/kvm-svm-override-default-mmio-mask-if-memory-encryption-is-enabled.patch b/queue-4.19/kvm-svm-override-default-mmio-mask-if-memory-encryption-is-enabled.patch
new file mode 100644 (file)
index 0000000..63808c8
--- /dev/null
@@ -0,0 +1,89 @@
+From 52918ed5fcf05d97d257f4131e19479da18f5d16 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 9 Jan 2020 17:42:16 -0600
+Subject: KVM: SVM: Override default MMIO mask if memory encryption is enabled
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 52918ed5fcf05d97d257f4131e19479da18f5d16 upstream.
+
+The KVM MMIO support uses bit 51 as the reserved bit to cause nested page
+faults when a guest performs MMIO. The AMD memory encryption support uses
+a CPUID function to define the encryption bit position. Given this, it is
+possible that these bits can conflict.
+
+Use svm_hardware_setup() to override the MMIO mask if memory encryption
+support is enabled. Various checks are performed to ensure that the mask
+is properly defined and rsvd_bits() is used to generate the new mask (as
+was done prior to the change that necessitated this patch).
+
+Fixes: 28a1f3ac1d0c ("kvm: x86: Set highest physical address bits in non-present/reserved SPTEs")
+Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm.c |   43 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1298,6 +1298,47 @@ static void shrink_ple_window(struct kvm
+                                   control->pause_filter_count, old);
+ }
++/*
++ * The default MMIO mask is a single bit (excluding the present bit),
++ * which could conflict with the memory encryption bit. Check for
++ * memory encryption support and override the default MMIO mask if
++ * memory encryption is enabled.
++ */
++static __init void svm_adjust_mmio_mask(void)
++{
++      unsigned int enc_bit, mask_bit;
++      u64 msr, mask;
++
++      /* If there is no memory encryption support, use existing mask */
++      if (cpuid_eax(0x80000000) < 0x8000001f)
++              return;
++
++      /* If memory encryption is not enabled, use existing mask */
++      rdmsrl(MSR_K8_SYSCFG, msr);
++      if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
++              return;
++
++      enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
++      mask_bit = boot_cpu_data.x86_phys_bits;
++
++      /* Increment the mask bit if it is the same as the encryption bit */
++      if (enc_bit == mask_bit)
++              mask_bit++;
++
++      /*
++       * If the mask bit location is below 52, then some bits above the
++       * physical addressing limit will always be reserved, so use the
++       * rsvd_bits() function to generate the mask. This mask, along with
++       * the present bit, will be used to generate a page fault with
++       * PFER.RSV = 1.
++       *
++       * If the mask bit location is 52 (or above), then clear the mask.
++       */
++      mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
++
++      kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
++}
++
+ static __init int svm_hardware_setup(void)
+ {
+       int cpu;
+@@ -1352,6 +1393,8 @@ static __init int svm_hardware_setup(voi
+               }
+       }
++      svm_adjust_mmio_mask();
++
+       for_each_possible_cpu(cpu) {
+               r = svm_cpu_init(cpu);
+               if (r)
diff --git a/queue-4.19/mwifiex-delete-unused-mwifiex_get_intf_num.patch b/queue-4.19/mwifiex-delete-unused-mwifiex_get_intf_num.patch
new file mode 100644 (file)
index 0000000..162f3f4
--- /dev/null
@@ -0,0 +1,47 @@
+From 1c9f329b084b7b8ea6d60d91a202e884cdcf6aae Mon Sep 17 00:00:00 2001
+From: Brian Norris <briannorris@chromium.org>
+Date: Mon, 9 Dec 2019 16:39:11 -0800
+Subject: mwifiex: delete unused mwifiex_get_intf_num()
+
+From: Brian Norris <briannorris@chromium.org>
+
+commit 1c9f329b084b7b8ea6d60d91a202e884cdcf6aae upstream.
+
+Commit 7afb94da3cd8 ("mwifiex: update set_mac_address logic") fixed the
+only user of this function, partly because the author seems to have
+noticed that, as written, it's on the borderline between highly
+misleading and buggy.
+
+Anyway, no sense in keeping dead code around: let's drop it.
+
+Fixes: 7afb94da3cd8 ("mwifiex: update set_mac_address logic")
+Signed-off-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/marvell/mwifiex/main.h |   13 -------------
+ 1 file changed, 13 deletions(-)
+
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -1294,19 +1294,6 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8
+       return pos;
+ }
+-/* This function return interface number with the same bss_type.
+- */
+-static inline u8
+-mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type)
+-{
+-      u8 i, num = 0;
+-
+-      for (i = 0; i < adapter->priv_num; i++)
+-              if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type)
+-                      num++;
+-      return num;
+-}
+-
+ /*
+  * This function returns the correct private structure pointer based
+  * upon the BSS type and BSS number.
diff --git a/queue-4.19/mwifiex-drop-most-magic-numbers-from-mwifiex_process_tdls_action_frame.patch b/queue-4.19/mwifiex-drop-most-magic-numbers-from-mwifiex_process_tdls_action_frame.patch
new file mode 100644 (file)
index 0000000..7e9ebb2
--- /dev/null
@@ -0,0 +1,225 @@
+From 70e5b8f445fd27fde0c5583460e82539a7242424 Mon Sep 17 00:00:00 2001
+From: Brian Norris <briannorris@chromium.org>
+Date: Fri, 6 Dec 2019 11:45:35 -0800
+Subject: mwifiex: drop most magic numbers from mwifiex_process_tdls_action_frame()
+
+From: Brian Norris <briannorris@chromium.org>
+
+commit 70e5b8f445fd27fde0c5583460e82539a7242424 upstream.
+
+Before commit 1e58252e334d ("mwifiex: Fix heap overflow in
+mmwifiex_process_tdls_action_frame()"),
+mwifiex_process_tdls_action_frame() already had too many magic numbers.
+But this commit just added a ton more, in the name of checking for
+buffer overflows. That seems like a really bad idea.
+
+Let's make these magic numbers a little less magic, by
+(a) factoring out 'pos[1]' as 'ie_len'
+(b) using 'sizeof' on the appropriate source or destination fields where
+    possible, instead of bare numbers
+(c) dropping redundant checks, per below.
+
+Regarding redundant checks: the beginning of the loop has this:
+
+                if (pos + 2 + pos[1] > end)
+                        break;
+
+but then individual 'case's include stuff like this:
+
+                       if (pos > end - 3)
+                               return;
+                       if (pos[1] != 1)
+                               return;
+
+Note that the second 'return' (validating the length, pos[1]) combined
+with the above condition (ensuring 'pos + 2 + length' doesn't exceed
+'end'), makes the first 'return' (whose 'if' can be reworded as 'pos >
+end - pos[1] - 2') redundant. Rather than unwind the magic numbers
+there, just drop those conditions.
+
+Fixes: 1e58252e334d ("mwifiex: Fix heap overflow in mmwifiex_process_tdls_action_frame()")
+Signed-off-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/marvell/mwifiex/tdls.c |   75 ++++++++++------------------
+ 1 file changed, 28 insertions(+), 47 deletions(-)
+
+--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
++++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
+@@ -897,7 +897,7 @@ void mwifiex_process_tdls_action_frame(s
+       u8 *peer, *pos, *end;
+       u8 i, action, basic;
+       u16 cap = 0;
+-      int ie_len = 0;
++      int ies_len = 0;
+       if (len < (sizeof(struct ethhdr) + 3))
+               return;
+@@ -919,7 +919,7 @@ void mwifiex_process_tdls_action_frame(s
+               pos = buf + sizeof(struct ethhdr) + 4;
+               /* payload 1+ category 1 + action 1 + dialog 1 */
+               cap = get_unaligned_le16(pos);
+-              ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
++              ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+               pos += 2;
+               break;
+@@ -929,7 +929,7 @@ void mwifiex_process_tdls_action_frame(s
+               /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
+               pos = buf + sizeof(struct ethhdr) + 6;
+               cap = get_unaligned_le16(pos);
+-              ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
++              ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+               pos += 2;
+               break;
+@@ -937,7 +937,7 @@ void mwifiex_process_tdls_action_frame(s
+               if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
+                       return;
+               pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
+-              ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
++              ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+               break;
+       default:
+               mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
+@@ -950,33 +950,33 @@ void mwifiex_process_tdls_action_frame(s
+       sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
+-      for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
+-              if (pos + 2 + pos[1] > end)
++      for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) {
++              u8 ie_len = pos[1];
++
++              if (pos + 2 + ie_len > end)
+                       break;
+               switch (*pos) {
+               case WLAN_EID_SUPP_RATES:
+-                      if (pos[1] > 32)
++                      if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
+                               return;
+-                      sta_ptr->tdls_cap.rates_len = pos[1];
+-                      for (i = 0; i < pos[1]; i++)
++                      sta_ptr->tdls_cap.rates_len = ie_len;
++                      for (i = 0; i < ie_len; i++)
+                               sta_ptr->tdls_cap.rates[i] = pos[i + 2];
+                       break;
+               case WLAN_EID_EXT_SUPP_RATES:
+-                      if (pos[1] > 32)
++                      if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
+                               return;
+                       basic = sta_ptr->tdls_cap.rates_len;
+-                      if (pos[1] > 32 - basic)
++                      if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic)
+                               return;
+-                      for (i = 0; i < pos[1]; i++)
++                      for (i = 0; i < ie_len; i++)
+                               sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
+-                      sta_ptr->tdls_cap.rates_len += pos[1];
++                      sta_ptr->tdls_cap.rates_len += ie_len;
+                       break;
+               case WLAN_EID_HT_CAPABILITY:
+-                      if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
+-                              return;
+-                      if (pos[1] != sizeof(struct ieee80211_ht_cap))
++                      if (ie_len != sizeof(struct ieee80211_ht_cap))
+                               return;
+                       /* copy the ie's value into ht_capb*/
+                       memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
+@@ -984,59 +984,45 @@ void mwifiex_process_tdls_action_frame(s
+                       sta_ptr->is_11n_enabled = 1;
+                       break;
+               case WLAN_EID_HT_OPERATION:
+-                      if (pos > end -
+-                          sizeof(struct ieee80211_ht_operation) - 2)
+-                              return;
+-                      if (pos[1] != sizeof(struct ieee80211_ht_operation))
++                      if (ie_len != sizeof(struct ieee80211_ht_operation))
+                               return;
+                       /* copy the ie's value into ht_oper*/
+                       memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
+                              sizeof(struct ieee80211_ht_operation));
+                       break;
+               case WLAN_EID_BSS_COEX_2040:
+-                      if (pos > end - 3)
+-                              return;
+-                      if (pos[1] != 1)
++                      if (ie_len != sizeof(pos[2]))
+                               return;
+                       sta_ptr->tdls_cap.coex_2040 = pos[2];
+                       break;
+               case WLAN_EID_EXT_CAPABILITY:
+-                      if (pos > end - sizeof(struct ieee_types_header))
+-                              return;
+-                      if (pos[1] < sizeof(struct ieee_types_header))
++                      if (ie_len < sizeof(struct ieee_types_header))
+                               return;
+-                      if (pos[1] > 8)
++                      if (ie_len > 8)
+                               return;
+                       memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
+                              sizeof(struct ieee_types_header) +
+-                             min_t(u8, pos[1], 8));
++                             min_t(u8, ie_len, 8));
+                       break;
+               case WLAN_EID_RSN:
+-                      if (pos > end - sizeof(struct ieee_types_header))
++                      if (ie_len < sizeof(struct ieee_types_header))
+                               return;
+-                      if (pos[1] < sizeof(struct ieee_types_header))
+-                              return;
+-                      if (pos[1] > IEEE_MAX_IE_SIZE -
++                      if (ie_len > IEEE_MAX_IE_SIZE -
+                           sizeof(struct ieee_types_header))
+                               return;
+                       memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
+                              sizeof(struct ieee_types_header) +
+-                             min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
++                             min_t(u8, ie_len, IEEE_MAX_IE_SIZE -
+                                    sizeof(struct ieee_types_header)));
+                       break;
+               case WLAN_EID_QOS_CAPA:
+-                      if (pos > end - 3)
+-                              return;
+-                      if (pos[1] != 1)
++                      if (ie_len != sizeof(pos[2]))
+                               return;
+                       sta_ptr->tdls_cap.qos_info = pos[2];
+                       break;
+               case WLAN_EID_VHT_OPERATION:
+                       if (priv->adapter->is_hw_11ac_capable) {
+-                              if (pos > end -
+-                                  sizeof(struct ieee80211_vht_operation) - 2)
+-                                      return;
+-                              if (pos[1] !=
++                              if (ie_len !=
+                                   sizeof(struct ieee80211_vht_operation))
+                                       return;
+                               /* copy the ie's value into vhtoper*/
+@@ -1046,10 +1032,7 @@ void mwifiex_process_tdls_action_frame(s
+                       break;
+               case WLAN_EID_VHT_CAPABILITY:
+                       if (priv->adapter->is_hw_11ac_capable) {
+-                              if (pos > end -
+-                                  sizeof(struct ieee80211_vht_cap) - 2)
+-                                      return;
+-                              if (pos[1] != sizeof(struct ieee80211_vht_cap))
++                              if (ie_len != sizeof(struct ieee80211_vht_cap))
+                                       return;
+                               /* copy the ie's value into vhtcap*/
+                               memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
+@@ -1059,9 +1042,7 @@ void mwifiex_process_tdls_action_frame(s
+                       break;
+               case WLAN_EID_AID:
+                       if (priv->adapter->is_hw_11ac_capable) {
+-                              if (pos > end - 4)
+-                                      return;
+-                              if (pos[1] != 2)
++                              if (ie_len != sizeof(u16))
+                                       return;
+                               sta_ptr->tdls_cap.aid =
+                                       get_unaligned_le16((pos + 2));
diff --git a/queue-4.19/namei-only-return-echild-from-follow_dotdot_rcu.patch b/queue-4.19/namei-only-return-echild-from-follow_dotdot_rcu.patch
new file mode 100644 (file)
index 0000000..527041e
--- /dev/null
@@ -0,0 +1,41 @@
+From 2b98149c2377bff12be5dd3ce02ae0506e2dd613 Mon Sep 17 00:00:00 2001
+From: Aleksa Sarai <cyphar@cyphar.com>
+Date: Sat, 7 Dec 2019 01:13:26 +1100
+Subject: namei: only return -ECHILD from follow_dotdot_rcu()
+
+From: Aleksa Sarai <cyphar@cyphar.com>
+
+commit 2b98149c2377bff12be5dd3ce02ae0506e2dd613 upstream.
+
+It's over-zealous to return hard errors under RCU-walk here, given that
+a REF-walk will be triggered for all other cases handling ".." under
+RCU.
+
+The original purpose of this check was to ensure that if a rename occurs
+such that a directory is moved outside of the bind-mount which the
+resolution started in, it would be detected and blocked to avoid being
+able to mess with paths outside of the bind-mount. However, triggering a
+new REF-walk is just as effective a solution.
+
+Cc: "Eric W. Biederman" <ebiederm@xmission.com>
+Fixes: 397d425dc26d ("vfs: Test for and handle paths that are unreachable from their mnt_root")
+Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/namei.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1368,7 +1368,7 @@ static int follow_dotdot_rcu(struct name
+                       nd->path.dentry = parent;
+                       nd->seq = seq;
+                       if (unlikely(!path_connected(&nd->path)))
+-                              return -ENOENT;
++                              return -ECHILD;
+                       break;
+               } else {
+                       struct mount *mnt = real_mount(nd->path.mnt);
diff --git a/queue-4.19/sched-fair-optimize-select_idle_cpu.patch b/queue-4.19/sched-fair-optimize-select_idle_cpu.patch
new file mode 100644 (file)
index 0000000..cd3a290
--- /dev/null
@@ -0,0 +1,62 @@
+From 60588bfa223ff675b95f866249f90616613fbe31 Mon Sep 17 00:00:00 2001
+From: Cheng Jian <cj.chengjian@huawei.com>
+Date: Fri, 13 Dec 2019 10:45:30 +0800
+Subject: sched/fair: Optimize select_idle_cpu
+
+From: Cheng Jian <cj.chengjian@huawei.com>
+
+commit 60588bfa223ff675b95f866249f90616613fbe31 upstream.
+
+select_idle_cpu() will scan the LLC domain for idle CPUs,
+it's always expensive. so the next commit :
+
+       1ad3aaf3fcd2 ("sched/core: Implement new approach to scale select_idle_cpu()")
+
+introduces a way to limit how many CPUs we scan.
+
+But it consume some CPUs out of 'nr' that are not allowed
+for the task and thus waste our attempts. The function
+always return nr_cpumask_bits, and we can't find a CPU
+which our task is allowed to run.
+
+Cpumask may be too big, similar to select_idle_core(), use
+per_cpu_ptr 'select_idle_mask' to prevent stack overflow.
+
+Fixes: 1ad3aaf3fcd2 ("sched/core: Implement new approach to scale select_idle_cpu()")
+Signed-off-by: Cheng Jian <cj.chengjian@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
+Link: https://lkml.kernel.org/r/20191213024530.28052-1-cj.chengjian@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6133,6 +6133,7 @@ static inline int select_idle_smt(struct
+  */
+ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
+ {
++      struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
+       struct sched_domain *this_sd;
+       u64 avg_cost, avg_idle;
+       u64 time, cost;
+@@ -6163,11 +6164,11 @@ static int select_idle_cpu(struct task_s
+       time = local_clock();
+-      for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
++      cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
++
++      for_each_cpu_wrap(cpu, cpus, target) {
+               if (!--nr)
+                       return -1;
+-              if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+-                      continue;
+               if (available_idle_cpu(cpu))
+                       break;
+       }
index 1f7e5643ea3a318d91910596b24acdcebe0a1774..01463020bca58a78e19490d0141869b301e234cd 100644 (file)
@@ -64,3 +64,9 @@ net-atlantic-fix-use-after-free-kasan-warn.patch
 net-atlantic-fix-potential-error-handling.patch
 net-smc-no-peer-id-in-clc-decline-for-smcd.patch
 net-ena-make-ena-rxfh-support-eth_rss_hash_no_change.patch
+namei-only-return-echild-from-follow_dotdot_rcu.patch
+mwifiex-drop-most-magic-numbers-from-mwifiex_process_tdls_action_frame.patch
+mwifiex-delete-unused-mwifiex_get_intf_num.patch
+kvm-svm-override-default-mmio-mask-if-memory-encryption-is-enabled.patch
+kvm-check-for-a-bad-hva-before-dropping-into-the-ghc-slow-path.patch
+sched-fair-optimize-select_idle_cpu.patch