]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Feb 2021 09:11:50 +0000 (10:11 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Feb 2021 09:11:50 +0000 (10:11 +0100)
added patches:
arm64-tegra-add-power-domain-for-tegra210-hda.patch
bluetooth-btusb-some-qualcomm-bluetooth-adapters-stop-working.patch
hwmon-dell-smm-add-xps-15-l502x-to-fan-control-blacklist.patch
kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch
kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch
kvm-x86-zap-the-oldest-mmu-pages-not-the-newest.patch
mm-provide-a-saner-pte-walking-api-for-modules.patch
mm-simplify-follow_pte-pmd.patch
mm-unexport-follow_pte_pmd.patch
ntfs-check-for-valid-standard-information-attribute.patch

queue-5.10/arm64-tegra-add-power-domain-for-tegra210-hda.patch [new file with mode: 0644]
queue-5.10/bluetooth-btusb-some-qualcomm-bluetooth-adapters-stop-working.patch [new file with mode: 0644]
queue-5.10/hwmon-dell-smm-add-xps-15-l502x-to-fan-control-blacklist.patch [new file with mode: 0644]
queue-5.10/kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch [new file with mode: 0644]
queue-5.10/kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch [new file with mode: 0644]
queue-5.10/kvm-x86-zap-the-oldest-mmu-pages-not-the-newest.patch [new file with mode: 0644]
queue-5.10/mm-provide-a-saner-pte-walking-api-for-modules.patch [new file with mode: 0644]
queue-5.10/mm-simplify-follow_pte-pmd.patch [new file with mode: 0644]
queue-5.10/mm-unexport-follow_pte_pmd.patch [new file with mode: 0644]
queue-5.10/ntfs-check-for-valid-standard-information-attribute.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/arm64-tegra-add-power-domain-for-tegra210-hda.patch b/queue-5.10/arm64-tegra-add-power-domain-for-tegra210-hda.patch
new file mode 100644 (file)
index 0000000..bb27566
--- /dev/null
@@ -0,0 +1,46 @@
+From 1e0ca5467445bc1f41a9e403d6161a22f313dae7 Mon Sep 17 00:00:00 2001
+From: Sameer Pujar <spujar@nvidia.com>
+Date: Thu, 7 Jan 2021 10:36:10 +0530
+Subject: arm64: tegra: Add power-domain for Tegra210 HDA
+
+From: Sameer Pujar <spujar@nvidia.com>
+
+commit 1e0ca5467445bc1f41a9e403d6161a22f313dae7 upstream.
+
+HDA initialization is failing occasionally on Tegra210 and following
+print is observed in the boot log. Because of this probe() fails and
+no sound card is registered.
+
+  [16.800802] tegra-hda 70030000.hda: no codecs found!
+
+Codecs request a state change and enumeration by the controller. In
+failure cases this does not seem to happen as STATETS register reads 0.
+
+The problem seems to be related to the HDA codec dependency on SOR
+power domain. If it is gated during HDA probe then the failure is
+observed. Building Tegra HDA driver into kernel image avoids this
+failure but does not completely address the dependency part. Fix this
+problem by adding 'power-domains' DT property for Tegra210 HDA. Note
+that Tegra186 and Tegra194 HDA do this already.
+
+Fixes: 742af7e7a0a1 ("arm64: tegra: Add Tegra210 support")
+Depends-on: 96d1f078ff0 ("arm64: tegra: Add SOR power-domain for Tegra210")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sameer Pujar <spujar@nvidia.com>
+Acked-by: Jon Hunter <jonathanh@nvidia.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/nvidia/tegra210.dtsi |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+@@ -997,6 +997,7 @@
+                        <&tegra_car 128>, /* hda2hdmi */
+                        <&tegra_car 111>; /* hda2codec_2x */
+               reset-names = "hda", "hda2hdmi", "hda2codec_2x";
++              power-domains = <&pd_sor>;
+               status = "disabled";
+       };
diff --git a/queue-5.10/bluetooth-btusb-some-qualcomm-bluetooth-adapters-stop-working.patch b/queue-5.10/bluetooth-btusb-some-qualcomm-bluetooth-adapters-stop-working.patch
new file mode 100644 (file)
index 0000000..011aebf
--- /dev/null
@@ -0,0 +1,51 @@
+From 234f414efd1164786269849b4fbb533d6c9cdbbf Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Mon, 8 Feb 2021 13:02:37 +0800
+Subject: Bluetooth: btusb: Some Qualcomm Bluetooth adapters stop working
+
+From: Hui Wang <hui.wang@canonical.com>
+
+commit 234f414efd1164786269849b4fbb533d6c9cdbbf upstream.
+
+This issue starts from linux-5.10-rc1, I reproduced this issue on my
+Dell Inspiron 7447 with BT adapter 0cf3:e005, the kernel will print
+out: "Bluetooth: hci0: don't support firmware rome 0x31010000", and
+someone else also reported the similar issue to bugzilla #211571.
+
+I found this is a regression introduced by 'commit b40f58b97386
+("Bluetooth: btusb: Add Qualcomm Bluetooth SoC WCN6855 support"), the
+patch assumed that if high ROM version is not zero, it is an adapter
+on WCN6855, but many old adapters don't need to load rampatch or nvm,
+and they have non-zero high ROM version.
+
+To fix it, let the driver match the rom_version in the
+qca_devices_table first, if there is no entry matched, check the
+high ROM version, if it is not zero, we assume this adapter is ready
+to work and no need to load rampatch and nvm like previously.
+
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=211571
+Fixes: b40f58b97386 ("Bluetooth: btusb: Add Qualcomm Bluetooth SoC WCN6855 support")
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Cc: Salvatore Bonaccorso <carnil@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/bluetooth/btusb.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3689,6 +3689,13 @@ static int btusb_setup_qca(struct hci_de
+                       info = &qca_devices_table[i];
+       }
+       if (!info) {
++              /* If the rom_version is not matched in the qca_devices_table
++               * and the high ROM version is not zero, we assume this chip no
++               * need to load the rampatch and nvm.
++               */
++              if (ver_rom & ~0xffffU)
++                      return 0;
++
+               bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom);
+               return -ENODEV;
+       }
diff --git a/queue-5.10/hwmon-dell-smm-add-xps-15-l502x-to-fan-control-blacklist.patch b/queue-5.10/hwmon-dell-smm-add-xps-15-l502x-to-fan-control-blacklist.patch
new file mode 100644 (file)
index 0000000..c64f24b
--- /dev/null
@@ -0,0 +1,50 @@
+From 4008bc7d39537bb3be166d8a3129c4980e1dd7dc Mon Sep 17 00:00:00 2001
+From: Thomas Hebb <tommyhebb@gmail.com>
+Date: Sat, 23 Jan 2021 18:46:08 -0800
+Subject: hwmon: (dell-smm) Add XPS 15 L502X to fan control blacklist
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hebb <tommyhebb@gmail.com>
+
+commit 4008bc7d39537bb3be166d8a3129c4980e1dd7dc upstream.
+
+It has been reported[0] that the Dell XPS 15 L502X exhibits similar
+freezing behavior to the other systems[1] on this blacklist. The issue
+was exposed by a prior change of mine to automatically load
+dell_smm_hwmon on a wider set of XPS models. To fix the regression, add
+this model to the blacklist.
+
+[0] https://bugzilla.kernel.org/show_bug.cgi?id=211081
+[1] https://bugzilla.kernel.org/show_bug.cgi?id=195751
+
+Fixes: b8a13e5e8f37 ("hwmon: (dell-smm) Use one DMI match for all XPS models")
+Cc: stable@vger.kernel.org
+Reported-by: Bob Hepple <bob.hepple@gmail.com>
+Tested-by: Bob Hepple <bob.hepple@gmail.com>
+Signed-off-by: Thomas Hebb <tommyhebb@gmail.com>
+Reviewed-by: Pali Rohár <pali@kernel.org>
+Link: https://lore.kernel.org/r/a09eea7616881d40d2db2fb5fa2770dc6166bdae.1611456351.git.tommyhebb@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwmon/dell-smm-hwmon.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -1159,6 +1159,13 @@ static struct dmi_system_id i8k_blacklis
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+               },
+       },
++      {
++              .ident = "Dell XPS 15 L502X",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++                      DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L502X"),
++              },
++      },
+       { }
+ };
diff --git a/queue-5.10/kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch b/queue-5.10/kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch
new file mode 100644 (file)
index 0000000..98d0de1
--- /dev/null
@@ -0,0 +1,85 @@
+From bd2fae8da794b55bf2ac02632da3a151b10e664c Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 1 Feb 2021 05:12:11 -0500
+Subject: KVM: do not assume PTE is writable after follow_pfn
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit bd2fae8da794b55bf2ac02632da3a151b10e664c upstream.
+
+In order to convert an HVA to a PFN, KVM usually tries to use
+the get_user_pages family of functinso.  This however is not
+possible for VM_IO vmas; in that case, KVM instead uses follow_pfn.
+
+In doing this however KVM loses the information on whether the
+PFN is writable.  That is usually not a problem because the main
+use of VM_IO vmas with KVM is for BARs in PCI device assignment,
+however it is a bug.  To fix it, use follow_pte and check pte_write
+while under the protection of the PTE lock.  The information can
+be used to fail hva_to_pfn_remapped or passed back to the
+caller via *writable.
+
+Usage of follow_pfn was introduced in commit add6a0cd1c5b ("KVM: MMU: try to fix
+up page faults before giving up", 2016-07-05); however, even older version
+have the same issue, all the way back to commit 2e2e3738af33 ("KVM:
+Handle vma regions with no backing page", 2008-07-20), as they also did
+not check whether the PFN was writable.
+
+Fixes: 2e2e3738af33 ("KVM: Handle vma regions with no backing page")
+Reported-by: David Stevens <stevensd@google.com>
+Cc: 3pvd@google.com
+Cc: Jann Horn <jannh@google.com>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1889,9 +1889,11 @@ static int hva_to_pfn_remapped(struct vm
+                              kvm_pfn_t *p_pfn)
+ {
+       unsigned long pfn;
++      pte_t *ptep;
++      spinlock_t *ptl;
+       int r;
+-      r = follow_pfn(vma, addr, &pfn);
++      r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl);
+       if (r) {
+               /*
+                * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
+@@ -1906,14 +1908,19 @@ static int hva_to_pfn_remapped(struct vm
+               if (r)
+                       return r;
+-              r = follow_pfn(vma, addr, &pfn);
++              r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl);
+               if (r)
+                       return r;
++      }
++      if (write_fault && !pte_write(*ptep)) {
++              pfn = KVM_PFN_ERR_RO_FAULT;
++              goto out;
+       }
+       if (writable)
+-              *writable = true;
++              *writable = pte_write(*ptep);
++      pfn = pte_pfn(*ptep);
+       /*
+        * Get a reference here because callers of *hva_to_pfn* and
+@@ -1928,6 +1935,8 @@ static int hva_to_pfn_remapped(struct vm
+        */ 
+       kvm_get_pfn(pfn);
++out:
++      pte_unmap_unlock(ptep, ptl);
+       *p_pfn = pfn;
+       return 0;
+ }
diff --git a/queue-5.10/kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch b/queue-5.10/kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch
new file mode 100644 (file)
index 0000000..1c206e1
--- /dev/null
@@ -0,0 +1,49 @@
+From a9545779ee9e9e103648f6f2552e73cfe808d0f4 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Mon, 8 Feb 2021 12:19:40 -0800
+Subject: KVM: Use kvm_pfn_t for local PFN variable in hva_to_pfn_remapped()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit a9545779ee9e9e103648f6f2552e73cfe808d0f4 upstream.
+
+Use kvm_pfn_t, a.k.a. u64, for the local 'pfn' variable when retrieving
+a so called "remapped" hva/pfn pair.  In theory, the hva could resolve to
+a pfn in high memory on a 32-bit kernel.
+
+This bug was inadvertantly exposed by commit bd2fae8da794 ("KVM: do not
+assume PTE is writable after follow_pfn"), which added an error PFN value
+to the mix, causing gcc to comlain about overflowing the unsigned long.
+
+  arch/x86/kvm/../../../virt/kvm/kvm_main.c: In function ‘hva_to_pfn_remapped’:
+  include/linux/kvm_host.h:89:30: error: conversion from ‘long long unsigned int’
+                                  to ‘long unsigned int’ changes value from
+                                  ‘9218868437227405314’ to ‘2’ [-Werror=overflow]
+   89 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+      |                              ^
+virt/kvm/kvm_main.c:1935:9: note: in expansion of macro ‘KVM_PFN_ERR_RO_FAULT’
+
+Cc: stable@vger.kernel.org
+Fixes: add6a0cd1c5b ("KVM: MMU: try to fix up page faults before giving up")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210208201940.1258328-1-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1888,7 +1888,7 @@ static int hva_to_pfn_remapped(struct vm
+                              bool write_fault, bool *writable,
+                              kvm_pfn_t *p_pfn)
+ {
+-      unsigned long pfn;
++      kvm_pfn_t pfn;
+       pte_t *ptep;
+       spinlock_t *ptl;
+       int r;
diff --git a/queue-5.10/kvm-x86-zap-the-oldest-mmu-pages-not-the-newest.patch b/queue-5.10/kvm-x86-zap-the-oldest-mmu-pages-not-the-newest.patch
new file mode 100644 (file)
index 0000000..fbefeb6
--- /dev/null
@@ -0,0 +1,37 @@
+From 8fc517267fb28576dfca2380cc2497a2454b8fae Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 13 Jan 2021 12:50:30 -0800
+Subject: KVM: x86: Zap the oldest MMU pages, not the newest
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit 8fc517267fb28576dfca2380cc2497a2454b8fae upstream.
+
+Walk the list of MMU pages in reverse in kvm_mmu_zap_oldest_mmu_pages().
+The list is FIFO, meaning new pages are inserted at the head and thus
+the oldest pages are at the tail.  Using a "forward" iterator causes KVM
+to zap MMU pages that were just added, which obliterates guest
+performance once the max number of shadow MMU pages is reached.
+
+Fixes: 6b82ef2c9cf1 ("KVM: x86/mmu: Batch zap MMU pages when recycling oldest pages")
+Reported-by: Zdenek Kaspar <zkaspar82@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210113205030.3481307-1-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/mmu/mmu.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -2409,7 +2409,7 @@ static unsigned long kvm_mmu_zap_oldest_
+               return 0;
+ restart:
+-      list_for_each_entry_safe(sp, tmp, &kvm->arch.active_mmu_pages, link) {
++      list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
+               /*
+                * Don't zap active root pages, the page itself can't be freed
+                * and zapping it will just force vCPUs to realloc and reload.
diff --git a/queue-5.10/mm-provide-a-saner-pte-walking-api-for-modules.patch b/queue-5.10/mm-provide-a-saner-pte-walking-api-for-modules.patch
new file mode 100644 (file)
index 0000000..13122a4
--- /dev/null
@@ -0,0 +1,160 @@
+From 9fd6dad1261a541b3f5fa7dc5b152222306e6702 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 5 Feb 2021 05:07:11 -0500
+Subject: mm: provide a saner PTE walking API for modules
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 9fd6dad1261a541b3f5fa7dc5b152222306e6702 upstream.
+
+Currently, the follow_pfn function is exported for modules but
+follow_pte is not.  However, follow_pfn is very easy to misuse,
+because it does not provide protections (so most of its callers
+assume the page is writable!) and because it returns after having
+already unlocked the page table lock.
+
+Provide instead a simplified version of follow_pte that does
+not have the pmdpp and range arguments.  The older version
+survives as follow_invalidate_pte() for use by fs/dax.c.
+
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/dax.c            |    5 +++--
+ include/linux/mm.h  |    6 ++++--
+ mm/memory.c         |   41 ++++++++++++++++++++++++++++++++++++-----
+ virt/kvm/kvm_main.c |    4 ++--
+ 4 files changed, 45 insertions(+), 11 deletions(-)
+
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -810,11 +810,12 @@ static void dax_entry_mkclean(struct add
+               address = pgoff_address(index, vma);
+               /*
+-               * Note because we provide range to follow_pte it will call
++               * follow_invalidate_pte() will use the range to call
+                * mmu_notifier_invalidate_range_start() on our behalf before
+                * taking any lock.
+                */
+-              if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl))
++              if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
++                                        &pmdp, &ptl))
+                       continue;
+               /*
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1655,9 +1655,11 @@ void free_pgd_range(struct mmu_gather *t
+               unsigned long end, unsigned long floor, unsigned long ceiling);
+ int
+ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
++int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
++                        struct mmu_notifier_range *range, pte_t **ptepp,
++                        pmd_t **pmdpp, spinlock_t **ptlp);
+ int follow_pte(struct mm_struct *mm, unsigned long address,
+-              struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
+-              spinlock_t **ptlp);
++             pte_t **ptepp, spinlock_t **ptlp);
+ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn);
+ int follow_phys(struct vm_area_struct *vma, unsigned long address,
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4707,9 +4707,9 @@ int __pmd_alloc(struct mm_struct *mm, pu
+ }
+ #endif /* __PAGETABLE_PMD_FOLDED */
+-int follow_pte(struct mm_struct *mm, unsigned long address,
+-             struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
+-             spinlock_t **ptlp)
++int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
++                        struct mmu_notifier_range *range, pte_t **ptepp,
++                        pmd_t **pmdpp, spinlock_t **ptlp)
+ {
+       pgd_t *pgd;
+       p4d_t *p4d;
+@@ -4775,6 +4775,34 @@ out:
+ }
+ /**
++ * follow_pte - look up PTE at a user virtual address
++ * @mm: the mm_struct of the target address space
++ * @address: user virtual address
++ * @ptepp: location to store found PTE
++ * @ptlp: location to store the lock for the PTE
++ *
++ * On a successful return, the pointer to the PTE is stored in @ptepp;
++ * the corresponding lock is taken and its location is stored in @ptlp.
++ * The contents of the PTE are only stable until @ptlp is released;
++ * any further use, if any, must be protected against invalidation
++ * with MMU notifiers.
++ *
++ * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
++ * should be taken for read.
++ *
++ * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
++ * it is not a good general-purpose API.
++ *
++ * Return: zero on success, -ve otherwise.
++ */
++int follow_pte(struct mm_struct *mm, unsigned long address,
++             pte_t **ptepp, spinlock_t **ptlp)
++{
++      return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
++}
++EXPORT_SYMBOL_GPL(follow_pte);
++
++/**
+  * follow_pfn - look up PFN at a user virtual address
+  * @vma: memory mapping
+  * @address: user virtual address
+@@ -4782,6 +4810,9 @@ out:
+  *
+  * Only IO mappings and raw PFN mappings are allowed.
+  *
++ * This function does not allow the caller to read the permissions
++ * of the PTE.  Do not use it.
++ *
+  * Return: zero and the pfn at @pfn on success, -ve otherwise.
+  */
+ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+@@ -4794,7 +4825,7 @@ int follow_pfn(struct vm_area_struct *vm
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return ret;
+-      ret = follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl);
++      ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
+       if (ret)
+               return ret;
+       *pfn = pte_pfn(*ptep);
+@@ -4815,7 +4846,7 @@ int follow_phys(struct vm_area_struct *v
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
+-      if (follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl))
++      if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+               goto out;
+       pte = *ptep;
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1893,7 +1893,7 @@ static int hva_to_pfn_remapped(struct vm
+       spinlock_t *ptl;
+       int r;
+-      r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl);
++      r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+       if (r) {
+               /*
+                * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
+@@ -1908,7 +1908,7 @@ static int hva_to_pfn_remapped(struct vm
+               if (r)
+                       return r;
+-              r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl);
++              r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+               if (r)
+                       return r;
+       }
diff --git a/queue-5.10/mm-simplify-follow_pte-pmd.patch b/queue-5.10/mm-simplify-follow_pte-pmd.patch
new file mode 100644 (file)
index 0000000..455c0b8
--- /dev/null
@@ -0,0 +1,130 @@
+From ff5c19ed4b087073cea38ff0edc80c23d7256943 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 15 Dec 2020 20:47:23 -0800
+Subject: mm: simplify follow_pte{,pmd}
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit ff5c19ed4b087073cea38ff0edc80c23d7256943 upstream.
+
+Merge __follow_pte_pmd, follow_pte_pmd and follow_pte into a single
+follow_pte function and just pass two additional NULL arguments for the
+two previous follow_pte callers.
+
+[sfr@canb.auug.org.au: merge fix for "s390/pci: remove races against pte updates"]
+  Link: https://lkml.kernel.org/r/20201111221254.7f6a3658@canb.auug.org.au
+
+Link: https://lkml.kernel.org/r/20201029101432.47011-3-hch@lst.de
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/dax.c           |    9 ++++-----
+ include/linux/mm.h |    6 +++---
+ mm/memory.c        |   35 +++++------------------------------
+ 3 files changed, 12 insertions(+), 38 deletions(-)
+
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -810,12 +810,11 @@ static void dax_entry_mkclean(struct add
+               address = pgoff_address(index, vma);
+               /*
+-               * Note because we provide range to follow_pte_pmd it will
+-               * call mmu_notifier_invalidate_range_start() on our behalf
+-               * before taking any lock.
++               * Note because we provide range to follow_pte it will call
++               * mmu_notifier_invalidate_range_start() on our behalf before
++               * taking any lock.
+                */
+-              if (follow_pte_pmd(vma->vm_mm, address, &range,
+-                                 &ptep, &pmdp, &ptl))
++              if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl))
+                       continue;
+               /*
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1655,9 +1655,9 @@ void free_pgd_range(struct mmu_gather *t
+               unsigned long end, unsigned long floor, unsigned long ceiling);
+ int
+ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+-int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+-                 struct mmu_notifier_range *range,
+-                 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
++int follow_pte(struct mm_struct *mm, unsigned long address,
++              struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
++              spinlock_t **ptlp);
+ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn);
+ int follow_phys(struct vm_area_struct *vma, unsigned long address,
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4707,9 +4707,9 @@ int __pmd_alloc(struct mm_struct *mm, pu
+ }
+ #endif /* __PAGETABLE_PMD_FOLDED */
+-static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+-                          struct mmu_notifier_range *range,
+-                          pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
++int follow_pte(struct mm_struct *mm, unsigned long address,
++             struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
++             spinlock_t **ptlp)
+ {
+       pgd_t *pgd;
+       p4d_t *p4d;
+@@ -4774,31 +4774,6 @@ out:
+       return -EINVAL;
+ }
+-static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+-                           pte_t **ptepp, spinlock_t **ptlp)
+-{
+-      int res;
+-
+-      /* (void) is needed to make gcc happy */
+-      (void) __cond_lock(*ptlp,
+-                         !(res = __follow_pte_pmd(mm, address, NULL,
+-                                                  ptepp, NULL, ptlp)));
+-      return res;
+-}
+-
+-int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+-                 struct mmu_notifier_range *range,
+-                 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
+-{
+-      int res;
+-
+-      /* (void) is needed to make gcc happy */
+-      (void) __cond_lock(*ptlp,
+-                         !(res = __follow_pte_pmd(mm, address, range,
+-                                                  ptepp, pmdpp, ptlp)));
+-      return res;
+-}
+-
+ /**
+  * follow_pfn - look up PFN at a user virtual address
+  * @vma: memory mapping
+@@ -4819,7 +4794,7 @@ int follow_pfn(struct vm_area_struct *vm
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return ret;
+-      ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
++      ret = follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl);
+       if (ret)
+               return ret;
+       *pfn = pte_pfn(*ptep);
+@@ -4840,7 +4815,7 @@ int follow_phys(struct vm_area_struct *v
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
+-      if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
++      if (follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl))
+               goto out;
+       pte = *ptep;
diff --git a/queue-5.10/mm-unexport-follow_pte_pmd.patch b/queue-5.10/mm-unexport-follow_pte_pmd.patch
new file mode 100644 (file)
index 0000000..3a73e76
--- /dev/null
@@ -0,0 +1,40 @@
+From 7336375734d65ecc82956b59a79cf5deccce880c Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 15 Dec 2020 20:47:20 -0800
+Subject: mm: unexport follow_pte_pmd
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 7336375734d65ecc82956b59a79cf5deccce880c upstream.
+
+Patch series "simplify follow_pte a bit".
+
+This small series drops the not needed follow_pte_pmd exports, and
+simplifies the follow_pte family of functions a bit.
+
+This patch (of 2):
+
+follow_pte_pmd() is only used by the DAX code, which can't be modular.
+
+Link: https://lkml.kernel.org/r/20201029101432.47011-2-hch@lst.de
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4798,7 +4798,6 @@ int follow_pte_pmd(struct mm_struct *mm,
+                                                   ptepp, pmdpp, ptlp)));
+       return res;
+ }
+-EXPORT_SYMBOL(follow_pte_pmd);
+ /**
+  * follow_pfn - look up PFN at a user virtual address
diff --git a/queue-5.10/ntfs-check-for-valid-standard-information-attribute.patch b/queue-5.10/ntfs-check-for-valid-standard-information-attribute.patch
new file mode 100644 (file)
index 0000000..72f0d3c
--- /dev/null
@@ -0,0 +1,43 @@
+From 4dfe6bd94959222e18d512bdf15f6bf9edb9c27c Mon Sep 17 00:00:00 2001
+From: Rustam Kovhaev <rkovhaev@gmail.com>
+Date: Wed, 24 Feb 2021 12:00:30 -0800
+Subject: ntfs: check for valid standard information attribute
+
+From: Rustam Kovhaev <rkovhaev@gmail.com>
+
+commit 4dfe6bd94959222e18d512bdf15f6bf9edb9c27c upstream.
+
+Mounting a corrupted filesystem with NTFS resulted in a kernel crash.
+
+We should check for valid STANDARD_INFORMATION attribute offset and length
+before trying to access it
+
+Link: https://lkml.kernel.org/r/20210217155930.1506815-1-rkovhaev@gmail.com
+Link: https://syzkaller.appspot.com/bug?extid=c584225dabdea2f71969
+Signed-off-by: Rustam Kovhaev <rkovhaev@gmail.com>
+Reported-by: syzbot+c584225dabdea2f71969@syzkaller.appspotmail.com
+Tested-by: syzbot+c584225dabdea2f71969@syzkaller.appspotmail.com
+Acked-by: Anton Altaparmakov <anton@tuxera.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs/inode.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -629,6 +629,12 @@ static int ntfs_read_locked_inode(struct
+       }
+       a = ctx->attr;
+       /* Get the standard information attribute value. */
++      if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset)
++                      + le32_to_cpu(a->data.resident.value_length) >
++                      (u8 *)ctx->mrec + vol->mft_record_size) {
++              ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode.");
++              goto unm_err_out;
++      }
+       si = (STANDARD_INFORMATION*)((u8*)a +
+                       le16_to_cpu(a->data.resident.value_offset));
index 3ace385f2b2c6ab8c2232a04ace84dba56769a26..037a3ab00ed7bdab822510febb8f0520f84700f0 100644 (file)
@@ -5,3 +5,13 @@ nvme-rdma-use-ibdev_to_node-instead-of-dereferencing-dma_device.patch
 usb-quirks-sort-quirk-entries.patch
 usb-quirks-add-quirk-to-start-video-capture-on-elmo-l-12f-document-camera-reliable.patch
 ceph-downgrade-warning-from-mdsmap-decode-to-debug.patch
+ntfs-check-for-valid-standard-information-attribute.patch
+bluetooth-btusb-some-qualcomm-bluetooth-adapters-stop-working.patch
+arm64-tegra-add-power-domain-for-tegra210-hda.patch
+hwmon-dell-smm-add-xps-15-l502x-to-fan-control-blacklist.patch
+kvm-x86-zap-the-oldest-mmu-pages-not-the-newest.patch
+mm-unexport-follow_pte_pmd.patch
+mm-simplify-follow_pte-pmd.patch
+kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch
+mm-provide-a-saner-pte-walking-api-for-modules.patch
+kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch