]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Feb 2021 09:11:47 +0000 (10:11 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Feb 2021 09:11:47 +0000 (10:11 +0100)
added patches:
arm64-tegra-add-power-domain-for-tegra210-hda.patch
kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch
kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch
mm-provide-a-saner-pte-walking-api-for-modules.patch
mm-simplify-follow_pte-pmd.patch
mm-unexport-follow_pte_pmd.patch
ntfs-check-for-valid-standard-information-attribute.patch
scripts-set-proper-openssl-include-dir-also-for-sign-file.patch
scripts-use-pkg-config-to-locate-libcrypto.patch

queue-5.4/arm64-tegra-add-power-domain-for-tegra210-hda.patch [new file with mode: 0644]
queue-5.4/kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch [new file with mode: 0644]
queue-5.4/kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch [new file with mode: 0644]
queue-5.4/mm-provide-a-saner-pte-walking-api-for-modules.patch [new file with mode: 0644]
queue-5.4/mm-simplify-follow_pte-pmd.patch [new file with mode: 0644]
queue-5.4/mm-unexport-follow_pte_pmd.patch [new file with mode: 0644]
queue-5.4/ntfs-check-for-valid-standard-information-attribute.patch [new file with mode: 0644]
queue-5.4/scripts-set-proper-openssl-include-dir-also-for-sign-file.patch [new file with mode: 0644]
queue-5.4/scripts-use-pkg-config-to-locate-libcrypto.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/arm64-tegra-add-power-domain-for-tegra210-hda.patch b/queue-5.4/arm64-tegra-add-power-domain-for-tegra210-hda.patch
new file mode 100644 (file)
index 0000000..4fcde46
--- /dev/null
@@ -0,0 +1,46 @@
+From 1e0ca5467445bc1f41a9e403d6161a22f313dae7 Mon Sep 17 00:00:00 2001
+From: Sameer Pujar <spujar@nvidia.com>
+Date: Thu, 7 Jan 2021 10:36:10 +0530
+Subject: arm64: tegra: Add power-domain for Tegra210 HDA
+
+From: Sameer Pujar <spujar@nvidia.com>
+
+commit 1e0ca5467445bc1f41a9e403d6161a22f313dae7 upstream.
+
+HDA initialization is failing occasionally on Tegra210 and following
+print is observed in the boot log. Because of this probe() fails and
+no sound card is registered.
+
+  [16.800802] tegra-hda 70030000.hda: no codecs found!
+
+Codecs request a state change and enumeration by the controller. In
+failure cases this does not seem to happen as STATETS register reads 0.
+
+The problem seems to be related to the HDA codec dependency on SOR
+power domain. If it is gated during HDA probe then the failure is
+observed. Building Tegra HDA driver into kernel image avoids this
+failure but does not completely address the dependency part. Fix this
+problem by adding 'power-domains' DT property for Tegra210 HDA. Note
+that Tegra186 and Tegra194 HDA do this already.
+
+Fixes: 742af7e7a0a1 ("arm64: tegra: Add Tegra210 support")
+Depends-on: 96d1f078ff0 ("arm64: tegra: Add SOR power-domain for Tegra210")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Sameer Pujar <spujar@nvidia.com>
+Acked-by: Jon Hunter <jonathanh@nvidia.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/nvidia/tegra210.dtsi |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+@@ -917,6 +917,7 @@
+                        <&tegra_car 128>, /* hda2hdmi */
+                        <&tegra_car 111>; /* hda2codec_2x */
+               reset-names = "hda", "hda2hdmi", "hda2codec_2x";
++              power-domains = <&pd_sor>;
+               status = "disabled";
+       };
diff --git a/queue-5.4/kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch b/queue-5.4/kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch
new file mode 100644 (file)
index 0000000..64801c9
--- /dev/null
@@ -0,0 +1,85 @@
+From bd2fae8da794b55bf2ac02632da3a151b10e664c Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 1 Feb 2021 05:12:11 -0500
+Subject: KVM: do not assume PTE is writable after follow_pfn
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit bd2fae8da794b55bf2ac02632da3a151b10e664c upstream.
+
+In order to convert an HVA to a PFN, KVM usually tries to use
+the get_user_pages family of functinso.  This however is not
+possible for VM_IO vmas; in that case, KVM instead uses follow_pfn.
+
+In doing this however KVM loses the information on whether the
+PFN is writable.  That is usually not a problem because the main
+use of VM_IO vmas with KVM is for BARs in PCI device assignment,
+however it is a bug.  To fix it, use follow_pte and check pte_write
+while under the protection of the PTE lock.  The information can
+be used to fail hva_to_pfn_remapped or passed back to the
+caller via *writable.
+
+Usage of follow_pfn was introduced in commit add6a0cd1c5b ("KVM: MMU: try to fix
+up page faults before giving up", 2016-07-05); however, even older version
+have the same issue, all the way back to commit 2e2e3738af33 ("KVM:
+Handle vma regions with no backing page", 2008-07-20), as they also did
+not check whether the PFN was writable.
+
+Fixes: 2e2e3738af33 ("KVM: Handle vma regions with no backing page")
+Reported-by: David Stevens <stevensd@google.com>
+Cc: 3pvd@google.com
+Cc: Jann Horn <jannh@google.com>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1599,9 +1599,11 @@ static int hva_to_pfn_remapped(struct vm
+                              kvm_pfn_t *p_pfn)
+ {
+       unsigned long pfn;
++      pte_t *ptep;
++      spinlock_t *ptl;
+       int r;
+-      r = follow_pfn(vma, addr, &pfn);
++      r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl);
+       if (r) {
+               /*
+                * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
+@@ -1616,14 +1618,19 @@ static int hva_to_pfn_remapped(struct vm
+               if (r)
+                       return r;
+-              r = follow_pfn(vma, addr, &pfn);
++              r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl);
+               if (r)
+                       return r;
++      }
++      if (write_fault && !pte_write(*ptep)) {
++              pfn = KVM_PFN_ERR_RO_FAULT;
++              goto out;
+       }
+       if (writable)
+-              *writable = true;
++              *writable = pte_write(*ptep);
++      pfn = pte_pfn(*ptep);
+       /*
+        * Get a reference here because callers of *hva_to_pfn* and
+@@ -1638,6 +1645,8 @@ static int hva_to_pfn_remapped(struct vm
+        */ 
+       kvm_get_pfn(pfn);
++out:
++      pte_unmap_unlock(ptep, ptl);
+       *p_pfn = pfn;
+       return 0;
+ }
diff --git a/queue-5.4/kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch b/queue-5.4/kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch
new file mode 100644 (file)
index 0000000..53b7bc3
--- /dev/null
@@ -0,0 +1,49 @@
+From a9545779ee9e9e103648f6f2552e73cfe808d0f4 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Mon, 8 Feb 2021 12:19:40 -0800
+Subject: KVM: Use kvm_pfn_t for local PFN variable in hva_to_pfn_remapped()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit a9545779ee9e9e103648f6f2552e73cfe808d0f4 upstream.
+
+Use kvm_pfn_t, a.k.a. u64, for the local 'pfn' variable when retrieving
+a so called "remapped" hva/pfn pair.  In theory, the hva could resolve to
+a pfn in high memory on a 32-bit kernel.
+
+This bug was inadvertantly exposed by commit bd2fae8da794 ("KVM: do not
+assume PTE is writable after follow_pfn"), which added an error PFN value
+to the mix, causing gcc to comlain about overflowing the unsigned long.
+
+  arch/x86/kvm/../../../virt/kvm/kvm_main.c: In function ‘hva_to_pfn_remapped’:
+  include/linux/kvm_host.h:89:30: error: conversion from ‘long long unsigned int’
+                                  to ‘long unsigned int’ changes value from
+                                  ‘9218868437227405314’ to ‘2’ [-Werror=overflow]
+   89 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+      |                              ^
+virt/kvm/kvm_main.c:1935:9: note: in expansion of macro ‘KVM_PFN_ERR_RO_FAULT’
+
+Cc: stable@vger.kernel.org
+Fixes: add6a0cd1c5b ("KVM: MMU: try to fix up page faults before giving up")
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210208201940.1258328-1-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ virt/kvm/kvm_main.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1598,7 +1598,7 @@ static int hva_to_pfn_remapped(struct vm
+                              bool write_fault, bool *writable,
+                              kvm_pfn_t *p_pfn)
+ {
+-      unsigned long pfn;
++      kvm_pfn_t pfn;
+       pte_t *ptep;
+       spinlock_t *ptl;
+       int r;
diff --git a/queue-5.4/mm-provide-a-saner-pte-walking-api-for-modules.patch b/queue-5.4/mm-provide-a-saner-pte-walking-api-for-modules.patch
new file mode 100644 (file)
index 0000000..dafc290
--- /dev/null
@@ -0,0 +1,160 @@
+From 9fd6dad1261a541b3f5fa7dc5b152222306e6702 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Fri, 5 Feb 2021 05:07:11 -0500
+Subject: mm: provide a saner PTE walking API for modules
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 9fd6dad1261a541b3f5fa7dc5b152222306e6702 upstream.
+
+Currently, the follow_pfn function is exported for modules but
+follow_pte is not.  However, follow_pfn is very easy to misuse,
+because it does not provide protections (so most of its callers
+assume the page is writable!) and because it returns after having
+already unlocked the page table lock.
+
+Provide instead a simplified version of follow_pte that does
+not have the pmdpp and range arguments.  The older version
+survives as follow_invalidate_pte() for use by fs/dax.c.
+
+Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/dax.c            |    5 +++--
+ include/linux/mm.h  |    6 ++++--
+ mm/memory.c         |   41 ++++++++++++++++++++++++++++++++++++-----
+ virt/kvm/kvm_main.c |    4 ++--
+ 4 files changed, 45 insertions(+), 11 deletions(-)
+
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -794,11 +794,12 @@ static void dax_entry_mkclean(struct add
+               address = pgoff_address(index, vma);
+               /*
+-               * Note because we provide range to follow_pte it will call
++               * follow_invalidate_pte() will use the range to call
+                * mmu_notifier_invalidate_range_start() on our behalf before
+                * taking any lock.
+                */
+-              if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl))
++              if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
++                                        &pmdp, &ptl))
+                       continue;
+               /*
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1466,9 +1466,11 @@ void free_pgd_range(struct mmu_gather *t
+               unsigned long end, unsigned long floor, unsigned long ceiling);
+ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+                       struct vm_area_struct *vma);
++int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
++                        struct mmu_notifier_range *range, pte_t **ptepp,
++                        pmd_t **pmdpp, spinlock_t **ptlp);
+ int follow_pte(struct mm_struct *mm, unsigned long address,
+-              struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
+-              spinlock_t **ptlp);
++             pte_t **ptepp, spinlock_t **ptlp);
+ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn);
+ int follow_phys(struct vm_area_struct *vma, unsigned long address,
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4222,9 +4222,9 @@ int __pmd_alloc(struct mm_struct *mm, pu
+ }
+ #endif /* __PAGETABLE_PMD_FOLDED */
+-int follow_pte(struct mm_struct *mm, unsigned long address,
+-             struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
+-             spinlock_t **ptlp)
++int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
++                        struct mmu_notifier_range *range, pte_t **ptepp,
++                        pmd_t **pmdpp, spinlock_t **ptlp)
+ {
+       pgd_t *pgd;
+       p4d_t *p4d;
+@@ -4290,6 +4290,34 @@ out:
+ }
+ /**
++ * follow_pte - look up PTE at a user virtual address
++ * @mm: the mm_struct of the target address space
++ * @address: user virtual address
++ * @ptepp: location to store found PTE
++ * @ptlp: location to store the lock for the PTE
++ *
++ * On a successful return, the pointer to the PTE is stored in @ptepp;
++ * the corresponding lock is taken and its location is stored in @ptlp.
++ * The contents of the PTE are only stable until @ptlp is released;
++ * any further use, if any, must be protected against invalidation
++ * with MMU notifiers.
++ *
++ * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
++ * should be taken for read.
++ *
++ * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
++ * it is not a good general-purpose API.
++ *
++ * Return: zero on success, -ve otherwise.
++ */
++int follow_pte(struct mm_struct *mm, unsigned long address,
++             pte_t **ptepp, spinlock_t **ptlp)
++{
++      return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
++}
++EXPORT_SYMBOL_GPL(follow_pte);
++
++/**
+  * follow_pfn - look up PFN at a user virtual address
+  * @vma: memory mapping
+  * @address: user virtual address
+@@ -4297,6 +4325,9 @@ out:
+  *
+  * Only IO mappings and raw PFN mappings are allowed.
+  *
++ * This function does not allow the caller to read the permissions
++ * of the PTE.  Do not use it.
++ *
+  * Return: zero and the pfn at @pfn on success, -ve otherwise.
+  */
+ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+@@ -4309,7 +4340,7 @@ int follow_pfn(struct vm_area_struct *vm
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return ret;
+-      ret = follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl);
++      ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
+       if (ret)
+               return ret;
+       *pfn = pte_pfn(*ptep);
+@@ -4330,7 +4361,7 @@ int follow_phys(struct vm_area_struct *v
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
+-      if (follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl))
++      if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+               goto out;
+       pte = *ptep;
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1603,7 +1603,7 @@ static int hva_to_pfn_remapped(struct vm
+       spinlock_t *ptl;
+       int r;
+-      r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl);
++      r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+       if (r) {
+               /*
+                * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
+@@ -1618,7 +1618,7 @@ static int hva_to_pfn_remapped(struct vm
+               if (r)
+                       return r;
+-              r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl);
++              r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
+               if (r)
+                       return r;
+       }
diff --git a/queue-5.4/mm-simplify-follow_pte-pmd.patch b/queue-5.4/mm-simplify-follow_pte-pmd.patch
new file mode 100644 (file)
index 0000000..e5f4cb6
--- /dev/null
@@ -0,0 +1,130 @@
+From ff5c19ed4b087073cea38ff0edc80c23d7256943 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 15 Dec 2020 20:47:23 -0800
+Subject: mm: simplify follow_pte{,pmd}
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit ff5c19ed4b087073cea38ff0edc80c23d7256943 upstream.
+
+Merge __follow_pte_pmd, follow_pte_pmd and follow_pte into a single
+follow_pte function and just pass two additional NULL arguments for the
+two previous follow_pte callers.
+
+[sfr@canb.auug.org.au: merge fix for "s390/pci: remove races against pte updates"]
+  Link: https://lkml.kernel.org/r/20201111221254.7f6a3658@canb.auug.org.au
+
+Link: https://lkml.kernel.org/r/20201029101432.47011-3-hch@lst.de
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/dax.c           |    9 ++++-----
+ include/linux/mm.h |    6 +++---
+ mm/memory.c        |   35 +++++------------------------------
+ 3 files changed, 12 insertions(+), 38 deletions(-)
+
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -794,12 +794,11 @@ static void dax_entry_mkclean(struct add
+               address = pgoff_address(index, vma);
+               /*
+-               * Note because we provide range to follow_pte_pmd it will
+-               * call mmu_notifier_invalidate_range_start() on our behalf
+-               * before taking any lock.
++               * Note because we provide range to follow_pte it will call
++               * mmu_notifier_invalidate_range_start() on our behalf before
++               * taking any lock.
+                */
+-              if (follow_pte_pmd(vma->vm_mm, address, &range,
+-                                 &ptep, &pmdp, &ptl))
++              if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl))
+                       continue;
+               /*
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1466,9 +1466,9 @@ void free_pgd_range(struct mmu_gather *t
+               unsigned long end, unsigned long floor, unsigned long ceiling);
+ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+                       struct vm_area_struct *vma);
+-int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+-                 struct mmu_notifier_range *range,
+-                 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
++int follow_pte(struct mm_struct *mm, unsigned long address,
++              struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
++              spinlock_t **ptlp);
+ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn);
+ int follow_phys(struct vm_area_struct *vma, unsigned long address,
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4222,9 +4222,9 @@ int __pmd_alloc(struct mm_struct *mm, pu
+ }
+ #endif /* __PAGETABLE_PMD_FOLDED */
+-static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+-                          struct mmu_notifier_range *range,
+-                          pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
++int follow_pte(struct mm_struct *mm, unsigned long address,
++             struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
++             spinlock_t **ptlp)
+ {
+       pgd_t *pgd;
+       p4d_t *p4d;
+@@ -4289,31 +4289,6 @@ out:
+       return -EINVAL;
+ }
+-static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+-                           pte_t **ptepp, spinlock_t **ptlp)
+-{
+-      int res;
+-
+-      /* (void) is needed to make gcc happy */
+-      (void) __cond_lock(*ptlp,
+-                         !(res = __follow_pte_pmd(mm, address, NULL,
+-                                                  ptepp, NULL, ptlp)));
+-      return res;
+-}
+-
+-int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+-                 struct mmu_notifier_range *range,
+-                 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
+-{
+-      int res;
+-
+-      /* (void) is needed to make gcc happy */
+-      (void) __cond_lock(*ptlp,
+-                         !(res = __follow_pte_pmd(mm, address, range,
+-                                                  ptepp, pmdpp, ptlp)));
+-      return res;
+-}
+-
+ /**
+  * follow_pfn - look up PFN at a user virtual address
+  * @vma: memory mapping
+@@ -4334,7 +4309,7 @@ int follow_pfn(struct vm_area_struct *vm
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return ret;
+-      ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
++      ret = follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl);
+       if (ret)
+               return ret;
+       *pfn = pte_pfn(*ptep);
+@@ -4355,7 +4330,7 @@ int follow_phys(struct vm_area_struct *v
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
+-      if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
++      if (follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl))
+               goto out;
+       pte = *ptep;
diff --git a/queue-5.4/mm-unexport-follow_pte_pmd.patch b/queue-5.4/mm-unexport-follow_pte_pmd.patch
new file mode 100644 (file)
index 0000000..193e6d0
--- /dev/null
@@ -0,0 +1,40 @@
+From 7336375734d65ecc82956b59a79cf5deccce880c Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 15 Dec 2020 20:47:20 -0800
+Subject: mm: unexport follow_pte_pmd
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 7336375734d65ecc82956b59a79cf5deccce880c upstream.
+
+Patch series "simplify follow_pte a bit".
+
+This small series drops the not needed follow_pte_pmd exports, and
+simplifies the follow_pte family of functions a bit.
+
+This patch (of 2):
+
+follow_pte_pmd() is only used by the DAX code, which can't be modular.
+
+Link: https://lkml.kernel.org/r/20201029101432.47011-2-hch@lst.de
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4313,7 +4313,6 @@ int follow_pte_pmd(struct mm_struct *mm,
+                                                   ptepp, pmdpp, ptlp)));
+       return res;
+ }
+-EXPORT_SYMBOL(follow_pte_pmd);
+ /**
+  * follow_pfn - look up PFN at a user virtual address
diff --git a/queue-5.4/ntfs-check-for-valid-standard-information-attribute.patch b/queue-5.4/ntfs-check-for-valid-standard-information-attribute.patch
new file mode 100644 (file)
index 0000000..2f460d2
--- /dev/null
@@ -0,0 +1,43 @@
+From 4dfe6bd94959222e18d512bdf15f6bf9edb9c27c Mon Sep 17 00:00:00 2001
+From: Rustam Kovhaev <rkovhaev@gmail.com>
+Date: Wed, 24 Feb 2021 12:00:30 -0800
+Subject: ntfs: check for valid standard information attribute
+
+From: Rustam Kovhaev <rkovhaev@gmail.com>
+
+commit 4dfe6bd94959222e18d512bdf15f6bf9edb9c27c upstream.
+
+Mounting a corrupted filesystem with NTFS resulted in a kernel crash.
+
+We should check for valid STANDARD_INFORMATION attribute offset and length
+before trying to access it
+
+Link: https://lkml.kernel.org/r/20210217155930.1506815-1-rkovhaev@gmail.com
+Link: https://syzkaller.appspot.com/bug?extid=c584225dabdea2f71969
+Signed-off-by: Rustam Kovhaev <rkovhaev@gmail.com>
+Reported-by: syzbot+c584225dabdea2f71969@syzkaller.appspotmail.com
+Tested-by: syzbot+c584225dabdea2f71969@syzkaller.appspotmail.com
+Acked-by: Anton Altaparmakov <anton@tuxera.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs/inode.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -628,6 +628,12 @@ static int ntfs_read_locked_inode(struct
+       }
+       a = ctx->attr;
+       /* Get the standard information attribute value. */
++      if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset)
++                      + le32_to_cpu(a->data.resident.value_length) >
++                      (u8 *)ctx->mrec + vol->mft_record_size) {
++              ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode.");
++              goto unm_err_out;
++      }
+       si = (STANDARD_INFORMATION*)((u8*)a +
+                       le16_to_cpu(a->data.resident.value_offset));
diff --git a/queue-5.4/scripts-set-proper-openssl-include-dir-also-for-sign-file.patch b/queue-5.4/scripts-set-proper-openssl-include-dir-also-for-sign-file.patch
new file mode 100644 (file)
index 0000000..b1e6033
--- /dev/null
@@ -0,0 +1,28 @@
+From fe968c41ac4f4ec9ffe3c4cf16b72285f5e9674f Mon Sep 17 00:00:00 2001
+From: Rolf Eike Beer <eb@emlix.com>
+Date: Fri, 12 Feb 2021 08:22:27 +0100
+Subject: scripts: set proper OpenSSL include dir also for sign-file
+
+From: Rolf Eike Beer <eb@emlix.com>
+
+commit fe968c41ac4f4ec9ffe3c4cf16b72285f5e9674f upstream.
+
+Fixes: 2cea4a7a1885 ("scripts: use pkg-config to locate libcrypto")
+Signed-off-by: Rolf Eike Beer <eb@emlix.com>
+Cc: stable@vger.kernel.org # 5.6.x
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/Makefile |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/scripts/Makefile
++++ b/scripts/Makefile
+@@ -26,6 +26,7 @@ hostprogs-$(CONFIG_SYSTEM_EXTRA_CERTIFIC
+ HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include
+ HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
++HOSTCFLAGS_sign-file.o = $(CRYPTO_CFLAGS)
+ HOSTLDLIBS_sign-file = $(CRYPTO_LIBS)
+ HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS)
+ HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS)
diff --git a/queue-5.4/scripts-use-pkg-config-to-locate-libcrypto.patch b/queue-5.4/scripts-use-pkg-config-to-locate-libcrypto.patch
new file mode 100644 (file)
index 0000000..43cb946
--- /dev/null
@@ -0,0 +1,44 @@
+From 2cea4a7a1885bd0c765089afc14f7ff0eb77864e Mon Sep 17 00:00:00 2001
+From: Rolf Eike Beer <eb@emlix.com>
+Date: Thu, 22 Nov 2018 16:40:49 +0100
+Subject: scripts: use pkg-config to locate libcrypto
+
+From: Rolf Eike Beer <eb@emlix.com>
+
+commit 2cea4a7a1885bd0c765089afc14f7ff0eb77864e upstream.
+
+Otherwise build fails if the headers are not in the default location. While at
+it also ask pkg-config for the libs, with fallback to the existing value.
+
+Signed-off-by: Rolf Eike Beer <eb@emlix.com>
+Cc: stable@vger.kernel.org # 5.6.x
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/Makefile |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/scripts/Makefile
++++ b/scripts/Makefile
+@@ -10,6 +10,9 @@
+ HOST_EXTRACFLAGS += -I$(srctree)/tools/include
++CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto)
++CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null)
++
+ hostprogs-$(CONFIG_BUILD_BIN2C)  += bin2c
+ hostprogs-$(CONFIG_KALLSYMS)     += kallsyms
+ hostprogs-$(CONFIG_LOGO)         += pnmtologo
+@@ -23,8 +26,9 @@ hostprogs-$(CONFIG_SYSTEM_EXTRA_CERTIFIC
+ HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include
+ HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
+-HOSTLDLIBS_sign-file = -lcrypto
+-HOSTLDLIBS_extract-cert = -lcrypto
++HOSTLDLIBS_sign-file = $(CRYPTO_LIBS)
++HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS)
++HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS)
+ always                := $(hostprogs-y) $(hostprogs-m)
index b4fe32a0aa7b8297a02bb7466e398df92031f19e..dfeea42cab7266bb80f89d985efc78f5fc62d411 100644 (file)
@@ -2,3 +2,12 @@ bpf-fix-truncation-handling-for-mod32-dst-reg-wrt-zero.patch
 hid-make-arrays-usage-and-value-to-be-the-same.patch
 usb-quirks-sort-quirk-entries.patch
 usb-quirks-add-quirk-to-start-video-capture-on-elmo-l-12f-document-camera-reliable.patch
+ntfs-check-for-valid-standard-information-attribute.patch
+arm64-tegra-add-power-domain-for-tegra210-hda.patch
+scripts-use-pkg-config-to-locate-libcrypto.patch
+scripts-set-proper-openssl-include-dir-also-for-sign-file.patch
+mm-unexport-follow_pte_pmd.patch
+mm-simplify-follow_pte-pmd.patch
+kvm-do-not-assume-pte-is-writable-after-follow_pfn.patch
+mm-provide-a-saner-pte-walking-api-for-modules.patch
+kvm-use-kvm_pfn_t-for-local-pfn-variable-in-hva_to_pfn_remapped.patch