]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 Aug 2018 17:02:15 +0000 (19:02 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 Aug 2018 17:02:15 +0000 (19:02 +0200)
added patches:
bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch
ioremap-update-pgtable-free-interfaces-with-addr.patch
x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch

queue-4.4/bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch [new file with mode: 0644]
queue-4.4/ioremap-update-pgtable-free-interfaces-with-addr.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch [new file with mode: 0644]

diff --git a/queue-4.4/bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch b/queue-4.4/bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch
new file mode 100644 (file)
index 0000000..35f48bd
--- /dev/null
@@ -0,0 +1,50 @@
+From 7992c18810e568b95c869b227137a2215702a805 Mon Sep 17 00:00:00 2001
+From: Mark Salyzyn <salyzyn@android.com>
+Date: Tue, 31 Jul 2018 15:02:13 -0700
+Subject: Bluetooth: hidp: buffer overflow in hidp_process_report
+
+From: Mark Salyzyn <salyzyn@android.com>
+
+commit 7992c18810e568b95c869b227137a2215702a805 upstream.
+
+CVE-2018-9363
+
+The buffer length is unsigned at all layers, but gets cast to int and
+checked in hidp_process_report and can lead to a buffer overflow.
+Switch len parameter to unsigned int to resolve issue.
+
+This affects 3.18 and newer kernels.
+
+Signed-off-by: Mark Salyzyn <salyzyn@android.com>
+Fixes: a4b1b5877b514b276f0f31efe02388a9c2836728 ("HID: Bluetooth: hidp: make sure input buffers are big enough")
+Cc: Marcel Holtmann <marcel@holtmann.org>
+Cc: Johan Hedberg <johan.hedberg@gmail.com>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Cc: linux-bluetooth@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Cc: security@kernel.org
+Cc: kernel-team@android.com
+Acked-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/hidp/core.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_s
+               del_timer(&session->timer);
+ }
+-static void hidp_process_report(struct hidp_session *session,
+-                              int type, const u8 *data, int len, int intr)
++static void hidp_process_report(struct hidp_session *session, int type,
++                              const u8 *data, unsigned int len, int intr)
+ {
+       if (len > HID_MAX_BUFFER_SIZE)
+               len = HID_MAX_BUFFER_SIZE;
diff --git a/queue-4.4/ioremap-update-pgtable-free-interfaces-with-addr.patch b/queue-4.4/ioremap-update-pgtable-free-interfaces-with-addr.patch
new file mode 100644 (file)
index 0000000..0962ca7
--- /dev/null
@@ -0,0 +1,176 @@
+From 785a19f9d1dd8a4ab2d0633be4656653bd3de1fc Mon Sep 17 00:00:00 2001
+From: Chintan Pandya <cpandya@codeaurora.org>
+Date: Wed, 27 Jun 2018 08:13:47 -0600
+Subject: ioremap: Update pgtable free interfaces with addr
+
+From: Chintan Pandya <cpandya@codeaurora.org>
+
+commit 785a19f9d1dd8a4ab2d0633be4656653bd3de1fc upstream.
+
+The following kernel panic was observed on ARM64 platform due to a stale
+TLB entry.
+
+ 1. ioremap with 4K size, a valid pte page table is set.
+ 2. iounmap it, its pte entry is set to 0.
+ 3. ioremap the same address with 2M size, update its pmd entry with
+    a new value.
+ 4. CPU may hit an exception because the old pmd entry is still in TLB,
+    which leads to a kernel panic.
+
+Commit b6bdb7517c3d ("mm/vmalloc: add interfaces to free unmapped page
+table") has addressed this panic by falling to pte mappings in the above
+case on ARM64.
+
+To support pmd mappings in all cases, TLB purge needs to be performed
+in this case on ARM64.
+
+Add a new arg, 'addr', to pud_free_pmd_page() and pmd_free_pte_page()
+so that TLB purge can be added later in seprate patches.
+
+[toshi.kani@hpe.com: merge changes, rewrite patch description]
+Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces")
+Signed-off-by: Chintan Pandya <cpandya@codeaurora.org>
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: mhocko@suse.com
+Cc: akpm@linux-foundation.org
+Cc: hpa@zytor.com
+Cc: linux-mm@kvack.org
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: stable@vger.kernel.org
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20180627141348.21777-3-toshi.kani@hpe.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/mmu.c           |    4 ++--
+ arch/x86/mm/pgtable.c         |   12 +++++++-----
+ include/asm-generic/pgtable.h |    8 ++++----
+ lib/ioremap.c                 |    4 ++--
+ 4 files changed, 15 insertions(+), 13 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -699,12 +699,12 @@ void *__init fixmap_remap_fdt(phys_addr_
+ }
+ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       return pud_none(*pud);
+ }
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       return pmd_none(*pmd);
+ }
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -680,11 +680,12 @@ int pmd_clear_huge(pmd_t *pmd)
+ /**
+  * pud_free_pmd_page - Clear pud entry and free pmd page.
+  * @pud: Pointer to a PUD.
++ * @addr: Virtual address associated with pud.
+  *
+  * Context: The pud range has been unmaped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
+  */
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       pmd_t *pmd;
+       int i;
+@@ -695,7 +696,7 @@ int pud_free_pmd_page(pud_t *pud)
+       pmd = (pmd_t *)pud_page_vaddr(*pud);
+       for (i = 0; i < PTRS_PER_PMD; i++)
+-              if (!pmd_free_pte_page(&pmd[i]))
++              if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE)))
+                       return 0;
+       pud_clear(pud);
+@@ -707,11 +708,12 @@ int pud_free_pmd_page(pud_t *pud)
+ /**
+  * pmd_free_pte_page - Clear pmd entry and free pte page.
+  * @pmd: Pointer to a PMD.
++ * @addr: Virtual address associated with pmd.
+  *
+  * Context: The pmd range has been unmaped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
+  */
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       pte_t *pte;
+@@ -727,7 +729,7 @@ int pmd_free_pte_page(pmd_t *pmd)
+ #else /* !CONFIG_X86_64 */
+-int pud_free_pmd_page(pud_t *pud)
++int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       return pud_none(*pud);
+ }
+@@ -736,7 +738,7 @@ int pud_free_pmd_page(pud_t *pud)
+  * Disable free page handling on x86-PAE. This assures that ioremap()
+  * does not update sync'd pmd entries. See vmalloc_sync_one().
+  */
+-int pmd_free_pte_page(pmd_t *pmd)
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       return pmd_none(*pmd);
+ }
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -770,8 +770,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t
+ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+ int pud_clear_huge(pud_t *pud);
+ int pmd_clear_huge(pmd_t *pmd);
+-int pud_free_pmd_page(pud_t *pud);
+-int pmd_free_pte_page(pmd_t *pmd);
++int pud_free_pmd_page(pud_t *pud, unsigned long addr);
++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
+ #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+ static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+ {
+@@ -789,11 +789,11 @@ static inline int pmd_clear_huge(pmd_t *
+ {
+       return 0;
+ }
+-static inline int pud_free_pmd_page(pud_t *pud)
++static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+       return 0;
+ }
+-static inline int pmd_free_pte_page(pmd_t *pmd)
++static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+ {
+       return 0;
+ }
+--- a/lib/ioremap.c
++++ b/lib/ioremap.c
+@@ -84,7 +84,7 @@ static inline int ioremap_pmd_range(pud_
+               if (ioremap_pmd_enabled() &&
+                   ((next - addr) == PMD_SIZE) &&
+                   IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
+-                  pmd_free_pte_page(pmd)) {
++                  pmd_free_pte_page(pmd, addr)) {
+                       if (pmd_set_huge(pmd, phys_addr + addr, prot))
+                               continue;
+               }
+@@ -111,7 +111,7 @@ static inline int ioremap_pud_range(pgd_
+               if (ioremap_pud_enabled() &&
+                   ((next - addr) == PUD_SIZE) &&
+                   IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
+-                  pud_free_pmd_page(pud)) {
++                  pud_free_pmd_page(pud, addr)) {
+                       if (pud_set_huge(pud, phys_addr + addr, prot))
+                               continue;
+               }
index 29aa211e03cabe4c843dd42b2aebdaffc45b2b82..1410a9bca433a9a7252e28031de209b903ff5fec 100644 (file)
@@ -8,3 +8,6 @@ crypto-vmac-separate-tfm-and-request-context.patch
 crypto-blkcipher-fix-crash-flushing-dcache-in-error-path.patch
 crypto-ablkcipher-fix-crash-flushing-dcache-in-error-path.patch
 asoc-intel-cht_bsw_max98090_ti-fix-jack-initialization.patch
+bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch
+ioremap-update-pgtable-free-interfaces-with-addr.patch
+x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch
diff --git a/queue-4.4/x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch b/queue-4.4/x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch
new file mode 100644 (file)
index 0000000..cbfd4d6
--- /dev/null
@@ -0,0 +1,124 @@
+From 5e0fb5df2ee871b841f96f9cb6a7f2784e96aa4e Mon Sep 17 00:00:00 2001
+From: Toshi Kani <toshi.kani@hpe.com>
+Date: Wed, 27 Jun 2018 08:13:48 -0600
+Subject: x86/mm: Add TLB purge to free pmd/pte page interfaces
+
+From: Toshi Kani <toshi.kani@hpe.com>
+
+commit 5e0fb5df2ee871b841f96f9cb6a7f2784e96aa4e upstream.
+
+ioremap() calls pud_free_pmd_page() / pmd_free_pte_page() when it creates
+a pud / pmd map.  The following preconditions are met at their entry.
+ - All pte entries for a target pud/pmd address range have been cleared.
+ - System-wide TLB purges have been peformed for a target pud/pmd address
+   range.
+
+The preconditions assure that there is no stale TLB entry for the range.
+Speculation may not cache TLB entries since it requires all levels of page
+entries, including ptes, to have P & A-bits set for an associated address.
+However, speculation may cache pud/pmd entries (paging-structure caches)
+when they have P-bit set.
+
+Add a system-wide TLB purge (INVLPG) to a single page after clearing
+pud/pmd entry's P-bit.
+
+SDM 4.10.4.1, Operation that Invalidate TLBs and Paging-Structure Caches,
+states that:
+  INVLPG invalidates all paging-structure caches associated with the
+  current PCID regardless of the liner addresses to which they correspond.
+
+Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces")
+Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: mhocko@suse.com
+Cc: akpm@linux-foundation.org
+Cc: hpa@zytor.com
+Cc: cpandya@codeaurora.org
+Cc: linux-mm@kvack.org
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: stable@vger.kernel.org
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20180627141348.21777-4-toshi.kani@hpe.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/pgtable.c |   38 +++++++++++++++++++++++++++++++-------
+ 1 file changed, 31 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -682,24 +682,44 @@ int pmd_clear_huge(pmd_t *pmd)
+  * @pud: Pointer to a PUD.
+  * @addr: Virtual address associated with pud.
+  *
+- * Context: The pud range has been unmaped and TLB purged.
++ * Context: The pud range has been unmapped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
++ *
++ * NOTE: Callers must allow a single page allocation.
+  */
+ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+ {
+-      pmd_t *pmd;
++      pmd_t *pmd, *pmd_sv;
++      pte_t *pte;
+       int i;
+       if (pud_none(*pud))
+               return 1;
+       pmd = (pmd_t *)pud_page_vaddr(*pud);
+-
+-      for (i = 0; i < PTRS_PER_PMD; i++)
+-              if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE)))
+-                      return 0;
++      pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
++      if (!pmd_sv)
++              return 0;
++
++      for (i = 0; i < PTRS_PER_PMD; i++) {
++              pmd_sv[i] = pmd[i];
++              if (!pmd_none(pmd[i]))
++                      pmd_clear(&pmd[i]);
++      }
+       pud_clear(pud);
++
++      /* INVLPG to clear all paging-structure caches */
++      flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
++
++      for (i = 0; i < PTRS_PER_PMD; i++) {
++              if (!pmd_none(pmd_sv[i])) {
++                      pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
++                      free_page((unsigned long)pte);
++              }
++      }
++
++      free_page((unsigned long)pmd_sv);
+       free_page((unsigned long)pmd);
+       return 1;
+@@ -710,7 +730,7 @@ int pud_free_pmd_page(pud_t *pud, unsign
+  * @pmd: Pointer to a PMD.
+  * @addr: Virtual address associated with pmd.
+  *
+- * Context: The pmd range has been unmaped and TLB purged.
++ * Context: The pmd range has been unmapped and TLB purged.
+  * Return: 1 if clearing the entry succeeded. 0 otherwise.
+  */
+ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+@@ -722,6 +742,10 @@ int pmd_free_pte_page(pmd_t *pmd, unsign
+       pte = (pte_t *)pmd_page_vaddr(*pmd);
+       pmd_clear(pmd);
++
++      /* INVLPG to clear all paging-structure caches */
++      flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
++
+       free_page((unsigned long)pte);
+       return 1;