From: Greg Kroah-Hartman Date: Thu, 16 Aug 2018 17:02:15 +0000 (+0200) Subject: 4.4-stable patches X-Git-Tag: v3.18.119~2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=1d46e312e7a24c1a3398f250174015c2cfab16aa;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch ioremap-update-pgtable-free-interfaces-with-addr.patch x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch --- diff --git a/queue-4.4/bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch b/queue-4.4/bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch new file mode 100644 index 00000000000..35f48bd906d --- /dev/null +++ b/queue-4.4/bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch @@ -0,0 +1,50 @@ +From 7992c18810e568b95c869b227137a2215702a805 Mon Sep 17 00:00:00 2001 +From: Mark Salyzyn +Date: Tue, 31 Jul 2018 15:02:13 -0700 +Subject: Bluetooth: hidp: buffer overflow in hidp_process_report + +From: Mark Salyzyn + +commit 7992c18810e568b95c869b227137a2215702a805 upstream. + +CVE-2018-9363 + +The buffer length is unsigned at all layers, but gets cast to int and +checked in hidp_process_report and can lead to a buffer overflow. +Switch len parameter to unsigned int to resolve issue. + +This affects 3.18 and newer kernels. + +Signed-off-by: Mark Salyzyn +Fixes: a4b1b5877b514b276f0f31efe02388a9c2836728 ("HID: Bluetooth: hidp: make sure input buffers are big enough") +Cc: Marcel Holtmann +Cc: Johan Hedberg +Cc: "David S. Miller" +Cc: Kees Cook +Cc: Benjamin Tissoires +Cc: linux-bluetooth@vger.kernel.org +Cc: netdev@vger.kernel.org +Cc: linux-kernel@vger.kernel.org +Cc: security@kernel.org +Cc: kernel-team@android.com +Acked-by: Kees Cook +Signed-off-by: Marcel Holtmann +Signed-off-by: Greg Kroah-Hartman + +--- + net/bluetooth/hidp/core.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_s + del_timer(&session->timer); + } + +-static void hidp_process_report(struct hidp_session *session, +- int type, const u8 *data, int len, int intr) ++static void hidp_process_report(struct hidp_session *session, int type, ++ const u8 *data, unsigned int len, int intr) + { + if (len > HID_MAX_BUFFER_SIZE) + len = HID_MAX_BUFFER_SIZE; diff --git a/queue-4.4/ioremap-update-pgtable-free-interfaces-with-addr.patch b/queue-4.4/ioremap-update-pgtable-free-interfaces-with-addr.patch new file mode 100644 index 00000000000..0962ca761c5 --- /dev/null +++ b/queue-4.4/ioremap-update-pgtable-free-interfaces-with-addr.patch @@ -0,0 +1,176 @@ +From 785a19f9d1dd8a4ab2d0633be4656653bd3de1fc Mon Sep 17 00:00:00 2001 +From: Chintan Pandya +Date: Wed, 27 Jun 2018 08:13:47 -0600 +Subject: ioremap: Update pgtable free interfaces with addr + +From: Chintan Pandya + +commit 785a19f9d1dd8a4ab2d0633be4656653bd3de1fc upstream. + +The following kernel panic was observed on ARM64 platform due to a stale +TLB entry. + + 1. ioremap with 4K size, a valid pte page table is set. + 2. iounmap it, its pte entry is set to 0. + 3. ioremap the same address with 2M size, update its pmd entry with + a new value. + 4. CPU may hit an exception because the old pmd entry is still in TLB, + which leads to a kernel panic. + +Commit b6bdb7517c3d ("mm/vmalloc: add interfaces to free unmapped page +table") has addressed this panic by falling to pte mappings in the above +case on ARM64. + +To support pmd mappings in all cases, TLB purge needs to be performed +in this case on ARM64. + +Add a new arg, 'addr', to pud_free_pmd_page() and pmd_free_pte_page() +so that TLB purge can be added later in seprate patches. + +[toshi.kani@hpe.com: merge changes, rewrite patch description] +Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces") +Signed-off-by: Chintan Pandya +Signed-off-by: Toshi Kani +Signed-off-by: Thomas Gleixner +Cc: mhocko@suse.com +Cc: akpm@linux-foundation.org +Cc: hpa@zytor.com +Cc: linux-mm@kvack.org +Cc: linux-arm-kernel@lists.infradead.org +Cc: Will Deacon +Cc: Joerg Roedel +Cc: stable@vger.kernel.org +Cc: Andrew Morton +Cc: Michal Hocko +Cc: "H. Peter Anvin" +Cc: +Link: https://lkml.kernel.org/r/20180627141348.21777-3-toshi.kani@hpe.com +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm64/mm/mmu.c | 4 ++-- + arch/x86/mm/pgtable.c | 12 +++++++----- + include/asm-generic/pgtable.h | 8 ++++---- + lib/ioremap.c | 4 ++-- + 4 files changed, 15 insertions(+), 13 deletions(-) + +--- a/arch/arm64/mm/mmu.c ++++ b/arch/arm64/mm/mmu.c +@@ -699,12 +699,12 @@ void *__init fixmap_remap_fdt(phys_addr_ + } + + #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +-int pud_free_pmd_page(pud_t *pud) ++int pud_free_pmd_page(pud_t *pud, unsigned long addr) + { + return pud_none(*pud); + } + +-int pmd_free_pte_page(pmd_t *pmd) ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) + { + return pmd_none(*pmd); + } +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -680,11 +680,12 @@ int pmd_clear_huge(pmd_t *pmd) + /** + * pud_free_pmd_page - Clear pud entry and free pmd page. + * @pud: Pointer to a PUD. ++ * @addr: Virtual address associated with pud. + * + * Context: The pud range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +-int pud_free_pmd_page(pud_t *pud) ++int pud_free_pmd_page(pud_t *pud, unsigned long addr) + { + pmd_t *pmd; + int i; +@@ -695,7 +696,7 @@ int pud_free_pmd_page(pud_t *pud) + pmd = (pmd_t *)pud_page_vaddr(*pud); + + for (i = 0; i < PTRS_PER_PMD; i++) +- if (!pmd_free_pte_page(&pmd[i])) ++ if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE))) + return 0; + + pud_clear(pud); +@@ -707,11 +708,12 @@ int pud_free_pmd_page(pud_t *pud) + /** + * pmd_free_pte_page - Clear pmd entry and free pte page. + * @pmd: Pointer to a PMD. ++ * @addr: Virtual address associated with pmd. + * + * Context: The pmd range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +-int pmd_free_pte_page(pmd_t *pmd) ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) + { + pte_t *pte; + +@@ -727,7 +729,7 @@ int pmd_free_pte_page(pmd_t *pmd) + + #else /* !CONFIG_X86_64 */ + +-int pud_free_pmd_page(pud_t *pud) ++int pud_free_pmd_page(pud_t *pud, unsigned long addr) + { + return pud_none(*pud); + } +@@ -736,7 +738,7 @@ int pud_free_pmd_page(pud_t *pud) + * Disable free page handling on x86-PAE. This assures that ioremap() + * does not update sync'd pmd entries. See vmalloc_sync_one(). + */ +-int pmd_free_pte_page(pmd_t *pmd) ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) + { + return pmd_none(*pmd); + } +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -770,8 +770,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t + int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); + int pud_clear_huge(pud_t *pud); + int pmd_clear_huge(pmd_t *pmd); +-int pud_free_pmd_page(pud_t *pud); +-int pmd_free_pte_page(pmd_t *pmd); ++int pud_free_pmd_page(pud_t *pud, unsigned long addr); ++int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); + #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ + static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) + { +@@ -789,11 +789,11 @@ static inline int pmd_clear_huge(pmd_t * + { + return 0; + } +-static inline int pud_free_pmd_page(pud_t *pud) ++static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) + { + return 0; + } +-static inline int pmd_free_pte_page(pmd_t *pmd) ++static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) + { + return 0; + } +--- a/lib/ioremap.c ++++ b/lib/ioremap.c +@@ -84,7 +84,7 @@ static inline int ioremap_pmd_range(pud_ + if (ioremap_pmd_enabled() && + ((next - addr) == PMD_SIZE) && + IS_ALIGNED(phys_addr + addr, PMD_SIZE) && +- pmd_free_pte_page(pmd)) { ++ pmd_free_pte_page(pmd, addr)) { + if (pmd_set_huge(pmd, phys_addr + addr, prot)) + continue; + } +@@ -111,7 +111,7 @@ static inline int ioremap_pud_range(pgd_ + if (ioremap_pud_enabled() && + ((next - addr) == PUD_SIZE) && + IS_ALIGNED(phys_addr + addr, PUD_SIZE) && +- pud_free_pmd_page(pud)) { ++ pud_free_pmd_page(pud, addr)) { + if (pud_set_huge(pud, phys_addr + addr, prot)) + continue; + } diff --git a/queue-4.4/series b/queue-4.4/series index 29aa211e03c..1410a9bca43 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -8,3 +8,6 @@ crypto-vmac-separate-tfm-and-request-context.patch crypto-blkcipher-fix-crash-flushing-dcache-in-error-path.patch crypto-ablkcipher-fix-crash-flushing-dcache-in-error-path.patch asoc-intel-cht_bsw_max98090_ti-fix-jack-initialization.patch +bluetooth-hidp-buffer-overflow-in-hidp_process_report.patch +ioremap-update-pgtable-free-interfaces-with-addr.patch +x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch diff --git a/queue-4.4/x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch b/queue-4.4/x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch new file mode 100644 index 00000000000..cbfd4d6f75d --- /dev/null +++ b/queue-4.4/x86-mm-add-tlb-purge-to-free-pmd-pte-page-interfaces.patch @@ -0,0 +1,124 @@ +From 5e0fb5df2ee871b841f96f9cb6a7f2784e96aa4e Mon Sep 17 00:00:00 2001 +From: Toshi Kani +Date: Wed, 27 Jun 2018 08:13:48 -0600 +Subject: x86/mm: Add TLB purge to free pmd/pte page interfaces + +From: Toshi Kani + +commit 5e0fb5df2ee871b841f96f9cb6a7f2784e96aa4e upstream. + +ioremap() calls pud_free_pmd_page() / pmd_free_pte_page() when it creates +a pud / pmd map. The following preconditions are met at their entry. + - All pte entries for a target pud/pmd address range have been cleared. + - System-wide TLB purges have been peformed for a target pud/pmd address + range. + +The preconditions assure that there is no stale TLB entry for the range. +Speculation may not cache TLB entries since it requires all levels of page +entries, including ptes, to have P & A-bits set for an associated address. +However, speculation may cache pud/pmd entries (paging-structure caches) +when they have P-bit set. + +Add a system-wide TLB purge (INVLPG) to a single page after clearing +pud/pmd entry's P-bit. + +SDM 4.10.4.1, Operation that Invalidate TLBs and Paging-Structure Caches, +states that: + INVLPG invalidates all paging-structure caches associated with the + current PCID regardless of the liner addresses to which they correspond. + +Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces") +Signed-off-by: Toshi Kani +Signed-off-by: Thomas Gleixner +Cc: mhocko@suse.com +Cc: akpm@linux-foundation.org +Cc: hpa@zytor.com +Cc: cpandya@codeaurora.org +Cc: linux-mm@kvack.org +Cc: linux-arm-kernel@lists.infradead.org +Cc: Joerg Roedel +Cc: stable@vger.kernel.org +Cc: Andrew Morton +Cc: Michal Hocko +Cc: "H. Peter Anvin" +Cc: +Link: https://lkml.kernel.org/r/20180627141348.21777-4-toshi.kani@hpe.com +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/mm/pgtable.c | 38 +++++++++++++++++++++++++++++++------- + 1 file changed, 31 insertions(+), 7 deletions(-) + +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -682,24 +682,44 @@ int pmd_clear_huge(pmd_t *pmd) + * @pud: Pointer to a PUD. + * @addr: Virtual address associated with pud. + * +- * Context: The pud range has been unmaped and TLB purged. ++ * Context: The pud range has been unmapped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. ++ * ++ * NOTE: Callers must allow a single page allocation. + */ + int pud_free_pmd_page(pud_t *pud, unsigned long addr) + { +- pmd_t *pmd; ++ pmd_t *pmd, *pmd_sv; ++ pte_t *pte; + int i; + + if (pud_none(*pud)) + return 1; + + pmd = (pmd_t *)pud_page_vaddr(*pud); +- +- for (i = 0; i < PTRS_PER_PMD; i++) +- if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE))) +- return 0; ++ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); ++ if (!pmd_sv) ++ return 0; ++ ++ for (i = 0; i < PTRS_PER_PMD; i++) { ++ pmd_sv[i] = pmd[i]; ++ if (!pmd_none(pmd[i])) ++ pmd_clear(&pmd[i]); ++ } + + pud_clear(pud); ++ ++ /* INVLPG to clear all paging-structure caches */ ++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); ++ ++ for (i = 0; i < PTRS_PER_PMD; i++) { ++ if (!pmd_none(pmd_sv[i])) { ++ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]); ++ free_page((unsigned long)pte); ++ } ++ } ++ ++ free_page((unsigned long)pmd_sv); + free_page((unsigned long)pmd); + + return 1; +@@ -710,7 +730,7 @@ int pud_free_pmd_page(pud_t *pud, unsign + * @pmd: Pointer to a PMD. + * @addr: Virtual address associated with pmd. + * +- * Context: The pmd range has been unmaped and TLB purged. ++ * Context: The pmd range has been unmapped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ + int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +@@ -722,6 +742,10 @@ int pmd_free_pte_page(pmd_t *pmd, unsign + + pte = (pte_t *)pmd_page_vaddr(*pmd); + pmd_clear(pmd); ++ ++ /* INVLPG to clear all paging-structure caches */ ++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); ++ + free_page((unsigned long)pte); + + return 1;