From: Greg Kroah-Hartman Date: Fri, 29 Nov 2019 10:04:23 +0000 (+0100) Subject: drop some 4.14 i386 patches that were not ready for prime time X-Git-Tag: v4.14.157~2 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=a19f1ef9310a30b0205ec60a117889facdc5543f;p=thirdparty%2Fkernel%2Fstable-queue.git drop some 4.14 i386 patches that were not ready for prime time --- diff --git a/queue-4.14/selftests-x86-mov_ss_trap-fix-the-sysenter-test.patch b/queue-4.14/selftests-x86-mov_ss_trap-fix-the-sysenter-test.patch deleted file mode 100644 index ea812eaaf80..00000000000 --- a/queue-4.14/selftests-x86-mov_ss_trap-fix-the-sysenter-test.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 8caa016bfc129f2c925d52da43022171d1d1de91 Mon Sep 17 00:00:00 2001 -From: Andy Lutomirski -Date: Wed, 20 Nov 2019 12:59:13 -0800 -Subject: selftests/x86/mov_ss_trap: Fix the SYSENTER test - -From: Andy Lutomirski - -commit 8caa016bfc129f2c925d52da43022171d1d1de91 upstream. - -For reasons that I haven't quite fully diagnosed, running -mov_ss_trap_32 on a 32-bit kernel results in an infinite loop in -userspace. This appears to be because the hacky SYSENTER test -doesn't segfault as desired; instead it corrupts the program state -such that it infinite loops. - -Fix it by explicitly clearing EBP before doing SYSENTER. This will -give a more reliable segfault. - -Fixes: 59c2a7226fc5 ("x86/selftests: Add mov_to_ss test") -Signed-off-by: Andy Lutomirski -Signed-off-by: Peter Zijlstra (Intel) -Cc: stable@kernel.org -Signed-off-by: Greg Kroah-Hartman - ---- - tools/testing/selftests/x86/mov_ss_trap.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/tools/testing/selftests/x86/mov_ss_trap.c -+++ b/tools/testing/selftests/x86/mov_ss_trap.c -@@ -257,7 +257,8 @@ int main() - err(1, "sigaltstack"); - sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK); - nr = SYS_getpid; -- asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr) -+ /* Clear EBP first to make sure we segfault cleanly. */ -+ asm volatile ("xorl %%ebp, %%ebp; mov %[ss], %%ss; SYSENTER" : "+a" (nr) - : [ss] "m" (ss) : "flags", "rcx" - #ifdef __x86_64__ - , "r11" diff --git a/queue-4.14/selftests-x86-sigreturn-32-invalidate-ds-and-es-when-abusing-the-kernel.patch b/queue-4.14/selftests-x86-sigreturn-32-invalidate-ds-and-es-when-abusing-the-kernel.patch deleted file mode 100644 index ff1a1c94a73..00000000000 --- a/queue-4.14/selftests-x86-sigreturn-32-invalidate-ds-and-es-when-abusing-the-kernel.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 4d2fa82d98d2d296043a04eb517d7dbade5b13b8 Mon Sep 17 00:00:00 2001 -From: Andy Lutomirski -Date: Wed, 20 Nov 2019 11:58:32 -0800 -Subject: selftests/x86/sigreturn/32: Invalidate DS and ES when abusing the kernel - -From: Andy Lutomirski - -commit 4d2fa82d98d2d296043a04eb517d7dbade5b13b8 upstream. - -If the kernel accidentally uses DS or ES while the user values are -loaded, it will work fine for sane userspace. In the interest of -simulating maximally insane userspace, make sigreturn_32 zero out DS -and ES for the nasty parts so that inadvertent use of these segments -will crash. - -Signed-off-by: Andy Lutomirski -Signed-off-by: Peter Zijlstra (Intel) -Cc: stable@kernel.org -Signed-off-by: Greg Kroah-Hartman - ---- - tools/testing/selftests/x86/sigreturn.c | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - ---- a/tools/testing/selftests/x86/sigreturn.c -+++ b/tools/testing/selftests/x86/sigreturn.c -@@ -459,6 +459,19 @@ static void sigusr1(int sig, siginfo_t * - ctx->uc_mcontext.gregs[REG_SP] = (unsigned long)0x8badf00d5aadc0deULL; - ctx->uc_mcontext.gregs[REG_CX] = 0; - -+#ifdef __i386__ -+ /* -+ * Make sure the kernel doesn't inadvertently use DS or ES-relative -+ * accesses in a region where user DS or ES is loaded. -+ * -+ * Skip this for 64-bit builds because long mode doesn't care about -+ * DS and ES and skipping it increases test coverage a little bit, -+ * since 64-bit kernels can still run the 32-bit build. -+ */ -+ ctx->uc_mcontext.gregs[REG_DS] = 0; -+ ctx->uc_mcontext.gregs[REG_ES] = 0; -+#endif -+ - memcpy(&requested_regs, &ctx->uc_mcontext.gregs, sizeof(gregset_t)); - requested_regs[REG_CX] = *ssptr(ctx); /* The asm code does this. */ - diff --git a/queue-4.14/series b/queue-4.14/series index 154f15200fc..69fdf6256a7 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -177,11 +177,6 @@ x86-insn-fix-awk-regexp-warnings.patch x86-speculation-fix-incorrect-mds-taa-mitigation-status.patch x86-speculation-fix-redundant-mds-mitigation-message.patch nbd-prevent-memory-leak.patch -x86-doublefault-32-fix-stack-canaries-in-the-double-fault-handler.patch -x86-cpu_entry_area-add-guard-page-for-entry-stack-on-32bit.patch -selftests-x86-mov_ss_trap-fix-the-sysenter-test.patch -selftests-x86-sigreturn-32-invalidate-ds-and-es-when-abusing-the-kernel.patch -x86-pti-32-calculate-the-various-pti-cpu_entry_area-sizes-correctly-make-the-cpu_entry_area_pages-assert-precise.patch nfc-port100-handle-command-failure-cleanly.patch media-vivid-set-vid_cap_streaming-and-vid_out_streaming-to-true.patch media-vivid-fix-wrong-locking-that-causes-race-conditions-on-streaming-stop.patch diff --git a/queue-4.14/x86-cpu_entry_area-add-guard-page-for-entry-stack-on-32bit.patch b/queue-4.14/x86-cpu_entry_area-add-guard-page-for-entry-stack-on-32bit.patch deleted file mode 100644 index c0fa219efa0..00000000000 --- a/queue-4.14/x86-cpu_entry_area-add-guard-page-for-entry-stack-on-32bit.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 880a98c339961eaa074393e3a2117cbe9125b8bb Mon Sep 17 00:00:00 2001 -From: Thomas Gleixner -Date: Thu, 21 Nov 2019 00:40:24 +0100 -Subject: x86/cpu_entry_area: Add guard page for entry stack on 32bit - -From: Thomas Gleixner - -commit 880a98c339961eaa074393e3a2117cbe9125b8bb upstream. - -The entry stack in the cpu entry area is protected against overflow by the -readonly GDT on 64-bit, but on 32-bit the GDT needs to be writeable and -therefore does not trigger a fault on stack overflow. - -Add a guard page. - -Fixes: c482feefe1ae ("x86/entry/64: Make cpu_entry_area.tss read-only") -Signed-off-by: Thomas Gleixner -Signed-off-by: Peter Zijlstra (Intel) -Cc: stable@kernel.org -Signed-off-by: Greg Kroah-Hartman - ---- - arch/x86/include/asm/cpu_entry_area.h | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/arch/x86/include/asm/cpu_entry_area.h -+++ b/arch/x86/include/asm/cpu_entry_area.h -@@ -20,8 +20,12 @@ struct cpu_entry_area { - - /* - * The GDT is just below entry_stack and thus serves (on x86_64) as -- * a a read-only guard page. -+ * a read-only guard page. On 32-bit the GDT must be writeable, so -+ * it needs an extra guard page. - */ -+#ifdef CONFIG_X86_32 -+ char guard_entry_stack[PAGE_SIZE]; -+#endif - struct entry_stack_page entry_stack_page; - - /* diff --git a/queue-4.14/x86-doublefault-32-fix-stack-canaries-in-the-double-fault-handler.patch b/queue-4.14/x86-doublefault-32-fix-stack-canaries-in-the-double-fault-handler.patch deleted file mode 100644 index b5f1d1fa894..00000000000 --- a/queue-4.14/x86-doublefault-32-fix-stack-canaries-in-the-double-fault-handler.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 3580d0b29cab08483f84a16ce6a1151a1013695f Mon Sep 17 00:00:00 2001 -From: Andy Lutomirski -Date: Thu, 21 Nov 2019 11:50:12 +0100 -Subject: x86/doublefault/32: Fix stack canaries in the double fault handler - -From: Andy Lutomirski - -commit 3580d0b29cab08483f84a16ce6a1151a1013695f upstream. - -The double fault TSS was missing GS setup, which is needed for stack -canaries to work. - -Signed-off-by: Andy Lutomirski -Signed-off-by: Peter Zijlstra (Intel) -Cc: stable@kernel.org -Signed-off-by: Greg Kroah-Hartman - ---- - arch/x86/kernel/doublefault.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/arch/x86/kernel/doublefault.c -+++ b/arch/x86/kernel/doublefault.c -@@ -65,6 +65,9 @@ struct x86_hw_tss doublefault_tss __cach - .ss = __KERNEL_DS, - .ds = __USER_DS, - .fs = __KERNEL_PERCPU, -+#ifndef CONFIG_X86_32_LAZY_GS -+ .gs = __KERNEL_STACK_CANARY, -+#endif - - .__cr3 = __pa_nodebug(swapper_pg_dir), - }; diff --git a/queue-4.14/x86-pti-32-calculate-the-various-pti-cpu_entry_area-sizes-correctly-make-the-cpu_entry_area_pages-assert-precise.patch b/queue-4.14/x86-pti-32-calculate-the-various-pti-cpu_entry_area-sizes-correctly-make-the-cpu_entry_area_pages-assert-precise.patch deleted file mode 100644 index 1448b1de7ec..00000000000 --- a/queue-4.14/x86-pti-32-calculate-the-various-pti-cpu_entry_area-sizes-correctly-make-the-cpu_entry_area_pages-assert-precise.patch +++ /dev/null @@ -1,198 +0,0 @@ -From 05b042a1944322844eaae7ea596d5f154166d68a Mon Sep 17 00:00:00 2001 -From: Ingo Molnar -Date: Sun, 24 Nov 2019 11:21:44 +0100 -Subject: x86/pti/32: Calculate the various PTI cpu_entry_area sizes correctly, make the CPU_ENTRY_AREA_PAGES assert precise -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -From: Ingo Molnar - -commit 05b042a1944322844eaae7ea596d5f154166d68a upstream. - -When two recent commits that increased the size of the 'struct cpu_entry_area' -were merged in -tip, the 32-bit defconfig build started failing on the following -build time assert: - - ./include/linux/compiler.h:391:38: error: call to ‘__compiletime_assert_189’ declared with attribute error: BUILD_BUG_ON failed: CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE - arch/x86/mm/cpu_entry_area.c:189:2: note: in expansion of macro ‘BUILD_BUG_ON’ - In function ‘setup_cpu_entry_area_ptes’, - -Which corresponds to the following build time assert: - - BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); - -The purpose of this assert is to sanity check the fixed-value definition of -CPU_ENTRY_AREA_PAGES arch/x86/include/asm/pgtable_32_types.h: - - #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 41) - -The '41' is supposed to match sizeof(struct cpu_entry_area)/PAGE_SIZE, which value -we didn't want to define in such a low level header, because it would cause -dependency hell. - -Every time the size of cpu_entry_area is changed, we have to adjust CPU_ENTRY_AREA_PAGES -accordingly - and this assert is checking that constraint. - -But the assert is both imprecise and buggy, primarily because it doesn't -include the single readonly IDT page that is mapped at CPU_ENTRY_AREA_BASE -(which begins at a PMD boundary). - -This bug was hidden by the fact that by accident CPU_ENTRY_AREA_PAGES is defined -too large upstream (v5.4-rc8): - - #define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) - -While 'struct cpu_entry_area' is 155648 bytes, or 38 pages. So we had two extra -pages, which hid the bug. - -The following commit (not yet upstream) increased the size to 40 pages: - - x86/iopl: ("Restrict iopl() permission scope") - -... but increased CPU_ENTRY_AREA_PAGES only 41 - i.e. shortening the gap -to just 1 extra page. - -Then another not-yet-upstream commit changed the size again: - - 880a98c33996: ("x86/cpu_entry_area: Add guard page for entry stack on 32bit") - -Which increased the cpu_entry_area size from 38 to 39 pages, but -didn't change CPU_ENTRY_AREA_PAGES (kept it at 40). This worked -fine, because we still had a page left from the accidental 'reserve'. - -But when these two commits were merged into the same tree, the -combined size of cpu_entry_area grew from 38 to 40 pages, while -CPU_ENTRY_AREA_PAGES finally caught up to 40 as well. - -Which is fine in terms of functionality, but the assert broke: - - BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); - -because CPU_ENTRY_AREA_MAP_SIZE is the total size of the area, -which is 1 page larger due to the IDT page. - -To fix all this, change the assert to two precise asserts: - - BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE); - BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE); - -This takes the IDT page into account, and also connects the size-based -define of CPU_ENTRY_AREA_TOTAL_SIZE with the address-subtraction based -define of CPU_ENTRY_AREA_MAP_SIZE. - -Also clean up some of the names which made it rather confusing: - - - 'CPU_ENTRY_AREA_TOT_SIZE' wasn't actually the 'total' size of - the cpu-entry-area, but the per-cpu array size, so rename this - to CPU_ENTRY_AREA_ARRAY_SIZE. - - - Introduce CPU_ENTRY_AREA_TOTAL_SIZE that _is_ the total mapping - size, with the IDT included. - - - Add comments where '+1' denotes the IDT mapping - it wasn't - obvious and took me about 3 hours to decode... - -Finally, because this particular commit is actually applied after -this patch: - - 880a98c33996: ("x86/cpu_entry_area: Add guard page for entry stack on 32bit") - -Fix the CPU_ENTRY_AREA_PAGES value from 40 pages to the correct 39 pages. - -All future commits that change cpu_entry_area will have to adjust -this value precisely. - -As a side note, we should probably attempt to remove CPU_ENTRY_AREA_PAGES -and derive its value directly from the structure, without causing -header hell - but that is an adventure for another day! :-) - -Fixes: 880a98c33996: ("x86/cpu_entry_area: Add guard page for entry stack on 32bit") -Cc: Thomas Gleixner -Cc: Borislav Petkov -Cc: Peter Zijlstra (Intel) -Cc: Linus Torvalds -Cc: Andy Lutomirski -Cc: stable@kernel.org -Signed-off-by: Ingo Molnar -Signed-off-by: Greg Kroah-Hartman - ---- - arch/x86/include/asm/cpu_entry_area.h | 12 +++++++----- - arch/x86/include/asm/pgtable_32_types.h | 8 ++++---- - arch/x86/mm/cpu_entry_area.c | 4 +++- - 3 files changed, 14 insertions(+), 10 deletions(-) - ---- a/arch/x86/include/asm/cpu_entry_area.h -+++ b/arch/x86/include/asm/cpu_entry_area.h -@@ -45,7 +45,6 @@ struct cpu_entry_area { - */ - char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; - #endif --#ifdef CONFIG_CPU_SUP_INTEL - /* - * Per CPU debug store for Intel performance monitoring. Wastes a - * full page at the moment. -@@ -56,24 +55,27 @@ struct cpu_entry_area { - * Reserve enough fixmap PTEs. - */ - struct debug_store_buffers cpu_debug_buffers; --#endif - }; - --#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) --#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) -+#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) -+#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) -+ -+/* Total size includes the readonly IDT mapping page as well: */ -+#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE) - - DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); - - extern void setup_cpu_entry_areas(void); - extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); - -+/* Single page reserved for the readonly IDT mapping: */ - #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE - #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) - - #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) - - #define CPU_ENTRY_AREA_MAP_SIZE \ -- (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) -+ (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE) - - extern struct cpu_entry_area *get_cpu_entry_area(int cpu); - ---- a/arch/x86/include/asm/pgtable_32_types.h -+++ b/arch/x86/include/asm/pgtable_32_types.h -@@ -42,11 +42,11 @@ extern bool __vmalloc_start_set; /* set - * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c - * to avoid include recursion hell - */ --#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) -+#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 39) - --#define CPU_ENTRY_AREA_BASE \ -- ((FIXADDR_TOT_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) \ -- & PMD_MASK) -+/* The +1 is for the readonly IDT page: */ -+#define CPU_ENTRY_AREA_BASE \ -+ ((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK) - - #define PKMAP_BASE \ - ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) ---- a/arch/x86/mm/cpu_entry_area.c -+++ b/arch/x86/mm/cpu_entry_area.c -@@ -143,7 +143,9 @@ static __init void setup_cpu_entry_area_ - #ifdef CONFIG_X86_32 - unsigned long start, end; - -- BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); -+ /* The +1 is for the readonly IDT: */ -+ BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE); -+ BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE); - BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); - - start = CPU_ENTRY_AREA_BASE;