From 9d6e4491f668e07901cca8c5cf340e3b5e34ab4c Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 2 Feb 2015 15:01:57 -0800 Subject: [PATCH] 3.18-stable patches added patches: spi-dw-fix-detecting-fifo-depth.patch spi-dw-mid-fix-fifo-size.patch vm-add-vm_fault_sigsegv-handling-support.patch x86-build-replace-perl-script-with-shell-script.patch --- .../spi-dw-fix-detecting-fifo-depth.patch | 46 ++ queue-3.18/spi-dw-mid-fix-fifo-size.patch | 32 ++ ...dd-vm_fault_sigsegv-handling-support.patch | 428 ++++++++++++++++++ ...eplace-perl-script-with-shell-script.patch | 131 ++++++ 4 files changed, 637 insertions(+) create mode 100644 queue-3.18/spi-dw-fix-detecting-fifo-depth.patch create mode 100644 queue-3.18/spi-dw-mid-fix-fifo-size.patch create mode 100644 queue-3.18/vm-add-vm_fault_sigsegv-handling-support.patch create mode 100644 queue-3.18/x86-build-replace-perl-script-with-shell-script.patch diff --git a/queue-3.18/spi-dw-fix-detecting-fifo-depth.patch b/queue-3.18/spi-dw-fix-detecting-fifo-depth.patch new file mode 100644 index 00000000000..19e17efa3a1 --- /dev/null +++ b/queue-3.18/spi-dw-fix-detecting-fifo-depth.patch @@ -0,0 +1,46 @@ +From d297933cc7fcfbaaf2d37570baac73287bf0357d Mon Sep 17 00:00:00 2001 +From: Axel Lin +Date: Mon, 5 Jan 2015 09:32:56 +0800 +Subject: spi: dw: Fix detecting FIFO depth + +From: Axel Lin + +commit d297933cc7fcfbaaf2d37570baac73287bf0357d upstream. + +Current code tries to find the highest valid fifo depth by checking the value +it wrote to DW_SPI_TXFLTR. There are a few problems in current code: +1) There is an off-by-one in dws->fifo_len setting because it assumes the latest + register write fails so the latest valid value should be fifo - 1. +2) We know the depth could be from 2 to 256 from HW spec, so it is not necessary + to test fifo == 257. In the case fifo is 257, it means the latest valid + setting is fifo = 256. So after the for loop iteration, we should check + fifo == 2 case instead of fifo == 257 if detecting the FIFO depth fails. +This patch fixes above issues. + +Signed-off-by: Axel Lin +Reviewed-and-tested-by: Andy Shevchenko +Signed-off-by: Mark Brown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/spi/spi-dw.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *d + if (!dws->fifo_len) { + u32 fifo; + +- for (fifo = 2; fifo <= 257; fifo++) { ++ for (fifo = 2; fifo <= 256; fifo++) { + dw_writew(dws, DW_SPI_TXFLTR, fifo); + if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) + break; + } + +- dws->fifo_len = (fifo == 257) ? 0 : fifo; ++ dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; + dw_writew(dws, DW_SPI_TXFLTR, 0); + } + } diff --git a/queue-3.18/spi-dw-mid-fix-fifo-size.patch b/queue-3.18/spi-dw-mid-fix-fifo-size.patch new file mode 100644 index 00000000000..76cb4b82768 --- /dev/null +++ b/queue-3.18/spi-dw-mid-fix-fifo-size.patch @@ -0,0 +1,32 @@ +From 67bf9cda4b498b8cea4a40be67a470afe57d2e88 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Fri, 2 Jan 2015 17:48:51 +0200 +Subject: spi: dw-mid: fix FIFO size + +From: Andy Shevchenko + +commit 67bf9cda4b498b8cea4a40be67a470afe57d2e88 upstream. + +The FIFO size is 40 accordingly to the specifications, but this means 0x40, +i.e. 64 bytes. This patch fixes the typo and enables FIFO size autodetection +for Intel MID devices. + +Fixes: 7063c0d942a1 (spi/dw_spi: add DMA support) +Signed-off-by: Andy Shevchenko +Signed-off-by: Mark Brown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/spi/spi-dw-mid.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/drivers/spi/spi-dw-mid.c ++++ b/drivers/spi/spi-dw-mid.c +@@ -219,7 +219,6 @@ int dw_spi_mid_init(struct dw_spi *dws) + iounmap(clk_reg); + + dws->num_cs = 16; +- dws->fifo_len = 40; /* FIFO has 40 words buffer */ + + #ifdef CONFIG_SPI_DW_MID_DMA + dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); diff --git a/queue-3.18/vm-add-vm_fault_sigsegv-handling-support.patch b/queue-3.18/vm-add-vm_fault_sigsegv-handling-support.patch new file mode 100644 index 00000000000..930ad59404b --- /dev/null +++ b/queue-3.18/vm-add-vm_fault_sigsegv-handling-support.patch @@ -0,0 +1,428 @@ +From 33692f27597fcab536d7cbbcc8f52905133e4aa7 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Thu, 29 Jan 2015 10:51:32 -0800 +Subject: vm: add VM_FAULT_SIGSEGV handling support + +From: Linus Torvalds + +commit 33692f27597fcab536d7cbbcc8f52905133e4aa7 upstream. + +The core VM already knows about VM_FAULT_SIGBUS, but cannot return a +"you should SIGSEGV" error, because the SIGSEGV case was generally +handled by the caller - usually the architecture fault handler. + +That results in lots of duplication - all the architecture fault +handlers end up doing very similar "look up vma, check permissions, do +retries etc" - but it generally works. However, there are cases where +the VM actually wants to SIGSEGV, and applications _expect_ SIGSEGV. + +In particular, when accessing the stack guard page, libsigsegv expects a +SIGSEGV. And it usually got one, because the stack growth is handled by +that duplicated architecture fault handler. + +However, when the generic VM layer started propagating the error return +from the stack expansion in commit fee7e49d4514 ("mm: propagate error +from stack expansion even for guard page"), that now exposed the +existing VM_FAULT_SIGBUS result to user space. And user space really +expected SIGSEGV, not SIGBUS. + +To fix that case, we need to add a VM_FAULT_SIGSEGV, and teach all those +duplicate architecture fault handlers about it. They all already have +the code to handle SIGSEGV, so it's about just tying that new return +value to the existing code, but it's all a bit annoying. + +This is the mindless minimal patch to do this. A more extensive patch +would be to try to gather up the mostly shared fault handling logic into +one generic helper routine, and long-term we really should do that +cleanup. + +Just from this patch, you can generally see that most architectures just +copied (directly or indirectly) the old x86 way of doing things, but in +the meantime that original x86 model has been improved to hold the VM +semaphore for shorter times etc and to handle VM_FAULT_RETRY and other +"newer" things, so it would be a good idea to bring all those +improvements to the generic case and teach other architectures about +them too. + +Reported-and-tested-by: Takashi Iwai +Tested-by: Jan Engelhardt +Acked-by: Heiko Carstens # "s390 still compiles and boots" +Cc: linux-arch@vger.kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + arch/alpha/mm/fault.c | 2 ++ + arch/arc/mm/fault.c | 2 ++ + arch/avr32/mm/fault.c | 2 ++ + arch/cris/mm/fault.c | 2 ++ + arch/frv/mm/fault.c | 2 ++ + arch/ia64/mm/fault.c | 2 ++ + arch/m32r/mm/fault.c | 2 ++ + arch/m68k/mm/fault.c | 2 ++ + arch/metag/mm/fault.c | 2 ++ + arch/microblaze/mm/fault.c | 2 ++ + arch/mips/mm/fault.c | 2 ++ + arch/mn10300/mm/fault.c | 2 ++ + arch/openrisc/mm/fault.c | 2 ++ + arch/parisc/mm/fault.c | 2 ++ + arch/powerpc/mm/copro_fault.c | 2 +- + arch/powerpc/mm/fault.c | 2 ++ + arch/s390/mm/fault.c | 6 ++++++ + arch/score/mm/fault.c | 2 ++ + arch/sh/mm/fault.c | 2 ++ + arch/sparc/mm/fault_32.c | 2 ++ + arch/sparc/mm/fault_64.c | 2 ++ + arch/tile/mm/fault.c | 2 ++ + arch/um/kernel/trap.c | 2 ++ + arch/x86/mm/fault.c | 2 ++ + arch/xtensa/mm/fault.c | 2 ++ + drivers/staging/lustre/lustre/llite/vvp_io.c | 2 +- + include/linux/mm.h | 6 ++++-- + mm/gup.c | 4 ++-- + mm/ksm.c | 2 +- + 29 files changed, 61 insertions(+), 7 deletions(-) + +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c +@@ -156,6 +156,8 @@ retry: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/arc/mm/fault.c ++++ b/arch/arc/mm/fault.c +@@ -161,6 +161,8 @@ good_area: + + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c +@@ -142,6 +142,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/cris/mm/fault.c ++++ b/arch/cris/mm/fault.c +@@ -176,6 +176,8 @@ retry: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/frv/mm/fault.c ++++ b/arch/frv/mm/fault.c +@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datamm + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c +@@ -172,6 +172,8 @@ retry: + */ + if (fault & VM_FAULT_OOM) { + goto out_of_memory; ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ goto bad_area; + } else if (fault & VM_FAULT_SIGBUS) { + signal = SIGBUS; + goto bad_area; +--- a/arch/m32r/mm/fault.c ++++ b/arch/m32r/mm/fault.c +@@ -200,6 +200,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/m68k/mm/fault.c ++++ b/arch/m68k/mm/fault.c +@@ -145,6 +145,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto map_err; + else if (fault & VM_FAULT_SIGBUS) + goto bus_err; + BUG(); +--- a/arch/metag/mm/fault.c ++++ b/arch/metag/mm/fault.c +@@ -141,6 +141,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/microblaze/mm/fault.c ++++ b/arch/microblaze/mm/fault.c +@@ -224,6 +224,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c +@@ -158,6 +158,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/mn10300/mm/fault.c ++++ b/arch/mn10300/mm/fault.c +@@ -262,6 +262,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/openrisc/mm/fault.c ++++ b/arch/openrisc/mm/fault.c +@@ -171,6 +171,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -256,6 +256,8 @@ good_area: + */ + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto bad_area; + BUG(); +--- a/arch/powerpc/mm/copro_fault.c ++++ b/arch/powerpc/mm/copro_fault.c +@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_stru + if (*flt & VM_FAULT_OOM) { + ret = -ENOMEM; + goto out_unlock; +- } else if (*flt & VM_FAULT_SIGBUS) { ++ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { + ret = -EFAULT; + goto out_unlock; + } +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -444,6 +444,8 @@ good_area: + */ + fault = handle_mm_fault(mm, vma, address, flags); + if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { ++ if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + rc = mm_fault_error(regs, address, fault); + if (rc >= MM_FAULT_RETURN) + goto bail; +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -374,6 +374,12 @@ static noinline void do_fault_error(stru + do_no_context(regs); + else + pagefault_out_of_memory(); ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ /* Kernel mode? Handle exceptions or die */ ++ if (!user_mode(regs)) ++ do_no_context(regs); ++ else ++ do_sigsegv(regs, SEGV_MAPERR); + } else if (fault & VM_FAULT_SIGBUS) { + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) +--- a/arch/score/mm/fault.c ++++ b/arch/score/mm/fault.c +@@ -114,6 +114,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/sh/mm/fault.c ++++ b/arch/sh/mm/fault.c +@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, uns + } else { + if (fault & VM_FAULT_SIGBUS) + do_sigbus(regs, error_code, address); ++ else if (fault & VM_FAULT_SIGSEGV) ++ bad_area(regs, error_code, address); + else + BUG(); + } +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c +@@ -249,6 +249,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -446,6 +446,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/tile/mm/fault.c ++++ b/arch/tile/mm/fault.c +@@ -444,6 +444,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/arch/um/kernel/trap.c ++++ b/arch/um/kernel/trap.c +@@ -80,6 +80,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) { + goto out_of_memory; ++ } else if (fault & VM_FAULT_SIGSEGV) { ++ goto out; + } else if (fault & VM_FAULT_SIGBUS) { + err = -EACCES; + goto out; +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -905,6 +905,8 @@ mm_fault_error(struct pt_regs *regs, uns + if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| + VM_FAULT_HWPOISON_LARGE)) + do_sigbus(regs, error_code, address, fault); ++ else if (fault & VM_FAULT_SIGSEGV) ++ bad_area_nosemaphore(regs, error_code, address); + else + BUG(); + } +--- a/arch/xtensa/mm/fault.c ++++ b/arch/xtensa/mm/fault.c +@@ -117,6 +117,8 @@ good_area: + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; ++ else if (fault & VM_FAULT_SIGSEGV) ++ goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); +--- a/drivers/staging/lustre/lustre/llite/vvp_io.c ++++ b/drivers/staging/lustre/lustre/llite/vvp_io.c +@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vv + return 0; + } + +- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { ++ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { + CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); + return -EFAULT; + } +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1054,6 +1054,7 @@ static inline int page_mapped(struct pag + #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ + #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ + #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ ++#define VM_FAULT_SIGSEGV 0x0040 + + #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ + #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ +@@ -1062,8 +1063,9 @@ static inline int page_mapped(struct pag + + #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ + +-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ +- VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) ++#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ ++ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ ++ VM_FAULT_FALLBACK) + + /* Encode hstate index for a hwpoisoned large page */ + #define VM_FAULT_SET_HINDEX(x) ((x) << 12) +--- a/mm/gup.c ++++ b/mm/gup.c +@@ -296,7 +296,7 @@ static int faultin_page(struct task_stru + return -ENOMEM; + if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) + return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; +- if (ret & VM_FAULT_SIGBUS) ++ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) + return -EFAULT; + BUG(); + } +@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct + return -ENOMEM; + if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) + return -EHWPOISON; +- if (ret & VM_FAULT_SIGBUS) ++ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) + return -EFAULT; + BUG(); + } +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_stru + else + ret = VM_FAULT_WRITE; + put_page(page); +- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); ++ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); + /* + * We must loop because handle_mm_fault() may back out if there's + * any difficulty e.g. if pte accessed bit gets updated concurrently. diff --git a/queue-3.18/x86-build-replace-perl-script-with-shell-script.patch b/queue-3.18/x86-build-replace-perl-script-with-shell-script.patch new file mode 100644 index 00000000000..4d8484871bf --- /dev/null +++ b/queue-3.18/x86-build-replace-perl-script-with-shell-script.patch @@ -0,0 +1,131 @@ +From d69911a68c865b152a067feaa45e98e6bb0f655b Mon Sep 17 00:00:00 2001 +From: Kees Cook +Date: Mon, 26 Jan 2015 12:58:35 -0800 +Subject: x86, build: replace Perl script with Shell script + +From: Kees Cook + +commit d69911a68c865b152a067feaa45e98e6bb0f655b upstream. + +Commit e6023367d779 ("x86, kaslr: Prevent .bss from overlaping initrd") +added Perl to the required build environment. This reimplements in +shell the Perl script used to find the size of the kernel with bss and +brk added. + +Signed-off-by: Kees Cook +Reported-by: Rob Landley +Acked-by: Rob Landley +Cc: Anca Emanuel +Cc: Fengguang Wu +Cc: Junjie Mao +Cc: Kees Cook +Cc: Thomas Gleixner +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/boot/compressed/Makefile | 2 - + arch/x86/tools/calc_run_size.pl | 39 ----------------------------------- + arch/x86/tools/calc_run_size.sh | 42 ++++++++++++++++++++++++++++++++++++++ + 3 files changed, 43 insertions(+), 40 deletions(-) + +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -77,7 +77,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo + suffix-$(CONFIG_KERNEL_LZ4) := lz4 + + RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ +- perl $(srctree)/arch/x86/tools/calc_run_size.pl) ++ $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh) + quiet_cmd_mkpiggy = MKPIGGY $@ + cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) + +--- a/arch/x86/tools/calc_run_size.pl ++++ /dev/null +@@ -1,39 +0,0 @@ +-#!/usr/bin/perl +-# +-# Calculate the amount of space needed to run the kernel, including room for +-# the .bss and .brk sections. +-# +-# Usage: +-# objdump -h a.out | perl calc_run_size.pl +-use strict; +- +-my $mem_size = 0; +-my $file_offset = 0; +- +-my $sections=" *[0-9]+ \.(?:bss|brk) +"; +-while (<>) { +- if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) { +- my $size = hex($1); +- my $offset = hex($2); +- $mem_size += $size; +- if ($file_offset == 0) { +- $file_offset = $offset; +- } elsif ($file_offset != $offset) { +- # BFD linker shows the same file offset in ELF. +- # Gold linker shows them as consecutive. +- next if ($file_offset + $mem_size == $offset + $size); +- +- printf STDERR "file_offset: 0x%lx\n", $file_offset; +- printf STDERR "mem_size: 0x%lx\n", $mem_size; +- printf STDERR "offset: 0x%lx\n", $offset; +- printf STDERR "size: 0x%lx\n", $size; +- +- die ".bss and .brk are non-contiguous\n"; +- } +- } +-} +- +-if ($file_offset == 0) { +- die "Never found .bss or .brk file offset\n"; +-} +-printf("%d\n", $mem_size + $file_offset); +--- /dev/null ++++ b/arch/x86/tools/calc_run_size.sh +@@ -0,0 +1,42 @@ ++#!/bin/sh ++# ++# Calculate the amount of space needed to run the kernel, including room for ++# the .bss and .brk sections. ++# ++# Usage: ++# objdump -h a.out | sh calc_run_size.sh ++ ++NUM='\([0-9a-fA-F]*[ \t]*\)' ++OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p') ++if [ -z "$OUT" ] ; then ++ echo "Never found .bss or .brk file offset" >&2 ++ exit 1 ++fi ++ ++OUT=$(echo ${OUT# }) ++sizeA=$(printf "%d" 0x${OUT%% *}) ++OUT=${OUT#* } ++offsetA=$(printf "%d" 0x${OUT%% *}) ++OUT=${OUT#* } ++sizeB=$(printf "%d" 0x${OUT%% *}) ++OUT=${OUT#* } ++offsetB=$(printf "%d" 0x${OUT%% *}) ++ ++run_size=$(( $offsetA + $sizeA + $sizeB )) ++ ++# BFD linker shows the same file offset in ELF. ++if [ "$offsetA" -ne "$offsetB" ] ; then ++ # Gold linker shows them as consecutive. ++ endB=$(( $offsetB + $sizeB )) ++ if [ "$endB" != "$run_size" ] ; then ++ printf "sizeA: 0x%x\n" $sizeA >&2 ++ printf "offsetA: 0x%x\n" $offsetA >&2 ++ printf "sizeB: 0x%x\n" $sizeB >&2 ++ printf "offsetB: 0x%x\n" $offsetB >&2 ++ echo ".bss and .brk are non-contiguous" >&2 ++ exit 1 ++ fi ++fi ++ ++printf "%d\n" $run_size ++exit 0 -- 2.47.3