--- /dev/null
+From f51e2f1911122879eefefa4c592dea8bf794b39c Mon Sep 17 00:00:00 2001
+From: Alexey Brodkin <abrodkin@synopsys.com>
+Date: Mon, 13 Jul 2015 10:25:17 +0300
+Subject: ARC: make sure instruction_pointer() returns unsigned value
+
+From: Alexey Brodkin <abrodkin@synopsys.com>
+
+commit f51e2f1911122879eefefa4c592dea8bf794b39c upstream.
+
+Currently instruction_pointer() returns pt_regs->ret and so return value
+is of type "long", which implicitly stands for "signed long".
+
+While that's perfectly fine when dealing with 32-bit values if return
+value of instruction_pointer() gets assigned to 64-bit variable sign
+extension may happen.
+
+And at least in one real use-case it happens already.
+In perf_prepare_sample() return value of perf_instruction_pointer()
+(which is an alias to instruction_pointer() in case of ARC) is assigned
+to (struct perf_sample_data)->ip (which type is "u64").
+
+And what we see if instuction pointer points to user-space application
+that in case of ARC lays below 0x8000_0000 "ip" gets set properly with
+leading 32 zeros. But if instruction pointer points to kernel address
+space that starts from 0x8000_0000 then "ip" is set with 32 leadig
+"f"-s. I.e. id instruction_pointer() returns 0x8100_0000, "ip" will be
+assigned with 0xffff_ffff__8100_0000. Which is obviously wrong.
+
+In particular that issuse broke output of perf, because perf was unable
+to associate addresses like 0xffff_ffff__8100_0000 with anything from
+/proc/kallsyms.
+
+That's what we used to see:
+ ----------->8----------
+ 6.27% ls [unknown] [k] 0xffffffff8046c5cc
+ 2.96% ls libuClibc-0.9.34-git.so [.] memcpy
+ 2.25% ls libuClibc-0.9.34-git.so [.] memset
+ 1.66% ls [unknown] [k] 0xffffffff80666536
+ 1.54% ls libuClibc-0.9.34-git.so [.] 0x000224d6
+ 1.18% ls libuClibc-0.9.34-git.so [.] 0x00022472
+ ----------->8----------
+
+With that change perf output looks much better now:
+ ----------->8----------
+ 8.21% ls [kernel.kallsyms] [k] memset
+ 3.52% ls libuClibc-0.9.34-git.so [.] memcpy
+ 2.11% ls libuClibc-0.9.34-git.so [.] malloc
+ 1.88% ls libuClibc-0.9.34-git.so [.] memset
+ 1.64% ls [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore
+ 1.41% ls [kernel.kallsyms] [k] __d_lookup_rcu
+ ----------->8----------
+
+Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com>
+Cc: arc-linux-dev@synopsys.com
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/include/asm/ptrace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arc/include/asm/ptrace.h
++++ b/arch/arc/include/asm/ptrace.h
+@@ -63,7 +63,7 @@ struct callee_regs {
+ long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+ };
+
+-#define instruction_pointer(regs) ((regs)->ret)
++#define instruction_pointer(regs) (unsigned long)((regs)->ret)
+ #define profile_pc(regs) instruction_pointer(regs)
+
+ /* return 1 if user mode or 0 if kernel mode */
--- /dev/null
+From 97709069214eb75312c14946803b9da4d3814203 Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Thu, 18 Jun 2015 13:54:01 +0530
+Subject: ARC: Override toplevel default -O2 with -O3
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit 97709069214eb75312c14946803b9da4d3814203 upstream.
+
+ARC kernels have historically been built with -O3, despite top level
+Makefile defaulting to -O2. This was facilitated by implicitly ordering
+of arch makefile include AFTER top level assigned -O2.
+
+An upstream fix to top level a1c48bb160f ("Makefile: Fix unrecognized
+cross-compiler command line options") changed the ordering, making ARC
+-O3 defunct.
+
+Fix that by NOT relying on any ordering whatsoever and use the proper
+arch override facility now present in kbuild (ARCH_*FLAGS)
+
+Depends-on: ("kbuild: Allow arch Makefiles to override {cpp,ld,c}flags")
+Suggested-by: Michal Marek <mmarek@suse.cz>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/Makefile | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -43,7 +43,8 @@ endif
+
+ ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+ # Generic build system uses -O2, we want -O3
+-cflags-y += -O3
++# Note: No need to add to cflags-y as that happens anyways
++ARCH_CFLAGS += -O3
+ endif
+
+ # small data is default for elf32 tool-chain. If not usable, disable it
--- /dev/null
+From acb33cc541d7a5495b16a133702d4c401ea4e294 Mon Sep 17 00:00:00 2001
+From: "Vutla, Lokesh" <lokeshvutla@ti.com>
+Date: Thu, 2 Jul 2015 18:33:28 +0530
+Subject: crypto: omap-des - Fix unmapping of dma channels
+
+From: "Vutla, Lokesh" <lokeshvutla@ti.com>
+
+commit acb33cc541d7a5495b16a133702d4c401ea4e294 upstream.
+
+dma_unmap_sg() is being called twice after completing the
+task. Looks like this is a copy paste error when creating
+des driver.
+With this the following warn appears during boot:
+
+[ 4.210457] ------------[ cut here ]------------
+[ 4.215114] WARNING: CPU: 0 PID: 0 at lib/dma-debug.c:1080 check_unmap+0x710/0x9a0()
+[ 4.222899] omap-des 480a5000.des: DMA-API: device driver tries to free DMA memory it has not allocated [device address=0x00000000ab2ce000] [size=8 bytes]
+[ 4.236785] Modules linked in:
+[ 4.239860] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.14.39-02999-g1bc045a-dirty #182
+[ 4.247918] [<c001678c>] (unwind_backtrace) from [<c0012574>] (show_stack+0x10/0x14)
+[ 4.255710] [<c0012574>] (show_stack) from [<c05a37e8>] (dump_stack+0x84/0xb8)
+[ 4.262977] [<c05a37e8>] (dump_stack) from [<c0046464>] (warn_slowpath_common+0x68/0x8c)
+[ 4.271107] [<c0046464>] (warn_slowpath_common) from [<c004651c>] (warn_slowpath_fmt+0x30/0x40)
+[ 4.279854] [<c004651c>] (warn_slowpath_fmt) from [<c02d50a4>] (check_unmap+0x710/0x9a0)
+[ 4.287991] [<c02d50a4>] (check_unmap) from [<c02d5478>] (debug_dma_unmap_sg+0x90/0x19c)
+[ 4.296128] [<c02d5478>] (debug_dma_unmap_sg) from [<c04a77d8>] (omap_des_done_task+0x1cc/0x3e4)
+[ 4.304963] [<c04a77d8>] (omap_des_done_task) from [<c004a090>] (tasklet_action+0x84/0x124)
+[ 4.313370] [<c004a090>] (tasklet_action) from [<c004a4ac>] (__do_softirq+0xf0/0x20c)
+[ 4.321235] [<c004a4ac>] (__do_softirq) from [<c004a840>] (irq_exit+0x98/0xec)
+[ 4.328500] [<c004a840>] (irq_exit) from [<c000f9ac>] (handle_IRQ+0x50/0xb0)
+[ 4.335589] [<c000f9ac>] (handle_IRQ) from [<c0008688>] (gic_handle_irq+0x28/0x5c)
+
+Removing the duplicate call to dma_unmap_sg().
+
+Reported-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Signed-off-by: Lokesh Vutla <lokeshvutla@ti.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/omap-des.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/crypto/omap-des.c
++++ b/drivers/crypto/omap-des.c
+@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struc
+ dmaengine_terminate_all(dd->dma_lch_in);
+ dmaengine_terminate_all(dd->dma_lch_out);
+
+- dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+- dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
+-
+ return err;
+ }
+
--- /dev/null
+From 61754c18752ffb78145671e94f053fb202fff041 Mon Sep 17 00:00:00 2001
+From: Michal Marek <mmarek@suse.cz>
+Date: Wed, 1 Jul 2015 17:19:30 +0200
+Subject: kbuild: Allow arch Makefiles to override {cpp,ld,c}flags
+
+From: Michal Marek <mmarek@suse.cz>
+
+commit 61754c18752ffb78145671e94f053fb202fff041 upstream.
+
+Since commit a1c48bb1 (Makefile: Fix unrecognized cross-compiler command
+line options), the arch Makefile is included earlier by the main
+Makefile, preventing the arc architecture to set its -O3 compiler
+option. Since there might be more use cases for an arch Makefile to
+fine-tune the options, add support for ARCH_CPPFLAGS, ARCH_AFLAGS and
+ARCH_CFLAGS variables that are appended to the respective kbuild
+variables. The user still has the final say via the KCPPFLAGS, KAFLAGS
+and KCFLAGS variables.
+
+Reported-by: Vineet Gupta <Vineet.Gupta1@synopsys.com>
+Signed-off-by: Michal Marek <mmarek@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/kbuild/makefiles.txt | 8 ++++++++
+ Makefile | 9 +++++----
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+--- a/Documentation/kbuild/makefiles.txt
++++ b/Documentation/kbuild/makefiles.txt
+@@ -952,6 +952,14 @@ When kbuild executes, the following step
+ $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
+ mode) if this option is supported by $(AR).
+
++ ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS Overrides the kbuild defaults
++
++ These variables are appended to the KBUILD_CPPFLAGS,
++ KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
++ top-level Makefile has set any other flags. This provides a
++ means for an architecture to override the defaults.
++
++
+ --- 6.2 Add prerequisites to archheaders:
+
+ The archheaders: rule is used to generate header files that
+--- a/Makefile
++++ b/Makefile
+@@ -783,10 +783,11 @@ endif
+ include scripts/Makefile.kasan
+ include scripts/Makefile.extrawarn
+
+-# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
+-KBUILD_CPPFLAGS += $(KCPPFLAGS)
+-KBUILD_AFLAGS += $(KAFLAGS)
+-KBUILD_CFLAGS += $(KCFLAGS)
++# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
++# last assignments
++KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
++KBUILD_AFLAGS += $(ARCH_AFLAGS) $(KAFLAGS)
++KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS)
+
+ # Use --build-id when available.
+ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
--- /dev/null
+From 30342fe65e511007672437741158d493472f427f Mon Sep 17 00:00:00 2001
+From: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Date: Thu, 23 Jul 2015 19:26:15 +0200
+Subject: s390/bpf: clear correct BPF accumulator register
+
+From: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+
+commit 30342fe65e511007672437741158d493472f427f upstream.
+
+Currently we assumed the following BPF to eBPF register mapping:
+
+ - BPF_REG_A -> BPF_REG_7
+ - BPF_REG_X -> BPF_REG_8
+
+Unfortunately this mapping is wrong. The correct mapping is:
+
+ - BPF_REG_A -> BPF_REG_0
+ - BPF_REG_X -> BPF_REG_7
+
+So clear the correct registers and use the BPF_REG_A and BPF_REG_X
+macros instead of BPF_REG_0/7.
+
+Fixes: 054623105728 ("s390/bpf: Add s390x eBPF JIT compiler backend")
+Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/net/bpf_jit_comp.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -415,13 +415,13 @@ static void bpf_jit_prologue(struct bpf_
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+ BPF_REG_1, offsetof(struct sk_buff, data));
+ }
+- /* BPF compatibility: clear A (%b7) and X (%b8) registers */
+- if (REG_SEEN(BPF_REG_7))
+- /* lghi %b7,0 */
+- EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
+- if (REG_SEEN(BPF_REG_8))
+- /* lghi %b8,0 */
+- EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
++ /* BPF compatibility: clear A (%b0) and X (%b7) registers */
++ if (REG_SEEN(BPF_REG_A))
++ /* lghi %ba,0 */
++ EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
++ if (REG_SEEN(BPF_REG_X))
++ /* lghi %bx,0 */
++ EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
+ }
+
+ /*
--- /dev/null
+From 0b991f5cdcd6201e5401f83ca3a672343c3bfc49 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Mon, 27 Jul 2015 09:53:49 +0200
+Subject: s390/cachinfo: add missing facility check to init_cache_level()
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 0b991f5cdcd6201e5401f83ca3a672343c3bfc49 upstream.
+
+Stephen Powell reported the following crash on a z890 machine:
+
+Kernel BUG at 00000000001219d0 [verbose debug info unavailable]
+illegal operation: 0001 ilc:3 [#1] SMP
+Krnl PSW : 0704e00180000000 00000000001219d0 (init_cache_level+0x38/0xe0)
+ R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:2 PM:0 EA:3
+Krnl Code: 00000000001219c2: a7840056 brc 8,121a6e
+ 00000000001219c6: a7190000 lghi %r1,0
+ #00000000001219ca: eb101000004c ecag %r1,%r0,0(%r1)
+ >00000000001219d0: a7390000 lghi %r3,0
+ 00000000001219d4: e310f0a00024 stg %r1,160(%r15)
+ 00000000001219da: a7080000 lhi %r0,0
+ 00000000001219de: a7b9f000 lghi %r11,-4096
+ 00000000001219e2: c0a0002899d9 larl %r10,634d94
+Call Trace:
+ [<0000000000478ee2>] detect_cache_attributes+0x2a/0x2b8
+ [<000000000097c9b0>] cacheinfo_sysfs_init+0x60/0xc8
+ [<00000000001001c0>] do_one_initcall+0x98/0x1c8
+ [<000000000094fdc2>] kernel_init_freeable+0x212/0x2d8
+ [<000000000062352e>] kernel_init+0x26/0x118
+ [<000000000062fd2e>] kernel_thread_starter+0x6/0xc
+
+The illegal operation was executed because of a missing facility check,
+which should have made sure that the ECAG execution would only be executed
+on machines which have the general-instructions-extension facility
+installed.
+
+Reported-and-tested-by: Stephen Powell <zlinuxman@wowway.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/cache.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/s390/kernel/cache.c
++++ b/arch/s390/kernel/cache.c
+@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
+ union cache_topology ct;
+ enum cache_type ctype;
+
++ if (!test_facility(34))
++ return -EOPNOTSUPP;
+ if (!this_cpu_ci)
+ return -EINVAL;
+ ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
--- /dev/null
+From cad49cfc44a5160e3fa09b18e4e7f7cacd13f27d Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Tue, 7 Jul 2015 08:40:49 +0200
+Subject: s390/nmi: fix vector register corruption
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit cad49cfc44a5160e3fa09b18e4e7f7cacd13f27d upstream.
+
+If a machine check happens, the machine has the vector facility installed
+and the extended save area exists, the cpu will save vector register
+contents into the extended save area. This is regardless of control
+register 0 contents, which enables and disables the vector facility during
+runtime.
+
+On each machine check we should validate the vector registers. The current
+code however tries to validate the registers only if the running task is
+using vector registers in user space.
+
+However even the current code is broken and causes vector register
+corruption on machine checks, if user space uses them:
+the prefix area contains a pointer (absolute address) to the machine check
+extended save area. In order to save some space the save area was put into
+an unused area of the second prefix page.
+When validating vector register contents the code uses the absolute address
+of the extended save area, which is wrong. Due to prefixing the vector
+instructions will then access contents using absolute addresses instead
+of real addresses, where the machine stored the contents.
+
+If the above would work there is still the problem that register validition
+would only happen if user space uses vector registers. If kernel space uses
+them also, this may also lead to vector register content corruption:
+if the kernel makes use of vector instructions, but the current running
+user space context does not, the machine check handler will validate
+floating point registers instead of vector registers.
+Given the fact that writing to a floating point register may change the
+upper halve of the corresponding vector register, we also experience vector
+register corruption in this case.
+
+Fix all of these issues, and always validate vector registers on each
+machine check, if the machine has the vector facility installed and the
+extended save area is defined.
+
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/ctl_reg.h | 5 +++
+ arch/s390/kernel/nmi.c | 51 +++++++++++++++++++++++-----------------
+ 2 files changed, 34 insertions(+), 22 deletions(-)
+
+--- a/arch/s390/include/asm/ctl_reg.h
++++ b/arch/s390/include/asm/ctl_reg.h
+@@ -57,7 +57,10 @@ union ctlreg0 {
+ unsigned long lap : 1; /* Low-address-protection control */
+ unsigned long : 4;
+ unsigned long edat : 1; /* Enhanced-DAT-enablement control */
+- unsigned long : 23;
++ unsigned long : 4;
++ unsigned long afp : 1; /* AFP-register control */
++ unsigned long vx : 1; /* Vector enablement control */
++ unsigned long : 17;
+ };
+ };
+
+--- a/arch/s390/kernel/nmi.c
++++ b/arch/s390/kernel/nmi.c
+@@ -21,6 +21,7 @@
+ #include <asm/nmi.h>
+ #include <asm/crw.h>
+ #include <asm/switch_to.h>
++#include <asm/ctl_reg.h>
+
+ struct mcck_struct {
+ int kill_task;
+@@ -129,26 +130,30 @@ static int notrace s390_revalidate_regis
+ } else
+ asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
+
+- asm volatile(
+- " ld 0,0(%0)\n"
+- " ld 1,8(%0)\n"
+- " ld 2,16(%0)\n"
+- " ld 3,24(%0)\n"
+- " ld 4,32(%0)\n"
+- " ld 5,40(%0)\n"
+- " ld 6,48(%0)\n"
+- " ld 7,56(%0)\n"
+- " ld 8,64(%0)\n"
+- " ld 9,72(%0)\n"
+- " ld 10,80(%0)\n"
+- " ld 11,88(%0)\n"
+- " ld 12,96(%0)\n"
+- " ld 13,104(%0)\n"
+- " ld 14,112(%0)\n"
+- " ld 15,120(%0)\n"
+- : : "a" (fpt_save_area));
+- /* Revalidate vector registers */
+- if (MACHINE_HAS_VX && current->thread.vxrs) {
++ if (!MACHINE_HAS_VX) {
++ /* Revalidate floating point registers */
++ asm volatile(
++ " ld 0,0(%0)\n"
++ " ld 1,8(%0)\n"
++ " ld 2,16(%0)\n"
++ " ld 3,24(%0)\n"
++ " ld 4,32(%0)\n"
++ " ld 5,40(%0)\n"
++ " ld 6,48(%0)\n"
++ " ld 7,56(%0)\n"
++ " ld 8,64(%0)\n"
++ " ld 9,72(%0)\n"
++ " ld 10,80(%0)\n"
++ " ld 11,88(%0)\n"
++ " ld 12,96(%0)\n"
++ " ld 13,104(%0)\n"
++ " ld 14,112(%0)\n"
++ " ld 15,120(%0)\n"
++ : : "a" (fpt_save_area));
++ } else {
++ /* Revalidate vector registers */
++ union ctlreg0 cr0;
++
+ if (!mci->vr) {
+ /*
+ * Vector registers can't be restored and therefore
+@@ -156,8 +161,12 @@ static int notrace s390_revalidate_regis
+ */
+ kill_task = 1;
+ }
++ cr0.val = S390_lowcore.cregs_save_area[0];
++ cr0.afp = cr0.vx = 1;
++ __ctl_load(cr0.val, 0, 0);
+ restore_vx_regs((__vector128 *)
+- S390_lowcore.vector_save_area_addr);
++ &S390_lowcore.vector_save_area);
++ __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
+ }
+ /* Revalidate access registers */
+ asm volatile(
--- /dev/null
+From e47994dd44bcb4a77b4152bd0eada585934703c0 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Mon, 6 Jul 2015 15:02:37 +0200
+Subject: s390/process: fix sfpc inline assembly
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit e47994dd44bcb4a77b4152bd0eada585934703c0 upstream.
+
+The sfpc inline assembly within execve_tail() may incorrectly set bits
+28-31 of the sfpc instruction to a value which is not zero.
+These bits however are currently unused and therefore should be zero
+so we won't get surprised if these bits will be used in the future.
+
+Therefore remove the second operand from the inline assembly.
+
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/process.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flag
+ asmlinkage void execve_tail(void)
+ {
+ current->thread.fp_regs.fpc = 0;
+- asm volatile("sfpc %0,%0" : : "d" (0));
++ asm volatile("sfpc %0" : : "d" (0));
+ }
+
+ /*
--- /dev/null
+From f9c87a6f46d508eae0d9ae640be98d50f237f827 Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Mon, 6 Jul 2015 17:58:19 +0200
+Subject: s390/sclp: clear upper register halves in _sclp_print_early
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit f9c87a6f46d508eae0d9ae640be98d50f237f827 upstream.
+
+If the kernel is compiled with gcc 5.1 and the XZ compression option
+the decompress_kernel function calls _sclp_print_early in 64-bit mode
+while the content of the upper register half of %r6 is non-zero.
+This causes a specification exception on the servc instruction in
+_sclp_servc.
+
+The _sclp_print_early function saves and restores the upper registers
+halves but it fails to clear them for the 31-bit code of the mini sclp
+driver.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/sclp.S | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/s390/kernel/sclp.S
++++ b/arch/s390/kernel/sclp.S
+@@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
+ jno .Lesa2
+ ahi %r15,-80
+ stmh %r6,%r15,96(%r15) # store upper register halves
++ basr %r13,0
++ lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
+ .Lesa2:
+ lr %r10,%r2 # save string pointer
+ lhi %r2,0
+@@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
+ .Lesa3:
+ lm %r6,%r15,120(%r15) # restore registers
+ br %r14
++.Lzeroes:
++ .fill 64,4,0
+
+ .LwritedataS4:
+ .long 0x00760005 # SCLP command for write data
can-rcar_can-print-signed-irq.patch
can-mcp251x-fix-resume-when-device-is-down.patch
freeing-unlinked-file-indefinitely-delayed.patch
+x86-init-clear-init_level4_pgt-earlier.patch
+x86-kasan-fix-kasan-shadow-region-page-tables.patch
+x86-kasan-flush-tlbs-after-switching-cr3.patch
+x86-kasan-fix-boot-crash-on-amd-processors.patch
+crypto-omap-des-fix-unmapping-of-dma-channels.patch
+s390-process-fix-sfpc-inline-assembly.patch
+s390-sclp-clear-upper-register-halves-in-_sclp_print_early.patch
+s390-nmi-fix-vector-register-corruption.patch
+s390-bpf-clear-correct-bpf-accumulator-register.patch
+s390-cachinfo-add-missing-facility-check-to-init_cache_level.patch
+arc-override-toplevel-default-o2-with-o3.patch
+arc-make-sure-instruction_pointer-returns-unsigned-value.patch
+kbuild-allow-arch-makefiles-to-override-cpp-ld-c-flags.patch
--- /dev/null
+From d0f77d4d04b222a817925d33ba3589b190bfa863 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <a.ryabinin@samsung.com>
+Date: Thu, 2 Jul 2015 12:09:33 +0300
+Subject: x86/init: Clear 'init_level4_pgt' earlier
+
+From: Andrey Ryabinin <a.ryabinin@samsung.com>
+
+commit d0f77d4d04b222a817925d33ba3589b190bfa863 upstream.
+
+Currently x86_64_start_kernel() has two KASAN related
+function calls. The first call maps shadow to early_level4_pgt,
+the second maps shadow to init_level4_pgt.
+
+If we move clear_page(init_level4_pgt) earlier, we could hide
+KASAN low level detail from generic x86_64 initialization code.
+The next patch will do it.
+
+Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
+Cc: Alexander Popov <alpopov@ptsecurity.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <adech.fo@gmail.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1435828178-10975-2-git-send-email-a.ryabinin@samsung.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/head64.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -166,6 +166,8 @@ asmlinkage __visible void __init x86_64_
+ /* clear bss before set_intr_gate with early_idt_handler */
+ clear_bss();
+
++ clear_page(init_level4_pgt);
++
+ for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
+ set_intr_gate(i, early_idt_handler_array[i]);
+ load_idt((const struct desc_ptr *)&idt_descr);
+@@ -177,7 +179,6 @@ asmlinkage __visible void __init x86_64_
+ */
+ load_ucode_bsp();
+
+- clear_page(init_level4_pgt);
+ /* set init_level4_pgt kernel high mapping*/
+ init_level4_pgt[511] = early_level4_pgt[511];
+
--- /dev/null
+From d4f86beacc21d538dc41e1fc75a22e084f547edf Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <a.ryabinin@samsung.com>
+Date: Thu, 2 Jul 2015 12:09:36 +0300
+Subject: x86/kasan: Fix boot crash on AMD processors
+
+From: Andrey Ryabinin <a.ryabinin@samsung.com>
+
+commit d4f86beacc21d538dc41e1fc75a22e084f547edf upstream.
+
+While populating zero shadow wrong bits in upper level page
+tables used. __PAGE_KERNEL_RO that was used for pgd/pud/pmd has
+_PAGE_BIT_GLOBAL set. Global bit is present only in the lowest
+level of the page translation hierarchy (ptes), and it should be
+zero in upper levels.
+
+This bug seems doesn't cause any troubles on Intel cpus, while
+on AMDs it cause kernel crash on boot.
+
+Use _KERNPG_TABLE bits for pgds/puds/pmds to fix this.
+
+Reported-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
+Cc: Alexander Popov <alpopov@ptsecurity.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <adech.fo@gmail.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1435828178-10975-5-git-send-email-a.ryabinin@samsung.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/kasan_init_64.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/mm/kasan_init_64.c
++++ b/arch/x86/mm/kasan_init_64.c
+@@ -85,7 +85,7 @@ static int __init zero_pmd_populate(pud_
+ while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
+ WARN_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
+- | __PAGE_KERNEL_RO));
++ | _KERNPG_TABLE));
+ addr += PMD_SIZE;
+ pmd = pmd_offset(pud, addr);
+ }
+@@ -111,7 +111,7 @@ static int __init zero_pud_populate(pgd_
+ while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
+ WARN_ON(!pud_none(*pud));
+ set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
+- | __PAGE_KERNEL_RO));
++ | _KERNPG_TABLE));
+ addr += PUD_SIZE;
+ pud = pud_offset(pgd, addr);
+ }
+@@ -136,7 +136,7 @@ static int __init zero_pgd_populate(unsi
+ while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
+ WARN_ON(!pgd_none(*pgd));
+ set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
+- | __PAGE_KERNEL_RO));
++ | _KERNPG_TABLE));
+ addr += PGDIR_SIZE;
+ pgd = pgd_offset_k(addr);
+ }
--- /dev/null
+From 5d5aa3cfca5cf74cd928daf3674642e6004328d1 Mon Sep 17 00:00:00 2001
+From: Alexander Popov <alpopov@ptsecurity.com>
+Date: Thu, 2 Jul 2015 12:09:34 +0300
+Subject: x86/kasan: Fix KASAN shadow region page tables
+
+From: Alexander Popov <alpopov@ptsecurity.com>
+
+commit 5d5aa3cfca5cf74cd928daf3674642e6004328d1 upstream.
+
+Currently KASAN shadow region page tables created without
+respect of physical offset (phys_base). This causes kernel halt
+when phys_base is not zero.
+
+So let's initialize KASAN shadow region page tables in
+kasan_early_init() using __pa_nodebug() which considers
+phys_base.
+
+This patch also separates x86_64_start_kernel() from KASAN low
+level details by moving kasan_map_early_shadow(init_level4_pgt)
+into kasan_early_init().
+
+Remove the comment before clear_bss() which stopped bringing
+much profit to the code readability. Otherwise describing all
+the new order dependencies would be too verbose.
+
+Signed-off-by: Alexander Popov <alpopov@ptsecurity.com>
+Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <adech.fo@gmail.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1435828178-10975-3-git-send-email-a.ryabinin@samsung.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/kasan.h | 8 ++------
+ arch/x86/kernel/head64.c | 7 ++-----
+ arch/x86/kernel/head_64.S | 29 -----------------------------
+ arch/x86/mm/kasan_init_64.c | 36 ++++++++++++++++++++++++++++++++++--
+ 4 files changed, 38 insertions(+), 42 deletions(-)
+
+--- a/arch/x86/include/asm/kasan.h
++++ b/arch/x86/include/asm/kasan.h
+@@ -14,15 +14,11 @@
+
+ #ifndef __ASSEMBLY__
+
+-extern pte_t kasan_zero_pte[];
+-extern pte_t kasan_zero_pmd[];
+-extern pte_t kasan_zero_pud[];
+-
+ #ifdef CONFIG_KASAN
+-void __init kasan_map_early_shadow(pgd_t *pgd);
++void __init kasan_early_init(void);
+ void __init kasan_init(void);
+ #else
+-static inline void kasan_map_early_shadow(pgd_t *pgd) { }
++static inline void kasan_early_init(void) { }
+ static inline void kasan_init(void) { }
+ #endif
+
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -161,13 +161,12 @@ asmlinkage __visible void __init x86_64_
+ /* Kill off the identity-map trampoline */
+ reset_early_page_tables();
+
+- kasan_map_early_shadow(early_level4_pgt);
+-
+- /* clear bss before set_intr_gate with early_idt_handler */
+ clear_bss();
+
+ clear_page(init_level4_pgt);
+
++ kasan_early_init();
++
+ for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
+ set_intr_gate(i, early_idt_handler_array[i]);
+ load_idt((const struct desc_ptr *)&idt_descr);
+@@ -182,8 +181,6 @@ asmlinkage __visible void __init x86_64_
+ /* set init_level4_pgt kernel high mapping*/
+ init_level4_pgt[511] = early_level4_pgt[511];
+
+- kasan_map_early_shadow(init_level4_pgt);
+-
+ x86_64_start_reservations(real_mode_data);
+ }
+
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -516,38 +516,9 @@ ENTRY(phys_base)
+ /* This must match the first entry in level2_kernel_pgt */
+ .quad 0x0000000000000000
+
+-#ifdef CONFIG_KASAN
+-#define FILL(VAL, COUNT) \
+- .rept (COUNT) ; \
+- .quad (VAL) ; \
+- .endr
+-
+-NEXT_PAGE(kasan_zero_pte)
+- FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
+-NEXT_PAGE(kasan_zero_pmd)
+- FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
+-NEXT_PAGE(kasan_zero_pud)
+- FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
+-
+-#undef FILL
+-#endif
+-
+-
+ #include "../../x86/xen/xen-head.S"
+
+ __PAGE_ALIGNED_BSS
+ NEXT_PAGE(empty_zero_page)
+ .skip PAGE_SIZE
+
+-#ifdef CONFIG_KASAN
+-/*
+- * This page used as early shadow. We don't use empty_zero_page
+- * at early stages, stack instrumentation could write some garbage
+- * to this page.
+- * Latter we reuse it as zero shadow for large ranges of memory
+- * that allowed to access, but not instrumented by kasan
+- * (vmalloc/vmemmap ...).
+- */
+-NEXT_PAGE(kasan_zero_page)
+- .skip PAGE_SIZE
+-#endif
+--- a/arch/x86/mm/kasan_init_64.c
++++ b/arch/x86/mm/kasan_init_64.c
+@@ -11,7 +11,19 @@
+ extern pgd_t early_level4_pgt[PTRS_PER_PGD];
+ extern struct range pfn_mapped[E820_X_MAX];
+
+-extern unsigned char kasan_zero_page[PAGE_SIZE];
++static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
++static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
++static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
++
++/*
++ * This page used as early shadow. We don't use empty_zero_page
++ * at early stages, stack instrumentation could write some garbage
++ * to this page.
++ * Latter we reuse it as zero shadow for large ranges of memory
++ * that allowed to access, but not instrumented by kasan
++ * (vmalloc/vmemmap ...).
++ */
++static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
+
+ static int __init map_range(struct range *range)
+ {
+@@ -36,7 +48,7 @@ static void __init clear_pgds(unsigned l
+ pgd_clear(pgd_offset_k(start));
+ }
+
+-void __init kasan_map_early_shadow(pgd_t *pgd)
++static void __init kasan_map_early_shadow(pgd_t *pgd)
+ {
+ int i;
+ unsigned long start = KASAN_SHADOW_START;
+@@ -166,6 +178,26 @@ static struct notifier_block kasan_die_n
+ };
+ #endif
+
++void __init kasan_early_init(void)
++{
++ int i;
++ pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
++ pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
++ pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
++
++ for (i = 0; i < PTRS_PER_PTE; i++)
++ kasan_zero_pte[i] = __pte(pte_val);
++
++ for (i = 0; i < PTRS_PER_PMD; i++)
++ kasan_zero_pmd[i] = __pmd(pmd_val);
++
++ for (i = 0; i < PTRS_PER_PUD; i++)
++ kasan_zero_pud[i] = __pud(pud_val);
++
++ kasan_map_early_shadow(early_level4_pgt);
++ kasan_map_early_shadow(init_level4_pgt);
++}
++
+ void __init kasan_init(void)
+ {
+ int i;
--- /dev/null
+From 241d2c54c62fa0939fc9a9512b48ac3434e90a89 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <a.ryabinin@samsung.com>
+Date: Thu, 2 Jul 2015 12:09:35 +0300
+Subject: x86/kasan: Flush TLBs after switching CR3
+
+From: Andrey Ryabinin <a.ryabinin@samsung.com>
+
+commit 241d2c54c62fa0939fc9a9512b48ac3434e90a89 upstream.
+
+load_cr3() doesn't cause tlb_flush if PGE enabled.
+
+This may cause tons of false positive reports spamming the
+kernel to death.
+
+To fix this __flush_tlb_all() should be called explicitly
+after CR3 changed.
+
+Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
+Cc: Alexander Popov <alpopov@ptsecurity.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Konovalov <adech.fo@gmail.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1435828178-10975-4-git-send-email-a.ryabinin@samsung.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/kasan_init_64.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/mm/kasan_init_64.c
++++ b/arch/x86/mm/kasan_init_64.c
+@@ -208,6 +208,7 @@ void __init kasan_init(void)
+
+ memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
+ load_cr3(early_level4_pgt);
++ __flush_tlb_all();
+
+ clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+@@ -234,5 +235,6 @@ void __init kasan_init(void)
+ memset(kasan_zero_page, 0, PAGE_SIZE);
+
+ load_cr3(init_level4_pgt);
++ __flush_tlb_all();
+ init_task.kasan_depth = 0;
+ }