--- /dev/null
+From b7ede5a1f5905ac394cc8e61712a13e3c5cb7b8f Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 22 Feb 2017 19:40:12 +0100
+Subject: ARM: 8662/1: module: split core and init PLT sections
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit b7ede5a1f5905ac394cc8e61712a13e3c5cb7b8f upstream.
+
+Since commit 35fa91eed817 ("ARM: kernel: merge core and init PLTs"),
+the ARM module PLT code allocates all PLT entries in a single core
+section, since the overhead of having a separate init PLT section is
+not justified by the small number of PLT entries usually required for
+init code.
+
+However, the core and init module regions are allocated independently,
+and there is a corner case where the core region may be allocated from
+the VMALLOC region if the dedicated module region is exhausted, but the
+init region, being much smaller, can still be allocated from the module
+region. This puts the PLT entries out of reach of the relocated branch
+instructions, defeating the whole purpose of PLTs.
+
+So split the core and init PLT regions, and name the latter ".init.plt"
+so it gets allocated along with (and sufficiently close to) the .init
+sections that it serves. Also, given that init PLT entries may need to
+be emitted for branches that target the core module, modify the logic
+that disregards defined symbols to only disregard symbols that are
+defined in the same section.
+
+Fixes: 35fa91eed817 ("ARM: kernel: merge core and init PLTs")
+Reported-by: Angus Clark <angus@angusclark.org>
+Tested-by: Angus Clark <angus@angusclark.org>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/module.h | 9 +++-
+ arch/arm/kernel/module-plts.c | 85 +++++++++++++++++++++++++++++-------------
+ arch/arm/kernel/module.lds | 1
+ 3 files changed, 67 insertions(+), 28 deletions(-)
+
+--- a/arch/arm/include/asm/module.h
++++ b/arch/arm/include/asm/module.h
+@@ -18,13 +18,18 @@ enum {
+ };
+ #endif
+
++struct mod_plt_sec {
++ struct elf32_shdr *plt;
++ int plt_count;
++};
++
+ struct mod_arch_specific {
+ #ifdef CONFIG_ARM_UNWIND
+ struct unwind_table *unwind[ARM_SEC_MAX];
+ #endif
+ #ifdef CONFIG_ARM_MODULE_PLTS
+- struct elf32_shdr *plt;
+- int plt_count;
++ struct mod_plt_sec core;
++ struct mod_plt_sec init;
+ #endif
+ };
+
+--- a/arch/arm/kernel/module-plts.c
++++ b/arch/arm/kernel/module-plts.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -31,9 +31,17 @@ struct plt_entries {
+ u32 lit[PLT_ENT_COUNT];
+ };
+
++static bool in_init(const struct module *mod, unsigned long loc)
++{
++ return loc - (u32)mod->init_layout.base < mod->init_layout.size;
++}
++
+ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
+ {
+- struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
++ struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
++ &mod->arch.init;
++
++ struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
+ int idx = 0;
+
+ /*
+@@ -41,9 +49,9 @@ u32 get_module_plt(struct module *mod, u
+ * relocations are sorted, this will be the last entry we allocated.
+ * (if one exists).
+ */
+- if (mod->arch.plt_count > 0) {
+- plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
+- idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
++ if (pltsec->plt_count > 0) {
++ plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
++ idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
+
+ if (plt->lit[idx] == val)
+ return (u32)&plt->ldr[idx];
+@@ -53,8 +61,8 @@ u32 get_module_plt(struct module *mod, u
+ plt++;
+ }
+
+- mod->arch.plt_count++;
+- BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
++ pltsec->plt_count++;
++ BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
+
+ if (!idx)
+ /* Populate a new set of entries */
+@@ -129,7 +137,7 @@ static bool duplicate_rel(Elf32_Addr bas
+
+ /* Count how many PLT entries we may need */
+ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
+- const Elf32_Rel *rel, int num)
++ const Elf32_Rel *rel, int num, Elf32_Word dstidx)
+ {
+ unsigned int ret = 0;
+ const Elf32_Sym *s;
+@@ -144,13 +152,17 @@ static unsigned int count_plts(const Elf
+ case R_ARM_THM_JUMP24:
+ /*
+ * We only have to consider branch targets that resolve
+- * to undefined symbols. This is not simply a heuristic,
+- * it is a fundamental limitation, since the PLT itself
+- * is part of the module, and needs to be within range
+- * as well, so modules can never grow beyond that limit.
++ * to symbols that are defined in a different section.
++ * This is not simply a heuristic, it is a fundamental
++ * limitation, since there is no guaranteed way to emit
++ * PLT entries sufficiently close to the branch if the
++ * section size exceeds the range of a branch
++ * instruction. So ignore relocations against defined
++ * symbols if they live in the same section as the
++ * relocation target.
+ */
+ s = syms + ELF32_R_SYM(rel[i].r_info);
+- if (s->st_shndx != SHN_UNDEF)
++ if (s->st_shndx == dstidx)
+ break;
+
+ /*
+@@ -161,7 +173,12 @@ static unsigned int count_plts(const Elf
+ * So we need to support them, but there is no need to
+ * take them into consideration when trying to optimize
+ * this code. So let's only check for duplicates when
+- * the addend is zero.
++ * the addend is zero. (Note that calls into the core
++ * module via init PLT entries could involve section
++ * relative symbol references with non-zero addends, for
++ * which we may end up emitting duplicates, but the init
++ * PLT is released along with the rest of the .init
++ * region as soon as module loading completes.)
+ */
+ if (!is_zero_addend_relocation(base, rel + i) ||
+ !duplicate_rel(base, rel, i))
+@@ -174,7 +191,8 @@ static unsigned int count_plts(const Elf
+ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+ {
+- unsigned long plts = 0;
++ unsigned long core_plts = 0;
++ unsigned long init_plts = 0;
+ Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
+ Elf32_Sym *syms = NULL;
+
+@@ -184,13 +202,15 @@ int module_frob_arch_sections(Elf_Ehdr *
+ */
+ for (s = sechdrs; s < sechdrs_end; ++s) {
+ if (strcmp(".plt", secstrings + s->sh_name) == 0)
+- mod->arch.plt = s;
++ mod->arch.core.plt = s;
++ else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
++ mod->arch.init.plt = s;
+ else if (s->sh_type == SHT_SYMTAB)
+ syms = (Elf32_Sym *)s->sh_addr;
+ }
+
+- if (!mod->arch.plt) {
+- pr_err("%s: module PLT section missing\n", mod->name);
++ if (!mod->arch.core.plt || !mod->arch.init.plt) {
++ pr_err("%s: module PLT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!syms) {
+@@ -213,16 +233,29 @@ int module_frob_arch_sections(Elf_Ehdr *
+ /* sort by type and symbol index */
+ sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
+
+- plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
++ if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
++ core_plts += count_plts(syms, dstsec->sh_addr, rels,
++ numrels, s->sh_info);
++ else
++ init_plts += count_plts(syms, dstsec->sh_addr, rels,
++ numrels, s->sh_info);
+ }
+
+- mod->arch.plt->sh_type = SHT_NOBITS;
+- mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+- mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
+- mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
+- sizeof(struct plt_entries));
+- mod->arch.plt_count = 0;
++ mod->arch.core.plt->sh_type = SHT_NOBITS;
++ mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
++ mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
++ mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
++ sizeof(struct plt_entries));
++ mod->arch.core.plt_count = 0;
++
++ mod->arch.init.plt->sh_type = SHT_NOBITS;
++ mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
++ mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
++ mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
++ sizeof(struct plt_entries));
++ mod->arch.init.plt_count = 0;
+
+- pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
++ pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
++ mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
+ return 0;
+ }
+--- a/arch/arm/kernel/module.lds
++++ b/arch/arm/kernel/module.lds
+@@ -1,3 +1,4 @@
+ SECTIONS {
+ .plt : { BYTE(0) }
++ .init.plt : { BYTE(0) }
+ }
--- /dev/null
+From 6d80594936914e798b1b54b3bfe4bd68d8418966 Mon Sep 17 00:00:00 2001
+From: Vladimir Murzin <vladimir.murzin@arm.com>
+Date: Mon, 24 Apr 2017 10:40:48 +0100
+Subject: ARM: 8670/1: V7M: Do not corrupt vector table around v7m_invalidate_l1 call
+
+From: Vladimir Murzin <vladimir.murzin@arm.com>
+
+commit 6d80594936914e798b1b54b3bfe4bd68d8418966 upstream.
+
+We save/restore registers around v7m_invalidate_l1 to address pointed
+by r12, which is vector table, so the first eight entries are
+overwritten with a garbage. We already have stack setup at that stage,
+so use it to save/restore register.
+
+Fixes: 6a8146f420be ("ARM: 8609/1: V7M: Add support for the Cortex-M7 processor")
+Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/proc-v7m.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/mm/proc-v7m.S
++++ b/arch/arm/mm/proc-v7m.S
+@@ -147,10 +147,10 @@ __v7m_setup_cont:
+
+ @ Configure caches (if implemented)
+ teq r8, #0
+- stmneia r12, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
++ stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
+ blne v7m_invalidate_l1
+ teq r8, #0 @ re-evalutae condition
+- ldmneia r12, {r0-r6, lr}
++ ldmneia sp, {r0-r6, lr}
+
+ @ Configure the System Control Register to ensure 8-byte stack alignment
+ @ Note the STKALIGN bit is either RW or RAO.
--- /dev/null
+From 9cdd31e5913c1f86dce7e201b086155b3f24896b Mon Sep 17 00:00:00 2001
+From: Ludovic Desroches <ludovic.desroches@microchip.com>
+Date: Mon, 10 Apr 2017 10:25:16 +0200
+Subject: ARM: dts: at91: sama5d3_xplained: fix ADC vref
+
+From: Ludovic Desroches <ludovic.desroches@microchip.com>
+
+commit 9cdd31e5913c1f86dce7e201b086155b3f24896b upstream.
+
+The voltage reference for the ADC is not 3V but 3.3V since it is connected to
+VDDANA.
+
+Signed-off-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/at91-sama5d3_xplained.dts | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+@@ -162,6 +162,7 @@
+ };
+
+ adc0: adc@f8018000 {
++ atmel,adc-vref = <3300>;
+ pinctrl-0 = <
+ &pinctrl_adc0_adtrg
+ &pinctrl_adc0_ad0
--- /dev/null
+From d3df1ec06353e51fc44563d2e7e18d42811af290 Mon Sep 17 00:00:00 2001
+From: Ludovic Desroches <ludovic.desroches@microchip.com>
+Date: Mon, 10 Apr 2017 10:25:17 +0200
+Subject: ARM: dts: at91: sama5d3_xplained: not all ADC channels are available
+
+From: Ludovic Desroches <ludovic.desroches@microchip.com>
+
+commit d3df1ec06353e51fc44563d2e7e18d42811af290 upstream.
+
+Remove ADC channels that are not available by default on the sama5d3_xplained
+board (resistor not populated) in order to not create confusion.
+
+Signed-off-by: Ludovic Desroches <ludovic.desroches@microchip.com>
+Acked-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/at91-sama5d3_xplained.dts | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+@@ -163,9 +163,9 @@
+
+ adc0: adc@f8018000 {
+ atmel,adc-vref = <3300>;
++ atmel,adc-channels-used = <0xfe>;
+ pinctrl-0 = <
+ &pinctrl_adc0_adtrg
+- &pinctrl_adc0_ad0
+ &pinctrl_adc0_ad1
+ &pinctrl_adc0_ad2
+ &pinctrl_adc0_ad3
+@@ -173,8 +173,6 @@
+ &pinctrl_adc0_ad5
+ &pinctrl_adc0_ad6
+ &pinctrl_adc0_ad7
+- &pinctrl_adc0_ad8
+- &pinctrl_adc0_ad9
+ >;
+ status = "okay";
+ };
--- /dev/null
+From d8581c7c8be172dac156a19d261f988a72ce596f Mon Sep 17 00:00:00 2001
+From: Leonard Crestez <leonard.crestez@nxp.com>
+Date: Fri, 5 May 2017 14:00:17 +0300
+Subject: ARM: dts: imx6sx-sdb: Remove OPP override
+
+From: Leonard Crestez <leonard.crestez@nxp.com>
+
+commit d8581c7c8be172dac156a19d261f988a72ce596f upstream.
+
+The board file for imx6sx-sdb overrides cpufreq operating points to use
+higher voltages. This is done because the board has a shared rail for
+VDD_ARM_IN and VDD_SOC_IN and when using LDO bypass the shared voltage
+needs to be a value suitable for both ARM and SOC.
+
+This only applies to LDO bypass mode, a feature not present in upstream.
+When LDOs are enabled the effect is to use higher voltages than necessary
+for no good reason.
+
+Setting these higher voltages can make some boards fail to boot with ugly
+semi-random crashes reminiscent of memory corruption. These failures only
+happen on board rev. C, rev. B is reported to still work.
+
+Signed-off-by: Leonard Crestez <leonard.crestez@nxp.com>
+Fixes: 54183bd7f766 ("ARM: imx6sx-sdb: add revb board and make it default")
+Signed-off-by: Shawn Guo <shawnguo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/imx6sx-sdb.dts | 17 -----------------
+ 1 file changed, 17 deletions(-)
+
+--- a/arch/arm/boot/dts/imx6sx-sdb.dts
++++ b/arch/arm/boot/dts/imx6sx-sdb.dts
+@@ -12,23 +12,6 @@
+ model = "Freescale i.MX6 SoloX SDB RevB Board";
+ };
+
+-&cpu0 {
+- operating-points = <
+- /* kHz uV */
+- 996000 1250000
+- 792000 1175000
+- 396000 1175000
+- 198000 1175000
+- >;
+- fsl,soc-operating-points = <
+- /* ARM kHz SOC uV */
+- 996000 1250000
+- 792000 1175000
+- 396000 1175000
+- 198000 1175000
+- >;
+-};
+-
+ &i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
--- /dev/null
+From 501ad27c67ed0b90df465f23d33e9aed64058a47 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 2 May 2017 14:30:38 +0100
+Subject: arm: KVM: Do not use stack-protector to compile HYP code
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 501ad27c67ed0b90df465f23d33e9aed64058a47 upstream.
+
+We like living dangerously. Nothing explicitely forbids stack-protector
+to be used in the HYP code, while distributions routinely compile their
+kernel with it. We're just lucky that no code actually triggers the
+instrumentation.
+
+Let's not try our luck for much longer, and disable stack-protector
+for code living at HYP.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Acked-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kvm/hyp/Makefile | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/kvm/hyp/Makefile
++++ b/arch/arm/kvm/hyp/Makefile
+@@ -2,6 +2,8 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+
++ccflags-y += -fno-stack-protector
++
+ KVM=../../../../virt/kvm
+
+ obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
--- /dev/null
+From 55de49f9aa17b0b2b144dd2af587177b9aadf429 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 3 May 2017 16:09:36 +0100
+Subject: arm64: armv8_deprecated: ensure extension of addr
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 55de49f9aa17b0b2b144dd2af587177b9aadf429 upstream.
+
+Our compat swp emulation holds the compat user address in an unsigned
+int, which it passes to __user_swpX_asm(). When a 32-bit value is passed
+in a register, the upper 32 bits of the register are unknown, and we
+must extend the value to 64 bits before we can use it as a base address.
+
+This patch casts the address to unsigned long to ensure it has been
+suitably extended, avoiding the potential issue, and silencing a related
+warning from clang.
+
+Fixes: bd35a4adc413 ("arm64: Port SWP/SWPB emulation support from arm")
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/armv8_deprecated.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -309,7 +309,8 @@ static void __init register_insn_emulati
+ ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN) \
+ : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
+- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
++ : "r" ((unsigned long)addr), "i" (-EAGAIN), \
++ "i" (-EFAULT), \
+ "i" (__SWP_LL_SC_LOOPS) \
+ : "memory")
+
--- /dev/null
+From f0e421b1bf7af97f026e1bb8bfe4c5a7a8c08f42 Mon Sep 17 00:00:00 2001
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+Date: Wed, 3 May 2017 16:37:48 +0100
+Subject: arm64: documentation: document tagged pointer stack constraints
+
+From: Kristina Martsenko <kristina.martsenko@arm.com>
+
+commit f0e421b1bf7af97f026e1bb8bfe4c5a7a8c08f42 upstream.
+
+Some kernel features don't currently work if a task puts a non-zero
+address tag in its stack pointer, frame pointer, or frame record entries
+(FP, LR).
+
+For example, with a tagged stack pointer, the kernel can't deliver
+signals to the process, and the task is killed instead. As another
+example, with a tagged frame pointer or frame records, perf fails to
+generate call graphs or resolve symbols.
+
+For now, just document these limitations, instead of finding and fixing
+everything that doesn't work, as it's not known if anyone needs to use
+tags in these places anyway.
+
+In addition, as requested by Dave Martin, generalize the limitations
+into a general kernel address tag policy, and refactor
+tagged-pointers.txt to include it.
+
+Fixes: d50240a5f6ce ("arm64: mm: permit use of tagged pointers at EL0")
+Reviewed-by: Dave Martin <Dave.Martin@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/arm64/tagged-pointers.txt | 66 +++++++++++++++++++++++---------
+ 1 file changed, 49 insertions(+), 17 deletions(-)
+
+--- a/Documentation/arm64/tagged-pointers.txt
++++ b/Documentation/arm64/tagged-pointers.txt
+@@ -11,24 +11,56 @@ in AArch64 Linux.
+ The kernel configures the translation tables so that translations made
+ via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of
+ the virtual address ignored by the translation hardware. This frees up
+-this byte for application use, with the following caveats:
++this byte for application use.
+
+- (1) The kernel requires that all user addresses passed to EL1
+- are tagged with tag 0x00. This means that any syscall
+- parameters containing user virtual addresses *must* have
+- their top byte cleared before trapping to the kernel.
+-
+- (2) Non-zero tags are not preserved when delivering signals.
+- This means that signal handlers in applications making use
+- of tags cannot rely on the tag information for user virtual
+- addresses being maintained for fields inside siginfo_t.
+- One exception to this rule is for signals raised in response
+- to watchpoint debug exceptions, where the tag information
+- will be preserved.
+-
+- (3) Special care should be taken when using tagged pointers,
+- since it is likely that C compilers will not hazard two
+- virtual addresses differing only in the upper byte.
++
++Passing tagged addresses to the kernel
++--------------------------------------
++
++All interpretation of userspace memory addresses by the kernel assumes
++an address tag of 0x00.
++
++This includes, but is not limited to, addresses found in:
++
++ - pointer arguments to system calls, including pointers in structures
++ passed to system calls,
++
++ - the stack pointer (sp), e.g. when interpreting it to deliver a
++ signal,
++
++ - the frame pointer (x29) and frame records, e.g. when interpreting
++ them to generate a backtrace or call graph.
++
++Using non-zero address tags in any of these locations may result in an
++error code being returned, a (fatal) signal being raised, or other modes
++of failure.
++
++For these reasons, passing non-zero address tags to the kernel via
++system calls is forbidden, and using a non-zero address tag for sp is
++strongly discouraged.
++
++Programs maintaining a frame pointer and frame records that use non-zero
++address tags may suffer impaired or inaccurate debug and profiling
++visibility.
++
++
++Preserving tags
++---------------
++
++Non-zero tags are not preserved when delivering signals. This means that
++signal handlers in applications making use of tags cannot rely on the
++tag information for user virtual addresses being maintained for fields
++inside siginfo_t. One exception to this rule is for signals raised in
++response to watchpoint debug exceptions, where the tag information will
++be preserved.
+
+ The architecture prevents the use of a tagged PC, so the upper byte will
+ be set to a sign-extension of bit 55 on exception return.
++
++
++Other considerations
++--------------------
++
++Special care should be taken when using tagged pointers, since it is
++likely that C compilers will not hazard two virtual addresses differing
++only in the upper byte.
--- /dev/null
+From 0fbdf9953b41c28845fe8d05007ff09634ee3000 Mon Sep 17 00:00:00 2001
+From: Daniel Lezcano <daniel.lezcano@linaro.org>
+Date: Thu, 16 Mar 2017 15:03:24 +0100
+Subject: arm64: dts: hi6220: Reset the mmc hosts
+
+From: Daniel Lezcano <daniel.lezcano@linaro.org>
+
+commit 0fbdf9953b41c28845fe8d05007ff09634ee3000 upstream.
+
+The MMC hosts could be left in an unconsistent or uninitialized state from
+the firmware. Instead of assuming, the firmware did the right things, let's
+reset the host controllers.
+
+This change fixes a bug when the mmc2/sdio is initialized leading to a hung
+task:
+
+[ 242.704294] INFO: task kworker/7:1:675 blocked for more than 120 seconds.
+[ 242.711129] Not tainted 4.9.0-rc8-00017-gcf0251f #3
+[ 242.716571] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+[ 242.724435] kworker/7:1 D 0 675 2 0x00000000
+[ 242.729973] Workqueue: events_freezable mmc_rescan
+[ 242.734796] Call trace:
+[ 242.737269] [<ffff00000808611c>] __switch_to+0xa8/0xb4
+[ 242.742437] [<ffff000008d07c04>] __schedule+0x1c0/0x67c
+[ 242.747689] [<ffff000008d08254>] schedule+0x40/0xa0
+[ 242.752594] [<ffff000008d0b284>] schedule_timeout+0x1c4/0x35c
+[ 242.758366] [<ffff000008d08e38>] wait_for_common+0xd0/0x15c
+[ 242.763964] [<ffff000008d09008>] wait_for_completion+0x28/0x34
+[ 242.769825] [<ffff000008a1a9f4>] mmc_wait_for_req_done+0x40/0x124
+[ 242.775949] [<ffff000008a1ab98>] mmc_wait_for_req+0xc0/0xf8
+[ 242.781549] [<ffff000008a1ac3c>] mmc_wait_for_cmd+0x6c/0x84
+[ 242.787149] [<ffff000008a26610>] mmc_io_rw_direct_host+0x9c/0x114
+[ 242.793270] [<ffff000008a26aa0>] sdio_reset+0x34/0x7c
+[ 242.798347] [<ffff000008a1d46c>] mmc_rescan+0x2fc/0x360
+
+[ ... ]
+
+Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Signed-off-by: Wei Xu <xuwei5@hisilicon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/boot/dts/hisilicon/hi6220.dtsi | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
++++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+@@ -772,6 +772,7 @@
+ clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
+ clock-names = "ciu", "biu";
+ resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
++ reset-names = "reset";
+ bus-width = <0x8>;
+ vmmc-supply = <&ldo19>;
+ pinctrl-names = "default";
+@@ -795,6 +796,7 @@
+ clocks = <&sys_ctrl 4>, <&sys_ctrl 3>;
+ clock-names = "ciu", "biu";
+ resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
++ reset-names = "reset";
+ vqmmc-supply = <&ldo7>;
+ vmmc-supply = <&ldo10>;
+ bus-width = <0x4>;
+@@ -813,6 +815,7 @@
+ clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
+ clock-names = "ciu", "biu";
+ resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
++ reset-names = "reset";
+ bus-width = <0x4>;
+ broken-cd;
+ pinctrl-names = "default", "idle";
--- /dev/null
+From 994870bead4ab19087a79492400a5478e2906196 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 3 May 2017 16:09:34 +0100
+Subject: arm64: ensure extension of smp_store_release value
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit 994870bead4ab19087a79492400a5478e2906196 upstream.
+
+When an inline assembly operand's type is narrower than the register it
+is allocated to, the least significant bits of the register (up to the
+operand type's width) are valid, and any other bits are permitted to
+contain any arbitrary value. This aligns with the AAPCS64 parameter
+passing rules.
+
+Our __smp_store_release() implementation does not account for this, and
+implicitly assumes that operands have been zero-extended to the width of
+the type being stored to. Thus, we may store unknown values to memory
+when the value type is narrower than the pointer type (e.g. when storing
+a char to a long).
+
+This patch fixes the issue by casting the value operand to the same
+width as the pointer operand in all cases, which ensures that the value
+is zero-extended as we expect. We use the same union trickery as
+__smp_load_acquire and {READ,WRITE}_ONCE() to avoid GCC complaining that
+pointers are potentially cast to narrower width integers in unreachable
+paths.
+
+A whitespace issue at the top of __smp_store_release() is also
+corrected.
+
+No changes are necessary for __smp_load_acquire(). Load instructions
+implicitly clear any upper bits of the register, and the compiler will
+only consider the least significant bits of the register as valid
+regardless.
+
+Fixes: 47933ad41a86 ("arch: Introduce smp_load_acquire(), smp_store_release()")
+Fixes: 878a84d5a8a1 ("arm64: add missing data types in smp_load_acquire/smp_store_release")
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Matthias Kaehlcke <mka@chromium.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/barrier.h | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -42,25 +42,35 @@
+ #define __smp_rmb() dmb(ishld)
+ #define __smp_wmb() dmb(ishst)
+
+-#define __smp_store_release(p, v) \
++#define __smp_store_release(p, v) \
+ do { \
++ union { typeof(*p) __val; char __c[1]; } __u = \
++ { .__val = (__force typeof(*p)) (v) }; \
+ compiletime_assert_atomic_type(*p); \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("stlrb %w1, %0" \
+- : "=Q" (*p) : "r" (v) : "memory"); \
++ : "=Q" (*p) \
++ : "r" (*(__u8 *)__u.__c) \
++ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("stlrh %w1, %0" \
+- : "=Q" (*p) : "r" (v) : "memory"); \
++ : "=Q" (*p) \
++ : "r" (*(__u16 *)__u.__c) \
++ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("stlr %w1, %0" \
+- : "=Q" (*p) : "r" (v) : "memory"); \
++ : "=Q" (*p) \
++ : "r" (*(__u32 *)__u.__c) \
++ : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("stlr %1, %0" \
+- : "=Q" (*p) : "r" (v) : "memory"); \
++ : "=Q" (*p) \
++ : "r" (*(__u64 *)__u.__c) \
++ : "memory"); \
+ break; \
+ } \
+ } while (0)
--- /dev/null
+From cde13b5dad60471886a3bccb4f4134c647c4a9dc Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 2 May 2017 14:30:37 +0100
+Subject: arm64: KVM: Do not use stack-protector to compile EL2 code
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit cde13b5dad60471886a3bccb4f4134c647c4a9dc upstream.
+
+We like living dangerously. Nothing explicitely forbids stack-protector
+to be used in the EL2 code, while distributions routinely compile their
+kernel with it. We're just lucky that no code actually triggers the
+instrumentation.
+
+Let's not try our luck for much longer, and disable stack-protector
+for code living at EL2.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Acked-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/hyp/Makefile | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/kvm/hyp/Makefile
++++ b/arch/arm64/kvm/hyp/Makefile
+@@ -2,6 +2,8 @@
+ # Makefile for Kernel-based Virtual Machine module, HYP part
+ #
+
++ccflags-y += -fno-stack-protector
++
+ KVM=../../../../virt/kvm
+
+ obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
--- /dev/null
+From a06040d7a791a9177581dcf7293941bd92400856 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 3 May 2017 16:09:35 +0100
+Subject: arm64: uaccess: ensure extension of access_ok() addr
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit a06040d7a791a9177581dcf7293941bd92400856 upstream.
+
+Our access_ok() simply hands its arguments over to __range_ok(), which
+implicitly assummes that the addr parameter is 64 bits wide. This isn't
+necessarily true for compat code, which might pass down a 32-bit address
+parameter.
+
+In these cases, we don't have a guarantee that the address has been zero
+extended to 64 bits, and the upper bits of the register may contain
+unknown values, potentially resulting in a suprious failure.
+
+Avoid this by explicitly casting the addr parameter to an unsigned long
+(as is done on other architectures), ensuring that the parameter is
+widened appropriately.
+
+Fixes: 0aea86a2176c ("arm64: User access library functions")
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/uaccess.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -93,11 +93,12 @@ static inline void set_fs(mm_segment_t f
+ */
+ #define __range_ok(addr, size) \
+ ({ \
++ unsigned long __addr = (unsigned long __force)(addr); \
+ unsigned long flag, roksum; \
+ __chk_user_ptr(addr); \
+ asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
+ : "=&r" (flag), "=&r" (roksum) \
+- : "1" (addr), "Ir" (size), \
++ : "1" (__addr), "Ir" (size), \
+ "r" (current_thread_info()->addr_limit) \
+ : "cc"); \
+ flag; \
--- /dev/null
+From fee960bed5e857eb126c4e56dd9ff85938356579 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 3 May 2017 16:09:33 +0100
+Subject: arm64: xchg: hazard against entire exchange variable
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit fee960bed5e857eb126c4e56dd9ff85938356579 upstream.
+
+The inline assembly in __XCHG_CASE() uses a +Q constraint to hazard
+against other accesses to the memory location being exchanged. However,
+the pointer passed to the constraint is a u8 pointer, and thus the
+hazard only applies to the first byte of the location.
+
+GCC can take advantage of this, assuming that other portions of the
+location are unchanged, as demonstrated with the following test case:
+
+union u {
+ unsigned long l;
+ unsigned int i[2];
+};
+
+unsigned long update_char_hazard(union u *u)
+{
+ unsigned int a, b;
+
+ a = u->i[1];
+ asm ("str %1, %0" : "+Q" (*(char *)&u->l) : "r" (0UL));
+ b = u->i[1];
+
+ return a ^ b;
+}
+
+unsigned long update_long_hazard(union u *u)
+{
+ unsigned int a, b;
+
+ a = u->i[1];
+ asm ("str %1, %0" : "+Q" (*(long *)&u->l) : "r" (0UL));
+ b = u->i[1];
+
+ return a ^ b;
+}
+
+The linaro 15.08 GCC 5.1.1 toolchain compiles the above as follows when
+using -O2 or above:
+
+0000000000000000 <update_char_hazard>:
+ 0: d2800001 mov x1, #0x0 // #0
+ 4: f9000001 str x1, [x0]
+ 8: d2800000 mov x0, #0x0 // #0
+ c: d65f03c0 ret
+
+0000000000000010 <update_long_hazard>:
+ 10: b9400401 ldr w1, [x0,#4]
+ 14: d2800002 mov x2, #0x0 // #0
+ 18: f9000002 str x2, [x0]
+ 1c: b9400400 ldr w0, [x0,#4]
+ 20: 4a000020 eor w0, w1, w0
+ 24: d65f03c0 ret
+
+This patch fixes the issue by passing an unsigned long pointer into the
++Q constraint, as we do for our cmpxchg code. This may hazard against
+more than is necessary, but this is better than missing a necessary
+hazard.
+
+Fixes: 305d454aaa29 ("arm64: atomics: implement native {relaxed, acquire, release} atomics")
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/cmpxchg.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/cmpxchg.h
++++ b/arch/arm64/include/asm/cmpxchg.h
+@@ -46,7 +46,7 @@ static inline unsigned long __xchg_case_
+ " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
+ __nops(3) \
+ " " #nop_lse) \
+- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
++ : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
+ : "r" (x) \
+ : cl); \
+ \
--- /dev/null
+From 2c4569ca26986d18243f282dd727da27e9adae4c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 11 May 2017 13:54:11 +0200
+Subject: genirq: Fix chained interrupt data ordering
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 2c4569ca26986d18243f282dd727da27e9adae4c upstream.
+
+irq_set_chained_handler_and_data() sets up the chained interrupt and then
+stores the handler data.
+
+That's racy against an immediate interrupt which gets handled before the
+store of the handler data happened. The handler will dereference a NULL
+pointer and crash.
+
+Cure it by storing handler data before installing the chained handler.
+
+Reported-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/irq/chip.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -877,8 +877,8 @@ irq_set_chained_handler_and_data(unsigne
+ if (!desc)
+ return;
+
+- __irq_do_set_handler(desc, handle, 1, NULL);
+ desc->irq_common_data.handler_data = data;
++ __irq_do_set_handler(desc, handle, 1, NULL);
+
+ irq_put_desc_busunlock(desc, flags);
+ }
--- /dev/null
+From f73a7eee900e95404b61408a23a1df5c5811704c Mon Sep 17 00:00:00 2001
+From: KarimAllah Ahmed <karahmed@amazon.de>
+Date: Fri, 5 May 2017 11:39:59 -0700
+Subject: iommu/vt-d: Flush the IOTLB to get rid of the initial kdump mappings
+
+From: KarimAllah Ahmed <karahmed@amazon.de>
+
+commit f73a7eee900e95404b61408a23a1df5c5811704c upstream.
+
+Ever since commit 091d42e43d ("iommu/vt-d: Copy translation tables from
+old kernel") the kdump kernel copies the IOMMU context tables from the
+previous kernel. Each device mappings will be destroyed once the driver
+for the respective device takes over.
+
+This unfortunately breaks the workflow of mapping and unmapping a new
+context to the IOMMU. The mapping function assumes that either:
+
+1) Unmapping did the proper IOMMU flushing and it only ever flush if the
+ IOMMU unit supports caching invalid entries.
+2) The system just booted and the initialization code took care of
+ flushing all IOMMU caches.
+
+This assumption is not true for the kdump kernel since the context
+tables have been copied from the previous kernel and translations could
+have been cached ever since. So make sure to flush the IOTLB as well
+when we destroy these old copied mappings.
+
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Anthony Liguori <aliguori@amazon.com>
+Signed-off-by: KarimAllah Ahmed <karahmed@amazon.de>
+Acked-by: David Woodhouse <dwmw@amazon.co.uk>
+Fixes: 091d42e43d ("iommu/vt-d: Copy translation tables from old kernel")
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iommu/intel-iommu.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2049,11 +2049,14 @@ static int domain_context_mapping_one(st
+ if (context_copied(context)) {
+ u16 did_old = context_domain_id(context);
+
+- if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
++ if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
+ iommu->flush.flush_context(iommu, did_old,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
++ iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
++ DMA_TLB_DSI_FLUSH);
++ }
+ }
+
+ pgd = domain->pgd;
--- /dev/null
+From ddf42d068f8802de122bb7efdfcb3179336053f1 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 2 May 2017 14:30:39 +0100
+Subject: KVM: arm/arm64: vgic-v2: Do not use Active+Pending state for a HW interrupt
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit ddf42d068f8802de122bb7efdfcb3179336053f1 upstream.
+
+When an interrupt is injected with the HW bit set (indicating that
+deactivation should be propagated to the physical distributor),
+special care must be taken so that we never mark the corresponding
+LR with the Active+Pending state (as the pending state is kept in
+the physycal distributor).
+
+Fixes: 140b086dd197 ("KVM: arm/arm64: vgic-new: Add GICv2 world switch backend")
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic-v2.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/virt/kvm/arm/vgic/vgic-v2.c
++++ b/virt/kvm/arm/vgic/vgic-v2.c
+@@ -168,6 +168,13 @@ void vgic_v2_populate_lr(struct kvm_vcpu
+ if (irq->hw) {
+ val |= GICH_LR_HW;
+ val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
++ /*
++ * Never set pending+active on a HW interrupt, as the
++ * pending state is kept at the physical distributor
++ * level.
++ */
++ if (irq->active && irq_is_pending(irq))
++ val &= ~GICH_LR_PENDING_BIT;
+ } else {
+ if (irq->config == VGIC_CONFIG_LEVEL)
+ val |= GICH_LR_EOI;
--- /dev/null
+From 3d6e77ad1489650afa20da92bb589c8778baa8da Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 2 May 2017 14:30:40 +0100
+Subject: KVM: arm/arm64: vgic-v3: Do not use Active+Pending state for a HW interrupt
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 3d6e77ad1489650afa20da92bb589c8778baa8da upstream.
+
+When an interrupt is injected with the HW bit set (indicating that
+deactivation should be propagated to the physical distributor),
+special care must be taken so that we never mark the corresponding
+LR with the Active+Pending state (as the pending state is kept in
+the physycal distributor).
+
+Fixes: 59529f69f504 ("KVM: arm/arm64: vgic-new: Add GICv3 world switch backend")
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ virt/kvm/arm/vgic/vgic-v3.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -151,6 +151,13 @@ void vgic_v3_populate_lr(struct kvm_vcpu
+ if (irq->hw) {
+ val |= ICH_LR_HW;
+ val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
++ /*
++ * Never set pending+active on a HW interrupt, as the
++ * pending state is kept at the physical distributor
++ * level.
++ */
++ if (irq->active && irq_is_pending(irq))
++ val &= ~ICH_LR_PENDING_BIT;
+ } else {
+ if (irq->config == VGIC_CONFIG_LEVEL)
+ val |= ICH_LR_EOI;
--- /dev/null
+From 661e6b02b5aa82db31897f36e96324b77450fd7a Mon Sep 17 00:00:00 2001
+From: Zhichao Huang <zhichao.huang@linaro.org>
+Date: Thu, 11 May 2017 13:46:11 +0100
+Subject: KVM: arm: plug potential guest hardware debug leakage
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Zhichao Huang <zhichao.huang@linaro.org>
+
+commit 661e6b02b5aa82db31897f36e96324b77450fd7a upstream.
+
+Hardware debugging in guests is not intercepted currently, it means
+that a malicious guest can bring down the entire machine by writing
+to the debug registers.
+
+This patch enable trapping of all debug registers, preventing the
+guests to access the debug registers. This includes access to the
+debug mode(DBGDSCR) in the guest world all the time which could
+otherwise mess with the host state. Reads return 0 and writes are
+ignored (RAZ_WI).
+
+The result is the guest cannot detect any working hardware based debug
+support. As debug exceptions are still routed to the guest normal
+debug using software based breakpoints still works.
+
+To support debugging using hardware registers we need to implement a
+debug register aware world switch as well as special trapping for
+registers that may affect the host state.
+
+Signed-off-by: Zhichao Huang <zhichao.huang@linaro.org>
+Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
+Reviewed-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/kvm_coproc.h | 3 -
+ arch/arm/kvm/coproc.c | 77 +++++++++++++++++++++++++++++---------
+ arch/arm/kvm/handle_exit.c | 4 -
+ arch/arm/kvm/hyp/switch.c | 4 +
+ 4 files changed, 66 insertions(+), 22 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_coproc.h
++++ b/arch/arm/include/asm/kvm_coproc.h
+@@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(st
+ int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
+ int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
+-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
++int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
++int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+--- a/arch/arm/kvm/coproc.c
++++ b/arch/arm/kvm/coproc.c
+@@ -93,12 +93,6 @@ int kvm_handle_cp14_load_store(struct kv
+ return 1;
+ }
+
+-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
+-{
+- kvm_inject_undefined(vcpu);
+- return 1;
+-}
+-
+ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
+ {
+ /*
+@@ -514,12 +508,7 @@ static int emulate_cp15(struct kvm_vcpu
+ return 1;
+ }
+
+-/**
+- * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+- * @vcpu: The VCPU pointer
+- * @run: The kvm_run struct
+- */
+-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
++static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
+ {
+ struct coproc_params params;
+
+@@ -533,9 +522,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *
+ params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
+ params.CRm = 0;
+
++ return params;
++}
++
++/**
++ * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
++ * @vcpu: The VCPU pointer
++ * @run: The kvm_run struct
++ */
++int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
++{
++ struct coproc_params params = decode_64bit_hsr(vcpu);
++
+ return emulate_cp15(vcpu, ¶ms);
+ }
+
++/**
++ * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
++ * @vcpu: The VCPU pointer
++ * @run: The kvm_run struct
++ */
++int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
++{
++ struct coproc_params params = decode_64bit_hsr(vcpu);
++
++ /* raz_wi cp14 */
++ pm_fake(vcpu, ¶ms, NULL);
++
++ /* handled */
++ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
++ return 1;
++}
++
+ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
+ const struct coproc_reg *table, size_t num)
+ {
+@@ -546,12 +564,7 @@ static void reset_coproc_regs(struct kvm
+ table[i].reset(vcpu, &table[i]);
+ }
+
+-/**
+- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+- * @vcpu: The VCPU pointer
+- * @run: The kvm_run struct
+- */
+-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
++static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
+ {
+ struct coproc_params params;
+
+@@ -565,9 +578,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *
+ params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
+ params.Rt2 = 0;
+
++ return params;
++}
++
++/**
++ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
++ * @vcpu: The VCPU pointer
++ * @run: The kvm_run struct
++ */
++int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
++{
++ struct coproc_params params = decode_32bit_hsr(vcpu);
+ return emulate_cp15(vcpu, ¶ms);
+ }
+
++/**
++ * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
++ * @vcpu: The VCPU pointer
++ * @run: The kvm_run struct
++ */
++int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
++{
++ struct coproc_params params = decode_32bit_hsr(vcpu);
++
++ /* raz_wi cp14 */
++ pm_fake(vcpu, ¶ms, NULL);
++
++ /* handled */
++ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
++ return 1;
++}
++
+ /******************************************************************************
+ * Userspace API
+ *****************************************************************************/
+--- a/arch/arm/kvm/handle_exit.c
++++ b/arch/arm/kvm/handle_exit.c
+@@ -83,9 +83,9 @@ static exit_handle_fn arm_exit_handlers[
+ [HSR_EC_WFI] = kvm_handle_wfx,
+ [HSR_EC_CP15_32] = kvm_handle_cp15_32,
+ [HSR_EC_CP15_64] = kvm_handle_cp15_64,
+- [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
++ [HSR_EC_CP14_MR] = kvm_handle_cp14_32,
+ [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
+- [HSR_EC_CP14_64] = kvm_handle_cp14_access,
++ [HSR_EC_CP14_64] = kvm_handle_cp14_64,
+ [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
+ [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
+ [HSR_EC_HVC] = handle_hvc,
+--- a/arch/arm/kvm/hyp/switch.c
++++ b/arch/arm/kvm/hyp/switch.c
+@@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(
+ write_sysreg(HSTR_T(15), HSTR);
+ write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
+ val = read_sysreg(HDCR);
+- write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
++ val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
++ val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
++ write_sysreg(val, HDCR);
+ }
+
+ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
--- /dev/null
+From 3a158a62da0673db918b53ac1440845a5b64fd90 Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 2 May 2017 19:41:06 +0100
+Subject: metag/uaccess: Check access_ok in strncpy_from_user
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 3a158a62da0673db918b53ac1440845a5b64fd90 upstream.
+
+The metag implementation of strncpy_from_user() doesn't validate the src
+pointer, which could allow reading of arbitrary kernel memory. Add a
+short access_ok() check to prevent that.
+
+Its still possible for it to read across the user/kernel boundary, but
+it will invariably reach a NUL character after only 9 bytes, leaking
+only a static kernel address being loaded into D0Re0 at the beginning of
+__start, which is acceptable for the immediate fix.
+
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/include/asm/uaccess.h | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -194,8 +194,13 @@ do {
+ extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
+ long count);
+
+-#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
+-
++static inline long
++strncpy_from_user(char *dst, const char __user *src, long count)
++{
++ if (!access_ok(VERIFY_READ, src, 1))
++ return -EFAULT;
++ return __strncpy_from_user(dst, src, count);
++}
+ /*
+ * Return the size of a string (including the ending 0)
+ *
--- /dev/null
+From 8a8b56638bcac4e64cccc88bf95a0f9f4b19a2fb Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Fri, 28 Apr 2017 10:50:26 +0100
+Subject: metag/uaccess: Fix access_ok()
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit 8a8b56638bcac4e64cccc88bf95a0f9f4b19a2fb upstream.
+
+The __user_bad() macro used by access_ok() has a few corner cases
+noticed by Al Viro where it doesn't behave correctly:
+
+ - The kernel range check has off by 1 errors which permit access to the
+ first and last byte of the kernel mapped range.
+
+ - The kernel range check ends at LINCORE_BASE rather than
+ META_MEMORY_LIMIT, which is ineffective when the kernel is in global
+ space (an extremely uncommon configuration).
+
+There are a couple of other shortcomings here too:
+
+ - Access to the whole of the other address space is permitted (i.e. the
+ global half of the address space when the kernel is in local space).
+ This isn't ideal as it could theoretically still contain privileged
+ mappings set up by the bootloader.
+
+ - The size argument is unused, permitting user copies which start on
+ valid pages at the end of the user address range and cross the
+ boundary into the kernel address space (e.g. addr = 0x3ffffff0, size
+ > 0x10).
+
+It isn't very convenient to add size checks when disallowing certain
+regions, and it seems far safer to be sure and explicit about what
+userland is able to access, so invert the logic to allow certain regions
+instead, and fix the off by 1 errors and missing size checks. This also
+allows the get_fs() == KERNEL_DS check to be more easily optimised into
+the user address range case.
+
+We now have 3 such allowed regions:
+
+ - The user address range (incorporating the get_fs() == KERNEL_DS
+ check).
+
+ - NULL (some kernel code expects this to work, and we'll always catch
+ the fault anyway).
+
+ - The core code memory region.
+
+Fixes: 373cd784d0fc ("metag: Memory handling")
+Reported-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/metag/include/asm/uaccess.h | 40 +++++++++++++++++++++++----------------
+ 1 file changed, 24 insertions(+), 16 deletions(-)
+
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -28,24 +28,32 @@
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+-/*
+- * Explicitly allow NULL pointers here. Parts of the kernel such
+- * as readv/writev use access_ok to validate pointers, but want
+- * to allow NULL pointers for various reasons. NULL pointers are
+- * safe to allow through because the first page is not mappable on
+- * Meta.
+- *
+- * We also wish to avoid letting user code access the system area
+- * and the kernel half of the address space.
+- */
+-#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
+- ((addr) > PAGE_OFFSET && \
+- (addr) < LINCORE_BASE))
+-
+ static inline int __access_ok(unsigned long addr, unsigned long size)
+ {
+- return __kernel_ok || !__user_bad(addr, size);
++ /*
++ * Allow access to the user mapped memory area, but not the system area
++ * before it. The check extends to the top of the address space when
++ * kernel access is allowed (there's no real reason to user copy to the
++ * system area in any case).
++ */
++ if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
++ size <= get_fs().seg - addr))
++ return true;
++ /*
++ * Explicitly allow NULL pointers here. Parts of the kernel such
++ * as readv/writev use access_ok to validate pointers, but want
++ * to allow NULL pointers for various reasons. NULL pointers are
++ * safe to allow through because the first page is not mappable on
++ * Meta.
++ */
++ if (!addr)
++ return true;
++ /* Allow access to core code memory area... */
++ if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
++ size <= LINCORE_CODE_LIMIT + 1 - addr)
++ return true;
++ /* ... but no other areas. */
++ return false;
+ }
+
+ #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
--- /dev/null
+From 17c99d9421695a0e0de18bf1e7091d859e20ec1d Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhc@lemote.com>
+Date: Thu, 16 Mar 2017 21:00:28 +0800
+Subject: MIPS: Loongson-3: Select MIPS_L1_CACHE_SHIFT_6
+
+From: Huacai Chen <chenhc@lemote.com>
+
+commit 17c99d9421695a0e0de18bf1e7091d859e20ec1d upstream.
+
+Some newer Loongson-3 have 64 bytes cache lines, so select
+MIPS_L1_CACHE_SHIFT_6.
+
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Cc: John Crispin <john@phrozen.org>
+Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
+Cc: Fuxin Zhang <zhangfx@lemote.com>
+Cc: Zhangjin Wu <wuzhangjin@gmail.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/15755/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1368,6 +1368,7 @@ config CPU_LOONGSON3
+ select WEAK_ORDERING
+ select WEAK_REORDERING_BEYOND_LLSC
+ select MIPS_PGD_C0_CONTEXT
++ select MIPS_L1_CACHE_SHIFT_6
+ select GPIOLIB
+ help
+ The Loongson 3 processor implements the MIPS64R2 instruction
--- /dev/null
+From f63572dff1421b6ca6abce71d46e03411e605c94 Mon Sep 17 00:00:00 2001
+From: Jon Derrick <jonathan.derrick@intel.com>
+Date: Fri, 5 May 2017 14:52:06 -0600
+Subject: nvme: unmap CMB and remove sysfs file in reset path
+
+From: Jon Derrick <jonathan.derrick@intel.com>
+
+commit f63572dff1421b6ca6abce71d46e03411e605c94 upstream.
+
+CMB doesn't get unmapped until removal while getting remapped on every
+reset. Add the unmapping and sysfs file removal to the reset path in
+nvme_pci_disable to match the mapping path in nvme_pci_enable.
+
+Fixes: 202021c1a ("nvme : Add sysfs entry for NVMe CMBs when appropriate")
+
+Signed-off-by: Jon Derrick <jonathan.derrick@intel.com>
+Acked-by: Keith Busch <keith.busch@intel.com>
+Reviewed-By: Stephen Bates <sbates@raithlin.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/pci.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1384,6 +1384,11 @@ static inline void nvme_release_cmb(stru
+ if (dev->cmb) {
+ iounmap(dev->cmb);
+ dev->cmb = NULL;
++ if (dev->cmbsz) {
++ sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
++ &dev_attr_cmb.attr, NULL);
++ dev->cmbsz = 0;
++ }
+ }
+ }
+
+@@ -1655,6 +1660,7 @@ static void nvme_pci_disable(struct nvme
+ {
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
++ nvme_release_cmb(dev);
+ pci_free_irq_vectors(pdev);
+
+ if (pci_is_enabled(pdev)) {
+@@ -1993,7 +1999,6 @@ static void nvme_remove(struct pci_dev *
+ nvme_dev_disable(dev, true);
+ nvme_dev_remove_admin(dev);
+ nvme_free_queues(dev, 0);
+- nvme_release_cmb(dev);
+ nvme_release_prp_pools(dev);
+ nvme_dev_unmap(dev);
+ nvme_put_ctrl(&dev->ctrl);
--- /dev/null
+From fd615f69a18a9d4aa5ef02a1dc83f319f75da8e7 Mon Sep 17 00:00:00 2001
+From: LiuHailong <liu.hailong6@zte.com.cn>
+Date: Tue, 7 Feb 2017 10:35:52 +0800
+Subject: powerpc/64e: Fix hang when debugging programs with relocated kernel
+
+From: LiuHailong <liu.hailong6@zte.com.cn>
+
+commit fd615f69a18a9d4aa5ef02a1dc83f319f75da8e7 upstream.
+
+Debug interrupts can be taken during interrupt entry, since interrupt
+entry does not automatically turn them off. The kernel will check
+whether the faulting instruction is between [interrupt_base_book3e,
+__end_interrupts], and if so clear MSR[DE] and return.
+
+However, when the kernel is built with CONFIG_RELOCATABLE, it can't use
+LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) and
+LOAD_REG_IMMEDIATE(r15,__end_interrupts), as they ignore relocation.
+Thus, if the kernel is actually running at a different address than it
+was built at, the address comparison will fail, and the exception entry
+code will hang at kernel_dbg_exc.
+
+r2(toc) is also not usable here, as r2 still holds data from the
+interrupted context, so LOAD_REG_ADDR() doesn't work either. So we use
+the *name@got* to get the EV of two labels directly.
+
+Test programs test.c shows as follows:
+int main(int argc, char *argv[])
+{
+ if (access("/proc/sys/kernel/perf_event_paranoid", F_OK) == -1)
+ printf("Kernel doesn't have perf_event support\n");
+}
+
+Steps to reproduce the bug, for example:
+ 1) ./gdb ./test
+ 2) (gdb) b access
+ 3) (gdb) r
+ 4) (gdb) s
+
+Signed-off-by: Liu Hailong <liu.hailong6@zte.com.cn>
+Signed-off-by: Jiang Xuexin <jiang.xuexin@zte.com.cn>
+Reviewed-by: Jiang Biao <jiang.biao2@zte.com.cn>
+Reviewed-by: Liu Song <liu.song11@zte.com.cn>
+Reviewed-by: Huang Jian <huang.jian@zte.com.cn>
+[scottwood: cleaned up commit message, and specified bad behavior
+ as a hang rather than an oops to correspond to mainline kernel behavior]
+Fixes: 1cb6e0649248 ("powerpc/book3e: support CONFIG_RELOCATABLE")
+Signed-off-by: Scott Wood <oss@buserror.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/exceptions-64e.S | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ andis. r15,r14,(DBSR_IC|DBSR_BT)@h
+ beq+ 1f
+
++#ifdef CONFIG_RELOCATABLE
++ ld r15,PACATOC(r13)
++ ld r14,interrupt_base_book3e@got(r15)
++ ld r15,__end_interrupts@got(r15)
++#else
+ LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
+ LOAD_REG_IMMEDIATE(r15,__end_interrupts)
++#endif
+ cmpld cr0,r10,r14
+ cmpld cr1,r10,r15
+ blt+ cr0,1f
+@@ -799,8 +805,14 @@ kernel_dbg_exc:
+ andis. r15,r14,(DBSR_IC|DBSR_BT)@h
+ beq+ 1f
+
++#ifdef CONFIG_RELOCATABLE
++ ld r15,PACATOC(r13)
++ ld r14,interrupt_base_book3e@got(r15)
++ ld r15,__end_interrupts@got(r15)
++#else
+ LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
+ LOAD_REG_IMMEDIATE(r15,__end_interrupts)
++#endif
+ cmpld cr0,r10,r14
+ cmpld cr1,r10,r15
+ blt+ cr0,1f
--- /dev/null
+From d93b0ac01a9ce276ec39644be47001873d3d183c Mon Sep 17 00:00:00 2001
+From: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Date: Tue, 18 Apr 2017 22:08:17 +0530
+Subject: powerpc/book3s/mce: Move add_taint() later in virtual mode
+
+From: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+
+commit d93b0ac01a9ce276ec39644be47001873d3d183c upstream.
+
+machine_check_early() gets called in real mode. The very first time when
+add_taint() is called, it prints a warning which ends up calling opal
+call (that uses OPAL_CALL wrapper) for writing it to console. If we get a
+very first machine check while we are in opal we are doomed. OPAL_CALL
+overwrites the PACASAVEDMSR in r13 and in this case when we are done with
+MCE handling the original opal call will use this new MSR on it's way
+back to opal_return. This usually leads to unexpected behaviour or the
+kernel to panic. Instead move the add_taint() call later in the virtual
+mode where it is safe to call.
+
+This is broken with current FW level. We got lucky so far for not getting
+very first MCE hit while in OPAL. But easily reproducible on Mambo.
+
+Fixes: 27ea2c420cad ("powerpc: Set the correct kernel taint on machine check errors.")
+Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/mce.c | 2 ++
+ arch/powerpc/kernel/traps.c | 4 ++--
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/mce.c
++++ b/arch/powerpc/kernel/mce.c
+@@ -205,6 +205,8 @@ static void machine_check_process_queued
+ {
+ int index;
+
++ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
++
+ /*
+ * For now just print it to console.
+ * TODO: log this error event to FSP or nvram.
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -302,8 +302,6 @@ long machine_check_early(struct pt_regs
+
+ __this_cpu_inc(irq_stat.mce_exceptions);
+
+- add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+-
+ if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
+ handled = cur_cpu_spec->machine_check_early(regs);
+ return handled;
+@@ -737,6 +735,8 @@ void machine_check_exception(struct pt_r
+
+ __this_cpu_inc(irq_stat.mce_exceptions);
+
++ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
++
+ /* See if any machine dependent calls. In theory, we would want
+ * to call the CPU first, and call the ppc_md. one if the CPU
+ * one returns a positive number. However there is existing code
--- /dev/null
+From daeba2956f32f91f3493788ff6ee02fb1b2f02fa Mon Sep 17 00:00:00 2001
+From: Russell Currey <ruscur@russell.cc>
+Date: Wed, 19 Apr 2017 17:39:26 +1000
+Subject: powerpc/eeh: Avoid use after free in eeh_handle_special_event()
+
+From: Russell Currey <ruscur@russell.cc>
+
+commit daeba2956f32f91f3493788ff6ee02fb1b2f02fa upstream.
+
+eeh_handle_special_event() is called when an EEH event is detected but
+can't be narrowed down to a specific PE. This function looks through
+every PE to find one in an erroneous state, then calls the regular event
+handler eeh_handle_normal_event() once it knows which PE has an error.
+
+However, if eeh_handle_normal_event() found that the PE cannot possibly
+be recovered, it will free it, rendering the passed PE stale.
+This leads to a use after free in eeh_handle_special_event() as it attempts to
+clear the "recovering" state on the PE after eeh_handle_normal_event() returns.
+
+Thus, make sure the PE is valid when attempting to clear state in
+eeh_handle_special_event().
+
+Fixes: 8a6b1bc70dbb ("powerpc/eeh: EEH core to handle special event")
+Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Signed-off-by: Russell Currey <ruscur@russell.cc>
+Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/eeh_driver.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -724,7 +724,7 @@ static int eeh_reset_device(struct eeh_p
+ */
+ #define MAX_WAIT_FOR_RECOVERY 300
+
+-static void eeh_handle_normal_event(struct eeh_pe *pe)
++static bool eeh_handle_normal_event(struct eeh_pe *pe)
+ {
+ struct pci_bus *frozen_bus;
+ struct eeh_dev *edev, *tmp;
+@@ -736,7 +736,7 @@ static void eeh_handle_normal_event(stru
+ if (!frozen_bus) {
+ pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
+ __func__, pe->phb->global_number, pe->addr);
+- return;
++ return false;
+ }
+
+ eeh_pe_update_time_stamp(pe);
+@@ -870,7 +870,7 @@ static void eeh_handle_normal_event(stru
+ pr_info("EEH: Notify device driver to resume\n");
+ eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
+
+- return;
++ return false;
+
+ excess_failures:
+ /*
+@@ -915,8 +915,12 @@ perm_error:
+ pci_lock_rescan_remove();
+ pci_hp_remove_devices(frozen_bus);
+ pci_unlock_rescan_remove();
++
++ /* The passed PE should no longer be used */
++ return true;
+ }
+ }
++ return false;
+ }
+
+ static void eeh_handle_special_event(void)
+@@ -982,7 +986,14 @@ static void eeh_handle_special_event(voi
+ */
+ if (rc == EEH_NEXT_ERR_FROZEN_PE ||
+ rc == EEH_NEXT_ERR_FENCED_PHB) {
+- eeh_handle_normal_event(pe);
++ /*
++ * eeh_handle_normal_event() can make the PE stale if it
++ * determines that the PE cannot possibly be recovered.
++ * Don't modify the PE state if that's the case.
++ */
++ if (eeh_handle_normal_event(pe))
++ continue;
++
+ eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
+ } else {
+ pci_lock_rescan_remove();
--- /dev/null
+From e889e96e98e8da97bd39e46b7253615eabe14397 Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Tue, 11 Apr 2017 17:54:57 +1000
+Subject: powerpc/iommu: Do not call PageTransHuge() on tail pages
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+commit e889e96e98e8da97bd39e46b7253615eabe14397 upstream.
+
+The CMA pages migration code does not support compound pages at
+the moment so it performs few tests before proceeding to actual page
+migration.
+
+One of the tests - PageTransHuge() - has VM_BUG_ON_PAGE(PageTail()) as
+it is designed to be called on head pages only. Since we also test for
+PageCompound(), and it contains PageTail() and PageHead(), we can
+simplify the check by leaving just PageCompound() and therefore avoid
+possible VM_BUG_ON_PAGE.
+
+Fixes: 2e5bbb5461f1 ("KVM: PPC: Book3S HV: Migrate pinned pages out of CMA")
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Acked-by: Balbir Singh <bsingharora@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/mmu_context_iommu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/mm/mmu_context_iommu.c
++++ b/arch/powerpc/mm/mmu_context_iommu.c
+@@ -81,7 +81,7 @@ struct page *new_iommu_non_cma_page(stru
+ gfp_t gfp_mask = GFP_USER;
+ struct page *new_page;
+
+- if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
++ if (PageCompound(page))
+ return NULL;
+
+ if (PageHighMem(page))
+@@ -100,7 +100,7 @@ static int mm_iommu_move_page_from_cma(s
+ LIST_HEAD(cma_migrate_pages);
+
+ /* Ignore huge pages for now */
+- if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
++ if (PageCompound(page))
+ return -EBUSY;
+
+ lru_add_drain();
--- /dev/null
+From 9765ad134a00a01cbcc69c78ff6defbfad209bc5 Mon Sep 17 00:00:00 2001
+From: David Gibson <david@gibson.dropbear.id.au>
+Date: Wed, 19 Apr 2017 16:38:26 +1000
+Subject: powerpc/mm: Ensure IRQs are off in switch_mm()
+
+From: David Gibson <david@gibson.dropbear.id.au>
+
+commit 9765ad134a00a01cbcc69c78ff6defbfad209bc5 upstream.
+
+powerpc expects IRQs to already be (soft) disabled when switch_mm() is
+called, as made clear in the commit message of 9c1e105238c4 ("powerpc: Allow
+perf_counters to access user memory at interrupt time").
+
+Aside from any race conditions that might exist between switch_mm() and an IRQ,
+there is also an unconditional hard_irq_disable() in switch_slb(). If that isn't
+followed at some point by an IRQ enable then interrupts will remain disabled
+until we return to userspace.
+
+It is true that when switch_mm() is called from the scheduler IRQs are off, but
+not when it's called by use_mm(). Looking closer we see that last year in commit
+f98db6013c55 ("sched/core: Add switch_mm_irqs_off() and use it in the scheduler")
+this was made more explicit by the addition of switch_mm_irqs_off() which is now
+called by the scheduler, vs switch_mm() which is used by use_mm().
+
+Arguably it is a bug in use_mm() to call switch_mm() in a different context than
+it expects, but fixing that will take time.
+
+This was discovered recently when vhost started throwing warnings such as:
+
+ BUG: sleeping function called from invalid context at kernel/mutex.c:578
+ in_atomic(): 0, irqs_disabled(): 1, pid: 10768, name: vhost-10760
+ no locks held by vhost-10760/10768.
+ irq event stamp: 10
+ hardirqs last enabled at (9): _raw_spin_unlock_irq+0x40/0x80
+ hardirqs last disabled at (10): switch_slb+0x2e4/0x490
+ softirqs last enabled at (0): copy_process+0x5e8/0x1260
+ softirqs last disabled at (0): (null)
+ Call Trace:
+ show_stack+0x88/0x390 (unreliable)
+ dump_stack+0x30/0x44
+ __might_sleep+0x1c4/0x2d0
+ mutex_lock_nested+0x74/0x5c0
+ cgroup_attach_task_all+0x5c/0x180
+ vhost_attach_cgroups_work+0x58/0x80 [vhost]
+ vhost_worker+0x24c/0x3d0 [vhost]
+ kthread+0xec/0x100
+ ret_from_kernel_thread+0x5c/0xd4
+
+Prior to commit 04b96e5528ca ("vhost: lockless enqueuing") (Aug 2016) the
+vhost_worker() would do a spin_unlock_irq() not long after calling use_mm(),
+which had the effect of reenabling IRQs. Since that commit removed the locking
+in vhost_worker() the body of the vhost_worker() loop now runs with interrupts
+off causing the warnings.
+
+This patch addresses the problem by making the powerpc code mirror the x86 code,
+ie. we disable interrupts in switch_mm(), and optimise the scheduler case by
+defining switch_mm_irqs_off().
+
+Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
+[mpe: Flesh out/rewrite change log, add stable]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/mmu_context.h | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -70,8 +70,9 @@ extern void drop_cop(unsigned long acop,
+ * switch_mm is the entry point called from the architecture independent
+ * code in kernel/sched/core.c
+ */
+-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+- struct task_struct *tsk)
++static inline void switch_mm_irqs_off(struct mm_struct *prev,
++ struct mm_struct *next,
++ struct task_struct *tsk)
+ {
+ /* Mark this context has been used on the new CPU */
+ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
+@@ -110,6 +111,18 @@ static inline void switch_mm(struct mm_s
+ switch_mmu_context(prev, next, tsk);
+ }
+
++static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ switch_mm_irqs_off(prev, next, tsk);
++ local_irq_restore(flags);
++}
++#define switch_mm_irqs_off switch_mm_irqs_off
++
++
+ #define deactivate_mm(tsk,mm) do { } while (0)
+
+ /*
--- /dev/null
+From 68baf692c435339e6295cb470ea5545cbc28160e Mon Sep 17 00:00:00 2001
+From: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+Date: Mon, 17 Apr 2017 20:21:40 -0400
+Subject: powerpc/pseries: Fix of_node_put() underflow during DLPAR remove
+
+From: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+
+commit 68baf692c435339e6295cb470ea5545cbc28160e upstream.
+
+Historically struct device_node references were tracked using a kref embedded as
+a struct field. Commit 75b57ecf9d1d ("of: Make device nodes kobjects so they
+show up in sysfs") (Mar 2014) refactored device_nodes to be kobjects such that
+the device tree could by more simply exposed to userspace using sysfs.
+
+Commit 0829f6d1f69e ("of: device_node kobject lifecycle fixes") (Mar 2014)
+followed up these changes to better control the kobject lifecycle and in
+particular the referecne counting via of_node_get(), of_node_put(), and
+of_node_init().
+
+A result of this second commit was that it introduced an of_node_put() call when
+a dynamic node is detached, in of_node_remove(), that removes the initial kobj
+reference created by of_node_init().
+
+Traditionally as the original dynamic device node user the pseries code had
+assumed responsibilty for releasing this final reference in its platform
+specific DLPAR detach code.
+
+This patch fixes a refcount underflow introduced by commit 0829f6d1f6, and
+recently exposed by the upstreaming of the recount API.
+
+Messages like the following are no longer seen in the kernel log with this
+patch following DLPAR remove operations of cpus and pci devices.
+
+ rpadlpar_io: slot PHB 72 removed
+ refcount_t: underflow; use-after-free.
+ ------------[ cut here ]------------
+ WARNING: CPU: 5 PID: 3335 at lib/refcount.c:128 refcount_sub_and_test+0xf4/0x110
+
+Fixes: 0829f6d1f69e ("of: device_node kobject lifecycle fixes")
+Signed-off-by: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+[mpe: Make change log commit references more verbose]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/dlpar.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -288,7 +288,6 @@ int dlpar_detach_node(struct device_node
+ if (rc)
+ return rc;
+
+- of_node_put(dn); /* Must decrement the refcount */
+ return 0;
+ }
+
--- /dev/null
+From f48e91e87e67b56bef63393d1a02c6e22c1d7078 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Mon, 8 May 2017 17:16:26 +1000
+Subject: powerpc/tm: Fix FP and VMX register corruption
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit f48e91e87e67b56bef63393d1a02c6e22c1d7078 upstream.
+
+In commit dc3106690b20 ("powerpc: tm: Always use fp_state and vr_state
+to store live registers"), a section of code was removed that copied
+the current state to checkpointed state. That code should not have been
+removed.
+
+When an FP (Floating Point) unavailable is taken inside a transaction,
+we need to abort the transaction. This is because at the time of the
+tbegin, the FP state is bogus so the state stored in the checkpointed
+registers is incorrect. To fix this, we treclaim (to get the
+checkpointed GPRs) and then copy the thread_struct FP live state into
+the checkpointed state. We then trecheckpoint so that the FP state is
+correctly restored into the CPU.
+
+The copying of the FP registers from live to checkpointed is what was
+missing.
+
+This simplifies the logic slightly from the original patch.
+tm_reclaim_thread() will now always write the checkpointed FP
+state. Either the checkpointed FP state will be written as part of
+the actual treclaim (in tm.S), or it'll be a copy of the live
+state. Which one we use is based on MSR[FP] from userspace.
+
+Similarly for VMX.
+
+Fixes: dc3106690b20 ("powerpc: tm: Always use fp_state and vr_state to store live registers")
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Reviewed-by: cyrilbur@gmail.com
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/process.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -839,6 +839,25 @@ static void tm_reclaim_thread(struct thr
+ if (!MSR_TM_SUSPENDED(mfmsr()))
+ return;
+
++ /*
++ * If we are in a transaction and FP is off then we can't have
++ * used FP inside that transaction. Hence the checkpointed
++ * state is the same as the live state. We need to copy the
++ * live state to the checkpointed state so that when the
++ * transaction is restored, the checkpointed state is correct
++ * and the aborted transaction sees the correct state. We use
++ * ckpt_regs.msr here as that's what tm_reclaim will use to
++ * determine if it's going to write the checkpointed state or
++ * not. So either this will write the checkpointed registers,
++ * or reclaim will. Similarly for VMX.
++ */
++ if ((thr->ckpt_regs.msr & MSR_FP) == 0)
++ memcpy(&thr->ckfp_state, &thr->fp_state,
++ sizeof(struct thread_fp_state));
++ if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
++ memcpy(&thr->ckvr_state, &thr->vr_state,
++ sizeof(struct thread_vr_state));
++
+ giveup_all(container_of(thr, struct task_struct, thread));
+
+ tm_reclaim(thr, thr->ckpt_regs.msr, cause);
cx231xx-audio-fix-init-error-path.patch
cx231xx-audio-fix-null-deref-at-probe.patch
cx231xx-cards-fix-null-deref-at-probe.patch
+powerpc-mm-ensure-irqs-are-off-in-switch_mm.patch
+powerpc-eeh-avoid-use-after-free-in-eeh_handle_special_event.patch
+powerpc-book3s-mce-move-add_taint-later-in-virtual-mode.patch
+powerpc-pseries-fix-of_node_put-underflow-during-dlpar-remove.patch
+powerpc-iommu-do-not-call-pagetranshuge-on-tail-pages.patch
+powerpc-64e-fix-hang-when-debugging-programs-with-relocated-kernel.patch
+powerpc-tm-fix-fp-and-vmx-register-corruption.patch
+arm64-kvm-do-not-use-stack-protector-to-compile-el2-code.patch
+arm-kvm-do-not-use-stack-protector-to-compile-hyp-code.patch
+kvm-arm-arm64-vgic-v2-do-not-use-active-pending-state-for-a-hw-interrupt.patch
+kvm-arm-arm64-vgic-v3-do-not-use-active-pending-state-for-a-hw-interrupt.patch
+kvm-arm-plug-potential-guest-hardware-debug-leakage.patch
+arm-8662-1-module-split-core-and-init-plt-sections.patch
+arm-8670-1-v7m-do-not-corrupt-vector-table-around-v7m_invalidate_l1-call.patch
+arm-dts-at91-sama5d3_xplained-fix-adc-vref.patch
+arm-dts-at91-sama5d3_xplained-not-all-adc-channels-are-available.patch
+arm-dts-imx6sx-sdb-remove-opp-override.patch
+arm64-dts-hi6220-reset-the-mmc-hosts.patch
+arm64-xchg-hazard-against-entire-exchange-variable.patch
+arm64-ensure-extension-of-smp_store_release-value.patch
+arm64-armv8_deprecated-ensure-extension-of-addr.patch
+arm64-uaccess-ensure-extension-of-access_ok-addr.patch
+arm64-documentation-document-tagged-pointer-stack-constraints.patch
+staging-rtl8192e-rtl92e_fill_tx_desc-fix-write-to-mapped-out-memory.patch
+staging-rtl8192e-fix-2-byte-alignment-of-register-bssidr.patch
+staging-rtl8192e-rtl92e_get_eeprom_size-fix-read-size-of-eprom_cmd.patch
+staging-rtl8192e-getts-fix-invalid-tid-7-warning.patch
+iommu-vt-d-flush-the-iotlb-to-get-rid-of-the-initial-kdump-mappings.patch
+metag-uaccess-fix-access_ok.patch
+metag-uaccess-check-access_ok-in-strncpy_from_user.patch
+stackprotector-increase-the-per-task-stack-canary-s-random-range-from-32-bits-to-64-bits-on-64-bit-platforms.patch
+uwb-fix-device-quirk-on-big-endian-hosts.patch
+genirq-fix-chained-interrupt-data-ordering.patch
+nvme-unmap-cmb-and-remove-sysfs-file-in-reset-path.patch
+mips-loongson-3-select-mips_l1_cache_shift_6.patch
--- /dev/null
+From 5ea30e4e58040cfd6434c2f33dc3ea76e2c15b05 Mon Sep 17 00:00:00 2001
+From: Daniel Micay <danielmicay@gmail.com>
+Date: Thu, 4 May 2017 09:32:09 -0400
+Subject: stackprotector: Increase the per-task stack canary's random range from 32 bits to 64 bits on 64-bit platforms
+
+From: Daniel Micay <danielmicay@gmail.com>
+
+commit 5ea30e4e58040cfd6434c2f33dc3ea76e2c15b05 upstream.
+
+The stack canary is an 'unsigned long' and should be fully initialized to
+random data rather than only 32 bits of random data.
+
+Signed-off-by: Daniel Micay <danielmicay@gmail.com>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Acked-by: Rik van Riel <riel@redhat.com>
+Acked-by: Kees Cook <keescook@chromium.org>
+Cc: Arjan van Ven <arjan@linux.intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: kernel-hardening@lists.openwall.com
+Link: http://lkml.kernel.org/r/20170504133209.3053-1-danielmicay@gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/fork.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -521,7 +521,7 @@ static struct task_struct *dup_task_stru
+ set_task_stack_end_magic(tsk);
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+- tsk->stack_canary = get_random_int();
++ tsk->stack_canary = get_random_long();
+ #endif
+
+ /*
--- /dev/null
+From 867510bde14e7b7fc6dd0f50b48f6753cfbd227a Mon Sep 17 00:00:00 2001
+From: Malcolm Priestley <tvboxspy@gmail.com>
+Date: Thu, 11 May 2017 18:57:44 +0100
+Subject: staging: rtl8192e: fix 2 byte alignment of register BSSIDR.
+
+From: Malcolm Priestley <tvboxspy@gmail.com>
+
+commit 867510bde14e7b7fc6dd0f50b48f6753cfbd227a upstream.
+
+BSSIDR has two byte alignment on PCI ioremap correct the write
+by swapping to 16 bits first.
+
+This fixes a problem that the device associates fail because
+the filter is not set correctly.
+
+Signed-off-by: Malcolm Priestley <tvboxspy@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
++++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+@@ -97,8 +97,9 @@ void rtl92e_set_reg(struct net_device *d
+
+ switch (variable) {
+ case HW_VAR_BSSID:
+- rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]);
+- rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
++ /* BSSIDR 2 byte alignment */
++ rtl92e_writew(dev, BSSIDR, *(u16 *)val);
++ rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
+ break;
+
+ case HW_VAR_MEDIA_STATUS:
+@@ -963,8 +964,8 @@ static void _rtl92e_net_update(struct ne
+ rtl92e_config_rate(dev, &rate_config);
+ priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
+ priv->basic_rate = rate_config &= 0x15f;
+- rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]);
+- rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
++ rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
++ rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
+
+ if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
+ rtl92e_writew(dev, ATIMWND, 2);
--- /dev/null
+From 95d93e271d920dfda369d4740b1cc1061d41fe7f Mon Sep 17 00:00:00 2001
+From: Malcolm Priestley <tvboxspy@gmail.com>
+Date: Thu, 11 May 2017 18:57:46 +0100
+Subject: staging: rtl8192e: GetTs Fix invalid TID 7 warning.
+
+From: Malcolm Priestley <tvboxspy@gmail.com>
+
+commit 95d93e271d920dfda369d4740b1cc1061d41fe7f upstream.
+
+TID 7 is a valid value for QoS IEEE 802.11e.
+
+The switch statement that follows states 7 is valid.
+
+Remove function IsACValid and use the default case to filter
+invalid TIDs.
+
+Signed-off-by: Malcolm Priestley <tvboxspy@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/rtl8192e/rtl819x_TSProc.c | 15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
++++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
+@@ -306,11 +306,6 @@ static void MakeTSEntry(struct ts_common
+ pTsCommonInfo->TClasNum = TCLAS_Num;
+ }
+
+-static bool IsACValid(unsigned int tid)
+-{
+- return tid < 7;
+-}
+-
+ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
+ u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
+ {
+@@ -328,12 +323,6 @@ bool GetTs(struct rtllib_device *ieee, s
+ if (ieee->current_network.qos_data.supported == 0) {
+ UP = 0;
+ } else {
+- if (!IsACValid(TID)) {
+- netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
+- __func__, TID);
+- return false;
+- }
+-
+ switch (TID) {
+ case 0:
+ case 3:
+@@ -351,6 +340,10 @@ bool GetTs(struct rtllib_device *ieee, s
+ case 7:
+ UP = 7;
+ break;
++ default:
++ netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
++ __func__, TID);
++ return false;
+ }
+ }
+
--- /dev/null
+From baabd567f87be05330faa5140f72a91960e7405a Mon Sep 17 00:00:00 2001
+From: Malcolm Priestley <tvboxspy@gmail.com>
+Date: Thu, 11 May 2017 18:57:43 +0100
+Subject: staging: rtl8192e: rtl92e_fill_tx_desc fix write to mapped out memory.
+
+From: Malcolm Priestley <tvboxspy@gmail.com>
+
+commit baabd567f87be05330faa5140f72a91960e7405a upstream.
+
+The driver attempts to alter memory that is mapped to PCI device.
+
+This is because tx_fwinfo_8190pci points to skb->data
+
+Move the pci_map_single to when completed buffer is ready to be mapped with
+psdec is empty to drop on mapping error.
+
+Signed-off-by: Malcolm Priestley <tvboxspy@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
++++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+@@ -1184,8 +1184,7 @@ void rtl92e_fill_tx_desc(struct net_dev
+ struct cb_desc *cb_desc, struct sk_buff *skb)
+ {
+ struct r8192_priv *priv = rtllib_priv(dev);
+- dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
+- PCI_DMA_TODEVICE);
++ dma_addr_t mapping;
+ struct tx_fwinfo_8190pci *pTxFwInfo;
+
+ pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
+@@ -1196,8 +1195,6 @@ void rtl92e_fill_tx_desc(struct net_dev
+ pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
+ pTxFwInfo->TxRate, cb_desc);
+
+- if (pci_dma_mapping_error(priv->pdev, mapping))
+- netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
+ if (cb_desc->bAMPDUEnable) {
+ pTxFwInfo->AllowAggregation = 1;
+ pTxFwInfo->RxMF = cb_desc->ampdu_factor;
+@@ -1232,6 +1229,14 @@ void rtl92e_fill_tx_desc(struct net_dev
+ }
+
+ memset((u8 *)pdesc, 0, 12);
++
++ mapping = pci_map_single(priv->pdev, skb->data, skb->len,
++ PCI_DMA_TODEVICE);
++ if (pci_dma_mapping_error(priv->pdev, mapping)) {
++ netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
++ return;
++ }
++
+ pdesc->LINIP = 0;
+ pdesc->CmdInit = 1;
+ pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
--- /dev/null
+From 90be652c9f157d44b9c2803f902a8839796c090d Mon Sep 17 00:00:00 2001
+From: Malcolm Priestley <tvboxspy@gmail.com>
+Date: Thu, 11 May 2017 18:57:45 +0100
+Subject: staging: rtl8192e: rtl92e_get_eeprom_size Fix read size of EPROM_CMD.
+
+From: Malcolm Priestley <tvboxspy@gmail.com>
+
+commit 90be652c9f157d44b9c2803f902a8839796c090d upstream.
+
+EPROM_CMD is 2 byte aligned on PCI map so calling with rtl92e_readl
+will return invalid data so use rtl92e_readw.
+
+The device is unable to select the right eeprom type.
+
+Signed-off-by: Malcolm Priestley <tvboxspy@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
++++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+@@ -627,7 +627,7 @@ void rtl92e_get_eeprom_size(struct net_d
+ struct r8192_priv *priv = rtllib_priv(dev);
+
+ RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
+- curCR = rtl92e_readl(dev, EPROM_CMD);
++ curCR = rtl92e_readw(dev, EPROM_CMD);
+ RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
+ curCR);
+ priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
--- /dev/null
+From 41318a2b82f5d5fe1fb408f6d6e0b22aa557111d Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Fri, 12 May 2017 12:06:32 +0200
+Subject: uwb: fix device quirk on big-endian hosts
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 41318a2b82f5d5fe1fb408f6d6e0b22aa557111d upstream.
+
+Add missing endianness conversion when using the USB device-descriptor
+idProduct field to apply a hardware quirk.
+
+Fixes: 1ba47da52712 ("uwb: add the i1480 DFU driver")
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/uwb/i1480/dfu/usb.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/uwb/i1480/dfu/usb.c
++++ b/drivers/uwb/i1480/dfu/usb.c
+@@ -341,6 +341,7 @@ error_submit_ep1:
+ static
+ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
+ {
++ struct usb_device *udev = interface_to_usbdev(iface);
+ struct i1480_usb *i1480_usb;
+ struct i1480 *i1480;
+ struct device *dev = &iface->dev;
+@@ -352,8 +353,8 @@ int i1480_usb_probe(struct usb_interface
+ iface->cur_altsetting->desc.bInterfaceNumber);
+ goto error;
+ }
+- if (iface->num_altsetting > 1
+- && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
++ if (iface->num_altsetting > 1 &&
++ le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
+ /* Need altsetting #1 [HW QUIRK] or EP1 won't work */
+ result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
+ if (result < 0)