]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 8 Nov 2019 13:18:55 +0000 (14:18 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 8 Nov 2019 13:18:55 +0000 (14:18 +0100)
added patches:
arm-8051-1-put_user-fix-possible-data-corruption-in-put_user.patch
arm-8478-2-arm-arm64-add-arm-smccc.patch
arm-8479-2-add-implementation-for-arm-smccc.patch
arm-8480-2-arm64-add-implementation-for-arm-smccc.patch
arm-8481-2-drivers-psci-replace-psci-firmware-calls.patch
arm-8789-1-signal-copy-registers-using-__copy_to_user.patch
arm-8791-1-vfp-use-__copy_to_user-when-saving-vfp-state.patch
arm-8792-1-oabi-compat-copy-oabi-events-using-__copy_to_user.patch
arm-8793-1-signal-replace-__put_user_error-with-__put_user.patch
arm-8794-1-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch
arm-8795-1-spectre-v1.1-use-put_user-for-__put_user.patch
arm-8796-1-spectre-v1-v1.1-provide-helpers-for-address-sanitization.patch
arm-8810-1-vfp-fix-wrong-assignement-to-ufp_exc.patch
arm-add-more-cpu-part-numbers-for-cortex-and-brahma-b15-cpus.patch
arm-add-proc_vtable-and-proc_table-macros.patch
arm-arm64-kvm-advertise-smccc-v1.1.patch
arm-arm64-smccc-1.1-handle-function-result-as-parameters.patch
arm-arm64-smccc-1.1-make-return-values-unsigned-long.patch
arm-arm64-smccc-add-smccc-specific-return-codes.patch
arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
arm-bugs-add-support-for-per-processor-bug-checking.patch
arm-bugs-hook-processor-bug-checking-into-smp-and-suspend-paths.patch
arm-bugs-prepare-processor-bug-infrastructure.patch
arm-clean-up-per-processor-check_bugs-method-call.patch
arm-ensure-that-processor-vtables-is-not-lost-after-boot.patch
arm-fix-the-cockup-in-the-previous-patch.patch
arm-make-lookup_processor_type-non-__init.patch
arm-move-system-register-accessors-to-asm-cp15.h.patch
arm-oabi-compat-copy-semops-using-__copy_from_user.patch
arm-signal-copy-registers-using-__copy_from_user.patch
arm-spectre-add-kconfig-symbol-for-cpus-vulnerable-to-spectre.patch
arm-spectre-v1-add-array_index_mask_nospec-implementation.patch
arm-spectre-v1-add-speculation-barrier-csdb-macros.patch
arm-spectre-v1-fix-syscall-entry.patch
arm-spectre-v1-mitigate-user-accesses.patch
arm-spectre-v1-use-get_user-for-__get_user.patch
arm-spectre-v2-add-cortex-a8-and-a15-validation-of-the-ibe-bit.patch
arm-spectre-v2-add-firmware-based-hardening.patch
arm-spectre-v2-harden-branch-predictor-on-context-switches.patch
arm-spectre-v2-harden-user-aborts-in-kernel-space.patch
arm-spectre-v2-per-cpu-vtables-to-work-around-big.little-systems.patch
arm-spectre-v2-warn-about-incorrect-context-switching-functions.patch
arm-split-out-processor-lookup.patch
arm-uaccess-remove-put_user-code-duplication.patch
arm-use-__inttype-in-get_user.patch
arm-vfp-use-__copy_from_user-when-restoring-vfp-state.patch
arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
dmaengine-qcom-bam_dma-fix-resource-leak.patch
firmware-psci-expose-psci-conduit.patch
firmware-psci-expose-smccc-version-through-psci_ops.patch

52 files changed:
queue-4.4/arm-8051-1-put_user-fix-possible-data-corruption-in-put_user.patch [new file with mode: 0644]
queue-4.4/arm-8478-2-arm-arm64-add-arm-smccc.patch [new file with mode: 0644]
queue-4.4/arm-8479-2-add-implementation-for-arm-smccc.patch [new file with mode: 0644]
queue-4.4/arm-8480-2-arm64-add-implementation-for-arm-smccc.patch [new file with mode: 0644]
queue-4.4/arm-8481-2-drivers-psci-replace-psci-firmware-calls.patch [new file with mode: 0644]
queue-4.4/arm-8789-1-signal-copy-registers-using-__copy_to_user.patch [new file with mode: 0644]
queue-4.4/arm-8791-1-vfp-use-__copy_to_user-when-saving-vfp-state.patch [new file with mode: 0644]
queue-4.4/arm-8792-1-oabi-compat-copy-oabi-events-using-__copy_to_user.patch [new file with mode: 0644]
queue-4.4/arm-8793-1-signal-replace-__put_user_error-with-__put_user.patch [new file with mode: 0644]
queue-4.4/arm-8794-1-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch [new file with mode: 0644]
queue-4.4/arm-8795-1-spectre-v1.1-use-put_user-for-__put_user.patch [new file with mode: 0644]
queue-4.4/arm-8796-1-spectre-v1-v1.1-provide-helpers-for-address-sanitization.patch [new file with mode: 0644]
queue-4.4/arm-8810-1-vfp-fix-wrong-assignement-to-ufp_exc.patch [new file with mode: 0644]
queue-4.4/arm-add-more-cpu-part-numbers-for-cortex-and-brahma-b15-cpus.patch [new file with mode: 0644]
queue-4.4/arm-add-proc_vtable-and-proc_table-macros.patch [new file with mode: 0644]
queue-4.4/arm-arm64-kvm-advertise-smccc-v1.1.patch [new file with mode: 0644]
queue-4.4/arm-arm64-smccc-1.1-handle-function-result-as-parameters.patch [new file with mode: 0644]
queue-4.4/arm-arm64-smccc-1.1-make-return-values-unsigned-long.patch [new file with mode: 0644]
queue-4.4/arm-arm64-smccc-add-smccc-specific-return-codes.patch [new file with mode: 0644]
queue-4.4/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch [new file with mode: 0644]
queue-4.4/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch [new file with mode: 0644]
queue-4.4/arm-bugs-add-support-for-per-processor-bug-checking.patch [new file with mode: 0644]
queue-4.4/arm-bugs-hook-processor-bug-checking-into-smp-and-suspend-paths.patch [new file with mode: 0644]
queue-4.4/arm-bugs-prepare-processor-bug-infrastructure.patch [new file with mode: 0644]
queue-4.4/arm-clean-up-per-processor-check_bugs-method-call.patch [new file with mode: 0644]
queue-4.4/arm-ensure-that-processor-vtables-is-not-lost-after-boot.patch [new file with mode: 0644]
queue-4.4/arm-fix-the-cockup-in-the-previous-patch.patch [new file with mode: 0644]
queue-4.4/arm-make-lookup_processor_type-non-__init.patch [new file with mode: 0644]
queue-4.4/arm-move-system-register-accessors-to-asm-cp15.h.patch [new file with mode: 0644]
queue-4.4/arm-oabi-compat-copy-semops-using-__copy_from_user.patch [new file with mode: 0644]
queue-4.4/arm-signal-copy-registers-using-__copy_from_user.patch [new file with mode: 0644]
queue-4.4/arm-spectre-add-kconfig-symbol-for-cpus-vulnerable-to-spectre.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v1-add-array_index_mask_nospec-implementation.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v1-add-speculation-barrier-csdb-macros.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v1-fix-syscall-entry.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v1-mitigate-user-accesses.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v1-use-get_user-for-__get_user.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v2-add-cortex-a8-and-a15-validation-of-the-ibe-bit.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v2-add-firmware-based-hardening.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v2-harden-branch-predictor-on-context-switches.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v2-harden-user-aborts-in-kernel-space.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v2-per-cpu-vtables-to-work-around-big.little-systems.patch [new file with mode: 0644]
queue-4.4/arm-spectre-v2-warn-about-incorrect-context-switching-functions.patch [new file with mode: 0644]
queue-4.4/arm-split-out-processor-lookup.patch [new file with mode: 0644]
queue-4.4/arm-uaccess-remove-put_user-code-duplication.patch [new file with mode: 0644]
queue-4.4/arm-use-__inttype-in-get_user.patch [new file with mode: 0644]
queue-4.4/arm-vfp-use-__copy_from_user-when-restoring-vfp-state.patch [new file with mode: 0644]
queue-4.4/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch [new file with mode: 0644]
queue-4.4/dmaengine-qcom-bam_dma-fix-resource-leak.patch [new file with mode: 0644]
queue-4.4/firmware-psci-expose-psci-conduit.patch [new file with mode: 0644]
queue-4.4/firmware-psci-expose-smccc-version-through-psci_ops.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/arm-8051-1-put_user-fix-possible-data-corruption-in-put_user.patch b/queue-4.4/arm-8051-1-put_user-fix-possible-data-corruption-in-put_user.patch
new file mode 100644 (file)
index 0000000..6efa64e
--- /dev/null
@@ -0,0 +1,39 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:05 +0100
+Subject: ARM: 8051/1: put_user: fix possible data corruption in put_user
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Andrey Ryabinin <a.ryabinin@samsung.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-2-ardb@kernel.org>
+
+From: Andrey Ryabinin <a.ryabinin@samsung.com>
+
+Commit 537094b64b229bf3ad146042f83e74cf6abe59df upstream.
+
+According to arm procedure call standart r2 register is call-cloberred.
+So after the result of x expression was put into r2 any following
+function call in p may overwrite r2. To fix this, the result of p
+expression must be saved to the temporary variable before the
+assigment x expression to __r2.
+
+Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
+Reviewed-by: Nicolas Pitre <nico@linaro.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/uaccess.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -251,7 +251,7 @@ extern int __put_user_8(void *, unsigned
+       ({                                                              \
+               unsigned long __limit = current_thread_info()->addr_limit - 1; \
+               const typeof(*(p)) __user *__tmp_p = (p);               \
+-              register typeof(*(p)) __r2 asm("r2") = (x);     \
++              register const typeof(*(p)) __r2 asm("r2") = (x);       \
+               register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
+               register unsigned long __l asm("r1") = __limit;         \
+               register int __e asm("r0");                             \
diff --git a/queue-4.4/arm-8478-2-arm-arm64-add-arm-smccc.patch b/queue-4.4/arm-8478-2-arm-arm64-add-arm-smccc.patch
new file mode 100644 (file)
index 0000000..2d0a44d
--- /dev/null
@@ -0,0 +1,150 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:06 +0100
+Subject: ARM: 8478/2: arm/arm64: add arm-smccc
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Jens Wiklander <jens.wiklander@linaro.org>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-3-ardb@kernel.org>
+
+From: Jens Wiklander <jens.wiklander@linaro.org>
+
+Commit 98dd64f34f47ce19b388d9015f767f48393a81eb upstream.
+
+Adds helpers to do SMC and HVC based on ARM SMC Calling Convention.
+CONFIG_HAVE_ARM_SMCCC is enabled for architectures that may support the
+SMC or HVC instruction. It's the responsibility of the caller to know if
+the SMC instruction is supported by the platform.
+
+This patch doesn't provide an implementation of the declared functions.
+Later patches will bring in implementations and set
+CONFIG_HAVE_ARM_SMCCC for ARM and ARM64 respectively.
+
+Reviewed-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/Kconfig  |    3 +
+ include/linux/arm-smccc.h |  104 ++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 107 insertions(+)
+
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -174,6 +174,9 @@ config QCOM_SCM_64
+       def_bool y
+       depends on QCOM_SCM && ARM64
++config HAVE_ARM_SMCCC
++      bool
++
+ source "drivers/firmware/broadcom/Kconfig"
+ source "drivers/firmware/google/Kconfig"
+ source "drivers/firmware/efi/Kconfig"
+--- /dev/null
++++ b/include/linux/arm-smccc.h
+@@ -0,0 +1,104 @@
++/*
++ * Copyright (c) 2015, Linaro Limited
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ */
++#ifndef __LINUX_ARM_SMCCC_H
++#define __LINUX_ARM_SMCCC_H
++
++#include <linux/linkage.h>
++#include <linux/types.h>
++
++/*
++ * This file provides common defines for ARM SMC Calling Convention as
++ * specified in
++ * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
++ */
++
++#define ARM_SMCCC_STD_CALL            0
++#define ARM_SMCCC_FAST_CALL           1
++#define ARM_SMCCC_TYPE_SHIFT          31
++
++#define ARM_SMCCC_SMC_32              0
++#define ARM_SMCCC_SMC_64              1
++#define ARM_SMCCC_CALL_CONV_SHIFT     30
++
++#define ARM_SMCCC_OWNER_MASK          0x3F
++#define ARM_SMCCC_OWNER_SHIFT         24
++
++#define ARM_SMCCC_FUNC_MASK           0xFFFF
++
++#define ARM_SMCCC_IS_FAST_CALL(smc_val)       \
++      ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT))
++#define ARM_SMCCC_IS_64(smc_val) \
++      ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT))
++#define ARM_SMCCC_FUNC_NUM(smc_val)   ((smc_val) & ARM_SMCCC_FUNC_MASK)
++#define ARM_SMCCC_OWNER_NUM(smc_val) \
++      (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK)
++
++#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \
++      (((type) << ARM_SMCCC_TYPE_SHIFT) | \
++      ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \
++      (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \
++      ((func_num) & ARM_SMCCC_FUNC_MASK))
++
++#define ARM_SMCCC_OWNER_ARCH          0
++#define ARM_SMCCC_OWNER_CPU           1
++#define ARM_SMCCC_OWNER_SIP           2
++#define ARM_SMCCC_OWNER_OEM           3
++#define ARM_SMCCC_OWNER_STANDARD      4
++#define ARM_SMCCC_OWNER_TRUSTED_APP   48
++#define ARM_SMCCC_OWNER_TRUSTED_APP_END       49
++#define ARM_SMCCC_OWNER_TRUSTED_OS    50
++#define ARM_SMCCC_OWNER_TRUSTED_OS_END        63
++
++/**
++ * struct arm_smccc_res - Result from SMC/HVC call
++ * @a0-a3 result values from registers 0 to 3
++ */
++struct arm_smccc_res {
++      unsigned long a0;
++      unsigned long a1;
++      unsigned long a2;
++      unsigned long a3;
++};
++
++/**
++ * arm_smccc_smc() - make SMC calls
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This function is used to make SMC calls following SMC Calling Convention.
++ * The content of the supplied param are copied to registers 0 to 7 prior
++ * to the SMC instruction. The return values are updated with the content
++ * from register 0 to 3 on return from the SMC instruction.
++ */
++asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1,
++                      unsigned long a2, unsigned long a3, unsigned long a4,
++                      unsigned long a5, unsigned long a6, unsigned long a7,
++                      struct arm_smccc_res *res);
++
++/**
++ * arm_smccc_hvc() - make HVC calls
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This function is used to make HVC calls following SMC Calling
++ * Convention.  The content of the supplied param are copied to registers 0
++ * to 7 prior to the HVC instruction. The return values are updated with
++ * the content from register 0 to 3 on return from the HVC instruction.
++ */
++asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
++                      unsigned long a2, unsigned long a3, unsigned long a4,
++                      unsigned long a5, unsigned long a6, unsigned long a7,
++                      struct arm_smccc_res *res);
++
++#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/queue-4.4/arm-8479-2-add-implementation-for-arm-smccc.patch b/queue-4.4/arm-8479-2-add-implementation-for-arm-smccc.patch
new file mode 100644 (file)
index 0000000..79a147f
--- /dev/null
@@ -0,0 +1,131 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:07 +0100
+Subject: ARM: 8479/2: add implementation for arm-smccc
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Jens Wiklander <jens.wiklander@linaro.org>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-4-ardb@kernel.org>
+
+From: Jens Wiklander <jens.wiklander@linaro.org>
+
+Commit b329f95d70f3f955093e9a2b18ac1ed3587a8f73 upstream.
+
+Adds implementation for arm-smccc and enables CONFIG_HAVE_SMCCC for
+architectures that may support arm-smccc. It's the responsibility of the
+caller to know if the SMC instruction is supported by the platform.
+
+Reviewed-by: Lars Persson <lars.persson@axis.com>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/Kconfig             |    1 
+ arch/arm/kernel/Makefile     |    2 +
+ arch/arm/kernel/armksyms.c   |    6 ++++
+ arch/arm/kernel/smccc-call.S |   62 +++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 71 insertions(+)
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -37,6 +37,7 @@ config ARM
+       select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
+       select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+       select HAVE_ARCH_TRACEHOOK
++      select HAVE_ARM_SMCCC if CPU_V7
+       select HAVE_BPF_JIT
+       select HAVE_CC_STACKPROTECTOR
+       select HAVE_CONTEXT_TRACKING
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -91,4 +91,6 @@ obj-y                                += psci-call.o
+ obj-$(CONFIG_SMP)             += psci_smp.o
+ endif
++obj-$(CONFIG_HAVE_ARM_SMCCC)  += smccc-call.o
++
+ extra-y := $(head-y) vmlinux.lds
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -16,6 +16,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
++#include <linux/arm-smccc.h>
+ #include <asm/checksum.h>
+ #include <asm/ftrace.h>
+@@ -175,3 +176,8 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
+ EXPORT_SYMBOL(__pv_phys_pfn_offset);
+ EXPORT_SYMBOL(__pv_offset);
+ #endif
++
++#ifdef CONFIG_HAVE_ARM_SMCCC
++EXPORT_SYMBOL(arm_smccc_smc);
++EXPORT_SYMBOL(arm_smccc_hvc);
++#endif
+--- /dev/null
++++ b/arch/arm/kernel/smccc-call.S
+@@ -0,0 +1,62 @@
++/*
++ * Copyright (c) 2015, Linaro Limited
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ */
++#include <linux/linkage.h>
++
++#include <asm/opcodes-sec.h>
++#include <asm/opcodes-virt.h>
++#include <asm/unwind.h>
++
++      /*
++       * Wrap c macros in asm macros to delay expansion until after the
++       * SMCCC asm macro is expanded.
++       */
++      .macro SMCCC_SMC
++      __SMC(0)
++      .endm
++
++      .macro SMCCC_HVC
++      __HVC(0)
++      .endm
++
++      .macro SMCCC instr
++UNWIND(       .fnstart)
++      mov     r12, sp
++      push    {r4-r7}
++UNWIND(       .save   {r4-r7})
++      ldm     r12, {r4-r7}
++      \instr
++      pop     {r4-r7}
++      ldr     r12, [sp, #(4 * 4)]
++      stm     r12, {r0-r3}
++      bx      lr
++UNWIND(       .fnend)
++      .endm
++
++/*
++ * void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
++ *              unsigned long a3, unsigned long a4, unsigned long a5,
++ *              unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
++ */
++ENTRY(arm_smccc_smc)
++      SMCCC SMCCC_SMC
++ENDPROC(arm_smccc_smc)
++
++/*
++ * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
++ *              unsigned long a3, unsigned long a4, unsigned long a5,
++ *              unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
++ */
++ENTRY(arm_smccc_hvc)
++      SMCCC SMCCC_HVC
++ENDPROC(arm_smccc_hvc)
diff --git a/queue-4.4/arm-8480-2-arm64-add-implementation-for-arm-smccc.patch b/queue-4.4/arm-8480-2-arm64-add-implementation-for-arm-smccc.patch
new file mode 100644 (file)
index 0000000..a2d4941
--- /dev/null
@@ -0,0 +1,130 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:08 +0100
+Subject: ARM: 8480/2: arm64: add implementation for arm-smccc
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Jens Wiklander <jens.wiklander@linaro.org>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-5-ardb@kernel.org>
+
+From: Jens Wiklander <jens.wiklander@linaro.org>
+
+Commit 14457459f9ca2ff8521686168ea179edc3a56a44 upstream.
+
+Adds implementation for arm-smccc and enables CONFIG_HAVE_SMCCC.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/Kconfig              |    1 
+ arch/arm64/kernel/Makefile      |    2 -
+ arch/arm64/kernel/arm64ksyms.c  |    5 ++++
+ arch/arm64/kernel/asm-offsets.c |    3 ++
+ arch/arm64/kernel/smccc-call.S  |   43 ++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 53 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -92,6 +92,7 @@ config ARM64
+       select SPARSE_IRQ
+       select SYSCTL_EXCEPTION_TRACE
+       select HAVE_CONTEXT_TRACKING
++      select HAVE_ARM_SMCCC
+       help
+         ARM 64-bit (AArch64) Linux support.
+--- a/arch/arm64/kernel/Makefile
++++ b/arch/arm64/kernel/Makefile
+@@ -17,7 +17,7 @@ arm64-obj-y          := debug-monitors.o entry.o
+                          hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o       \
+                          return_address.o cpuinfo.o cpu_errata.o              \
+                          cpufeature.o alternative.o cacheinfo.o               \
+-                         smp.o smp_spin_table.o topology.o
++                         smp.o smp_spin_table.o topology.o smccc-call.o
+ extra-$(CONFIG_EFI)                   := efi-entry.o
+--- a/arch/arm64/kernel/arm64ksyms.c
++++ b/arch/arm64/kernel/arm64ksyms.c
+@@ -26,6 +26,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
++#include <linux/arm-smccc.h>
+ #include <asm/checksum.h>
+@@ -68,3 +69,7 @@ EXPORT_SYMBOL(test_and_change_bit);
+ #ifdef CONFIG_FUNCTION_TRACER
+ EXPORT_SYMBOL(_mcount);
+ #endif
++
++      /* arm-smccc */
++EXPORT_SYMBOL(arm_smccc_smc);
++EXPORT_SYMBOL(arm_smccc_hvc);
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -28,6 +28,7 @@
+ #include <asm/suspend.h>
+ #include <asm/vdso_datapage.h>
+ #include <linux/kbuild.h>
++#include <linux/arm-smccc.h>
+ int main(void)
+ {
+@@ -162,5 +163,7 @@ int main(void)
+   DEFINE(SLEEP_SAVE_SP_PHYS,  offsetof(struct sleep_save_sp, save_ptr_stash_phys));
+   DEFINE(SLEEP_SAVE_SP_VIRT,  offsetof(struct sleep_save_sp, save_ptr_stash));
+ #endif
++  DEFINE(ARM_SMCCC_RES_X0_OFFS,       offsetof(struct arm_smccc_res, a0));
++  DEFINE(ARM_SMCCC_RES_X2_OFFS,       offsetof(struct arm_smccc_res, a2));
+   return 0;
+ }
+--- /dev/null
++++ b/arch/arm64/kernel/smccc-call.S
+@@ -0,0 +1,43 @@
++/*
++ * Copyright (c) 2015, Linaro Limited
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License Version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ */
++#include <linux/linkage.h>
++#include <asm/asm-offsets.h>
++
++      .macro SMCCC instr
++      .cfi_startproc
++      \instr  #0
++      ldr     x4, [sp]
++      stp     x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
++      stp     x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
++      ret
++      .cfi_endproc
++      .endm
++
++/*
++ * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
++ *              unsigned long a3, unsigned long a4, unsigned long a5,
++ *              unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
++ */
++ENTRY(arm_smccc_smc)
++      SMCCC   smc
++ENDPROC(arm_smccc_smc)
++
++/*
++ * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
++ *              unsigned long a3, unsigned long a4, unsigned long a5,
++ *              unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
++ */
++ENTRY(arm_smccc_hvc)
++      SMCCC   hvc
++ENDPROC(arm_smccc_hvc)
diff --git a/queue-4.4/arm-8481-2-drivers-psci-replace-psci-firmware-calls.patch b/queue-4.4/arm-8481-2-drivers-psci-replace-psci-firmware-calls.patch
new file mode 100644 (file)
index 0000000..27d4b84
--- /dev/null
@@ -0,0 +1,176 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:09 +0100
+Subject: ARM: 8481/2: drivers: psci: replace psci firmware calls
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Jens Wiklander <jens.wiklander@linaro.org>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-6-ardb@kernel.org>
+
+From: Jens Wiklander <jens.wiklander@linaro.org>
+
+Commit e679660dbb8347f275fe5d83a5dd59c1fb6c8e63 upstream.
+
+Switch to use a generic interface for issuing SMC/HVC based on ARM SMC
+Calling Convention. Removes now the now unused psci-call.S.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Tested-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/Kconfig              |    2 +-
+ arch/arm/kernel/Makefile      |    1 -
+ arch/arm/kernel/psci-call.S   |   31 -------------------------------
+ arch/arm64/kernel/Makefile    |    2 +-
+ arch/arm64/kernel/psci-call.S |   28 ----------------------------
+ drivers/firmware/psci.c       |   23 +++++++++++++++++++++--
+ 6 files changed, 23 insertions(+), 64 deletions(-)
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1482,7 +1482,7 @@ config HOTPLUG_CPU
+ config ARM_PSCI
+       bool "Support for the ARM Power State Coordination Interface (PSCI)"
+-      depends on CPU_V7
++      depends on HAVE_ARM_SMCCC
+       select ARM_PSCI_FW
+       help
+         Say Y here if you want Linux to communicate with system firmware
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -87,7 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK)   += early_prin
+ obj-$(CONFIG_ARM_VIRT_EXT)    += hyp-stub.o
+ ifeq ($(CONFIG_ARM_PSCI),y)
+-obj-y                         += psci-call.o
+ obj-$(CONFIG_SMP)             += psci_smp.o
+ endif
+--- a/arch/arm/kernel/psci-call.S
++++ /dev/null
+@@ -1,31 +0,0 @@
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * Copyright (C) 2015 ARM Limited
+- *
+- * Author: Mark Rutland <mark.rutland@arm.com>
+- */
+-
+-#include <linux/linkage.h>
+-
+-#include <asm/opcodes-sec.h>
+-#include <asm/opcodes-virt.h>
+-
+-/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
+-ENTRY(__invoke_psci_fn_hvc)
+-      __HVC(0)
+-      bx      lr
+-ENDPROC(__invoke_psci_fn_hvc)
+-
+-/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
+-ENTRY(__invoke_psci_fn_smc)
+-      __SMC(0)
+-      bx      lr
+-ENDPROC(__invoke_psci_fn_smc)
+--- a/arch/arm64/kernel/Makefile
++++ b/arch/arm64/kernel/Makefile
+@@ -14,7 +14,7 @@ CFLAGS_REMOVE_return_address.o = -pg
+ arm64-obj-y           := debug-monitors.o entry.o irq.o fpsimd.o              \
+                          entry-fpsimd.o process.o ptrace.o setup.o signal.o   \
+                          sys.o stacktrace.o time.o traps.o io.o vdso.o        \
+-                         hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o       \
++                         hyp-stub.o psci.o cpu_ops.o insn.o   \
+                          return_address.o cpuinfo.o cpu_errata.o              \
+                          cpufeature.o alternative.o cacheinfo.o               \
+                          smp.o smp_spin_table.o topology.o smccc-call.o
+--- a/arch/arm64/kernel/psci-call.S
++++ /dev/null
+@@ -1,28 +0,0 @@
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * Copyright (C) 2015 ARM Limited
+- *
+- * Author: Will Deacon <will.deacon@arm.com>
+- */
+-
+-#include <linux/linkage.h>
+-
+-/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+-ENTRY(__invoke_psci_fn_hvc)
+-      hvc     #0
+-      ret
+-ENDPROC(__invoke_psci_fn_hvc)
+-
+-/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
+-ENTRY(__invoke_psci_fn_smc)
+-      smc     #0
+-      ret
+-ENDPROC(__invoke_psci_fn_smc)
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -13,6 +13,7 @@
+ #define pr_fmt(fmt) "psci: " fmt
++#include <linux/arm-smccc.h>
+ #include <linux/errno.h>
+ #include <linux/linkage.h>
+ #include <linux/of.h>
+@@ -58,8 +59,6 @@ struct psci_operations psci_ops;
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+                               unsigned long, unsigned long);
+-asmlinkage psci_fn __invoke_psci_fn_hvc;
+-asmlinkage psci_fn __invoke_psci_fn_smc;
+ static psci_fn *invoke_psci_fn;
+ enum psci_function {
+@@ -107,6 +106,26 @@ bool psci_power_state_is_valid(u32 state
+       return !(state & ~valid_mask);
+ }
++static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
++                      unsigned long arg0, unsigned long arg1,
++                      unsigned long arg2)
++{
++      struct arm_smccc_res res;
++
++      arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
++      return res.a0;
++}
++
++static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
++                      unsigned long arg0, unsigned long arg1,
++                      unsigned long arg2)
++{
++      struct arm_smccc_res res;
++
++      arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
++      return res.a0;
++}
++
+ static int psci_to_linux_errno(int errno)
+ {
+       switch (errno) {
diff --git a/queue-4.4/arm-8789-1-signal-copy-registers-using-__copy_to_user.patch b/queue-4.4/arm-8789-1-signal-copy-registers-using-__copy_to_user.patch
new file mode 100644 (file)
index 0000000..a831fd0
--- /dev/null
@@ -0,0 +1,87 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:40 +0100
+Subject: ARM: 8789/1: signal: copy registers using __copy_to_user()
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Julien Thierry <julien.thierry@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-37-ardb@kernel.org>
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+Commit 5ca451cf6ed04443774bbb7ee45332dafa42e99f upstream.
+
+When saving the ARM integer registers, use __copy_to_user() to
+copy them into user signal frame, rather than __put_user_error().
+This has the benefit of disabling/enabling PAN once for the whole copy
+intead of once per write.
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/signal.c |   47 ++++++++++++++++++++++++++---------------------
+ 1 file changed, 26 insertions(+), 21 deletions(-)
+
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -256,30 +256,35 @@ static int
+ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
+ {
+       struct aux_sigframe __user *aux;
++      struct sigcontext context;
+       int err = 0;
+-      __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+-      __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+-      __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+-      __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+-      __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+-      __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+-      __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+-      __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+-      __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+-      __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+-      __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+-      __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+-      __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+-      __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+-      __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+-      __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+-      __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
++      context = (struct sigcontext) {
++              .arm_r0        = regs->ARM_r0,
++              .arm_r1        = regs->ARM_r1,
++              .arm_r2        = regs->ARM_r2,
++              .arm_r3        = regs->ARM_r3,
++              .arm_r4        = regs->ARM_r4,
++              .arm_r5        = regs->ARM_r5,
++              .arm_r6        = regs->ARM_r6,
++              .arm_r7        = regs->ARM_r7,
++              .arm_r8        = regs->ARM_r8,
++              .arm_r9        = regs->ARM_r9,
++              .arm_r10       = regs->ARM_r10,
++              .arm_fp        = regs->ARM_fp,
++              .arm_ip        = regs->ARM_ip,
++              .arm_sp        = regs->ARM_sp,
++              .arm_lr        = regs->ARM_lr,
++              .arm_pc        = regs->ARM_pc,
++              .arm_cpsr      = regs->ARM_cpsr,
+-      __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
+-      __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
+-      __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
+-      __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
++              .trap_no       = current->thread.trap_no,
++              .error_code    = current->thread.error_code,
++              .fault_address = current->thread.address,
++              .oldmask       = set->sig[0],
++      };
++
++      err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
+       err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
diff --git a/queue-4.4/arm-8791-1-vfp-use-__copy_to_user-when-saving-vfp-state.patch b/queue-4.4/arm-8791-1-vfp-use-__copy_to_user-when-saving-vfp-state.patch
new file mode 100644 (file)
index 0000000..9339c2e
--- /dev/null
@@ -0,0 +1,116 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:41 +0100
+Subject: ARM: 8791/1: vfp: use __copy_to_user() when saving VFP state
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Julien Thierry <julien.thierry@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-38-ardb@kernel.org>
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+Commit 3aa2df6ec2ca6bc143a65351cca4266d03a8bc41 upstream.
+
+Use __copy_to_user() rather than __put_user_error() for individual
+members when saving VFP state.
+This has the benefit of disabling/enabling PAN once per copied struct
+intead of once per write.
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/thread_info.h |    4 ++--
+ arch/arm/kernel/signal.c           |   13 +++++++------
+ arch/arm/vfp/vfpmodule.c           |   20 ++++++++------------
+ 3 files changed, 17 insertions(+), 20 deletions(-)
+
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -124,8 +124,8 @@ extern void vfp_flush_hwstate(struct thr
+ struct user_vfp;
+ struct user_vfp_exc;
+-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
+-                                         struct user_vfp_exc __user *);
++extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
++                                         struct user_vfp_exc *);
+ extern int vfp_restore_user_hwstate(struct user_vfp *,
+                                   struct user_vfp_exc *);
+ #endif
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -94,17 +94,18 @@ static int restore_iwmmxt_context(struct
+ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
+ {
+-      const unsigned long magic = VFP_MAGIC;
+-      const unsigned long size = VFP_STORAGE_SIZE;
++      struct vfp_sigframe kframe;
+       int err = 0;
+-      __put_user_error(magic, &frame->magic, err);
+-      __put_user_error(size, &frame->size, err);
++      memset(&kframe, 0, sizeof(kframe));
++      kframe.magic = VFP_MAGIC;
++      kframe.size = VFP_STORAGE_SIZE;
++      err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
+       if (err)
+-              return -EFAULT;
++              return err;
+-      return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
++      return __copy_to_user(frame, &kframe, sizeof(kframe));
+ }
+ static int restore_vfp_context(struct vfp_sigframe __user *auxp)
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -558,12 +558,11 @@ void vfp_flush_hwstate(struct thread_inf
+  * Save the current VFP state into the provided structures and prepare
+  * for entry into a new function (signal handler).
+  */
+-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+-                                  struct user_vfp_exc __user *ufp_exc)
++int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
++                                  struct user_vfp_exc *ufp_exc)
+ {
+       struct thread_info *thread = current_thread_info();
+       struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+-      int err = 0;
+       /* Ensure that the saved hwstate is up-to-date. */
+       vfp_sync_hwstate(thread);
+@@ -572,22 +571,19 @@ int vfp_preserve_user_clear_hwstate(stru
+        * Copy the floating point registers. There can be unused
+        * registers see asm/hwcap.h for details.
+        */
+-      err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
+-                            sizeof(hwstate->fpregs));
++      memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
++
+       /*
+        * Copy the status and control register.
+        */
+-      __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
++      ufp->fpscr = hwstate->fpscr;
+       /*
+        * Copy the exception registers.
+        */
+-      __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
+-      __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+-      __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+-
+-      if (err)
+-              return -EFAULT;
++      ufp_exc->fpexc = hwstate->fpexc;
++      ufp_exc->fpinst = hwstate->fpinst;
++      ufp_exc->fpinst2 = ufp_exc->fpinst2;
+       /* Ensure that VFP is disabled. */
+       vfp_flush_hwstate(thread);
diff --git a/queue-4.4/arm-8792-1-oabi-compat-copy-oabi-events-using-__copy_to_user.patch b/queue-4.4/arm-8792-1-oabi-compat-copy-oabi-events-using-__copy_to_user.patch
new file mode 100644 (file)
index 0000000..1df5f46
--- /dev/null
@@ -0,0 +1,52 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:42 +0100
+Subject: ARM: 8792/1: oabi-compat: copy oabi events using __copy_to_user()
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Julien Thierry <julien.thierry@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-39-ardb@kernel.org>
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+Commit 319508902600c2688e057750148487996396e9ca upstream.
+
+Copy events to user using __copy_to_user() rather than copy members of
+individually with __put_user_error().
+This has the benefit of disabling/enabling PAN once per event intead of
+once per event member.
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/sys_oabi-compat.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -276,6 +276,7 @@ asmlinkage long sys_oabi_epoll_wait(int
+                                   int maxevents, int timeout)
+ {
+       struct epoll_event *kbuf;
++      struct oabi_epoll_event e;
+       mm_segment_t fs;
+       long ret, err, i;
+@@ -294,8 +295,11 @@ asmlinkage long sys_oabi_epoll_wait(int
+       set_fs(fs);
+       err = 0;
+       for (i = 0; i < ret; i++) {
+-              __put_user_error(kbuf[i].events, &events->events, err);
+-              __put_user_error(kbuf[i].data,   &events->data,   err);
++              e.events = kbuf[i].events;
++              e.data = kbuf[i].data;
++              err = __copy_to_user(events, &e, sizeof(e));
++              if (err)
++                      break;
+               events++;
+       }
+       kfree(kbuf);
diff --git a/queue-4.4/arm-8793-1-signal-replace-__put_user_error-with-__put_user.patch b/queue-4.4/arm-8793-1-signal-replace-__put_user_error-with-__put_user.patch
new file mode 100644 (file)
index 0000000..d23b089
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:43 +0100
+Subject: ARM: 8793/1: signal: replace __put_user_error with __put_user
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Julien Thierry <julien.thierry@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-40-ardb@kernel.org>
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+Commit 18ea66bd6e7a95bdc598223d72757190916af28b upstream.
+
+With Spectre-v1.1 mitigations, __put_user_error is pointless. In an attempt
+to remove it, replace its references in frame setups with __put_user.
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/signal.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -302,7 +302,7 @@ setup_sigframe(struct sigframe __user *s
+       if (err == 0)
+               err |= preserve_vfp_context(&aux->vfp);
+ #endif
+-      __put_user_error(0, &aux->end_magic, err);
++      err |= __put_user(0, &aux->end_magic);
+       return err;
+ }
+@@ -434,7 +434,7 @@ setup_frame(struct ksignal *ksig, sigset
+       /*
+        * Set uc.uc_flags to a value which sc.trap_no would never have.
+        */
+-      __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
++      err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
+       err |= setup_sigframe(frame, regs, set);
+       if (err == 0)
+@@ -454,8 +454,8 @@ setup_rt_frame(struct ksignal *ksig, sig
+       err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+-      __put_user_error(0, &frame->sig.uc.uc_flags, err);
+-      __put_user_error(NULL, &frame->sig.uc.uc_link, err);
++      err |= __put_user(0, &frame->sig.uc.uc_flags);
++      err |= __put_user(NULL, &frame->sig.uc.uc_link);
+       err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
+       err |= setup_sigframe(&frame->sig, regs, set);
diff --git a/queue-4.4/arm-8794-1-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch b/queue-4.4/arm-8794-1-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch
new file mode 100644 (file)
index 0000000..ddd997c
--- /dev/null
@@ -0,0 +1,51 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:44 +0100
+Subject: ARM: 8794/1: uaccess: Prevent speculative use of the current addr_limit
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Julien Thierry <julien.thierry@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-41-ardb@kernel.org>
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+Commit 621afc677465db231662ed126ae1f355bf8eac47 upstream.
+
+A mispredicted conditional call to set_fs could result in the wrong
+addr_limit being forwarded under speculation to a subsequent access_ok
+check, potentially forming part of a spectre-v1 attack using uaccess
+routines.
+
+This patch prevents this forwarding from taking place, but putting heavy
+barriers in set_fs after writing the addr_limit.
+
+Porting commit c2f0ad4fc089cff8 ("arm64: uaccess: Prevent speculative use
+of the current addr_limit").
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/uaccess.h |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -99,6 +99,14 @@ extern int __put_user_bad(void);
+ static inline void set_fs(mm_segment_t fs)
+ {
+       current_thread_info()->addr_limit = fs;
++
++      /*
++       * Prevent a mispredicted conditional call to set_fs from forwarding
++       * the wrong address limit to access_ok under speculation.
++       */
++      dsb(nsh);
++      isb();
++
+       modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+ }
diff --git a/queue-4.4/arm-8795-1-spectre-v1.1-use-put_user-for-__put_user.patch b/queue-4.4/arm-8795-1-spectre-v1.1-use-put_user-for-__put_user.patch
new file mode 100644 (file)
index 0000000..09e3823
--- /dev/null
@@ -0,0 +1,65 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:45 +0100
+Subject: ARM: 8795/1: spectre-v1.1: use put_user() for __put_user()
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Julien Thierry <julien.thierry@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-42-ardb@kernel.org>
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+Commit e3aa6243434fd9a82e84bb79ab1abd14f2d9a5a7 upstream.
+
+When Spectre mitigation is required, __put_user() needs to include
+check_uaccess. This is already the case for put_user(), so just make
+__put_user() an alias of put_user().
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/uaccess.h |   15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -408,6 +408,14 @@ do {                                                                      \
+       __pu_err;                                                       \
+ })
++#ifdef CONFIG_CPU_SPECTRE
++/*
++ * When mitigating Spectre variant 1.1, all accessors need to include
++ * verification of the address space.
++ */
++#define __put_user(x, ptr) put_user(x, ptr)
++
++#else
+ #define __put_user(x, ptr)                                            \
+ ({                                                                    \
+       long __pu_err = 0;                                              \
+@@ -415,12 +423,6 @@ do {                                                                      \
+       __pu_err;                                                       \
+ })
+-#define __put_user_error(x, ptr, err)                                 \
+-({                                                                    \
+-      __put_user_switch((x), (ptr), (err), __put_user_nocheck);       \
+-      (void) 0;                                                       \
+-})
+-
+ #define __put_user_nocheck(x, __pu_ptr, __err, __size)                        \
+       do {                                                            \
+               unsigned long __pu_addr = (unsigned long)__pu_ptr;      \
+@@ -500,6 +502,7 @@ do {                                                                       \
+       : "r" (x), "i" (-EFAULT)                                \
+       : "cc")
++#endif /* !CONFIG_CPU_SPECTRE */
+ #ifdef CONFIG_MMU
+ extern unsigned long __must_check
diff --git a/queue-4.4/arm-8796-1-spectre-v1-v1.1-provide-helpers-for-address-sanitization.patch b/queue-4.4/arm-8796-1-spectre-v1-v1.1-provide-helpers-for-address-sanitization.patch
new file mode 100644 (file)
index 0000000..a080823
--- /dev/null
@@ -0,0 +1,100 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:46 +0100
+Subject: ARM: 8796/1: spectre-v1,v1.1: provide helpers for address sanitization
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Julien Thierry <julien.thierry@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-43-ardb@kernel.org>
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+Commit afaf6838f4bc896a711180b702b388b8cfa638fc upstream.
+
+Introduce C and asm helpers to sanitize user address, taking the
+address range they target into account.
+
+Use asm helper for existing sanitization in __copy_from_user().
+
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/assembler.h |   11 +++++++++++
+ arch/arm/include/asm/uaccess.h   |   26 ++++++++++++++++++++++++++
+ arch/arm/lib/copy_from_user.S    |    6 +-----
+ 3 files changed, 38 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -461,6 +461,17 @@ THUMB(    orr     \reg , \reg , #PSR_T_BIT        )
+ #endif
+       .endm
++      .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
++#ifdef CONFIG_CPU_SPECTRE
++      sub     \tmp, \limit, #1
++      subs    \tmp, \tmp, \addr       @ tmp = limit - 1 - addr
++      addhs   \tmp, \tmp, #1          @ if (tmp >= 0) {
++      subhss  \tmp, \tmp, \size       @ tmp = limit - (addr + size) }
++      movlo   \addr, #0               @ if (tmp < 0) addr = NULL
++      csdb
++#endif
++      .endm
++
+       .macro  uaccess_disable, tmp, isb=1
+ #ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /*
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -138,6 +138,32 @@ static inline void set_fs(mm_segment_t f
+       __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+ /*
++ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
++ * is above the current addr_limit.
++ */
++#define uaccess_mask_range_ptr(ptr, size)                     \
++      ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
++static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
++                                                  size_t size)
++{
++      void __user *safe_ptr = (void __user *)ptr;
++      unsigned long tmp;
++
++      asm volatile(
++      "       sub     %1, %3, #1\n"
++      "       subs    %1, %1, %0\n"
++      "       addhs   %1, %1, #1\n"
++      "       subhss  %1, %1, %2\n"
++      "       movlo   %0, #0\n"
++      : "+r" (safe_ptr), "=&r" (tmp)
++      : "r" (size), "r" (current_thread_info()->addr_limit)
++      : "cc");
++
++      csdb();
++      return safe_ptr;
++}
++
++/*
+  * Single-value transfer routines.  They automatically use the right
+  * size if we just have the right pointer type.  Note that the functions
+  * which read from user space (*get_*) need to take care not to leak
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
+ #ifdef CONFIG_CPU_SPECTRE
+       get_thread_info r3
+       ldr     r3, [r3, #TI_ADDR_LIMIT]
+-      adds    ip, r1, r2      @ ip=addr+size
+-      sub     r3, r3, #1      @ addr_limit - 1
+-      cmpcc   ip, r3          @ if (addr+size > addr_limit - 1)
+-      movcs   r1, #0          @ addr = NULL
+-      csdb
++      uaccess_mask_range_ptr r1, r2, r3, ip
+ #endif
+ #include "copy_template.S"
diff --git a/queue-4.4/arm-8810-1-vfp-fix-wrong-assignement-to-ufp_exc.patch b/queue-4.4/arm-8810-1-vfp-fix-wrong-assignement-to-ufp_exc.patch
new file mode 100644 (file)
index 0000000..6016d38
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:47 +0100
+Subject: ARM: 8810/1: vfp: Fix wrong assignement to ufp_exc
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Julien Thierry <julien.thierry@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-44-ardb@kernel.org>
+
+From: Julien Thierry <julien.thierry@arm.com>
+
+Commit 5df7a99bdd0de4a0480320264c44c04543c29d5a upstream.
+
+In vfp_preserve_user_clear_hwstate, ufp_exc->fpinst2 gets assigned to
+itself. It should actually be hwstate->fpinst2 that gets assigned to the
+ufp_exc field.
+
+Fixes commit 3aa2df6ec2ca6bc143a65351cca4266d03a8bc41 ("ARM: 8791/1:
+vfp: use __copy_to_user() when saving VFP state").
+
+Reported-by: David Binderman <dcb314@hotmail.com>
+Signed-off-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/vfp/vfpmodule.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -583,7 +583,7 @@ int vfp_preserve_user_clear_hwstate(stru
+        */
+       ufp_exc->fpexc = hwstate->fpexc;
+       ufp_exc->fpinst = hwstate->fpinst;
+-      ufp_exc->fpinst2 = ufp_exc->fpinst2;
++      ufp_exc->fpinst2 = hwstate->fpinst2;
+       /* Ensure that VFP is disabled. */
+       vfp_flush_hwstate(thread);
diff --git a/queue-4.4/arm-add-more-cpu-part-numbers-for-cortex-and-brahma-b15-cpus.patch b/queue-4.4/arm-add-more-cpu-part-numbers-for-cortex-and-brahma-b15-cpus.patch
new file mode 100644 (file)
index 0000000..29f7e05
--- /dev/null
@@ -0,0 +1,47 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:21 +0100
+Subject: ARM: add more CPU part numbers for Cortex and Brahma B15 CPUs
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-18-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit f5683e76f35b4ec5891031b6a29036efe0a1ff84 upstream.
+
+Add CPU part numbers for Cortex A53, A57, A72, A73, A75 and the
+Broadcom Brahma B15 CPU.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/cputype.h |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm/include/asm/cputype.h
++++ b/arch/arm/include/asm/cputype.h
+@@ -74,8 +74,16 @@
+ #define ARM_CPU_PART_CORTEX_A12               0x4100c0d0
+ #define ARM_CPU_PART_CORTEX_A17               0x4100c0e0
+ #define ARM_CPU_PART_CORTEX_A15               0x4100c0f0
++#define ARM_CPU_PART_CORTEX_A53               0x4100d030
++#define ARM_CPU_PART_CORTEX_A57               0x4100d070
++#define ARM_CPU_PART_CORTEX_A72               0x4100d080
++#define ARM_CPU_PART_CORTEX_A73               0x4100d090
++#define ARM_CPU_PART_CORTEX_A75               0x4100d0a0
+ #define ARM_CPU_PART_MASK             0xff00fff0
++/* Broadcom cores */
++#define ARM_CPU_PART_BRAHMA_B15               0x420000f0
++
+ #define ARM_CPU_XSCALE_ARCH_MASK      0xe000
+ #define ARM_CPU_XSCALE_ARCH_V1                0x2000
+ #define ARM_CPU_XSCALE_ARCH_V2                0x4000
diff --git a/queue-4.4/arm-add-proc_vtable-and-proc_table-macros.patch b/queue-4.4/arm-add-proc_vtable-and-proc_table-macros.patch
new file mode 100644 (file)
index 0000000..179a4ed
--- /dev/null
@@ -0,0 +1,111 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:51 +0100
+Subject: ARM: add PROC_VTABLE and PROC_TABLE macros
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-48-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit e209950fdd065d2cc46e6338e47e52841b830cba upstream.
+
+Allow the way we access members of the processor vtable to be changed
+at compile time.  We will need to move to per-CPU vtables to fix the
+Spectre variant 2 issues on big.Little systems.
+
+However, we have a couple of calls that do not need the vtable
+treatment, and indeed cause a kernel warning due to the (later) use
+of smp_processor_id(), so also introduce the PROC_TABLE macro for
+these which always use CPU 0's function pointers.
+
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/proc-fns.h |   39 ++++++++++++++++++++++++++-------------
+ arch/arm/kernel/setup.c         |    4 +---
+ 2 files changed, 27 insertions(+), 16 deletions(-)
+
+--- a/arch/arm/include/asm/proc-fns.h
++++ b/arch/arm/include/asm/proc-fns.h
+@@ -23,7 +23,7 @@ struct mm_struct;
+ /*
+  * Don't change this structure - ASM code relies on it.
+  */
+-extern struct processor {
++struct processor {
+       /* MISC
+        * get data abort address/flags
+        */
+@@ -79,9 +79,13 @@ extern struct processor {
+       unsigned int suspend_size;
+       void (*do_suspend)(void *);
+       void (*do_resume)(void *);
+-} processor;
++};
+ #ifndef MULTI_CPU
++static inline void init_proc_vtable(const struct processor *p)
++{
++}
++
+ extern void cpu_proc_init(void);
+ extern void cpu_proc_fin(void);
+ extern int cpu_do_idle(void);
+@@ -98,18 +102,27 @@ extern void cpu_reset(unsigned long addr
+ extern void cpu_do_suspend(void *);
+ extern void cpu_do_resume(void *);
+ #else
+-#define cpu_proc_init                 processor._proc_init
+-#define cpu_check_bugs                        processor.check_bugs
+-#define cpu_proc_fin                  processor._proc_fin
+-#define cpu_reset                     processor.reset
+-#define cpu_do_idle                   processor._do_idle
+-#define cpu_dcache_clean_area         processor.dcache_clean_area
+-#define cpu_set_pte_ext                       processor.set_pte_ext
+-#define cpu_do_switch_mm              processor.switch_mm
+-/* These three are private to arch/arm/kernel/suspend.c */
+-#define cpu_do_suspend                        processor.do_suspend
+-#define cpu_do_resume                 processor.do_resume
++extern struct processor processor;
++#define PROC_VTABLE(f)                        processor.f
++#define PROC_TABLE(f)                 processor.f
++static inline void init_proc_vtable(const struct processor *p)
++{
++      processor = *p;
++}
++
++#define cpu_proc_init                 PROC_VTABLE(_proc_init)
++#define cpu_check_bugs                        PROC_VTABLE(check_bugs)
++#define cpu_proc_fin                  PROC_VTABLE(_proc_fin)
++#define cpu_reset                     PROC_VTABLE(reset)
++#define cpu_do_idle                   PROC_VTABLE(_do_idle)
++#define cpu_dcache_clean_area         PROC_TABLE(dcache_clean_area)
++#define cpu_set_pte_ext                       PROC_TABLE(set_pte_ext)
++#define cpu_do_switch_mm              PROC_VTABLE(switch_mm)
++
++/* These two are private to arch/arm/kernel/suspend.c */
++#define cpu_do_suspend                        PROC_VTABLE(do_suspend)
++#define cpu_do_resume                 PROC_VTABLE(do_resume)
+ #endif
+ extern void cpu_resume(void);
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -625,9 +625,7 @@ static void __init setup_processor(void)
+       cpu_name = list->cpu_name;
+       __cpu_architecture = __get_cpu_architecture();
+-#ifdef MULTI_CPU
+-      processor = *list->proc;
+-#endif
++      init_proc_vtable(list->proc);
+ #ifdef MULTI_TLB
+       cpu_tlb = *list->tlb;
+ #endif
diff --git a/queue-4.4/arm-arm64-kvm-advertise-smccc-v1.1.patch b/queue-4.4/arm-arm64-kvm-advertise-smccc-v1.1.patch
new file mode 100644 (file)
index 0000000..3efbee2
--- /dev/null
@@ -0,0 +1,76 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:12 +0100
+Subject: arm/arm64: KVM: Advertise SMCCC v1.1
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Mark Rutland <mark.rutland@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-9-ardb@kernel.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 09e6be12effdb33bf7210c8867bbd213b66a499e upstream.
+
+The new SMC Calling Convention (v1.1) allows for a reduced overhead
+when calling into the firmware, and provides a new feature discovery
+mechanism.
+
+Make it visible to KVM guests.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[v4.9: account for files moved to virt/ upstream]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
+Tested-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[ardb: restrict to include/linux/arm-smccc.h, drop KVM bits]
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |   22 +++++++++++++++++++---
+ 1 file changed, 19 insertions(+), 3 deletions(-)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -14,9 +14,6 @@
+ #ifndef __LINUX_ARM_SMCCC_H
+ #define __LINUX_ARM_SMCCC_H
+-#include <linux/linkage.h>
+-#include <linux/types.h>
+-
+ /*
+  * This file provides common defines for ARM SMC Calling Convention as
+  * specified in
+@@ -60,6 +57,24 @@
+ #define ARM_SMCCC_OWNER_TRUSTED_OS    50
+ #define ARM_SMCCC_OWNER_TRUSTED_OS_END        63
++#define ARM_SMCCC_VERSION_1_0         0x10000
++#define ARM_SMCCC_VERSION_1_1         0x10001
++
++#define ARM_SMCCC_VERSION_FUNC_ID                                     \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
++                         ARM_SMCCC_SMC_32,                            \
++                         0, 0)
++
++#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID                                       \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
++                         ARM_SMCCC_SMC_32,                            \
++                         0, 1)
++
++#ifndef __ASSEMBLY__
++
++#include <linux/linkage.h>
++#include <linux/types.h>
++
+ /**
+  * struct arm_smccc_res - Result from SMC/HVC call
+  * @a0-a3 result values from registers 0 to 3
+@@ -101,4 +116,5 @@ asmlinkage void arm_smccc_hvc(unsigned l
+                       unsigned long a5, unsigned long a6, unsigned long a7,
+                       struct arm_smccc_res *res);
++#endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/queue-4.4/arm-arm64-smccc-1.1-handle-function-result-as-parameters.patch b/queue-4.4/arm-arm64-smccc-1.1-handle-function-result-as-parameters.patch
new file mode 100644 (file)
index 0000000..908370b
--- /dev/null
@@ -0,0 +1,140 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:20 +0100
+Subject: arm/arm64: smccc-1.1: Handle function result as parameters
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Marc Zyngier <marc.zyngier@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-17-ardb@kernel.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit 755a8bf5579d22eb5636685c516d8dede799e27b ]
+
+If someone has the silly idea to write something along those lines:
+
+       extern u64 foo(void);
+
+       void bar(struct arm_smccc_res *res)
+       {
+               arm_smccc_1_1_smc(0xbad, foo(), res);
+       }
+
+they are in for a surprise, as this gets compiled as:
+
+       0000000000000588 <bar>:
+        588:   a9be7bfd        stp     x29, x30, [sp, #-32]!
+        58c:   910003fd        mov     x29, sp
+        590:   f9000bf3        str     x19, [sp, #16]
+        594:   aa0003f3        mov     x19, x0
+        598:   aa1e03e0        mov     x0, x30
+        59c:   94000000        bl      0 <_mcount>
+        5a0:   94000000        bl      0 <foo>
+        5a4:   aa0003e1        mov     x1, x0
+        5a8:   d4000003        smc     #0x0
+        5ac:   b4000073        cbz     x19, 5b8 <bar+0x30>
+        5b0:   a9000660        stp     x0, x1, [x19]
+        5b4:   a9010e62        stp     x2, x3, [x19, #16]
+        5b8:   f9400bf3        ldr     x19, [sp, #16]
+        5bc:   a8c27bfd        ldp     x29, x30, [sp], #32
+        5c0:   d65f03c0        ret
+        5c4:   d503201f        nop
+
+The call to foo "overwrites" the x0 register for the return value,
+and we end up calling the wrong secure service.
+
+A solution is to evaluate all the parameters before assigning
+anything to specific registers, leading to the expected result:
+
+       0000000000000588 <bar>:
+        588:   a9be7bfd        stp     x29, x30, [sp, #-32]!
+        58c:   910003fd        mov     x29, sp
+        590:   f9000bf3        str     x19, [sp, #16]
+        594:   aa0003f3        mov     x19, x0
+        598:   aa1e03e0        mov     x0, x30
+        59c:   94000000        bl      0 <_mcount>
+        5a0:   94000000        bl      0 <foo>
+        5a4:   aa0003e1        mov     x1, x0
+        5a8:   d28175a0        mov     x0, #0xbad
+        5ac:   d4000003        smc     #0x0
+        5b0:   b4000073        cbz     x19, 5bc <bar+0x34>
+        5b4:   a9000660        stp     x0, x1, [x19]
+        5b8:   a9010e62        stp     x2, x3, [x19, #16]
+        5bc:   f9400bf3        ldr     x19, [sp, #16]
+        5c0:   a8c27bfd        ldp     x29, x30, [sp], #32
+        5c4:   d65f03c0        ret
+
+Reported-by: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |   30 ++++++++++++++++++++----------
+ 1 file changed, 20 insertions(+), 10 deletions(-)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -173,41 +173,51 @@ asmlinkage void arm_smccc_hvc(unsigned l
+       register unsigned long r3 asm("r3")
+ #define __declare_arg_1(a0, a1, res)                                  \
++      typeof(a1) __a1 = a1;                                           \
+       struct arm_smccc_res   *___res = res;                           \
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
+-      register unsigned long r1 asm("r1") = a1;                       \
++      register unsigned long r1 asm("r1") = __a1;                     \
+       register unsigned long r2 asm("r2");                            \
+       register unsigned long r3 asm("r3")
+ #define __declare_arg_2(a0, a1, a2, res)                              \
++      typeof(a1) __a1 = a1;                                           \
++      typeof(a2) __a2 = a2;                                           \
+       struct arm_smccc_res   *___res = res;                           \
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
+-      register unsigned long r1 asm("r1") = a1;                       \
+-      register unsigned long r2 asm("r2") = a2;                       \
++      register unsigned long r1 asm("r1") = __a1;                     \
++      register unsigned long r2 asm("r2") = __a2;                     \
+       register unsigned long r3 asm("r3")
+ #define __declare_arg_3(a0, a1, a2, a3, res)                          \
++      typeof(a1) __a1 = a1;                                           \
++      typeof(a2) __a2 = a2;                                           \
++      typeof(a3) __a3 = a3;                                           \
+       struct arm_smccc_res   *___res = res;                           \
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
+-      register unsigned long r1 asm("r1") = a1;                       \
+-      register unsigned long r2 asm("r2") = a2;                       \
+-      register unsigned long r3 asm("r3") = a3
++      register unsigned long r1 asm("r1") = __a1;                     \
++      register unsigned long r2 asm("r2") = __a2;                     \
++      register unsigned long r3 asm("r3") = __a3
+ #define __declare_arg_4(a0, a1, a2, a3, a4, res)                      \
++      typeof(a4) __a4 = a4;                                           \
+       __declare_arg_3(a0, a1, a2, a3, res);                           \
+-      register typeof(a4) r4 asm("r4") = a4
++      register unsigned long r4 asm("r4") = __a4
+ #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)                  \
++      typeof(a5) __a5 = a5;                                           \
+       __declare_arg_4(a0, a1, a2, a3, a4, res);                       \
+-      register typeof(a5) r5 asm("r5") = a5
++      register unsigned long r5 asm("r5") = __a5
+ #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)              \
++      typeof(a6) __a6 = a6;                                           \
+       __declare_arg_5(a0, a1, a2, a3, a4, a5, res);                   \
+-      register typeof(a6) r6 asm("r6") = a6
++      register unsigned long r6 asm("r6") = __a6
+ #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)          \
++      typeof(a7) __a7 = a7;                                           \
+       __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);               \
+-      register typeof(a7) r7 asm("r7") = a7
++      register unsigned long r7 asm("r7") = __a7
+ #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
+ #define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
diff --git a/queue-4.4/arm-arm64-smccc-1.1-make-return-values-unsigned-long.patch b/queue-4.4/arm-arm64-smccc-1.1-make-return-values-unsigned-long.patch
new file mode 100644 (file)
index 0000000..8a78ec1
--- /dev/null
@@ -0,0 +1,76 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:19 +0100
+Subject: arm/arm64: smccc-1.1: Make return values unsigned long
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Marc Zyngier <marc.zyngier@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-16-ardb@kernel.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+[ Upstream commit 1d8f574708a3fb6f18c85486d0c5217df893c0cf ]
+
+An unfortunate consequence of having a strong typing for the input
+values to the SMC call is that it also affects the type of the
+return values, limiting r0 to 32 bits and r{1,2,3} to whatever
+was passed as an input.
+
+Let's turn everything into "unsigned long", which satisfies the
+requirements of both architectures, and allows for the full
+range of return values.
+
+Reported-by: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |   20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -167,31 +167,31 @@ asmlinkage void arm_smccc_hvc(unsigned l
+ #define __declare_arg_0(a0, res)                                      \
+       struct arm_smccc_res   *___res = res;                           \
+-      register u32           r0 asm("r0") = a0;                       \
++      register unsigned long r0 asm("r0") = (u32)a0;                  \
+       register unsigned long r1 asm("r1");                            \
+       register unsigned long r2 asm("r2");                            \
+       register unsigned long r3 asm("r3")
+ #define __declare_arg_1(a0, a1, res)                                  \
+       struct arm_smccc_res   *___res = res;                           \
+-      register u32           r0 asm("r0") = a0;                       \
+-      register typeof(a1)    r1 asm("r1") = a1;                       \
++      register unsigned long r0 asm("r0") = (u32)a0;                  \
++      register unsigned long r1 asm("r1") = a1;                       \
+       register unsigned long r2 asm("r2");                            \
+       register unsigned long r3 asm("r3")
+ #define __declare_arg_2(a0, a1, a2, res)                              \
+       struct arm_smccc_res   *___res = res;                           \
+-      register u32           r0 asm("r0") = a0;                       \
+-      register typeof(a1)    r1 asm("r1") = a1;                       \
+-      register typeof(a2)    r2 asm("r2") = a2;                       \
++      register unsigned long r0 asm("r0") = (u32)a0;                  \
++      register unsigned long r1 asm("r1") = a1;                       \
++      register unsigned long r2 asm("r2") = a2;                       \
+       register unsigned long r3 asm("r3")
+ #define __declare_arg_3(a0, a1, a2, a3, res)                          \
+       struct arm_smccc_res   *___res = res;                           \
+-      register u32           r0 asm("r0") = a0;                       \
+-      register typeof(a1)    r1 asm("r1") = a1;                       \
+-      register typeof(a2)    r2 asm("r2") = a2;                       \
+-      register typeof(a3)    r3 asm("r3") = a3
++      register unsigned long r0 asm("r0") = (u32)a0;                  \
++      register unsigned long r1 asm("r1") = a1;                       \
++      register unsigned long r2 asm("r2") = a2;                       \
++      register unsigned long r3 asm("r3") = a3
+ #define __declare_arg_4(a0, a1, a2, a3, a4, res)                      \
+       __declare_arg_3(a0, a1, a2, a3, res);                           \
diff --git a/queue-4.4/arm-arm64-smccc-add-smccc-specific-return-codes.patch b/queue-4.4/arm-arm64-smccc-add-smccc-specific-return-codes.patch
new file mode 100644 (file)
index 0000000..7142295
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:18 +0100
+Subject: arm/arm64: smccc: Add SMCCC-specific return codes
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Marc Zyngier <marc.zyngier@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-15-ardb@kernel.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit eff0e9e1078ea7dc1d794dc50e31baef984c46d7 upstream.
+
+We've so far used the PSCI return codes for SMCCC because they
+were extremely similar. But with the new ARM DEN 0070A specification,
+"NOT_REQUIRED" (-2) is clashing with PSCI's "PSCI_RET_INVALID_PARAMS".
+
+Let's bite the bullet and add SMCCC specific return codes. Users
+can be repainted as and when required.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -264,5 +264,10 @@ asmlinkage void arm_smccc_hvc(unsigned l
+  */
+ #define arm_smccc_1_1_hvc(...)        __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
++/* Return codes defined in ARM DEN 0070A */
++#define SMCCC_RET_SUCCESS                     0
++#define SMCCC_RET_NOT_SUPPORTED                       -1
++#define SMCCC_RET_NOT_REQUIRED                        -2
++
+ #endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/queue-4.4/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch b/queue-4.4/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
new file mode 100644 (file)
index 0000000..44327e3
--- /dev/null
@@ -0,0 +1,180 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:17 +0100
+Subject: arm/arm64: smccc: Implement SMCCC v1.1 inline primitive
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Mark Rutland <mark.rutland@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-14-ardb@kernel.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit f2d3b2e8759a5833df6f022e42df2d581e6d843c upstream.
+
+One of the major improvement of SMCCC v1.1 is that it only clobbers
+the first 4 registers, both on 32 and 64bit. This means that it
+becomes very easy to provide an inline version of the SMC call
+primitive, and avoid performing a function call to stash the
+registers that would otherwise be clobbered by SMCCC v1.0.
+
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
+Tested-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |  141 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 141 insertions(+)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -123,5 +123,146 @@ asmlinkage void arm_smccc_hvc(unsigned l
+                       unsigned long a5, unsigned long a6, unsigned long a7,
+                       struct arm_smccc_res *res);
++/* SMCCC v1.1 implementation madness follows */
++#ifdef CONFIG_ARM64
++
++#define SMCCC_SMC_INST        "smc    #0"
++#define SMCCC_HVC_INST        "hvc    #0"
++
++#elif defined(CONFIG_ARM)
++#include <asm/opcodes-sec.h>
++#include <asm/opcodes-virt.h>
++
++#define SMCCC_SMC_INST        __SMC(0)
++#define SMCCC_HVC_INST        __HVC(0)
++
++#endif
++
++#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
++
++#define __count_args(...)                                             \
++      ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
++
++#define __constraint_write_0                                          \
++      "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
++#define __constraint_write_1                                          \
++      "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
++#define __constraint_write_2                                          \
++      "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
++#define __constraint_write_3                                          \
++      "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
++#define __constraint_write_4  __constraint_write_3
++#define __constraint_write_5  __constraint_write_4
++#define __constraint_write_6  __constraint_write_5
++#define __constraint_write_7  __constraint_write_6
++
++#define __constraint_read_0
++#define __constraint_read_1
++#define __constraint_read_2
++#define __constraint_read_3
++#define __constraint_read_4   "r" (r4)
++#define __constraint_read_5   __constraint_read_4, "r" (r5)
++#define __constraint_read_6   __constraint_read_5, "r" (r6)
++#define __constraint_read_7   __constraint_read_6, "r" (r7)
++
++#define __declare_arg_0(a0, res)                                      \
++      struct arm_smccc_res   *___res = res;                           \
++      register u32           r0 asm("r0") = a0;                       \
++      register unsigned long r1 asm("r1");                            \
++      register unsigned long r2 asm("r2");                            \
++      register unsigned long r3 asm("r3")
++
++#define __declare_arg_1(a0, a1, res)                                  \
++      struct arm_smccc_res   *___res = res;                           \
++      register u32           r0 asm("r0") = a0;                       \
++      register typeof(a1)    r1 asm("r1") = a1;                       \
++      register unsigned long r2 asm("r2");                            \
++      register unsigned long r3 asm("r3")
++
++#define __declare_arg_2(a0, a1, a2, res)                              \
++      struct arm_smccc_res   *___res = res;                           \
++      register u32           r0 asm("r0") = a0;                       \
++      register typeof(a1)    r1 asm("r1") = a1;                       \
++      register typeof(a2)    r2 asm("r2") = a2;                       \
++      register unsigned long r3 asm("r3")
++
++#define __declare_arg_3(a0, a1, a2, a3, res)                          \
++      struct arm_smccc_res   *___res = res;                           \
++      register u32           r0 asm("r0") = a0;                       \
++      register typeof(a1)    r1 asm("r1") = a1;                       \
++      register typeof(a2)    r2 asm("r2") = a2;                       \
++      register typeof(a3)    r3 asm("r3") = a3
++
++#define __declare_arg_4(a0, a1, a2, a3, a4, res)                      \
++      __declare_arg_3(a0, a1, a2, a3, res);                           \
++      register typeof(a4) r4 asm("r4") = a4
++
++#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)                  \
++      __declare_arg_4(a0, a1, a2, a3, a4, res);                       \
++      register typeof(a5) r5 asm("r5") = a5
++
++#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)              \
++      __declare_arg_5(a0, a1, a2, a3, a4, a5, res);                   \
++      register typeof(a6) r6 asm("r6") = a6
++
++#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)          \
++      __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);               \
++      register typeof(a7) r7 asm("r7") = a7
++
++#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
++#define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
++
++#define ___constraints(count)                                         \
++      : __constraint_write_ ## count                                  \
++      : __constraint_read_ ## count                                   \
++      : "memory"
++#define __constraints(count)  ___constraints(count)
++
++/*
++ * We have an output list that is not necessarily used, and GCC feels
++ * entitled to optimise the whole sequence away. "volatile" is what
++ * makes it stick.
++ */
++#define __arm_smccc_1_1(inst, ...)                                    \
++      do {                                                            \
++              __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
++              asm volatile(inst "\n"                                  \
++                           __constraints(__count_args(__VA_ARGS__))); \
++              if (___res)                                             \
++                      *___res = (typeof(*___res)){r0, r1, r2, r3};    \
++      } while (0)
++
++/*
++ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
++ *
++ * This is a variadic macro taking one to eight source arguments, and
++ * an optional return structure.
++ *
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
++ * The content of the supplied param are copied to registers 0 to 7 prior
++ * to the SMC instruction. The return values are updated with the content
++ * from register 0 to 3 on return from the SMC instruction if not NULL.
++ */
++#define arm_smccc_1_1_smc(...)        __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
++
++/*
++ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
++ *
++ * This is a variadic macro taking one to eight source arguments, and
++ * an optional return structure.
++ *
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
++ * The content of the supplied param are copied to registers 0 to 7 prior
++ * to the HVC instruction. The return values are updated with the content
++ * from register 0 to 3 on return from the HVC instruction if not NULL.
++ */
++#define arm_smccc_1_1_hvc(...)        __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
++
+ #endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/queue-4.4/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch b/queue-4.4/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
new file mode 100644 (file)
index 0000000..0720fc4
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:16 +0100
+Subject: arm/arm64: smccc: Make function identifiers an unsigned quantity
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Mark Rutland <mark.rutland@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-13-ardb@kernel.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit ded4c39e93f3b72968fdb79baba27f3b83dad34c upstream.
+
+Function identifiers are a 32bit, unsigned quantity. But we never
+tell so to the compiler, resulting in the following:
+
+ 4ac:   b26187e0        mov     x0, #0xffffffff80000001
+
+We thus rely on the firmware narrowing it for us, which is not
+always a reasonable expectation.
+
+Cc: stable@vger.kernel.org
+Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
+Tested-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -14,14 +14,16 @@
+ #ifndef __LINUX_ARM_SMCCC_H
+ #define __LINUX_ARM_SMCCC_H
++#include <uapi/linux/const.h>
++
+ /*
+  * This file provides common defines for ARM SMC Calling Convention as
+  * specified in
+  * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+  */
+-#define ARM_SMCCC_STD_CALL            0
+-#define ARM_SMCCC_FAST_CALL           1
++#define ARM_SMCCC_STD_CALL            _AC(0,U)
++#define ARM_SMCCC_FAST_CALL           _AC(1,U)
+ #define ARM_SMCCC_TYPE_SHIFT          31
+ #define ARM_SMCCC_SMC_32              0
diff --git a/queue-4.4/arm-bugs-add-support-for-per-processor-bug-checking.patch b/queue-4.4/arm-bugs-add-support-for-per-processor-bug-checking.patch
new file mode 100644 (file)
index 0000000..3a3a184
--- /dev/null
@@ -0,0 +1,80 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:24 +0100
+Subject: ARM: bugs: add support for per-processor bug checking
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-21-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 9d3a04925deeabb97c8e26d940b501a2873e8af3 upstream.
+
+Add support for per-processor bug checking - each processor function
+descriptor gains a function pointer for this check, which must not be
+an __init function.  If non-NULL, this will be called whenever a CPU
+enters the kernel via which ever path (boot CPU, secondary CPU startup,
+CPU resuming, etc.)
+
+This allows processor specific bug checks to validate that workaround
+bits are properly enabled by firmware via all entry paths to the kernel.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/proc-fns.h |    4 ++++
+ arch/arm/kernel/bugs.c          |    4 ++++
+ arch/arm/mm/proc-macros.S       |    3 ++-
+ 3 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/proc-fns.h
++++ b/arch/arm/include/asm/proc-fns.h
+@@ -37,6 +37,10 @@ extern struct processor {
+        */
+       void (*_proc_init)(void);
+       /*
++       * Check for processor bugs
++       */
++      void (*check_bugs)(void);
++      /*
+        * Disable any processor specifics
+        */
+       void (*_proc_fin)(void);
+--- a/arch/arm/kernel/bugs.c
++++ b/arch/arm/kernel/bugs.c
+@@ -5,6 +5,10 @@
+ void check_other_bugs(void)
+ {
++#ifdef MULTI_CPU
++      if (processor.check_bugs)
++              processor.check_bugs();
++#endif
+ }
+ void __init check_bugs(void)
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -258,13 +258,14 @@
+       mcr     p15, 0, ip, c7, c10, 4          @ data write barrier
+       .endm
+-.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
++.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
+       .type   \name\()_processor_functions, #object
+       .align 2
+ ENTRY(\name\()_processor_functions)
+       .word   \dabort
+       .word   \pabort
+       .word   cpu_\name\()_proc_init
++      .word   \bugs
+       .word   cpu_\name\()_proc_fin
+       .word   cpu_\name\()_reset
+       .word   cpu_\name\()_do_idle
diff --git a/queue-4.4/arm-bugs-hook-processor-bug-checking-into-smp-and-suspend-paths.patch b/queue-4.4/arm-bugs-hook-processor-bug-checking-into-smp-and-suspend-paths.patch
new file mode 100644 (file)
index 0000000..0d3b74c
--- /dev/null
@@ -0,0 +1,99 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:23 +0100
+Subject: ARM: bugs: hook processor bug checking into SMP and suspend paths
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-20-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 26602161b5ba795928a5a719fe1d5d9f2ab5c3ef upstream.
+
+Check for CPU bugs when secondary processors are being brought online,
+and also when CPUs are resuming from a low power mode.  This gives an
+opportunity to check that processor specific bug workarounds are
+correctly enabled for all paths that a CPU re-enters the kernel.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/bugs.h |    2 ++
+ arch/arm/kernel/bugs.c      |    5 +++++
+ arch/arm/kernel/smp.c       |    4 ++++
+ arch/arm/kernel/suspend.c   |    2 ++
+ 4 files changed, 13 insertions(+)
+
+--- a/arch/arm/include/asm/bugs.h
++++ b/arch/arm/include/asm/bugs.h
+@@ -14,8 +14,10 @@ extern void check_writebuffer_bugs(void)
+ #ifdef CONFIG_MMU
+ extern void check_bugs(void);
++extern void check_other_bugs(void);
+ #else
+ #define check_bugs() do { } while (0)
++#define check_other_bugs() do { } while (0)
+ #endif
+ #endif
+--- a/arch/arm/kernel/bugs.c
++++ b/arch/arm/kernel/bugs.c
+@@ -3,7 +3,12 @@
+ #include <asm/bugs.h>
+ #include <asm/proc-fns.h>
++void check_other_bugs(void)
++{
++}
++
+ void __init check_bugs(void)
+ {
+       check_writebuffer_bugs();
++      check_other_bugs();
+ }
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -29,6 +29,7 @@
+ #include <linux/irq_work.h>
+ #include <linux/atomic.h>
++#include <asm/bugs.h>
+ #include <asm/smp.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cpu.h>
+@@ -396,6 +397,9 @@ asmlinkage void secondary_start_kernel(v
+        * before we continue - which happens after __cpu_up returns.
+        */
+       set_cpu_online(cpu, true);
++
++      check_other_bugs();
++
+       complete(&cpu_running);
+       local_irq_enable();
+--- a/arch/arm/kernel/suspend.c
++++ b/arch/arm/kernel/suspend.c
+@@ -1,6 +1,7 @@
+ #include <linux/init.h>
+ #include <linux/slab.h>
++#include <asm/bugs.h>
+ #include <asm/cacheflush.h>
+ #include <asm/idmap.h>
+ #include <asm/pgalloc.h>
+@@ -34,6 +35,7 @@ int cpu_suspend(unsigned long arg, int (
+               cpu_switch_mm(mm->pgd, mm);
+               local_flush_bp_all();
+               local_flush_tlb_all();
++              check_other_bugs();
+       }
+       return ret;
diff --git a/queue-4.4/arm-bugs-prepare-processor-bug-infrastructure.patch b/queue-4.4/arm-bugs-prepare-processor-bug-infrastructure.patch
new file mode 100644 (file)
index 0000000..a23cacd
--- /dev/null
@@ -0,0 +1,67 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:22 +0100
+Subject: ARM: bugs: prepare processor bug infrastructure
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-19-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit a5b9177f69329314721aa7022b7e69dab23fa1f0 upstream.
+
+Prepare the processor bug infrastructure so that it can be expanded to
+check for per-processor bugs.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/bugs.h |    4 ++--
+ arch/arm/kernel/Makefile    |    1 +
+ arch/arm/kernel/bugs.c      |    9 +++++++++
+ 3 files changed, 12 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/bugs.h
++++ b/arch/arm/include/asm/bugs.h
+@@ -10,10 +10,10 @@
+ #ifndef __ASM_BUGS_H
+ #define __ASM_BUGS_H
+-#ifdef CONFIG_MMU
+ extern void check_writebuffer_bugs(void);
+-#define check_bugs() check_writebuffer_bugs()
++#ifdef CONFIG_MMU
++extern void check_bugs(void);
+ #else
+ #define check_bugs() do { } while (0)
+ #endif
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -30,6 +30,7 @@ else
+ obj-y         += entry-armv.o
+ endif
++obj-$(CONFIG_MMU)             += bugs.o
+ obj-$(CONFIG_CPU_IDLE)                += cpuidle.o
+ obj-$(CONFIG_ISA_DMA_API)     += dma.o
+ obj-$(CONFIG_FIQ)             += fiq.o fiqasm.o
+--- /dev/null
++++ b/arch/arm/kernel/bugs.c
+@@ -0,0 +1,9 @@
++// SPDX-Identifier: GPL-2.0
++#include <linux/init.h>
++#include <asm/bugs.h>
++#include <asm/proc-fns.h>
++
++void __init check_bugs(void)
++{
++      check_writebuffer_bugs();
++}
diff --git a/queue-4.4/arm-clean-up-per-processor-check_bugs-method-call.patch b/queue-4.4/arm-clean-up-per-processor-check_bugs-method-call.patch
new file mode 100644 (file)
index 0000000..d57f2f8
--- /dev/null
@@ -0,0 +1,51 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:50 +0100
+Subject: ARM: clean up per-processor check_bugs method call
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-47-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 945aceb1db8885d3a35790cf2e810f681db52756 upstream.
+
+Call the per-processor type check_bugs() method in the same way as we
+do other per-processor functions - move the "processor." detail into
+proc-fns.h.
+
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/proc-fns.h |    1 +
+ arch/arm/kernel/bugs.c          |    4 ++--
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/proc-fns.h
++++ b/arch/arm/include/asm/proc-fns.h
+@@ -99,6 +99,7 @@ extern void cpu_do_suspend(void *);
+ extern void cpu_do_resume(void *);
+ #else
+ #define cpu_proc_init                 processor._proc_init
++#define cpu_check_bugs                        processor.check_bugs
+ #define cpu_proc_fin                  processor._proc_fin
+ #define cpu_reset                     processor.reset
+ #define cpu_do_idle                   processor._do_idle
+--- a/arch/arm/kernel/bugs.c
++++ b/arch/arm/kernel/bugs.c
+@@ -6,8 +6,8 @@
+ void check_other_bugs(void)
+ {
+ #ifdef MULTI_CPU
+-      if (processor.check_bugs)
+-              processor.check_bugs();
++      if (cpu_check_bugs)
++              cpu_check_bugs();
+ #endif
+ }
diff --git a/queue-4.4/arm-ensure-that-processor-vtables-is-not-lost-after-boot.patch b/queue-4.4/arm-ensure-that-processor-vtables-is-not-lost-after-boot.patch
new file mode 100644 (file)
index 0000000..39e75f6
--- /dev/null
@@ -0,0 +1,58 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:53 +0100
+Subject: ARM: ensure that processor vtables is not lost after boot
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-50-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 3a4d0c2172bcf15b7a3d9d498b2b355f9864286b upstream.
+
+Marek Szyprowski reported problems with CPU hotplug in current kernels.
+This was tracked down to the processor vtables being located in an
+init section, and therefore discarded after kernel boot, despite being
+required after boot to properly initialise the non-boot CPUs.
+
+Arrange for these tables to end up in .rodata when required.
+
+Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Tested-by: Krzysztof Kozlowski <krzk@kernel.org>
+Fixes: 383fb3ee8024 ("ARM: spectre-v2: per-CPU vtables to work around big.Little systems")
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/proc-macros.S |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -259,6 +259,13 @@
+       .endm
+ .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
++/*
++ * If we are building for big.Little with branch predictor hardening,
++ * we need the processor function tables to remain available after boot.
++ */
++#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++      .section ".rodata"
++#endif
+       .type   \name\()_processor_functions, #object
+       .align 2
+ ENTRY(\name\()_processor_functions)
+@@ -294,6 +301,9 @@ ENTRY(\name\()_processor_functions)
+       .endif
+       .size   \name\()_processor_functions, . - \name\()_processor_functions
++#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++      .previous
++#endif
+ .endm
+ .macro define_cache_functions name:req
diff --git a/queue-4.4/arm-fix-the-cockup-in-the-previous-patch.patch b/queue-4.4/arm-fix-the-cockup-in-the-previous-patch.patch
new file mode 100644 (file)
index 0000000..445492d
--- /dev/null
@@ -0,0 +1,51 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:54 +0100
+Subject: ARM: fix the cockup in the previous patch
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-51-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit d6951f582cc50ba0ad22ef46b599740966599b14 upstream.
+
+The intention in the previous patch was to only place the processor
+tables in the .rodata section if big.Little was being built and we
+wanted the branch target hardening, but instead (due to the way it
+was tested) it ended up always placing the tables into the .rodata
+section.
+
+Although harmless, let's correct this anyway.
+
+Fixes: 3a4d0c2172bc ("ARM: ensure that processor vtables is not lost after boot")
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/proc-macros.S |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -263,7 +263,7 @@
+  * If we are building for big.Little with branch predictor hardening,
+  * we need the processor function tables to remain available after boot.
+  */
+-#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+       .section ".rodata"
+ #endif
+       .type   \name\()_processor_functions, #object
+@@ -301,7 +301,7 @@ ENTRY(\name\()_processor_functions)
+       .endif
+       .size   \name\()_processor_functions, . - \name\()_processor_functions
+-#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+       .previous
+ #endif
+ .endm
diff --git a/queue-4.4/arm-make-lookup_processor_type-non-__init.patch b/queue-4.4/arm-make-lookup_processor_type-non-__init.patch
new file mode 100644 (file)
index 0000000..871bc6c
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:48 +0100
+Subject: ARM: make lookup_processor_type() non-__init
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-45-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 899a42f836678a595f7d2bc36a5a0c2b03d08cbc upstream.
+
+Move lookup_processor_type() out of the __init section so it is callable
+from (eg) the secondary startup code during hotplug.
+
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/head-common.S |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/kernel/head-common.S
++++ b/arch/arm/kernel/head-common.S
+@@ -122,6 +122,9 @@ __mmap_switched_data:
+       .long   init_thread_union + THREAD_START_SP @ sp
+       .size   __mmap_switched_data, . - __mmap_switched_data
++      __FINIT
++      .text
++
+ /*
+  * This provides a C-API version of __lookup_processor_type
+  */
+@@ -133,9 +136,6 @@ ENTRY(lookup_processor_type)
+       ldmfd   sp!, {r4 - r6, r9, pc}
+ ENDPROC(lookup_processor_type)
+-      __FINIT
+-      .text
+-
+ /*
+  * Read processor ID register (CP#15, CR0), and look up in the linker-built
+  * supported processor list.  Note that we can't use the absolute addresses
diff --git a/queue-4.4/arm-move-system-register-accessors-to-asm-cp15.h.patch b/queue-4.4/arm-move-system-register-accessors-to-asm-cp15.h.patch
new file mode 100644 (file)
index 0000000..7af6ba4
--- /dev/null
@@ -0,0 +1,143 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:11 +0100
+Subject: ARM: Move system register accessors to asm/cp15.h
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Vladimir Murzin <vladimir.murzin@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-8-ardb@kernel.org>
+
+From: Vladimir Murzin <vladimir.murzin@arm.com>
+
+Commit 4f2546384150e78cad8045e59a9587fabcd9f9fe upstream.
+
+Headers linux/irqchip/arm-gic.v3.h and arch/arm/include/asm/kvm_hyp.h
+are included in virt/kvm/arm/hyp/vgic-v3-sr.c and both define macros
+called __ACCESS_CP15 and __ACCESS_CP15_64 which obviously creates a
+conflict. These macros were introduced independently for GIC and KVM
+and, in fact, do the same thing.
+
+As an option we could add prefixes to KVM and GIC version of macros so
+they won't clash, but it'd introduce code duplication.  Alternatively,
+we could keep macro in, say, GIC header and include it in KVM one (or
+vice versa), but such dependency would not look nicer.
+
+So we follow arm64 way (it handles this via sysreg.h) and move only
+single set of macros to asm/cp15.h
+
+Cc: Russell King <rmk+kernel@armlinux.org.uk>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/arch_gicv3.h |   27 +++++++++++----------------
+ arch/arm/include/asm/cp15.h       |   15 +++++++++++++++
+ 2 files changed, 26 insertions(+), 16 deletions(-)
+
+--- a/arch/arm/include/asm/arch_gicv3.h
++++ b/arch/arm/include/asm/arch_gicv3.h
+@@ -22,9 +22,7 @@
+ #include <linux/io.h>
+ #include <asm/barrier.h>
+-
+-#define __ACCESS_CP15(CRn, Op1, CRm, Op2)     p15, Op1, %0, CRn, CRm, Op2
+-#define __ACCESS_CP15_64(Op1, CRm)            p15, Op1, %Q0, %R0, CRm
++#include <asm/cp15.h>
+ #define ICC_EOIR1                     __ACCESS_CP15(c12, 0, c12, 1)
+ #define ICC_DIR                               __ACCESS_CP15(c12, 0, c11, 1)
+@@ -102,58 +100,55 @@
+ static inline void gic_write_eoir(u32 irq)
+ {
+-      asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq));
++      write_sysreg(irq, ICC_EOIR1);
+       isb();
+ }
+ static inline void gic_write_dir(u32 val)
+ {
+-      asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val));
++      write_sysreg(val, ICC_DIR);
+       isb();
+ }
+ static inline u32 gic_read_iar(void)
+ {
+-      u32 irqstat;
++      u32 irqstat = read_sysreg(ICC_IAR1);
+-      asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
+       dsb(sy);
++
+       return irqstat;
+ }
+ static inline void gic_write_pmr(u32 val)
+ {
+-      asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val));
++      write_sysreg(val, ICC_PMR);
+ }
+ static inline void gic_write_ctlr(u32 val)
+ {
+-      asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val));
++      write_sysreg(val, ICC_CTLR);
+       isb();
+ }
+ static inline void gic_write_grpen1(u32 val)
+ {
+-      asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val));
++      write_sysreg(val, ICC_IGRPEN1);
+       isb();
+ }
+ static inline void gic_write_sgi1r(u64 val)
+ {
+-      asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val));
++      write_sysreg(val, ICC_SGI1R);
+ }
+ static inline u32 gic_read_sre(void)
+ {
+-      u32 val;
+-
+-      asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
+-      return val;
++      return read_sysreg(ICC_SRE);
+ }
+ static inline void gic_write_sre(u32 val)
+ {
+-      asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val));
++      write_sysreg(val, ICC_SRE);
+       isb();
+ }
+--- a/arch/arm/include/asm/cp15.h
++++ b/arch/arm/include/asm/cp15.h
+@@ -49,6 +49,21 @@
+ #ifdef CONFIG_CPU_CP15
++#define __ACCESS_CP15(CRn, Op1, CRm, Op2)     \
++      "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
++#define __ACCESS_CP15_64(Op1, CRm)            \
++      "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
++
++#define __read_sysreg(r, w, c, t) ({                          \
++      t __val;                                                \
++      asm volatile(r " " c : "=r" (__val));                   \
++      __val;                                                  \
++})
++#define read_sysreg(...)              __read_sysreg(__VA_ARGS__)
++
++#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
++#define write_sysreg(v, ...)          __write_sysreg(v, __VA_ARGS__)
++
+ extern unsigned long cr_alignment;    /* defined in entry-armv.S */
+ static inline unsigned long get_cr(void)
diff --git a/queue-4.4/arm-oabi-compat-copy-semops-using-__copy_from_user.patch b/queue-4.4/arm-oabi-compat-copy-semops-using-__copy_from_user.patch
new file mode 100644 (file)
index 0000000..bd3be68
--- /dev/null
@@ -0,0 +1,51 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:36 +0100
+Subject: ARM: oabi-compat: copy semops using __copy_from_user()
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-33-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 8c8484a1c18e3231648f5ba7cc5ffb7fd70b3ca4 upstream.
+
+__get_user_error() is used as a fast accessor to make copying structure
+members as efficient as possible.  However, with software PAN and the
+recent Spectre variant 1, the efficiency is reduced as these are no
+longer fast accessors.
+
+In the case of software PAN, it has to switch the domain register around
+each access, and with Spectre variant 1, it would have to repeat the
+access_ok() check for each access.
+
+Rather than using __get_user_error() to copy each semops element member,
+copy each semops element in full using __copy_from_user().
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/sys_oabi-compat.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -328,9 +328,11 @@ asmlinkage long sys_oabi_semtimedop(int
+               return -ENOMEM;
+       err = 0;
+       for (i = 0; i < nsops; i++) {
+-              __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
+-              __get_user_error(sops[i].sem_op,  &tsops->sem_op,  err);
+-              __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
++              struct oabi_sembuf osb;
++              err |= __copy_from_user(&osb, tsops, sizeof(osb));
++              sops[i].sem_num = osb.sem_num;
++              sops[i].sem_op = osb.sem_op;
++              sops[i].sem_flg = osb.sem_flg;
+               tsops++;
+       }
+       if (timeout) {
diff --git a/queue-4.4/arm-signal-copy-registers-using-__copy_from_user.patch b/queue-4.4/arm-signal-copy-registers-using-__copy_from_user.patch
new file mode 100644 (file)
index 0000000..a78e1fb
--- /dev/null
@@ -0,0 +1,88 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:34 +0100
+Subject: ARM: signal: copy registers using __copy_from_user()
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-31-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit c32cd419d6650e42b9cdebb83c672ec945e6bd7e upstream.
+
+__get_user_error() is used as a fast accessor to make copying structure
+members in the signal handling path as efficient as possible.  However,
+with software PAN and the recent Spectre variant 1, the efficiency is
+reduced as these are no longer fast accessors.
+
+In the case of software PAN, it has to switch the domain register around
+each access, and with Spectre variant 1, it would have to repeat the
+access_ok() check for each access.
+
+It becomes much more efficient to use __copy_from_user() instead, so
+let's use this for the ARM integer registers.
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/signal.c |   38 +++++++++++++++++++++-----------------
+ 1 file changed, 21 insertions(+), 17 deletions(-)
+
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -141,6 +141,7 @@ struct rt_sigframe {
+ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
+ {
++      struct sigcontext context;
+       struct aux_sigframe __user *aux;
+       sigset_t set;
+       int err;
+@@ -149,23 +150,26 @@ static int restore_sigframe(struct pt_re
+       if (err == 0)
+               set_current_blocked(&set);
+-      __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+-      __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+-      __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+-      __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+-      __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+-      __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+-      __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+-      __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+-      __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+-      __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+-      __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+-      __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+-      __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+-      __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+-      __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+-      __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+-      __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
++      err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
++      if (err == 0) {
++              regs->ARM_r0 = context.arm_r0;
++              regs->ARM_r1 = context.arm_r1;
++              regs->ARM_r2 = context.arm_r2;
++              regs->ARM_r3 = context.arm_r3;
++              regs->ARM_r4 = context.arm_r4;
++              regs->ARM_r5 = context.arm_r5;
++              regs->ARM_r6 = context.arm_r6;
++              regs->ARM_r7 = context.arm_r7;
++              regs->ARM_r8 = context.arm_r8;
++              regs->ARM_r9 = context.arm_r9;
++              regs->ARM_r10 = context.arm_r10;
++              regs->ARM_fp = context.arm_fp;
++              regs->ARM_ip = context.arm_ip;
++              regs->ARM_sp = context.arm_sp;
++              regs->ARM_lr = context.arm_lr;
++              regs->ARM_pc = context.arm_pc;
++              regs->ARM_cpsr = context.arm_cpsr;
++      }
+       err |= !valid_user_regs(regs);
diff --git a/queue-4.4/arm-spectre-add-kconfig-symbol-for-cpus-vulnerable-to-spectre.patch b/queue-4.4/arm-spectre-add-kconfig-symbol-for-cpus-vulnerable-to-spectre.patch
new file mode 100644 (file)
index 0000000..3ab95d2
--- /dev/null
@@ -0,0 +1,48 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:25 +0100
+Subject: ARM: spectre: add Kconfig symbol for CPUs vulnerable to Spectre
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-22-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit c58d237d0852a57fde9bc2c310972e8f4e3d155d upstream.
+
+Add a Kconfig symbol for CPUs which are vulnerable to the Spectre
+attacks.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/Kconfig |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -396,6 +396,7 @@ config CPU_V7
+       select CPU_CP15_MPU if !MMU
+       select CPU_HAS_ASID if MMU
+       select CPU_PABRT_V7
++      select CPU_SPECTRE if MMU
+       select CPU_TLB_V7 if MMU
+ # ARMv7M
+@@ -793,6 +794,9 @@ config CPU_BPREDICT_DISABLE
+       help
+         Say Y here to disable branch prediction.  If unsure, say N.
++config CPU_SPECTRE
++      bool
++
+ config TLS_REG_EMUL
+       bool
+       select NEED_KUSER_HELPERS
diff --git a/queue-4.4/arm-spectre-v1-add-array_index_mask_nospec-implementation.patch b/queue-4.4/arm-spectre-v1-add-array_index_mask_nospec-implementation.patch
new file mode 100644 (file)
index 0000000..4f07b8e
--- /dev/null
@@ -0,0 +1,56 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:32 +0100
+Subject: ARM: spectre-v1: add array_index_mask_nospec() implementation
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-29-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 1d4238c56f9816ce0f9c8dbe42d7f2ad81cb6613 upstream.
+
+Add an implementation of the array_index_mask_nospec() function for
+mitigating Spectre variant 1 throughout the kernel.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/barrier.h |   21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/arch/arm/include/asm/barrier.h
++++ b/arch/arm/include/asm/barrier.h
+@@ -108,5 +108,26 @@ do {                                                                      \
+ #define smp_mb__before_atomic()       smp_mb()
+ #define smp_mb__after_atomic()        smp_mb()
++#ifdef CONFIG_CPU_SPECTRE
++static inline unsigned long array_index_mask_nospec(unsigned long idx,
++                                                  unsigned long sz)
++{
++      unsigned long mask;
++
++      asm volatile(
++              "cmp    %1, %2\n"
++      "       sbc     %0, %1, %1\n"
++      CSDB
++      : "=r" (mask)
++      : "r" (idx), "Ir" (sz)
++      : "cc");
++
++      return mask;
++}
++#define array_index_mask_nospec array_index_mask_nospec
++#endif
++
++#include <asm-generic/barrier.h>
++
+ #endif /* !__ASSEMBLY__ */
+ #endif /* __ASM_BARRIER_H */
diff --git a/queue-4.4/arm-spectre-v1-add-speculation-barrier-csdb-macros.patch b/queue-4.4/arm-spectre-v1-add-speculation-barrier-csdb-macros.patch
new file mode 100644 (file)
index 0000000..e4778f0
--- /dev/null
@@ -0,0 +1,73 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:31 +0100
+Subject: ARM: spectre-v1: add speculation barrier (csdb) macros
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-28-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit a78d156587931a2c3b354534aa772febf6c9e855 upstream.
+
+Add assembly and C macros for the new CSDB instruction.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/assembler.h |    8 ++++++++
+ arch/arm/include/asm/barrier.h   |   13 +++++++++++++
+ 2 files changed, 21 insertions(+)
+
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -441,6 +441,14 @@ THUMB(    orr     \reg , \reg , #PSR_T_BIT        )
+       .size \name , . - \name
+       .endm
++      .macro  csdb
++#ifdef CONFIG_THUMB2_KERNEL
++      .inst.w 0xf3af8014
++#else
++      .inst   0xe320f014
++#endif
++      .endm
++
+       .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+ #ifndef CONFIG_CPU_USE_DOMAINS
+       adds    \tmp, \addr, #\size - 1
+--- a/arch/arm/include/asm/barrier.h
++++ b/arch/arm/include/asm/barrier.h
+@@ -18,6 +18,12 @@
+ #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+ #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+ #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
++#ifdef CONFIG_THUMB2_KERNEL
++#define CSDB  ".inst.w 0xf3af8014"
++#else
++#define CSDB  ".inst  0xe320f014"
++#endif
++#define csdb() __asm__ __volatile__(CSDB : : : "memory")
+ #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
+ #define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+                                   : : "r" (0) : "memory")
+@@ -38,6 +44,13 @@
+ #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
+ #endif
++#ifndef CSDB
++#define CSDB
++#endif
++#ifndef csdb
++#define csdb()
++#endif
++
+ #ifdef CONFIG_ARM_HEAVY_MB
+ extern void (*soc_mb)(void);
+ extern void arm_heavy_mb(void);
diff --git a/queue-4.4/arm-spectre-v1-fix-syscall-entry.patch b/queue-4.4/arm-spectre-v1-fix-syscall-entry.patch
new file mode 100644 (file)
index 0000000..a8bfdc2
--- /dev/null
@@ -0,0 +1,104 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:33 +0100
+Subject: ARM: spectre-v1: fix syscall entry
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-30-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 10573ae547c85b2c61417ff1a106cffbfceada35 upstream.
+
+Prevent speculation at the syscall table decoding by clamping the index
+used to zero on invalid system call numbers, and using the csdb
+speculative barrier.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/kernel/entry-common.S |   18 +++++++-----------
+ arch/arm/kernel/entry-header.S |   25 +++++++++++++++++++++++++
+ 2 files changed, 32 insertions(+), 11 deletions(-)
+
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -223,9 +223,7 @@ local_restart:
+       tst     r10, #_TIF_SYSCALL_WORK         @ are we tracing syscalls?
+       bne     __sys_trace
+-      cmp     scno, #NR_syscalls              @ check upper syscall limit
+-      badr    lr, ret_fast_syscall            @ return address
+-      ldrcc   pc, [tbl, scno, lsl #2]         @ call sys_* routine
++      invoke_syscall tbl, scno, r10, ret_fast_syscall
+       add     r1, sp, #S_OFF
+ 2:    cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
+@@ -258,14 +256,8 @@ __sys_trace:
+       mov     r1, scno
+       add     r0, sp, #S_OFF
+       bl      syscall_trace_enter
+-
+-      badr    lr, __sys_trace_return          @ return address
+-      mov     scno, r0                        @ syscall number (possibly new)
+-      add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
+-      cmp     scno, #NR_syscalls              @ check upper syscall limit
+-      ldmccia r1, {r0 - r6}                   @ have to reload r0 - r6
+-      stmccia sp, {r4, r5}                    @ and update the stack args
+-      ldrcc   pc, [tbl, scno, lsl #2]         @ call sys_* routine
++      mov     scno, r0
++      invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
+       cmp     scno, #-1                       @ skip the syscall?
+       bne     2b
+       add     sp, sp, #S_OFF                  @ restore stack
+@@ -317,6 +309,10 @@ sys_syscall:
+               bic     scno, r0, #__NR_OABI_SYSCALL_BASE
+               cmp     scno, #__NR_syscall - __NR_SYSCALL_BASE
+               cmpne   scno, #NR_syscalls      @ check range
++#ifdef CONFIG_CPU_SPECTRE
++              movhs   scno, #0
++              csdb
++#endif
+               stmloia sp, {r5, r6}            @ shuffle args
+               movlo   r0, r1
+               movlo   r1, r2
+--- a/arch/arm/kernel/entry-header.S
++++ b/arch/arm/kernel/entry-header.S
+@@ -373,6 +373,31 @@
+ #endif
+       .endm
++      .macro  invoke_syscall, table, nr, tmp, ret, reload=0
++#ifdef CONFIG_CPU_SPECTRE
++      mov     \tmp, \nr
++      cmp     \tmp, #NR_syscalls              @ check upper syscall limit
++      movcs   \tmp, #0
++      csdb
++      badr    lr, \ret                        @ return address
++      .if     \reload
++      add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
++      ldmccia r1, {r0 - r6}                   @ reload r0-r6
++      stmccia sp, {r4, r5}                    @ update stack arguments
++      .endif
++      ldrcc   pc, [\table, \tmp, lsl #2]      @ call sys_* routine
++#else
++      cmp     \nr, #NR_syscalls               @ check upper syscall limit
++      badr    lr, \ret                        @ return address
++      .if     \reload
++      add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
++      ldmccia r1, {r0 - r6}                   @ reload r0-r6
++      stmccia sp, {r4, r5}                    @ update stack arguments
++      .endif
++      ldrcc   pc, [\table, \nr, lsl #2]       @ call sys_* routine
++#endif
++      .endm
++
+ /*
+  * These are the registers used in the syscall handler, and allow us to
+  * have in theory up to 7 arguments to a function - r0 to r6.
diff --git a/queue-4.4/arm-spectre-v1-mitigate-user-accesses.patch b/queue-4.4/arm-spectre-v1-mitigate-user-accesses.patch
new file mode 100644 (file)
index 0000000..94991c7
--- /dev/null
@@ -0,0 +1,80 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:39 +0100
+Subject: ARM: spectre-v1: mitigate user accesses
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-36-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit a3c0f84765bb429ba0fd23de1c57b5e1591c9389 upstream.
+
+Spectre variant 1 attacks are about this sequence of pseudo-code:
+
+       index = load(user-manipulated pointer);
+       access(base + index * stride);
+
+In order for the cache side-channel to work, the access() must me made
+to memory which userspace can detect whether cache lines have been
+loaded.  On 32-bit ARM, this must be either user accessible memory, or
+a kernel mapping of that same user accessible memory.
+
+The problem occurs when the load() speculatively loads privileged data,
+and the subsequent access() is made to user accessible memory.
+
+Any load() which makes use of a user-maniplated pointer is a potential
+problem if the data it has loaded is used in a subsequent access.  This
+also applies for the access() if the data loaded by that access is used
+by a subsequent access.
+
+Harden the get_user() accessors against Spectre attacks by forcing out
+of bounds addresses to a NULL pointer.  This prevents get_user() being
+used as the load() step above.  As a side effect, put_user() will also
+be affected even though it isn't implicated.
+
+Also harden copy_from_user() by redoing the bounds check within the
+arm_copy_from_user() code, and NULLing the pointer if out of bounds.
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/assembler.h |    4 ++++
+ arch/arm/lib/copy_from_user.S    |    9 +++++++++
+ 2 files changed, 13 insertions(+)
+
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -454,6 +454,10 @@ THUMB(    orr     \reg , \reg , #PSR_T_BIT        )
+       adds    \tmp, \addr, #\size - 1
+       sbcccs  \tmp, \tmp, \limit
+       bcs     \bad
++#ifdef CONFIG_CPU_SPECTRE
++      movcs   \addr, #0
++      csdb
++#endif
+ #endif
+       .endm
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -90,6 +90,15 @@
+       .text
+ ENTRY(arm_copy_from_user)
++#ifdef CONFIG_CPU_SPECTRE
++      get_thread_info r3
++      ldr     r3, [r3, #TI_ADDR_LIMIT]
++      adds    ip, r1, r2      @ ip=addr+size
++      sub     r3, r3, #1      @ addr_limit - 1
++      cmpcc   ip, r3          @ if (addr+size > addr_limit - 1)
++      movcs   r1, #0          @ addr = NULL
++      csdb
++#endif
+ #include "copy_template.S"
diff --git a/queue-4.4/arm-spectre-v1-use-get_user-for-__get_user.patch b/queue-4.4/arm-spectre-v1-use-get_user-for-__get_user.patch
new file mode 100644 (file)
index 0000000..1916ad6
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:38 +0100
+Subject: ARM: spectre-v1: use get_user() for __get_user()
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-35-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit b1cd0a14806321721aae45f5446ed83a3647c914 upstream.
+
+Fixing __get_user() for spectre variant 1 is not sane: we would have to
+add address space bounds checking in order to validate that the location
+should be accessed, and then zero the address if found to be invalid.
+
+Since __get_user() is supposed to avoid the bounds check, and this is
+exactly what get_user() does, there's no point having two different
+implementations that are doing the same thing.  So, when the Spectre
+workarounds are required, make __get_user() an alias of get_user().
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/uaccess.h |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -288,6 +288,16 @@ static inline void set_fs(mm_segment_t f
+ #define user_addr_max() \
+       (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
++#ifdef CONFIG_CPU_SPECTRE
++/*
++ * When mitigating Spectre variant 1, it is not worth fixing the non-
++ * verifying accessors, because we need to add verification of the
++ * address space there.  Force these to use the standard get_user()
++ * version instead.
++ */
++#define __get_user(x, ptr) get_user(x, ptr)
++#else
++
+ /*
+  * The "__xxx" versions of the user access functions do not verify the
+  * address space - it must have been done previously with a separate
+@@ -304,12 +314,6 @@ static inline void set_fs(mm_segment_t f
+       __gu_err;                                                       \
+ })
+-#define __get_user_error(x, ptr, err)                                 \
+-({                                                                    \
+-      __get_user_err((x), (ptr), err);                                \
+-      (void) 0;                                                       \
+-})
+-
+ #define __get_user_err(x, ptr, err)                                   \
+ do {                                                                  \
+       unsigned long __gu_addr = (unsigned long)(ptr);                 \
+@@ -369,6 +373,7 @@ do {                                                                       \
+ #define __get_user_asm_word(x, addr, err)                     \
+       __get_user_asm(x, addr, err, ldr)
++#endif
+ #define __put_user_switch(x, ptr, __err, __fn)                                \
diff --git a/queue-4.4/arm-spectre-v2-add-cortex-a8-and-a15-validation-of-the-ibe-bit.patch b/queue-4.4/arm-spectre-v2-add-cortex-a8-and-a15-validation-of-the-ibe-bit.patch
new file mode 100644 (file)
index 0000000..b96f779
--- /dev/null
@@ -0,0 +1,103 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:27 +0100
+Subject: ARM: spectre-v2: add Cortex A8 and A15 validation of the IBE bit
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-24-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit e388b80288aade31135aca23d32eee93dd106795 upstream.
+
+When the branch predictor hardening is enabled, firmware must have set
+the IBE bit in the auxiliary control register.  If this bit has not
+been set, the Spectre workarounds will not be functional.
+
+Add validation that this bit is set, and print a warning at alert level
+if this is not the case.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/Makefile       |    2 +-
+ arch/arm/mm/proc-v7-bugs.c |   36 ++++++++++++++++++++++++++++++++++++
+ arch/arm/mm/proc-v7.S      |    4 ++--
+ 3 files changed, 39 insertions(+), 3 deletions(-)
+
+--- a/arch/arm/mm/Makefile
++++ b/arch/arm/mm/Makefile
+@@ -92,7 +92,7 @@ obj-$(CONFIG_CPU_MOHAWK)     += proc-mohawk.
+ obj-$(CONFIG_CPU_FEROCEON)    += proc-feroceon.o
+ obj-$(CONFIG_CPU_V6)          += proc-v6.o
+ obj-$(CONFIG_CPU_V6K)         += proc-v6.o
+-obj-$(CONFIG_CPU_V7)          += proc-v7.o
++obj-$(CONFIG_CPU_V7)          += proc-v7.o proc-v7-bugs.o
+ obj-$(CONFIG_CPU_V7M)         += proc-v7m.o
+ AFLAGS_proc-v6.o      :=-Wa,-march=armv6
+--- /dev/null
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -0,0 +1,36 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/kernel.h>
++#include <linux/smp.h>
++
++static __maybe_unused void cpu_v7_check_auxcr_set(bool *warned,
++                                                u32 mask, const char *msg)
++{
++      u32 aux_cr;
++
++      asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
++
++      if ((aux_cr & mask) != mask) {
++              if (!*warned)
++                      pr_err("CPU%u: %s", smp_processor_id(), msg);
++              *warned = true;
++      }
++}
++
++static DEFINE_PER_CPU(bool, spectre_warned);
++
++static void check_spectre_auxcr(bool *warned, u32 bit)
++{
++      if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
++              cpu_v7_check_auxcr_set(warned, bit,
++                                     "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
++}
++
++void cpu_v7_ca8_ibe(void)
++{
++      check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6));
++}
++
++void cpu_v7_ca15_ibe(void)
++{
++      check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0));
++}
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -511,7 +511,7 @@ __v7_setup_stack:
+       globl_equ       cpu_ca8_do_suspend,     cpu_v7_do_suspend
+       globl_equ       cpu_ca8_do_resume,      cpu_v7_do_resume
+ #endif
+-      define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++      define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
+       @ Cortex-A9 - needs more registers preserved across suspend/resume
+       @ and bpiall switch_mm for hardening
+@@ -544,7 +544,7 @@ __v7_setup_stack:
+       globl_equ       cpu_ca15_suspend_size,  cpu_v7_suspend_size
+       globl_equ       cpu_ca15_do_suspend,    cpu_v7_do_suspend
+       globl_equ       cpu_ca15_do_resume,     cpu_v7_do_resume
+-      define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++      define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
+ #ifdef CONFIG_CPU_PJ4B
+       define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+ #endif
diff --git a/queue-4.4/arm-spectre-v2-add-firmware-based-hardening.patch b/queue-4.4/arm-spectre-v2-add-firmware-based-hardening.patch
new file mode 100644 (file)
index 0000000..bc25199
--- /dev/null
@@ -0,0 +1,157 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:29 +0100
+Subject: ARM: spectre-v2: add firmware based hardening
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-26-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 10115105cb3aa17b5da1cb726ae8dd5f6854bd93 upstream.
+
+Add firmware based hardening for cores that require more complex
+handling in firmware.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/proc-v7-bugs.c |   60 +++++++++++++++++++++++++++++++++++++++++++++
+ arch/arm/mm/proc-v7.S      |   21 +++++++++++++++
+ 2 files changed, 81 insertions(+)
+
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -1,14 +1,20 @@
+ // SPDX-License-Identifier: GPL-2.0
++#include <linux/arm-smccc.h>
+ #include <linux/kernel.h>
++#include <linux/psci.h>
+ #include <linux/smp.h>
+ #include <asm/cp15.h>
+ #include <asm/cputype.h>
++#include <asm/proc-fns.h>
+ #include <asm/system_misc.h>
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
++extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
++extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
++
+ static void harden_branch_predictor_bpiall(void)
+ {
+       write_sysreg(0, BPIALL);
+@@ -19,6 +25,16 @@ static void harden_branch_predictor_icia
+       write_sysreg(0, ICIALLU);
+ }
++static void __maybe_unused call_smc_arch_workaround_1(void)
++{
++      arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
++}
++
++static void __maybe_unused call_hvc_arch_workaround_1(void)
++{
++      arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
++}
++
+ static void cpu_v7_spectre_init(void)
+ {
+       const char *spectre_v2_method = NULL;
+@@ -45,7 +61,51 @@ static void cpu_v7_spectre_init(void)
+                       harden_branch_predictor_iciallu;
+               spectre_v2_method = "ICIALLU";
+               break;
++
++#ifdef CONFIG_ARM_PSCI
++      default:
++              /* Other ARM CPUs require no workaround */
++              if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
++                      break;
++              /* fallthrough */
++              /* Cortex A57/A72 require firmware workaround */
++      case ARM_CPU_PART_CORTEX_A57:
++      case ARM_CPU_PART_CORTEX_A72: {
++              struct arm_smccc_res res;
++
++              if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++                      break;
++
++              switch (psci_ops.conduit) {
++              case PSCI_CONDUIT_HVC:
++                      arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                                        ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++                      if ((int)res.a0 != 0)
++                              break;
++                      per_cpu(harden_branch_predictor_fn, cpu) =
++                              call_hvc_arch_workaround_1;
++                      processor.switch_mm = cpu_v7_hvc_switch_mm;
++                      spectre_v2_method = "hypervisor";
++                      break;
++
++              case PSCI_CONDUIT_SMC:
++                      arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++                                        ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++                      if ((int)res.a0 != 0)
++                              break;
++                      per_cpu(harden_branch_predictor_fn, cpu) =
++                              call_smc_arch_workaround_1;
++                      processor.switch_mm = cpu_v7_smc_switch_mm;
++                      spectre_v2_method = "firmware";
++                      break;
++
++              default:
++                      break;
++              }
+       }
++#endif
++      }
++
+       if (spectre_v2_method)
+               pr_info("CPU%u: Spectre v2: using %s workaround\n",
+                       smp_processor_id(), spectre_v2_method);
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -9,6 +9,7 @@
+  *
+  *  This is the "shell" of the ARMv7 processor support.
+  */
++#include <linux/arm-smccc.h>
+ #include <linux/init.h>
+ #include <linux/linkage.h>
+ #include <asm/assembler.h>
+@@ -87,6 +88,26 @@ ENTRY(cpu_v7_dcache_clean_area)
+       ret     lr
+ ENDPROC(cpu_v7_dcache_clean_area)
++#ifdef CONFIG_ARM_PSCI
++      .arch_extension sec
++ENTRY(cpu_v7_smc_switch_mm)
++      stmfd   sp!, {r0 - r3}
++      movw    r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
++      movt    r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
++      smc     #0
++      ldmfd   sp!, {r0 - r3}
++      b       cpu_v7_switch_mm
++ENDPROC(cpu_v7_smc_switch_mm)
++      .arch_extension virt
++ENTRY(cpu_v7_hvc_switch_mm)
++      stmfd   sp!, {r0 - r3}
++      movw    r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
++      movt    r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
++      hvc     #0
++      ldmfd   sp!, {r0 - r3}
++      b       cpu_v7_switch_mm
++ENDPROC(cpu_v7_hvc_switch_mm)
++#endif
+ ENTRY(cpu_v7_iciallu_switch_mm)
+       mov     r3, #0
+       mcr     p15, 0, r3, c7, c5, 0           @ ICIALLU
diff --git a/queue-4.4/arm-spectre-v2-harden-branch-predictor-on-context-switches.patch b/queue-4.4/arm-spectre-v2-harden-branch-predictor-on-context-switches.patch
new file mode 100644 (file)
index 0000000..ee52f7b
--- /dev/null
@@ -0,0 +1,271 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:26 +0100
+Subject: ARM: spectre-v2: harden branch predictor on context switches
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-23-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 06c23f5ffe7ad45b908d0fff604dae08a7e334b9 upstream.
+
+Required manual merge of arch/arm/mm/proc-v7.S.
+
+Harden the branch predictor against Spectre v2 attacks on context
+switches for ARMv7 and later CPUs.  We do this by:
+
+Cortex A9, A12, A17, A73, A75: invalidating the BTB.
+Cortex A15, Brahma B15: invalidating the instruction cache.
+
+Cortex A57 and Cortex A72 are not addressed in this patch.
+
+Cortex R7 and Cortex R8 are also not addressed as we do not enforce
+memory protection on these cores.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/Kconfig          |   19 ++++++
+ arch/arm/mm/proc-v7-2level.S |    6 --
+ arch/arm/mm/proc-v7.S        |  125 +++++++++++++++++++++++++++++++++----------
+ 3 files changed, 115 insertions(+), 35 deletions(-)
+
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -797,6 +797,25 @@ config CPU_BPREDICT_DISABLE
+ config CPU_SPECTRE
+       bool
++config HARDEN_BRANCH_PREDICTOR
++      bool "Harden the branch predictor against aliasing attacks" if EXPERT
++      depends on CPU_SPECTRE
++      default y
++      help
++         Speculation attacks against some high-performance processors rely
++         on being able to manipulate the branch predictor for a victim
++         context by executing aliasing branches in the attacker context.
++         Such attacks can be partially mitigated against by clearing
++         internal branch predictor state and limiting the prediction
++         logic in some situations.
++
++         This config option will take CPU-specific actions to harden
++         the branch predictor against aliasing attacks and may rely on
++         specific instruction sequences or control bits being set by
++         the system firmware.
++
++         If unsure, say Y.
++
+ config TLS_REG_EMUL
+       bool
+       select NEED_KUSER_HELPERS
+--- a/arch/arm/mm/proc-v7-2level.S
++++ b/arch/arm/mm/proc-v7-2level.S
+@@ -41,11 +41,6 @@
+  *    even on Cortex-A8 revisions not affected by 430973.
+  *    If IBE is not set, the flush BTAC/BTB won't do anything.
+  */
+-ENTRY(cpu_ca8_switch_mm)
+-#ifdef CONFIG_MMU
+-      mov     r2, #0
+-      mcr     p15, 0, r2, c7, c5, 6           @ flush BTAC/BTB
+-#endif
+ ENTRY(cpu_v7_switch_mm)
+ #ifdef CONFIG_MMU
+       mmid    r1, r1                          @ get mm->context.id
+@@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
+ #endif
+       bx      lr
+ ENDPROC(cpu_v7_switch_mm)
+-ENDPROC(cpu_ca8_switch_mm)
+ /*
+  *    cpu_v7_set_pte_ext(ptep, pte)
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -87,6 +87,17 @@ ENTRY(cpu_v7_dcache_clean_area)
+       ret     lr
+ ENDPROC(cpu_v7_dcache_clean_area)
++ENTRY(cpu_v7_iciallu_switch_mm)
++      mov     r3, #0
++      mcr     p15, 0, r3, c7, c5, 0           @ ICIALLU
++      b       cpu_v7_switch_mm
++ENDPROC(cpu_v7_iciallu_switch_mm)
++ENTRY(cpu_v7_bpiall_switch_mm)
++      mov     r3, #0
++      mcr     p15, 0, r3, c7, c5, 6           @ flush BTAC/BTB
++      b       cpu_v7_switch_mm
++ENDPROC(cpu_v7_bpiall_switch_mm)
++
+       string  cpu_v7_name, "ARMv7 Processor"
+       .align
+@@ -152,31 +163,6 @@ ENTRY(cpu_v7_do_resume)
+ ENDPROC(cpu_v7_do_resume)
+ #endif
+-/*
+- * Cortex-A8
+- */
+-      globl_equ       cpu_ca8_proc_init,      cpu_v7_proc_init
+-      globl_equ       cpu_ca8_proc_fin,       cpu_v7_proc_fin
+-      globl_equ       cpu_ca8_reset,          cpu_v7_reset
+-      globl_equ       cpu_ca8_do_idle,        cpu_v7_do_idle
+-      globl_equ       cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
+-      globl_equ       cpu_ca8_set_pte_ext,    cpu_v7_set_pte_ext
+-      globl_equ       cpu_ca8_suspend_size,   cpu_v7_suspend_size
+-#ifdef CONFIG_ARM_CPU_SUSPEND
+-      globl_equ       cpu_ca8_do_suspend,     cpu_v7_do_suspend
+-      globl_equ       cpu_ca8_do_resume,      cpu_v7_do_resume
+-#endif
+-
+-/*
+- * Cortex-A9 processor functions
+- */
+-      globl_equ       cpu_ca9mp_proc_init,    cpu_v7_proc_init
+-      globl_equ       cpu_ca9mp_proc_fin,     cpu_v7_proc_fin
+-      globl_equ       cpu_ca9mp_reset,        cpu_v7_reset
+-      globl_equ       cpu_ca9mp_do_idle,      cpu_v7_do_idle
+-      globl_equ       cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
+-      globl_equ       cpu_ca9mp_switch_mm,    cpu_v7_switch_mm
+-      globl_equ       cpu_ca9mp_set_pte_ext,  cpu_v7_set_pte_ext
+ .globl        cpu_ca9mp_suspend_size
+ .equ  cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
+ #ifdef CONFIG_ARM_CPU_SUSPEND
+@@ -490,10 +476,75 @@ __v7_setup_stack:
+       @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+       define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++      @ generic v7 bpiall on context switch
++      globl_equ       cpu_v7_bpiall_proc_init,        cpu_v7_proc_init
++      globl_equ       cpu_v7_bpiall_proc_fin,         cpu_v7_proc_fin
++      globl_equ       cpu_v7_bpiall_reset,            cpu_v7_reset
++      globl_equ       cpu_v7_bpiall_do_idle,          cpu_v7_do_idle
++      globl_equ       cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
++      globl_equ       cpu_v7_bpiall_set_pte_ext,      cpu_v7_set_pte_ext
++      globl_equ       cpu_v7_bpiall_suspend_size,     cpu_v7_suspend_size
++#ifdef CONFIG_ARM_CPU_SUSPEND
++      globl_equ       cpu_v7_bpiall_do_suspend,       cpu_v7_do_suspend
++      globl_equ       cpu_v7_bpiall_do_resume,        cpu_v7_do_resume
++#endif
++      define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++
++#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
++#else
++#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
++#endif
++
+ #ifndef CONFIG_ARM_LPAE
++      @ Cortex-A8 - always needs bpiall switch_mm implementation
++      globl_equ       cpu_ca8_proc_init,      cpu_v7_proc_init
++      globl_equ       cpu_ca8_proc_fin,       cpu_v7_proc_fin
++      globl_equ       cpu_ca8_reset,          cpu_v7_reset
++      globl_equ       cpu_ca8_do_idle,        cpu_v7_do_idle
++      globl_equ       cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
++      globl_equ       cpu_ca8_set_pte_ext,    cpu_v7_set_pte_ext
++      globl_equ       cpu_ca8_switch_mm,      cpu_v7_bpiall_switch_mm
++      globl_equ       cpu_ca8_suspend_size,   cpu_v7_suspend_size
++#ifdef CONFIG_ARM_CPU_SUSPEND
++      globl_equ       cpu_ca8_do_suspend,     cpu_v7_do_suspend
++      globl_equ       cpu_ca8_do_resume,      cpu_v7_do_resume
++#endif
+       define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++
++      @ Cortex-A9 - needs more registers preserved across suspend/resume
++      @ and bpiall switch_mm for hardening
++      globl_equ       cpu_ca9mp_proc_init,    cpu_v7_proc_init
++      globl_equ       cpu_ca9mp_proc_fin,     cpu_v7_proc_fin
++      globl_equ       cpu_ca9mp_reset,        cpu_v7_reset
++      globl_equ       cpu_ca9mp_do_idle,      cpu_v7_do_idle
++      globl_equ       cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++      globl_equ       cpu_ca9mp_switch_mm,    cpu_v7_bpiall_switch_mm
++#else
++      globl_equ       cpu_ca9mp_switch_mm,    cpu_v7_switch_mm
++#endif
++      globl_equ       cpu_ca9mp_set_pte_ext,  cpu_v7_set_pte_ext
+       define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+ #endif
++
++      @ Cortex-A15 - needs iciallu switch_mm for hardening
++      globl_equ       cpu_ca15_proc_init,     cpu_v7_proc_init
++      globl_equ       cpu_ca15_proc_fin,      cpu_v7_proc_fin
++      globl_equ       cpu_ca15_reset,         cpu_v7_reset
++      globl_equ       cpu_ca15_do_idle,       cpu_v7_do_idle
++      globl_equ       cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++      globl_equ       cpu_ca15_switch_mm,     cpu_v7_iciallu_switch_mm
++#else
++      globl_equ       cpu_ca15_switch_mm,     cpu_v7_switch_mm
++#endif
++      globl_equ       cpu_ca15_set_pte_ext,   cpu_v7_set_pte_ext
++      globl_equ       cpu_ca15_suspend_size,  cpu_v7_suspend_size
++      globl_equ       cpu_ca15_do_suspend,    cpu_v7_do_suspend
++      globl_equ       cpu_ca15_do_resume,     cpu_v7_do_resume
++      define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+ #ifdef CONFIG_CPU_PJ4B
+       define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+ #endif
+@@ -600,7 +651,7 @@ __v7_ca7mp_proc_info:
+ __v7_ca12mp_proc_info:
+       .long   0x410fc0d0
+       .long   0xff0ffff0
+-      __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup
++      __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
+       .size   __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
+       /*
+@@ -610,7 +661,7 @@ __v7_ca12mp_proc_info:
+ __v7_ca15mp_proc_info:
+       .long   0x410fc0f0
+       .long   0xff0ffff0
+-      __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup
++      __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
+       .size   __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
+       /*
+@@ -620,7 +671,7 @@ __v7_ca15mp_proc_info:
+ __v7_b15mp_proc_info:
+       .long   0x420f00f0
+       .long   0xff0ffff0
+-      __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
++      __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions
+       .size   __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
+       /*
+@@ -630,9 +681,25 @@ __v7_b15mp_proc_info:
+ __v7_ca17mp_proc_info:
+       .long   0x410fc0e0
+       .long   0xff0ffff0
+-      __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup
++      __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
+       .size   __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
++      /* ARM Ltd. Cortex A73 processor */
++      .type   __v7_ca73_proc_info, #object
++__v7_ca73_proc_info:
++      .long   0x410fd090
++      .long   0xff0ffff0
++      __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
++      .size   __v7_ca73_proc_info, . - __v7_ca73_proc_info
++
++      /* ARM Ltd. Cortex A75 processor */
++      .type   __v7_ca75_proc_info, #object
++__v7_ca75_proc_info:
++      .long   0x410fd0a0
++      .long   0xff0ffff0
++      __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
++      .size   __v7_ca75_proc_info, . - __v7_ca75_proc_info
++
+       /*
+        * Qualcomm Inc. Krait processors.
+        */
diff --git a/queue-4.4/arm-spectre-v2-harden-user-aborts-in-kernel-space.patch b/queue-4.4/arm-spectre-v2-harden-user-aborts-in-kernel-space.patch
new file mode 100644 (file)
index 0000000..aca93bb
--- /dev/null
@@ -0,0 +1,228 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:28 +0100
+Subject: ARM: spectre-v2: harden user aborts in kernel space
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-25-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit f5fe12b1eaee220ce62ff9afb8b90929c396595f upstream.
+
+In order to prevent aliasing attacks on the branch predictor,
+invalidate the BTB or instruction cache on CPUs that are known to be
+affected when taking an abort on a address that is outside of a user
+task limit:
+
+Cortex A8, A9, A12, A17, A73, A75: flush BTB.
+Cortex A15, Brahma B15: invalidate icache.
+
+If the IBE bit is not set, then there is little point to enabling the
+workaround.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/cp15.h        |    3 +
+ arch/arm/include/asm/system_misc.h |   15 +++++++
+ arch/arm/mm/fault.c                |    3 +
+ arch/arm/mm/proc-v7-bugs.c         |   73 ++++++++++++++++++++++++++++++++++---
+ arch/arm/mm/proc-v7.S              |    8 ++--
+ 5 files changed, 94 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/include/asm/cp15.h
++++ b/arch/arm/include/asm/cp15.h
+@@ -64,6 +64,9 @@
+ #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+ #define write_sysreg(v, ...)          __write_sysreg(v, __VA_ARGS__)
++#define BPIALL                                __ACCESS_CP15(c7, 0, c5, 6)
++#define ICIALLU                               __ACCESS_CP15(c7, 0, c5, 0)
++
+ extern unsigned long cr_alignment;    /* defined in entry-armv.S */
+ static inline unsigned long get_cr(void)
+--- a/arch/arm/include/asm/system_misc.h
++++ b/arch/arm/include/asm/system_misc.h
+@@ -7,6 +7,7 @@
+ #include <linux/linkage.h>
+ #include <linux/irqflags.h>
+ #include <linux/reboot.h>
++#include <linux/percpu.h>
+ extern void cpu_init(void);
+@@ -14,6 +15,20 @@ void soft_restart(unsigned long);
+ extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+ extern void (*arm_pm_idle)(void);
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++typedef void (*harden_branch_predictor_fn_t)(void);
++DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
++static inline void harden_branch_predictor(void)
++{
++      harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
++                                                smp_processor_id());
++      if (fn)
++              fn();
++}
++#else
++#define harden_branch_predictor() do { } while (0)
++#endif
++
+ #define UDBG_UNDEFINED        (1 << 0)
+ #define UDBG_SYSCALL  (1 << 1)
+ #define UDBG_BADABORT (1 << 2)
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk,
+ {
+       struct siginfo si;
++      if (addr > TASK_SIZE)
++              harden_branch_predictor();
++
+ #ifdef CONFIG_DEBUG_USER
+       if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
+           ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -2,7 +2,61 @@
+ #include <linux/kernel.h>
+ #include <linux/smp.h>
+-static __maybe_unused void cpu_v7_check_auxcr_set(bool *warned,
++#include <asm/cp15.h>
++#include <asm/cputype.h>
++#include <asm/system_misc.h>
++
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
++
++static void harden_branch_predictor_bpiall(void)
++{
++      write_sysreg(0, BPIALL);
++}
++
++static void harden_branch_predictor_iciallu(void)
++{
++      write_sysreg(0, ICIALLU);
++}
++
++static void cpu_v7_spectre_init(void)
++{
++      const char *spectre_v2_method = NULL;
++      int cpu = smp_processor_id();
++
++      if (per_cpu(harden_branch_predictor_fn, cpu))
++              return;
++
++      switch (read_cpuid_part()) {
++      case ARM_CPU_PART_CORTEX_A8:
++      case ARM_CPU_PART_CORTEX_A9:
++      case ARM_CPU_PART_CORTEX_A12:
++      case ARM_CPU_PART_CORTEX_A17:
++      case ARM_CPU_PART_CORTEX_A73:
++      case ARM_CPU_PART_CORTEX_A75:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_bpiall;
++              spectre_v2_method = "BPIALL";
++              break;
++
++      case ARM_CPU_PART_CORTEX_A15:
++      case ARM_CPU_PART_BRAHMA_B15:
++              per_cpu(harden_branch_predictor_fn, cpu) =
++                      harden_branch_predictor_iciallu;
++              spectre_v2_method = "ICIALLU";
++              break;
++      }
++      if (spectre_v2_method)
++              pr_info("CPU%u: Spectre v2: using %s workaround\n",
++                      smp_processor_id(), spectre_v2_method);
++}
++#else
++static void cpu_v7_spectre_init(void)
++{
++}
++#endif
++
++static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
+                                                 u32 mask, const char *msg)
+ {
+       u32 aux_cr;
+@@ -13,24 +67,33 @@ static __maybe_unused void cpu_v7_check_
+               if (!*warned)
+                       pr_err("CPU%u: %s", smp_processor_id(), msg);
+               *warned = true;
++              return false;
+       }
++      return true;
+ }
+ static DEFINE_PER_CPU(bool, spectre_warned);
+-static void check_spectre_auxcr(bool *warned, u32 bit)
++static bool check_spectre_auxcr(bool *warned, u32 bit)
+ {
+-      if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
++      return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
+               cpu_v7_check_auxcr_set(warned, bit,
+                                      "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
+ }
+ void cpu_v7_ca8_ibe(void)
+ {
+-      check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6));
++      if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
++              cpu_v7_spectre_init();
+ }
+ void cpu_v7_ca15_ibe(void)
+ {
+-      check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0));
++      if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
++              cpu_v7_spectre_init();
++}
++
++void cpu_v7_bugs_init(void)
++{
++      cpu_v7_spectre_init();
+ }
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -474,8 +474,10 @@ __v7_setup_stack:
+       __INITDATA
++      .weak cpu_v7_bugs_init
++
+       @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+-      define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++      define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       @ generic v7 bpiall on context switch
+@@ -490,7 +492,7 @@ __v7_setup_stack:
+       globl_equ       cpu_v7_bpiall_do_suspend,       cpu_v7_do_suspend
+       globl_equ       cpu_v7_bpiall_do_resume,        cpu_v7_do_resume
+ #endif
+-      define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++      define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
+ #define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
+ #else
+@@ -526,7 +528,7 @@ __v7_setup_stack:
+       globl_equ       cpu_ca9mp_switch_mm,    cpu_v7_switch_mm
+ #endif
+       globl_equ       cpu_ca9mp_set_pte_ext,  cpu_v7_set_pte_ext
+-      define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
++      define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
+ #endif
+       @ Cortex-A15 - needs iciallu switch_mm for hardening
diff --git a/queue-4.4/arm-spectre-v2-per-cpu-vtables-to-work-around-big.little-systems.patch b/queue-4.4/arm-spectre-v2-per-cpu-vtables-to-work-around-big.little-systems.patch
new file mode 100644 (file)
index 0000000..87ed066
--- /dev/null
@@ -0,0 +1,218 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:52 +0100
+Subject: ARM: spectre-v2: per-CPU vtables to work around big.Little systems
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-49-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 383fb3ee8024d596f488d2dbaf45e572897acbdb upstream.
+
+In big.Little systems, some CPUs require the Spectre workarounds in
+paths such as the context switch, but other CPUs do not.  In order
+to handle these differences, we need per-CPU vtables.
+
+We are unable to use the kernel's per-CPU variables to support this
+as per-CPU is not initialised at times when we need access to the
+vtables, so we have to use an array indexed by logical CPU number.
+
+We use an array-of-pointers to avoid having function pointers in
+the kernel's read/write .data section.
+
+Note: Added include of linux/slab.h in arch/arm/smp.c.
+
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/proc-fns.h |   23 +++++++++++++++++++++++
+ arch/arm/kernel/setup.c         |    5 +++++
+ arch/arm/kernel/smp.c           |   32 ++++++++++++++++++++++++++++++++
+ arch/arm/mm/proc-v7-bugs.c      |   17 ++---------------
+ 4 files changed, 62 insertions(+), 15 deletions(-)
+
+--- a/arch/arm/include/asm/proc-fns.h
++++ b/arch/arm/include/asm/proc-fns.h
+@@ -104,12 +104,35 @@ extern void cpu_do_resume(void *);
+ #else
+ extern struct processor processor;
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++#include <linux/smp.h>
++/*
++ * This can't be a per-cpu variable because we need to access it before
++ * per-cpu has been initialised.  We have a couple of functions that are
++ * called in a pre-emptible context, and so can't use smp_processor_id()
++ * there, hence PROC_TABLE().  We insist in init_proc_vtable() that the
++ * function pointers for these are identical across all CPUs.
++ */
++extern struct processor *cpu_vtable[];
++#define PROC_VTABLE(f)                        cpu_vtable[smp_processor_id()]->f
++#define PROC_TABLE(f)                 cpu_vtable[0]->f
++static inline void init_proc_vtable(const struct processor *p)
++{
++      unsigned int cpu = smp_processor_id();
++      *cpu_vtable[cpu] = *p;
++      WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
++                   cpu_vtable[0]->dcache_clean_area);
++      WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
++                   cpu_vtable[0]->set_pte_ext);
++}
++#else
+ #define PROC_VTABLE(f)                        processor.f
+ #define PROC_TABLE(f)                 processor.f
+ static inline void init_proc_vtable(const struct processor *p)
+ {
+       processor = *p;
+ }
++#endif
+ #define cpu_proc_init                 PROC_VTABLE(_proc_init)
+ #define cpu_check_bugs                        PROC_VTABLE(check_bugs)
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -113,6 +113,11 @@ EXPORT_SYMBOL(elf_hwcap2);
+ #ifdef MULTI_CPU
+ struct processor processor __read_mostly;
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++struct processor *cpu_vtable[NR_CPUS] = {
++      [0] = &processor,
++};
++#endif
+ #endif
+ #ifdef MULTI_TLB
+ struct cpu_tlb_fns cpu_tlb __read_mostly;
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -27,6 +27,7 @@
+ #include <linux/completion.h>
+ #include <linux/cpufreq.h>
+ #include <linux/irq_work.h>
++#include <linux/slab.h>
+ #include <linux/atomic.h>
+ #include <asm/bugs.h>
+@@ -40,6 +41,7 @@
+ #include <asm/mmu_context.h>
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
++#include <asm/procinfo.h>
+ #include <asm/processor.h>
+ #include <asm/sections.h>
+ #include <asm/tlbflush.h>
+@@ -96,6 +98,30 @@ static unsigned long get_arch_pgd(pgd_t
+ #endif
+ }
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++static int secondary_biglittle_prepare(unsigned int cpu)
++{
++      if (!cpu_vtable[cpu])
++              cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
++
++      return cpu_vtable[cpu] ? 0 : -ENOMEM;
++}
++
++static void secondary_biglittle_init(void)
++{
++      init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
++}
++#else
++static int secondary_biglittle_prepare(unsigned int cpu)
++{
++      return 0;
++}
++
++static void secondary_biglittle_init(void)
++{
++}
++#endif
++
+ int __cpu_up(unsigned int cpu, struct task_struct *idle)
+ {
+       int ret;
+@@ -103,6 +129,10 @@ int __cpu_up(unsigned int cpu, struct ta
+       if (!smp_ops.smp_boot_secondary)
+               return -ENOSYS;
++      ret = secondary_biglittle_prepare(cpu);
++      if (ret)
++              return ret;
++
+       /*
+        * We need to tell the secondary core where to find
+        * its stack and the page tables.
+@@ -354,6 +384,8 @@ asmlinkage void secondary_start_kernel(v
+       struct mm_struct *mm = &init_mm;
+       unsigned int cpu;
++      secondary_biglittle_init();
++
+       /*
+        * The identity mapping is uncached (strongly ordered), so
+        * switch away from it before attempting any exclusive accesses.
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
+       case ARM_CPU_PART_CORTEX_A17:
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
+-              if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
+-                      goto bl_error;
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_bpiall;
+               spectre_v2_method = "BPIALL";
+@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
+-              if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
+-                      goto bl_error;
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_iciallu;
+               spectre_v2_method = "ICIALLU";
+@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
+-                      if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
+-                              goto bl_error;
+                       per_cpu(harden_branch_predictor_fn, cpu) =
+                               call_hvc_arch_workaround_1;
+-                      processor.switch_mm = cpu_v7_hvc_switch_mm;
++                      cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+                       spectre_v2_method = "hypervisor";
+                       break;
+@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
+-                      if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
+-                              goto bl_error;
+                       per_cpu(harden_branch_predictor_fn, cpu) =
+                               call_smc_arch_workaround_1;
+-                      processor.switch_mm = cpu_v7_smc_switch_mm;
++                      cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+                       spectre_v2_method = "firmware";
+                       break;
+@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
+       if (spectre_v2_method)
+               pr_info("CPU%u: Spectre v2: using %s workaround\n",
+                       smp_processor_id(), spectre_v2_method);
+-      return;
+-
+-bl_error:
+-      pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
+-              cpu);
+ }
+ #else
+ static void cpu_v7_spectre_init(void)
diff --git a/queue-4.4/arm-spectre-v2-warn-about-incorrect-context-switching-functions.patch b/queue-4.4/arm-spectre-v2-warn-about-incorrect-context-switching-functions.patch
new file mode 100644 (file)
index 0000000..2b52807
--- /dev/null
@@ -0,0 +1,87 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:30 +0100
+Subject: ARM: spectre-v2: warn about incorrect context switching functions
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-27-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit c44f366ea7c85e1be27d08f2f0880f4120698125 upstream.
+
+Warn at error level if the context switching function is not what we
+are expecting.  This can happen with big.Little systems, which we
+currently do not support.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Boot-tested-by: Tony Lindgren <tony@atomide.com>
+Reviewed-by: Tony Lindgren <tony@atomide.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/mm/proc-v7-bugs.c |   15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -12,6 +12,8 @@
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
++extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
++extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+ extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+ extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
+@@ -50,6 +52,8 @@ static void cpu_v7_spectre_init(void)
+       case ARM_CPU_PART_CORTEX_A17:
+       case ARM_CPU_PART_CORTEX_A73:
+       case ARM_CPU_PART_CORTEX_A75:
++              if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
++                      goto bl_error;
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_bpiall;
+               spectre_v2_method = "BPIALL";
+@@ -57,6 +61,8 @@ static void cpu_v7_spectre_init(void)
+       case ARM_CPU_PART_CORTEX_A15:
+       case ARM_CPU_PART_BRAHMA_B15:
++              if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
++                      goto bl_error;
+               per_cpu(harden_branch_predictor_fn, cpu) =
+                       harden_branch_predictor_iciallu;
+               spectre_v2_method = "ICIALLU";
+@@ -82,6 +88,8 @@ static void cpu_v7_spectre_init(void)
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
++                      if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
++                              goto bl_error;
+                       per_cpu(harden_branch_predictor_fn, cpu) =
+                               call_hvc_arch_workaround_1;
+                       processor.switch_mm = cpu_v7_hvc_switch_mm;
+@@ -93,6 +101,8 @@ static void cpu_v7_spectre_init(void)
+                                         ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+                       if ((int)res.a0 != 0)
+                               break;
++                      if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
++                              goto bl_error;
+                       per_cpu(harden_branch_predictor_fn, cpu) =
+                               call_smc_arch_workaround_1;
+                       processor.switch_mm = cpu_v7_smc_switch_mm;
+@@ -109,6 +119,11 @@ static void cpu_v7_spectre_init(void)
+       if (spectre_v2_method)
+               pr_info("CPU%u: Spectre v2: using %s workaround\n",
+                       smp_processor_id(), spectre_v2_method);
++      return;
++
++bl_error:
++      pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
++              cpu);
+ }
+ #else
+ static void cpu_v7_spectre_init(void)
diff --git a/queue-4.4/arm-split-out-processor-lookup.patch b/queue-4.4/arm-split-out-processor-lookup.patch
new file mode 100644 (file)
index 0000000..fb8addf
--- /dev/null
@@ -0,0 +1,90 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:49 +0100
+Subject: ARM: split out processor lookup
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-46-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 65987a8553061515b5851b472081aedb9837a391 upstream.
+
+Split out the lookup of the processor type and associated error handling
+from the rest of setup_processor() - we will need to use this in the
+secondary CPU bringup path for big.Little Spectre variant 2 mitigation.
+
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Reviewed-by: Julien Thierry <julien.thierry@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/cputype.h |    1 +
+ arch/arm/kernel/setup.c        |   31 +++++++++++++++++++------------
+ 2 files changed, 20 insertions(+), 12 deletions(-)
+
+--- a/arch/arm/include/asm/cputype.h
++++ b/arch/arm/include/asm/cputype.h
+@@ -93,6 +93,7 @@
+ #define ARM_CPU_PART_SCORPION         0x510002d0
+ extern unsigned int processor_id;
++struct proc_info_list *lookup_processor(u32 midr);
+ #ifdef CONFIG_CPU_CP15
+ #define read_cpuid(reg)                                                       \
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -599,22 +599,29 @@ static void __init smp_build_mpidr_hash(
+ }
+ #endif
+-static void __init setup_processor(void)
++/*
++ * locate processor in the list of supported processor types.  The linker
++ * builds this table for us from the entries in arch/arm/mm/proc-*.S
++ */
++struct proc_info_list *lookup_processor(u32 midr)
+ {
+-      struct proc_info_list *list;
++      struct proc_info_list *list = lookup_processor_type(midr);
+-      /*
+-       * locate processor in the list of supported processor
+-       * types.  The linker builds this table for us from the
+-       * entries in arch/arm/mm/proc-*.S
+-       */
+-      list = lookup_processor_type(read_cpuid_id());
+       if (!list) {
+-              pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
+-                     read_cpuid_id());
+-              while (1);
++              pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
++                     smp_processor_id(), midr);
++              while (1)
++              /* can't use cpu_relax() here as it may require MMU setup */;
+       }
++      return list;
++}
++
++static void __init setup_processor(void)
++{
++      unsigned int midr = read_cpuid_id();
++      struct proc_info_list *list = lookup_processor(midr);
++
+       cpu_name = list->cpu_name;
+       __cpu_architecture = __get_cpu_architecture();
+@@ -632,7 +639,7 @@ static void __init setup_processor(void)
+ #endif
+       pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
+-              cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
++              list->cpu_name, midr, midr & 15,
+               proc_arch[cpu_architecture()], get_cr());
+       snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
diff --git a/queue-4.4/arm-uaccess-remove-put_user-code-duplication.patch b/queue-4.4/arm-uaccess-remove-put_user-code-duplication.patch
new file mode 100644 (file)
index 0000000..6252392
--- /dev/null
@@ -0,0 +1,170 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:10 +0100
+Subject: ARM: uaccess: remove put_user() code duplication
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Russell King <rmk+kernel@arm.linux.org.uk>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-7-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+Commit 9f73bd8bb445e0cbe4bcef6d4cfc788f1e184007 upstream.
+
+Remove the code duplication between put_user() and __put_user().  The
+code which selected the implementation based upon the pointer size, and
+declared the local variable to hold the value to be put are common to
+both implementations.
+
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/uaccess.h |  106 ++++++++++++++++++-----------------------
+ 1 file changed, 49 insertions(+), 57 deletions(-)
+
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -238,49 +238,23 @@ extern int __put_user_2(void *, unsigned
+ extern int __put_user_4(void *, unsigned int);
+ extern int __put_user_8(void *, unsigned long long);
+-#define __put_user_x(__r2, __p, __e, __l, __s)                                \
+-         __asm__ __volatile__ (                                       \
+-              __asmeq("%0", "r0") __asmeq("%2", "r2")                 \
+-              __asmeq("%3", "r1")                                     \
+-              "bl     __put_user_" #__s                               \
+-              : "=&r" (__e)                                           \
+-              : "0" (__p), "r" (__r2), "r" (__l)                      \
+-              : "ip", "lr", "cc")
+-
+-#define __put_user_check(x, p)                                                \
++#define __put_user_check(__pu_val, __ptr, __err, __s)                 \
+       ({                                                              \
+               unsigned long __limit = current_thread_info()->addr_limit - 1; \
+-              const typeof(*(p)) __user *__tmp_p = (p);               \
+-              register const typeof(*(p)) __r2 asm("r2") = (x);       \
+-              register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
++              register typeof(__pu_val) __r2 asm("r2") = __pu_val;    \
++              register const void __user *__p asm("r0") = __ptr;      \
+               register unsigned long __l asm("r1") = __limit;         \
+               register int __e asm("r0");                             \
+-              unsigned int __ua_flags = uaccess_save_and_enable();    \
+-              switch (sizeof(*(__p))) {                               \
+-              case 1:                                                 \
+-                      __put_user_x(__r2, __p, __e, __l, 1);           \
+-                      break;                                          \
+-              case 2:                                                 \
+-                      __put_user_x(__r2, __p, __e, __l, 2);           \
+-                      break;                                          \
+-              case 4:                                                 \
+-                      __put_user_x(__r2, __p, __e, __l, 4);           \
+-                      break;                                          \
+-              case 8:                                                 \
+-                      __put_user_x(__r2, __p, __e, __l, 8);           \
+-                      break;                                          \
+-              default: __e = __put_user_bad(); break;                 \
+-              }                                                       \
+-              uaccess_restore(__ua_flags);                            \
+-              __e;                                                    \
++              __asm__ __volatile__ (                                  \
++                      __asmeq("%0", "r0") __asmeq("%2", "r2")         \
++                      __asmeq("%3", "r1")                             \
++                      "bl     __put_user_" #__s                       \
++                      : "=&r" (__e)                                   \
++                      : "0" (__p), "r" (__r2), "r" (__l)              \
++                      : "ip", "lr", "cc");                            \
++              __err = __e;                                            \
+       })
+-#define put_user(x, p)                                                        \
+-      ({                                                              \
+-              might_fault();                                          \
+-              __put_user_check(x, p);                                 \
+-       })
+-
+ #else /* CONFIG_MMU */
+ /*
+@@ -298,7 +272,7 @@ static inline void set_fs(mm_segment_t f
+ }
+ #define get_user(x, p)        __get_user(x, p)
+-#define put_user(x, p)        __put_user(x, p)
++#define __put_user_check __put_user_nocheck
+ #endif /* CONFIG_MMU */
+@@ -389,36 +363,54 @@ do {                                                                     \
+ #define __get_user_asm_word(x, addr, err)                     \
+       __get_user_asm(x, addr, err, ldr)
++
++#define __put_user_switch(x, ptr, __err, __fn)                                \
++      do {                                                            \
++              const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);      \
++              __typeof__(*(ptr)) __pu_val = (x);                      \
++              unsigned int __ua_flags;                                \
++              might_fault();                                          \
++              __ua_flags = uaccess_save_and_enable();                 \
++              switch (sizeof(*(ptr))) {                               \
++              case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;      \
++              case 2: __fn(__pu_val, __pu_ptr, __err, 2); break;      \
++              case 4: __fn(__pu_val, __pu_ptr, __err, 4); break;      \
++              case 8: __fn(__pu_val, __pu_ptr, __err, 8); break;      \
++              default: __err = __put_user_bad(); break;               \
++              }                                                       \
++              uaccess_restore(__ua_flags);                            \
++      } while (0)
++
++#define put_user(x, ptr)                                              \
++({                                                                    \
++      int __pu_err = 0;                                               \
++      __put_user_switch((x), (ptr), __pu_err, __put_user_check);      \
++      __pu_err;                                                       \
++})
++
+ #define __put_user(x, ptr)                                            \
+ ({                                                                    \
+       long __pu_err = 0;                                              \
+-      __put_user_err((x), (ptr), __pu_err);                           \
++      __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);    \
+       __pu_err;                                                       \
+ })
+ #define __put_user_error(x, ptr, err)                                 \
+ ({                                                                    \
+-      __put_user_err((x), (ptr), err);                                \
++      __put_user_switch((x), (ptr), (err), __put_user_nocheck);       \
+       (void) 0;                                                       \
+ })
+-#define __put_user_err(x, ptr, err)                                   \
+-do {                                                                  \
+-      unsigned long __pu_addr = (unsigned long)(ptr);                 \
+-      unsigned int __ua_flags;                                        \
+-      __typeof__(*(ptr)) __pu_val = (x);                              \
+-      __chk_user_ptr(ptr);                                            \
+-      might_fault();                                                  \
+-      __ua_flags = uaccess_save_and_enable();                         \
+-      switch (sizeof(*(ptr))) {                                       \
+-      case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);  break;  \
+-      case 2: __put_user_asm_half(__pu_val, __pu_addr, err);  break;  \
+-      case 4: __put_user_asm_word(__pu_val, __pu_addr, err);  break;  \
+-      case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break;  \
+-      default: __put_user_bad();                                      \
+-      }                                                               \
+-      uaccess_restore(__ua_flags);                                    \
+-} while (0)
++#define __put_user_nocheck(x, __pu_ptr, __err, __size)                        \
++      do {                                                            \
++              unsigned long __pu_addr = (unsigned long)__pu_ptr;      \
++              __put_user_nocheck_##__size(x, __pu_addr, __err);       \
++      } while (0)
++
++#define __put_user_nocheck_1 __put_user_asm_byte
++#define __put_user_nocheck_2 __put_user_asm_half
++#define __put_user_nocheck_4 __put_user_asm_word
++#define __put_user_nocheck_8 __put_user_asm_dword
+ #define __put_user_asm(x, __pu_addr, err, instr)              \
+       __asm__ __volatile__(                                   \
diff --git a/queue-4.4/arm-use-__inttype-in-get_user.patch b/queue-4.4/arm-use-__inttype-in-get_user.patch
new file mode 100644 (file)
index 0000000..fbf518f
--- /dev/null
@@ -0,0 +1,54 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:37 +0100
+Subject: ARM: use __inttype() in get_user()
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-34-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit d09fbb327d670737ab40fd8bbb0765ae06b8b739 upstream.
+
+Borrow the x86 implementation of __inttype() to use in get_user() to
+select an integer type suitable to temporarily hold the result value.
+This is necessary to avoid propagating the volatile nature of the
+result argument, which can cause the following warning:
+
+lib/iov_iter.c:413:5: warning: optimization may eliminate reads and/or writes to register variables [-Wvolatile-register-var]
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/uaccess.h |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -123,6 +123,13 @@ static inline void set_fs(mm_segment_t f
+       flag; })
+ /*
++ * This is a type: either unsigned long, if the argument fits into
++ * that type, or otherwise unsigned long long.
++ */
++#define __inttype(x) \
++      __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
++
++/*
+  * Single-value transfer routines.  They automatically use the right
+  * size if we just have the right pointer type.  Note that the functions
+  * which read from user space (*get_*) need to take care not to leak
+@@ -191,7 +198,7 @@ extern int __get_user_64t_4(void *);
+       ({                                                              \
+               unsigned long __limit = current_thread_info()->addr_limit - 1; \
+               register const typeof(*(p)) __user *__p asm("r0") = (p);\
+-              register typeof(x) __r2 asm("r2");                      \
++              register __inttype(x) __r2 asm("r2");                   \
+               register unsigned long __l asm("r1") = __limit;         \
+               register int __e asm("r0");                             \
+               unsigned int __ua_flags = uaccess_save_and_enable();    \
diff --git a/queue-4.4/arm-vfp-use-__copy_from_user-when-restoring-vfp-state.patch b/queue-4.4/arm-vfp-use-__copy_from_user-when-restoring-vfp-state.patch
new file mode 100644 (file)
index 0000000..153fa15
--- /dev/null
@@ -0,0 +1,133 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:35 +0100
+Subject: ARM: vfp: use __copy_from_user() when restoring VFP state
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-32-ardb@kernel.org>
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+Commit 42019fc50dfadb219f9e6ddf4c354f3837057d80 upstream.
+
+__get_user_error() is used as a fast accessor to make copying structure
+members in the signal handling path as efficient as possible.  However,
+with software PAN and the recent Spectre variant 1, the efficiency is
+reduced as these are no longer fast accessors.
+
+In the case of software PAN, it has to switch the domain register around
+each access, and with Spectre variant 1, it would have to repeat the
+access_ok() check for each access.
+
+Use __copy_from_user() rather than __get_user_err() for individual
+members when restoring VFP state.
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David A. Long <dave.long@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/thread_info.h |    4 ++--
+ arch/arm/kernel/signal.c           |   17 ++++++++---------
+ arch/arm/vfp/vfpmodule.c           |   17 +++++++----------
+ 3 files changed, 17 insertions(+), 21 deletions(-)
+
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -126,8 +126,8 @@ struct user_vfp_exc;
+ extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
+                                          struct user_vfp_exc __user *);
+-extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+-                                  struct user_vfp_exc __user *);
++extern int vfp_restore_user_hwstate(struct user_vfp *,
++                                  struct user_vfp_exc *);
+ #endif
+ /*
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -107,21 +107,20 @@ static int preserve_vfp_context(struct v
+       return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
+ }
+-static int restore_vfp_context(struct vfp_sigframe __user *frame)
++static int restore_vfp_context(struct vfp_sigframe __user *auxp)
+ {
+-      unsigned long magic;
+-      unsigned long size;
+-      int err = 0;
++      struct vfp_sigframe frame;
++      int err;
+-      __get_user_error(magic, &frame->magic, err);
+-      __get_user_error(size, &frame->size, err);
++      err = __copy_from_user(&frame, (char __user *) auxp, sizeof(frame));
+       if (err)
+-              return -EFAULT;
+-      if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
++              return err;
++
++      if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
+               return -EINVAL;
+-      return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
++      return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
+ }
+ #endif
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -601,13 +601,11 @@ int vfp_preserve_user_clear_hwstate(stru
+ }
+ /* Sanitise and restore the current VFP state from the provided structures. */
+-int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+-                           struct user_vfp_exc __user *ufp_exc)
++int vfp_restore_user_hwstate(struct user_vfp *ufp, struct user_vfp_exc *ufp_exc)
+ {
+       struct thread_info *thread = current_thread_info();
+       struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+       unsigned long fpexc;
+-      int err = 0;
+       /* Disable VFP to avoid corrupting the new thread state. */
+       vfp_flush_hwstate(thread);
+@@ -616,17 +614,16 @@ int vfp_restore_user_hwstate(struct user
+        * Copy the floating point registers. There can be unused
+        * registers see asm/hwcap.h for details.
+        */
+-      err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
+-                              sizeof(hwstate->fpregs));
++      memcpy(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs));
+       /*
+        * Copy the status and control register.
+        */
+-      __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
++      hwstate->fpscr = ufp->fpscr;
+       /*
+        * Sanitise and restore the exception registers.
+        */
+-      __get_user_error(fpexc, &ufp_exc->fpexc, err);
++      fpexc = ufp_exc->fpexc;
+       /* Ensure the VFP is enabled. */
+       fpexc |= FPEXC_EN;
+@@ -635,10 +632,10 @@ int vfp_restore_user_hwstate(struct user
+       fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+       hwstate->fpexc = fpexc;
+-      __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+-      __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
++      hwstate->fpinst = ufp_exc->fpinst;
++      hwstate->fpinst2 = ufp_exc->fpinst2;
+-      return err ? -EFAULT : 0;
++      return 0;
+ }
+ /*
diff --git a/queue-4.4/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch b/queue-4.4/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
new file mode 100644 (file)
index 0000000..decb022
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:13 +0100
+Subject: arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Mark Rutland <mark.rutland@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-10-ardb@kernel.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 6167ec5c9145cdf493722dfd80a5d48bafc4a18a upstream.
+
+A new feature of SMCCC 1.1 is that it offers firmware-based CPU
+workarounds. In particular, SMCCC_ARCH_WORKAROUND_1 provides
+BP hardening for CVE-2017-5715.
+
+If the host has some mitigation for this issue, report that
+we deal with it using SMCCC_ARCH_WORKAROUND_1, as we apply the
+host workaround on every guest exit.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+[v4.9: account for files moved to virt/ upstream]
+Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
+Tested-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[ardb: restrict to include/linux/arm-smccc.h]
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/arm-smccc.h |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -70,6 +70,11 @@
+                          ARM_SMCCC_SMC_32,                            \
+                          0, 1)
++#define ARM_SMCCC_ARCH_WORKAROUND_1                                   \
++      ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
++                         ARM_SMCCC_SMC_32,                            \
++                         0, 0x8000)
++
+ #ifndef __ASSEMBLY__
+ #include <linux/linkage.h>
diff --git a/queue-4.4/dmaengine-qcom-bam_dma-fix-resource-leak.patch b/queue-4.4/dmaengine-qcom-bam_dma-fix-resource-leak.patch
new file mode 100644 (file)
index 0000000..231b227
--- /dev/null
@@ -0,0 +1,54 @@
+From 7667819385457b4aeb5fac94f67f52ab52cc10d5 Mon Sep 17 00:00:00 2001
+From: Jeffrey Hugo <jeffrey.l.hugo@gmail.com>
+Date: Thu, 17 Oct 2019 08:26:06 -0700
+Subject: dmaengine: qcom: bam_dma: Fix resource leak
+
+From: Jeffrey Hugo <jeffrey.l.hugo@gmail.com>
+
+commit 7667819385457b4aeb5fac94f67f52ab52cc10d5 upstream.
+
+bam_dma_terminate_all() will leak resources if any of the transactions are
+committed to the hardware (present in the desc fifo), and not complete.
+Since bam_dma_terminate_all() does not cause the hardware to be updated,
+the hardware will still operate on any previously committed transactions.
+This can cause memory corruption if the memory for the transaction has been
+reassigned, and will cause a sync issue between the BAM and its client(s).
+
+Fix this by properly updating the hardware in bam_dma_terminate_all().
+
+Fixes: e7c0fe2a5c84 ("dmaengine: add Qualcomm BAM dma driver")
+Signed-off-by: Jeffrey Hugo <jeffrey.l.hugo@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20191017152606.34120-1-jeffrey.l.hugo@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/dma/qcom_bam_dma.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/dma/qcom_bam_dma.c
++++ b/drivers/dma/qcom_bam_dma.c
+@@ -671,7 +671,21 @@ static int bam_dma_terminate_all(struct
+       /* remove all transactions, including active transaction */
+       spin_lock_irqsave(&bchan->vc.lock, flag);
++      /*
++       * If we have transactions queued, then some might be committed to the
++       * hardware in the desc fifo.  The only way to reset the desc fifo is
++       * to do a hardware reset (either by pipe or the entire block).
++       * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
++       * pipe.  If the pipe is left disabled (default state after pipe reset)
++       * and is accessed by a connected hardware engine, a fatal error in
++       * the BAM will occur.  There is a small window where this could happen
++       * with bam_chan_init_hw(), but it is assumed that the caller has
++       * stopped activity on any attached hardware engine.  Make sure to do
++       * this first so that the BAM hardware doesn't cause memory corruption
++       * by accessing freed resources.
++       */
+       if (bchan->curr_txd) {
++              bam_chan_init_hw(bchan, bchan->curr_txd->dir);
+               list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
+               bchan->curr_txd = NULL;
+       }
diff --git a/queue-4.4/firmware-psci-expose-psci-conduit.patch b/queue-4.4/firmware-psci-expose-psci-conduit.patch
new file mode 100644 (file)
index 0000000..bd45f44
--- /dev/null
@@ -0,0 +1,114 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:14 +0100
+Subject: firmware/psci: Expose PSCI conduit
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Mark Rutland <mark.rutland@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-11-ardb@kernel.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit 09a8d6d48499f93e2abde691f5800081cd858726 upstream.
+
+In order to call into the firmware to apply workarounds, it is
+useful to find out whether we're using HVC or SMC. Let's expose
+this through the psci_ops.
+
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
+Tested-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/psci.c |   28 +++++++++++++++++++++++-----
+ include/linux/psci.h    |    7 +++++++
+ 2 files changed, 30 insertions(+), 5 deletions(-)
+
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -55,7 +55,9 @@ bool psci_tos_resident_on(int cpu)
+       return cpu == resident_cpu;
+ }
+-struct psci_operations psci_ops;
++struct psci_operations psci_ops = {
++      .conduit = PSCI_CONDUIT_NONE,
++};
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+                               unsigned long, unsigned long);
+@@ -206,6 +208,22 @@ static unsigned long psci_migrate_info_u
+                             0, 0, 0);
+ }
++static void set_conduit(enum psci_conduit conduit)
++{
++      switch (conduit) {
++      case PSCI_CONDUIT_HVC:
++              invoke_psci_fn = __invoke_psci_fn_hvc;
++              break;
++      case PSCI_CONDUIT_SMC:
++              invoke_psci_fn = __invoke_psci_fn_smc;
++              break;
++      default:
++              WARN(1, "Unexpected PSCI conduit %d\n", conduit);
++      }
++
++      psci_ops.conduit = conduit;
++}
++
+ static int get_set_conduit_method(struct device_node *np)
+ {
+       const char *method;
+@@ -218,9 +236,9 @@ static int get_set_conduit_method(struct
+       }
+       if (!strcmp("hvc", method)) {
+-              invoke_psci_fn = __invoke_psci_fn_hvc;
++              set_conduit(PSCI_CONDUIT_HVC);
+       } else if (!strcmp("smc", method)) {
+-              invoke_psci_fn = __invoke_psci_fn_smc;
++              set_conduit(PSCI_CONDUIT_SMC);
+       } else {
+               pr_warn("invalid \"method\" property: %s\n", method);
+               return -EINVAL;
+@@ -480,9 +498,9 @@ int __init psci_acpi_init(void)
+       pr_info("probing for conduit method from ACPI.\n");
+       if (acpi_psci_use_hvc())
+-              invoke_psci_fn = __invoke_psci_fn_hvc;
++              set_conduit(PSCI_CONDUIT_HVC);
+       else
+-              invoke_psci_fn = __invoke_psci_fn_smc;
++              set_conduit(PSCI_CONDUIT_SMC);
+       return psci_probe();
+ }
+--- a/include/linux/psci.h
++++ b/include/linux/psci.h
+@@ -24,6 +24,12 @@ bool psci_tos_resident_on(int cpu);
+ bool psci_power_state_loses_context(u32 state);
+ bool psci_power_state_is_valid(u32 state);
++enum psci_conduit {
++      PSCI_CONDUIT_NONE,
++      PSCI_CONDUIT_SMC,
++      PSCI_CONDUIT_HVC,
++};
++
+ struct psci_operations {
+       int (*cpu_suspend)(u32 state, unsigned long entry_point);
+       int (*cpu_off)(u32 state);
+@@ -32,6 +38,7 @@ struct psci_operations {
+       int (*affinity_info)(unsigned long target_affinity,
+                       unsigned long lowest_affinity_level);
+       int (*migrate_info_type)(void);
++      enum psci_conduit conduit;
+ };
+ extern struct psci_operations psci_ops;
diff --git a/queue-4.4/firmware-psci-expose-smccc-version-through-psci_ops.patch b/queue-4.4/firmware-psci-expose-smccc-version-through-psci_ops.patch
new file mode 100644 (file)
index 0000000..d540500
--- /dev/null
@@ -0,0 +1,103 @@
+From foo@baz Fri 08 Nov 2019 02:08:46 PM CET
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Fri,  8 Nov 2019 13:35:15 +0100
+Subject: firmware/psci: Expose SMCCC version through psci_ops
+To: stable@vger.kernel.org
+Cc: linus.walleij@linaro.org, rmk+kernel@armlinux.org.uk, Mark Rutland <mark.rutland@arm.com>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20191108123554.29004-12-ardb@kernel.org>
+
+From: Marc Zyngier <marc.zyngier@arm.com>
+
+commit e78eef554a912ef6c1e0bbf97619dafbeae3339f upstream.
+
+Since PSCI 1.0 allows the SMCCC version to be (indirectly) probed,
+let's do that at boot time, and expose the version of the calling
+convention as part of the psci_ops structure.
+
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
+Tested-by: Greg Hackmann <ghackmann@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/psci.c |   27 +++++++++++++++++++++++++++
+ include/linux/psci.h    |    6 ++++++
+ 2 files changed, 33 insertions(+)
+
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -57,6 +57,7 @@ bool psci_tos_resident_on(int cpu)
+ struct psci_operations psci_ops = {
+       .conduit = PSCI_CONDUIT_NONE,
++      .smccc_version = SMCCC_VERSION_1_0,
+ };
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+@@ -339,6 +340,31 @@ static void __init psci_init_migrate(voi
+       pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+ }
++static void __init psci_init_smccc(void)
++{
++      u32 ver = ARM_SMCCC_VERSION_1_0;
++      int feature;
++
++      feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
++
++      if (feature != PSCI_RET_NOT_SUPPORTED) {
++              u32 ret;
++              ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
++              if (ret == ARM_SMCCC_VERSION_1_1) {
++                      psci_ops.smccc_version = SMCCC_VERSION_1_1;
++                      ver = ret;
++              }
++      }
++
++      /*
++       * Conveniently, the SMCCC and PSCI versions are encoded the
++       * same way. No, this isn't accidental.
++       */
++      pr_info("SMC Calling Convention v%d.%d\n",
++              PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
++
++}
++
+ static void __init psci_0_2_set_functions(void)
+ {
+       pr_info("Using standard PSCI v0.2 function IDs\n");
+@@ -385,6 +411,7 @@ static int __init psci_probe(void)
+       psci_init_migrate();
+       if (PSCI_VERSION_MAJOR(ver) >= 1) {
++              psci_init_smccc();
+               psci_init_cpu_suspend();
+               psci_init_system_suspend();
+       }
+--- a/include/linux/psci.h
++++ b/include/linux/psci.h
+@@ -30,6 +30,11 @@ enum psci_conduit {
+       PSCI_CONDUIT_HVC,
+ };
++enum smccc_version {
++      SMCCC_VERSION_1_0,
++      SMCCC_VERSION_1_1,
++};
++
+ struct psci_operations {
+       int (*cpu_suspend)(u32 state, unsigned long entry_point);
+       int (*cpu_off)(u32 state);
+@@ -39,6 +44,7 @@ struct psci_operations {
+                       unsigned long lowest_affinity_level);
+       int (*migrate_info_type)(void);
+       enum psci_conduit conduit;
++      enum smccc_version smccc_version;
+ };
+ extern struct psci_operations psci_ops;
index 149bf4fda2b071a6385ae81d8fe0ff56b23343e5..689481bc35d40ebbf5bfff82b1a46cde5b44b7ac 100644 (file)
@@ -20,3 +20,54 @@ vxlan-check-tun_info-options_len-properly.patch
 net-mlx4_core-dynamically-set-guaranteed-amount-of-counters-per-vf.patch
 inet-stop-leaking-jiffies-on-the-wire.patch
 net-flow_dissector-switch-to-siphash.patch
+dmaengine-qcom-bam_dma-fix-resource-leak.patch
+arm-8051-1-put_user-fix-possible-data-corruption-in-put_user.patch
+arm-8478-2-arm-arm64-add-arm-smccc.patch
+arm-8479-2-add-implementation-for-arm-smccc.patch
+arm-8480-2-arm64-add-implementation-for-arm-smccc.patch
+arm-8481-2-drivers-psci-replace-psci-firmware-calls.patch
+arm-uaccess-remove-put_user-code-duplication.patch
+arm-move-system-register-accessors-to-asm-cp15.h.patch
+arm-arm64-kvm-advertise-smccc-v1.1.patch
+arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch
+firmware-psci-expose-psci-conduit.patch
+firmware-psci-expose-smccc-version-through-psci_ops.patch
+arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch
+arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch
+arm-arm64-smccc-add-smccc-specific-return-codes.patch
+arm-arm64-smccc-1.1-make-return-values-unsigned-long.patch
+arm-arm64-smccc-1.1-handle-function-result-as-parameters.patch
+arm-add-more-cpu-part-numbers-for-cortex-and-brahma-b15-cpus.patch
+arm-bugs-prepare-processor-bug-infrastructure.patch
+arm-bugs-hook-processor-bug-checking-into-smp-and-suspend-paths.patch
+arm-bugs-add-support-for-per-processor-bug-checking.patch
+arm-spectre-add-kconfig-symbol-for-cpus-vulnerable-to-spectre.patch
+arm-spectre-v2-harden-branch-predictor-on-context-switches.patch
+arm-spectre-v2-add-cortex-a8-and-a15-validation-of-the-ibe-bit.patch
+arm-spectre-v2-harden-user-aborts-in-kernel-space.patch
+arm-spectre-v2-add-firmware-based-hardening.patch
+arm-spectre-v2-warn-about-incorrect-context-switching-functions.patch
+arm-spectre-v1-add-speculation-barrier-csdb-macros.patch
+arm-spectre-v1-add-array_index_mask_nospec-implementation.patch
+arm-spectre-v1-fix-syscall-entry.patch
+arm-signal-copy-registers-using-__copy_from_user.patch
+arm-vfp-use-__copy_from_user-when-restoring-vfp-state.patch
+arm-oabi-compat-copy-semops-using-__copy_from_user.patch
+arm-use-__inttype-in-get_user.patch
+arm-spectre-v1-use-get_user-for-__get_user.patch
+arm-spectre-v1-mitigate-user-accesses.patch
+arm-8789-1-signal-copy-registers-using-__copy_to_user.patch
+arm-8791-1-vfp-use-__copy_to_user-when-saving-vfp-state.patch
+arm-8792-1-oabi-compat-copy-oabi-events-using-__copy_to_user.patch
+arm-8793-1-signal-replace-__put_user_error-with-__put_user.patch
+arm-8794-1-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch
+arm-8795-1-spectre-v1.1-use-put_user-for-__put_user.patch
+arm-8796-1-spectre-v1-v1.1-provide-helpers-for-address-sanitization.patch
+arm-8810-1-vfp-fix-wrong-assignement-to-ufp_exc.patch
+arm-make-lookup_processor_type-non-__init.patch
+arm-split-out-processor-lookup.patch
+arm-clean-up-per-processor-check_bugs-method-call.patch
+arm-add-proc_vtable-and-proc_table-macros.patch
+arm-spectre-v2-per-cpu-vtables-to-work-around-big.little-systems.patch
+arm-ensure-that-processor-vtables-is-not-lost-after-boot.patch
+arm-fix-the-cockup-in-the-previous-patch.patch